Initial infrastructure documentation - comprehensive homelab reference

This commit is contained in:
Funky (OpenClaw)
2026-02-23 03:42:22 +00:00
commit 0682c79580
169 changed files with 63913 additions and 0 deletions

141
infrastructure/.env.example Normal file
View File

@@ -0,0 +1,141 @@
# Homelab Infrastructure Environment Variables
# Copy to .env and fill in any secrets
# Generated from AGENT-REFERENCE.md
# =============================================================================
# VPS HOSTS
# =============================================================================
VPS_PRIMARY_IP=66.63.182.168
VPS_PRIMARY_DOMAIN=vps.nianticbooks.com
VPS_PRIMARY_USER=fred
VPS_GAMING_IP=51.222.12.162
VPS_GAMING_DOMAIN=deadeyeg4ming.vip
VPS_GAMING_USER=ubuntu
# =============================================================================
# PROXMOX HOSTS
# =============================================================================
PROXMOX_MAIN_IP=10.0.10.3
PROXMOX_MAIN_HOST=main-pve
PROXMOX_MAIN_USER=root
PROXMOX_ROUTER_IP=10.0.10.2
PROXMOX_ROUTER_HOST=pve-router
PROXMOX_ROUTER_USER=root
PROXMOX_STORAGE_IP=10.0.10.4
PROXMOX_STORAGE_HOST=pve-storage
PROXMOX_STORAGE_USER=root
# =============================================================================
# NETWORK
# =============================================================================
GATEWAY_IP=10.0.10.1
NETWORK_CIDR=10.0.10.0/24
DHCP_RANGE_START=10.0.10.50
DHCP_RANGE_END=10.0.10.254
# WireGuard
WIREGUARD_ACTIVE_NETWORK=10.0.9.0/24
WIREGUARD_LEGACY_NETWORK=10.0.8.0/24
WIREGUARD_ENDPOINT=51.222.12.162:51820
WIREGUARD_VPS_PROXY_IP=10.0.9.3
# =============================================================================
# INFRASTRUCTURE SERVICES
# =============================================================================
# Step-CA (CT 115)
STEPCA_IP=10.0.10.15
STEPCA_PORT=8443
STEPCA_ACME_URL=https://10.0.10.15:8443/acme/acme/directory
# PostgreSQL (CT 102) - Shared database
POSTGRES_IP=10.0.10.20
POSTGRES_PORT=5432
POSTGRES_USER=postgres
POSTGRES_PASSWORD=
# Authentik (CT 121)
AUTHENTIK_IP=10.0.10.21
AUTHENTIK_PORT=9000
AUTHENTIK_ADMIN_USER=akadmin
AUTHENTIK_ADMIN_PASSWORD=
# n8n (CT 106)
N8N_IP=10.0.10.22
N8N_PORT=5678
# RustDesk (CT 123)
RUSTDESK_IP=10.0.10.23
RUSTDESK_ID_PORT=21115
RUSTDESK_RELAY_PORT=21117
RUSTDESK_PUBKEY=sfYuCTMHxrA22kukomb/RAKYyUgr8iaMfm/U4CFLfL0=
# Prometheus/Grafana (CT 125)
PROMETHEUS_IP=10.0.10.25
PROMETHEUS_PORT=9090
GRAFANA_IP=10.0.10.25
GRAFANA_PORT=3000
# Uptime Kuma (CT 128)
UPTIME_KUMA_IP=10.0.10.26
UPTIME_KUMA_PORT=3001
# =============================================================================
# APPLICATION SERVICES
# =============================================================================
# Home Assistant (VM 104 on pve-router)
HOME_ASSISTANT_IP=10.0.10.24
HOME_ASSISTANT_PORT=8123
# Dockge/Vikunja (CT 127)
DOCKGE_IP=10.0.10.27
DOCKGE_PORT=5001
VIKUNJA_IP=10.0.10.27
VIKUNJA_PORT=3456
# Bar Assistant (CT 103)
BAR_ASSISTANT_IP=10.0.10.40
BAR_ASSISTANT_PORT=8080
# Minecraft Servers
MINECRAFT_FORGE_IP=10.0.10.41
MINECRAFT_FORGE_PORT=25565
MINECRAFT_STONEBLOCK_IP=10.0.10.42
MINECRAFT_STONEBLOCK_PORT=25565
# Pterodactyl (CT 105/107)
PTERODACTYL_PANEL_IP=10.0.10.45
PTERODACTYL_PANEL_PORT=80
PTERODACTYL_WINGS_IP=10.0.10.46
PTERODACTYL_WINGS_PORT=8080
# =============================================================================
# OTHER HOSTS
# =============================================================================
# OpenMediaVault (VM 400)
OMV_IP=10.0.10.5
OMV_NFS_PATH=/export/backups
# HOMELAB-COMMAND (Windows PC)
HOMELAB_COMMAND_IP=10.0.10.10
# Twingate (CT 101)
TWINGATE_IP=10.0.10.179
# =============================================================================
# PUBLIC DOMAINS (Caddy reverse proxy)
# =============================================================================
DOMAIN_BASE=nianticbooks.com
DOMAIN_PROXMOX=freddesk.nianticbooks.com
DOMAIN_HOME_ASSISTANT=bob.nianticbooks.com
DOMAIN_AUTHENTIK=auth.nianticbooks.com
DOMAIN_3D_PRINTER=ad5m.nianticbooks.com
DOMAIN_COCKTAILS=cocktails.nianticbooks.com
DOMAIN_VIKUNJA=tasks.nianticbooks.com
# =============================================================================
# TRUSTED PROXIES (for services behind WireGuard)
# =============================================================================
TRUSTED_PROXIES=10.0.9.0/24,10.0.8.0/24,10.0.9.3

50
infrastructure/.gitignore vendored Normal file
View File

@@ -0,0 +1,50 @@
# Sensitive Infrastructure Data
*.key
*.pem
*.crt
*.p12
*.pfx
# Environment and Configuration Files
.env
.env.local
*.conf
config.yaml
config.yml
secrets.yaml
secrets.yml
# SSH Keys
id_rsa
id_ed25519
*.pub
# API Keys and Tokens
*api-key*
*token*
*secret*
# Backup Files
*.bak
*.backup
*-backup.*
# Completed Audits with Sensitive Data
*-completed.md
*-filled.md
# OS Files
.DS_Store
Thumbs.db
# Editor Files
.vscode/
.idea/
*.swp
*.swo
*~
# Claude Code local settings (but allow commands to be shared)
.claude/*
!.claude/commands/
mc_server/modpack/*.zip

3
infrastructure/.gitmodules vendored Normal file
View File

@@ -0,0 +1,3 @@
[submodule "claude-shared"]
path = claude-shared
url = https://github.com/FredN9MRQ/claude-workflows.git

15
infrastructure/.mcp.json Normal file
View File

@@ -0,0 +1,15 @@
{
"mcpServers": {
"n8n-mcp": {
"command": "cmd",
"args": ["/c", "npx", "-y", "n8n-mcp"],
"env": {
"MCP_MODE": "stdio",
"LOG_LEVEL": "error",
"DISABLE_CONSOLE_OUTPUT": "true",
"N8N_API_URL": "http://10.0.10.22:5678",
"N8N_API_KEY": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiI1ZTVjZTQ2Zi1iNmUyLTQyMGEtYmUzMC1iYzQzYThlMDA1YjMiLCJpc3MiOiJuOG4iLCJhdWQiOiJwdWJsaWMtYXBpIiwiaWF0IjoxNzY4MzUxNTgxfQ.IEInZiVdFy4KivDmcvYXlnrvMUr1H1krPyjLRxs_5d4"
}
}
}
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.9 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 59 KiB

View File

@@ -0,0 +1,153 @@
# 3D Printing Setup - Quick Start Guide
Quick reference for setting up Orca Slicer with shared profiles on your homelab.
## First Time Setup (Run these commands)
### 1. Mount the 3DPrinting Share
```bash
# Mount the OMV share
sudo mount-3dprinting.sh
# OR set up automatic mounting on boot
sudo setup-3dprinting-automount.sh
sudo mount /mnt/3DPrinting
```
Verify it worked:
```bash
ls -la /mnt/3DPrinting/3DPrinting/
```
You should see: `profiles/`, `models/`, `gcode/`, `projects/`
### 2. Install Orca Slicer
```bash
# Install from the downloaded AppImage
sudo install-orca-slicer.sh
```
### 3. Launch Orca Slicer
```bash
/opt/OrcaSlicer/orca-slicer.AppImage
```
Or search for "Orca Slicer" in your application menu.
## Daily Usage
### Slicing a Model
1. Open Orca Slicer
2. File → Import → Select STL from `/mnt/3DPrinting/3DPrinting/models/`
3. Select profiles:
- Printer: AD5M
- Filament: PLA/PETG/etc
- Print: Quality level
4. Slice
5. Export gcode to `/mnt/3DPrinting/3DPrinting/gcode/queue/`
6. Transfer to printer and print!
### Syncing Profiles
**Get latest shared profiles:**
```bash
sync-orca-profiles.sh pull
```
**Share your updated profiles:**
```bash
sync-orca-profiles.sh push
```
**Check sync status:**
```bash
sync-orca-profiles.sh status
```
## File Locations
### On OMV (10.0.10.5)
```
/srv/dev-disk-by-uuid-1c893fab-9943-43df-8e24-3c9190869955/data/3DPrinting/
├── profiles/ # Shared Orca Slicer profiles
├── models/ # STL files
├── gcode/ # Sliced gcode files
└── projects/ # Work in progress
```
### On This Computer
```
/mnt/3DPrinting/3DPrinting/ # Mounted share (same as above)
~/.config/OrcaSlicer/user/ # Local Orca Slicer profiles
```
## Troubleshooting
### Share not mounted?
```bash
# Check if mounted
mount | grep 3DPrinting
# If not mounted
sudo mount-3dprinting.sh
```
### Can't find profiles in Orca Slicer?
```bash
# Pull profiles from shared storage
sync-orca-profiles.sh pull
# Restart Orca Slicer
```
### OMV server not accessible?
```bash
# Test connection
ping 10.0.10.5
# Check if SMB is running on OMV
ssh 10.0.10.5 "systemctl status smbd"
```
## Helper Scripts
All scripts are in `~/.local/bin/`:
| Script | Purpose |
|--------|---------|
| `mount-3dprinting.sh` | Mount the 3DPrinting share (requires sudo) |
| `setup-3dprinting-automount.sh` | Configure automatic mounting on boot |
| `install-orca-slicer.sh` | Install Orca Slicer from AppImage |
| `sync-orca-profiles.sh pull` | Download shared profiles |
| `sync-orca-profiles.sh push` | Upload your profiles to share |
| `sync-orca-profiles.sh status` | Check sync configuration |
## Next Steps
1. ✅ Mount share: `sudo mount-3dprinting.sh`
2. ✅ Install Orca Slicer: `sudo install-orca-slicer.sh`
3. 🔲 Launch Orca Slicer and configure AD5M printer profile
4. 🔲 Test slice a model
5. 🔲 Push your AD5M profiles: `sync-orca-profiles.sh push`
6. 🔲 Install Orca Slicer on other family computers
7. 🔲 On those computers: mount share, install Orca Slicer, pull profiles
## Family Members: Getting Started
If you're a family member setting up Orca Slicer on your computer:
1. **Mount the network share** (ask Fred for help with this part)
2. **Install Orca Slicer** - Download from https://github.com/SoftFever/OrcaSlicer/releases/latest
3. **Get the shared profiles**:
```bash
sync-orca-profiles.sh pull
```
4. **Start slicing!** All the AD5M profiles are ready to use
## Documentation
For complete setup details, see: [3D-PRINTING-SETUP.md](3D-PRINTING-SETUP.md)

View File

@@ -0,0 +1,325 @@
# 3D Printing - Shared Orca Slicer Setup
This document describes the setup for sharing Orca Slicer profiles across multiple computers in the homelab, while keeping installations local for best performance.
## Overview
**Goal**: Family members can use Orca Slicer on their own computers with shared profiles for the AD5M printer, with files stored on OMV.
**Approach**:
- Local Orca Slicer installations on each computer (best performance)
- Shared profiles stored on OMV NFS/SMB share
- Shared STL library and gcode output folder on OMV
## OMV Shared Folder Structure
Create the following structure on your OMV storage:
```
/srv/3DPrinting/
├── profiles/ # Shared Orca Slicer profiles
│ ├── filament/ # Filament profiles
│ ├── print/ # Print profiles (layer heights, speeds, etc.)
│ ├── printer/ # Printer profiles (AD5M config)
│ └── process/ # Process profiles
├── models/ # STL files library
│ ├── functional/
│ ├── decorative/
│ └── repairs/
├── gcode/ # Sliced output ready to print
│ ├── queue/ # Ready to print
│ └── archive/ # Completed prints
└── projects/ # Work-in-progress projects
```
## OMV Setup Steps
### 1. Create Shared Folder on OMV
SSH into your OMV server (pve-storage or wherever OMV is running):
```bash
# Create the directory structure
sudo mkdir -p /srv/3DPrinting/{profiles/{filament,print,printer,process},models/{functional,decorative,repairs},gcode/{queue,archive},projects}
# Set permissions (adjust user/group as needed)
sudo chown -R fred:users /srv/3DPrinting
sudo chmod -R 775 /srv/3DPrinting
```
### 2. Create NFS Share in OMV
Via OMV web interface:
1. Storage → Shared Folders → Create
- Name: `3DPrinting`
- Device: Your storage device
- Path: `/3DPrinting/`
- Permissions: fred (R/W), users (R/W)
2. Services → NFS → Shares → Create
- Shared folder: `3DPrinting`
- Client: `10.0.10.0/24` (adjust to your network)
- Privilege: Read/Write
- Extra options: `rw,sync,no_subtree_check,no_root_squash`
### 3. Mount on Client Computers
Add to `/etc/fstab` on each client computer:
```bash
# Replace <OMV-IP> with your OMV server IP
<OMV-IP>:/export/3DPrinting /mnt/3DPrinting nfs defaults,user,auto,noatime 0 0
```
Mount the share:
```bash
sudo mkdir -p /mnt/3DPrinting
sudo mount /mnt/3DPrinting
```
Or use SMB/CIFS if preferred:
```bash
# Add to /etc/fstab
//<OMV-IP>/3DPrinting /mnt/3DPrinting cifs credentials=/home/fred/.smbcredentials,uid=1000,gid=1000 0 0
```
## Orca Slicer Installation
### On Ubuntu/Debian (including this computer)
1. Download the latest AppImage from GitHub:
```bash
cd ~/Downloads
wget https://github.com/SoftFever/OrcaSlicer/releases/latest/download/OrcaSlicer_Linux_V2.2.0.AppImage
chmod +x OrcaSlicer_Linux_*.AppImage
```
2. Move to a permanent location:
```bash
sudo mkdir -p /opt/OrcaSlicer
sudo mv OrcaSlicer_Linux_*.AppImage /opt/OrcaSlicer/orca-slicer.AppImage
```
3. Create desktop entry:
```bash
cat > ~/.local/share/applications/orca-slicer.desktop <<EOF
[Desktop Entry]
Name=Orca Slicer
Comment=3D Printer Slicer
Exec=/opt/OrcaSlicer/orca-slicer.AppImage
Icon=orca-slicer
Terminal=false
Type=Application
Categories=Graphics;3DGraphics;
EOF
```
4. Launch Orca Slicer:
```bash
/opt/OrcaSlicer/orca-slicer.AppImage
```
### On Windows
1. Download installer from: https://github.com/SoftFever/OrcaSlicer/releases/latest
2. Run the installer
3. Default install location: `C:\Program Files\OrcaSlicer\`
### On macOS
1. Download DMG from: https://github.com/SoftFever/OrcaSlicer/releases/latest
2. Drag OrcaSlicer to Applications folder
## Profile Sync Setup
### Initial Profile Export (from homelab-command computer)
Once you have Orca Slicer installed with your AD5M profiles configured:
```bash
# Export profiles to shared location
cd ~/.config/OrcaSlicer # or check actual config location
cp -r user/* /mnt/3DPrinting/profiles/
```
### Profile Sync Script
Create `~/.local/bin/sync-orca-profiles.sh`:
```bash
#!/bin/bash
# Sync Orca Slicer profiles with shared network storage
ORCA_CONFIG="$HOME/.config/OrcaSlicer/user"
SHARED_PROFILES="/mnt/3DPrinting/profiles"
case "$1" in
push)
echo "Pushing local profiles to shared storage..."
rsync -av --delete "$ORCA_CONFIG/" "$SHARED_PROFILES/"
echo "✓ Profiles pushed"
;;
pull)
echo "Pulling profiles from shared storage..."
rsync -av --delete "$SHARED_PROFILES/" "$ORCA_CONFIG/"
echo "✓ Profiles pulled"
;;
*)
echo "Usage: sync-orca-profiles.sh {push|pull}"
echo " push - Upload your local profiles to shared storage"
echo " pull - Download shared profiles to your local config"
exit 1
;;
esac
```
Make it executable:
```bash
chmod +x ~/.local/bin/sync-orca-profiles.sh
```
### Profile Management Workflow
**When you update profiles** (any computer):
```bash
sync-orca-profiles.sh push
```
**When starting on a new computer** or to get latest profiles:
```bash
sync-orca-profiles.sh pull
```
**Tip**: Add to your shell aliases:
```bash
alias orca-push='sync-orca-profiles.sh push'
alias orca-pull='sync-orca-profiles.sh pull'
```
## Orca Slicer Configuration
### First Launch Setup
1. Launch Orca Slicer
2. Skip the configuration wizard (we'll import profiles)
3. Pull shared profiles: `sync-orca-profiles.sh pull`
4. Restart Orca Slicer
### Configure Default Paths
In Orca Slicer preferences:
- **Default output directory**: `/mnt/3DPrinting/gcode/queue/`
- **STL search path**: `/mnt/3DPrinting/models/`
### Printer Profile - AD5M
If starting fresh, configure:
- Printer: Generic Ender 3 or Custom
- Bed size: 220 x 220 mm (adjust for your AD5M)
- Build height: 250 mm (adjust for your AD5M)
- Nozzle diameter: 0.4 mm (or your actual nozzle)
## Workflow for Family Members
### Printing a Model
1. **Find or add STL file**:
- Browse `/mnt/3DPrinting/models/`
- Or add new file to the models folder
2. **Slice in Orca Slicer**:
- Open STL from models folder
- Select printer profile: "AD5M"
- Select filament profile (PLA, PETG, etc.)
- Select print profile (quality level)
- Slice and export to `/mnt/3DPrinting/gcode/queue/`
3. **Send to printer**:
- Copy gcode to SD card, or
- Use OctoPrint/Mainsail if you set one up (optional)
4. **After printing**:
- Move gcode from `queue/` to `archive/`
### Sharing New Profiles
If someone creates a great new profile:
```bash
sync-orca-profiles.sh push
# Then others can: sync-orca-profiles.sh pull
```
## Advantages of This Setup
**Performance**: Native app speed, no web lag
**No Conflicts**: Each user has their own instance
**Shared Knowledge**: Everyone uses the same tested profiles
**Centralized Storage**: All files in one place, backed up by OMV
**Easy Updates**: Sync profiles when they improve
**Offline Work**: Can slice without network (if files are local)
## Optional Enhancements
### OctoPrint Integration (Optional)
If you want to skip SD cards and print over network:
1. Install OctoPrint on a Raspberry Pi or in a container
2. Connect to AD5M via USB
3. Configure Orca Slicer to upload directly to OctoPrint
4. Everyone can queue prints from anywhere
### Automatic Profile Sync (Optional)
Add to crontab to auto-pull profiles daily:
```bash
# Add to crontab -e
0 9 * * * /home/fred/.local/bin/sync-orca-profiles.sh pull
```
### Version Control for Profiles (Advanced)
Initialize git in the profiles folder to track changes:
```bash
cd /mnt/3DPrinting/profiles
git init
git add .
git commit -m "Initial profiles"
```
## Troubleshooting
### Profiles Not Showing Up
1. Check if share is mounted: `df -h | grep 3DPrinting`
2. Check Orca Slicer config location: May be `~/.config/OrcaSlicer` or `~/.local/share/OrcaSlicer`
3. Run `sync-orca-profiles.sh pull` manually
### Permission Issues
```bash
# Fix permissions on shared folder
sudo chown -R $USER:users /mnt/3DPrinting
sudo chmod -R 775 /mnt/3DPrinting
```
### Different Orca Slicer Versions
If different computers have different Orca Slicer versions, profiles may have compatibility issues. Keep all installations on the same major version.
## Next Steps
- [ ] Set up OMV 3DPrinting share
- [ ] Mount share on homelab-command computer
- [ ] Install Orca Slicer on homelab-command
- [ ] Configure AD5M printer profile
- [ ] Test print and tune profiles
- [ ] Export profiles to shared location
- [ ] Install Orca Slicer on family computers
- [ ] Test profile sync workflow
- [ ] (Optional) Set up OctoPrint for network printing
## References
- Orca Slicer GitHub: https://github.com/SoftFever/OrcaSlicer
- Orca Slicer Documentation: https://github.com/SoftFever/OrcaSlicer/wiki

View File

@@ -0,0 +1,165 @@
# Homelab Network - Agent Reference
**Last Updated:** 2026-01-20 | **Owner:** Fred | **Purpose:** Automation agent context
---
## HOSTS & HARDWARE
### VPS
- **Primary:** 66.63.182.168 (vps.nianticbooks.com) | 2vCPU/4GB | Caddy, WireGuard, RustDesk relay
- **Gaming:** 51.222.12.162 (deadeyeg4ming.vip) | WireGuard VPN (10.0.9.1)
### Proxmox
- **main-pve:** 10.0.10.3 | 32c/96GB | 13 LXC containers (CT 102-131)
- **pve-router:** 10.0.10.2 | 8c/8GB | Home Assistant VM, local dev
- **pve-storage:** 10.0.10.4 | OMV VM 400 (10.0.10.5, 12TB NFS)
### Network
- **Gateway:** 10.0.10.1 (UCG Ultra) | DHCP 10.0.10.50-254
- **WireGuard:** 10.0.9.0/24 (active), 10.0.8.0/24 (legacy)
---
## SERVICES (IP:PORT)
### Auth & Infrastructure
| IP | Service | CT | Ports | Notes |
|---|---|---|---|---|
| 10.0.10.15 | Step-CA | 115 | 8443 | ACME: https://10.0.10.15:8443/acme/acme/directory |
| 10.0.10.20 | PostgreSQL | 102 | 5432 | Shared DB (n8n, rustdesk, grafana, authentik) |
| 10.0.10.21 | Authentik | 121 | 9000 | SSO (admin: akadmin), OAuth2/OIDC |
| 10.0.10.22 | n8n | 106 | 5678 | Workflow automation |
| 10.0.10.23 | RustDesk | 123 | 21115-18 | ID server, pubkey: sfYuCTMHxrA22kukomb/RAKYyUgr8iaMfm/U4CFLfL0= |
| 10.0.10.25 | Prometheus/Grafana | 125 | 9090/3000 | Monitoring |
| 10.0.10.26 | Uptime Kuma | 128 | 3001 | Status monitoring |
### Apps & Gaming
| IP | Service | CT | Ports | Notes |
|---|---|---|---|---|
| 10.0.10.24 | Home Assistant | VM 104 | 8123 | Smart home (pve-router) |
| 10.0.10.27 | Dockge/Vikunja | 127 | 5001/3456 | Docker UI, tasks |
| 10.0.10.40 | Bar Assistant | 103 | 8080 | Cocktails (Meilisearch, Redis) |
| 10.0.10.41 | Minecraft Forge | 130 | 25565 | CFMRPGU modpack |
| 10.0.10.42 | Minecraft Stoneblock | 131 | 25565 | FTB Stoneblock 4 |
| 10.0.10.45/46 | Pterodactyl | 105/107 | 80/8080 | Game panel + wings |
### Other
- **OMV:** 10.0.10.5 (VM 400) | 12TB NFS backup target
- **HOMELAB-COMMAND:** 10.0.10.10 | Claude Code, Wyoming STT/TTS, Ollama
- **Twingate:** 10.0.10.179 (CT 101) | Zero-trust access
---
## PUBLIC DOMAINS (via Caddy @ 66.63.182.168)
| Domain | Backend | Service |
|---|---|---|
| freddesk.nianticbooks.com | 10.0.10.3:8006 | Proxmox |
| bob.nianticbooks.com | 10.0.10.24:8123 | Home Assistant |
| auth.nianticbooks.com | 10.0.10.21:9000 | Authentik |
| ad5m.nianticbooks.com | 10.0.10.30:80 | 3D Printer |
| cocktails.nianticbooks.com | 10.0.10.40 | Bar Assistant |
| tasks.nianticbooks.com | 10.0.10.27:3456 | Vikunja |
---
## SSH ACCESS (All key-based, NO passwords)
```bash
ssh fred@66.63.182.168 # VPS Primary
ssh ubuntu@51.222.12.162 # VPS Gaming
ssh root@10.0.10.3 # main-pve
ssh root@10.0.10.2 # pve-router
ssh root@10.0.10.4 # pve-storage
```
**Rule:** Generate/add SSH keys when adding new servers/devices.
---
## SERVICE CREDENTIALS & API ACCESS
### Authentik (10.0.10.21:9000)
- Admin: akadmin | Integrations: Proxmox (OIDC), Grafana (OAuth2)
### PostgreSQL (10.0.10.20:5432)
- Shared by: n8n, rustdesk, grafana, authentik
- Backup: Daily 2:00 AM → OMV NFS (~13MB)
### Step-CA (10.0.10.15:8443)
- ACME: `https://10.0.10.15:8443/acme/acme/directory`
- Provisioners: JWK (admin@nianticbooks.home), ACME
### RustDesk (10.0.10.23)
- Public endpoint: 66.63.182.168:21117 (relay)
- Pubkey: `sfYuCTMHxrA22kukomb/RAKYyUgr8iaMfm/U4CFLfL0=`
### Home Assistant (10.0.10.24:8123)
- Never commit secrets.yaml | Use secrets.yaml.example template
- LocalTuya: Requires device keys from Tuya IoT Platform
- Trusted proxies: 10.0.9.0/24, 10.0.8.0/24, 10.0.9.3
---
## CRITICAL CONSTRAINTS
- **VPS:** 2CPU/4GB only - lightweight services ONLY
- **Proxmox Storage:** ALWAYS `local`, NEVER `local-lvm`
- **SSH:** ALWAYS keys, NEVER passwords
- **Trusted Proxies:** Services must accept 10.0.9.0/24, 10.0.8.0/24, 10.0.9.3
- **HOMELAB-COMMAND:** SSH requires system restart to enable
---
## QUICK COMMANDS
### Container Ops
```bash
ssh root@10.0.10.3 "pct exec <CT> -- bash -c 'command'" # Execute in CT
pct status <CT> # Check status
pct exec <CT> -- docker logs --tail 50 <service> # View logs
pct exec <CT> -- docker restart <service> # Restart
```
### Caddy (VPS)
```bash
ssh fred@66.63.182.168 "nano /etc/caddy/Caddyfile" # Edit
ssh fred@66.63.182.168 "sudo systemctl reload caddy" # Reload
ssh fred@66.63.182.168 "sudo journalctl -u caddy --tail 50" # Logs
```
### Backups
- PostgreSQL: Daily 2:00 AM → 10.0.10.5:/export/backups (7d/4w/3m retention)
- Proxmox: Daily 2:30 AM → OMV NFS
- Log: `/var/log/homelab-backup.log`
---
## ACTIVE TODOS
### High Priority
1. Configure Prometheus targets + Grafana dashboards
2. Remove deprecated VMs (Spoolman 10.0.10.71, Authelia 10.0.10.112)
### Medium Priority
- DNS: omv.nianticbooks.home → 10.0.10.5
- n8n service monitoring workflow (#4833)
- Authentik SSO integrations (Home Assistant, others)
### Low Priority
- Tier 2/3 backups (off-site, cloud)
- Home Assistant HTTPS certificates
---
## AVAILABLE IPs
**Reserved blocks:**
- 10.0.10.6-9 (infrastructure)
- 10.0.10.11-12, 14, 16-19 (management)
- 10.0.10.28-29, 32-39, 43-44, 47-49 (utility)
---
**Source:** C:/Users/Fred/projects/infrastructure/.claude/docs/

View File

@@ -0,0 +1,267 @@
# BlueBubbles Server Setup Guide
**Last Updated:** 2026-01-31
**Service:** BlueBubbles iMessage Server
**Host:** Fred's iMac (10.0.10.11 / 10.0.10.144)
**Version:** v1.9.9
**Status:** In Setup
---
## Overview
BlueBubbles allows you to access iMessage on Android, Windows, and Linux devices by running a server on your Mac that forwards messages via WebSocket.
**Architecture:**
```
iMessage on iMac (10.0.10.11)
BlueBubbles Server (Port 1234)
Caddy VPS Proxy (bluebubbles.nianticbooks.com)
BlueBubbles Clients (Android/Web/Desktop)
```
---
## Installation Steps
### Step 1: Download BlueBubbles Server
**IMPORTANT:** You need to download this via Safari on your iMac due to GitHub's download mechanism.
On your iMac:
1. Open Safari and navigate to:
```
https://github.com/BlueBubblesApp/bluebubbles-server/releases/tag/v1.9.9
```
2. Scroll down to "Assets" section
3. Click on **`BlueBubbles-v1.9.9.dmg`** (NOT the `.zip` file)
4. The file will download to your ~/Downloads folder
### Step 2: Install BlueBubbles Server
1. Open the downloaded `.dmg` file
2. Drag BlueBubbles to your Applications folder
3. Open BlueBubbles from Applications
4. If you get a security warning:
- Go to System Settings → Privacy & Security
- Click "Open Anyway" for BlueBubbles
### Step 3: Initial Configuration
When BlueBubbles opens for the first time:
1. **Grant Permissions:**
- Full Disk Access (required to read iMessage database)
- Accessibility (required for Private API features)
- Go to: System Settings → Privacy & Security → Full Disk Access
- Add BlueBubbles and enable it
2. **Server Settings:**
- **Port:** 1234 (default, or choose your own)
- **Password:** Create a strong password (save it securely)
- **Server Name:** "Fred's BlueBubbles" (or any name you prefer)
3. **Enable Private API** (Recommended):
- This enables advanced features like:
- Typing indicators
- Read receipts
- Message reactions
- Send messages as specific accounts
- Toggle "Enable Private API" in settings
- Follow the on-screen instructions to install the helper
4. **Auto-Start Settings:**
- ✅ Enable "Start server on login"
- ✅ Enable "Start minimized"
- This ensures BlueBubbles runs automatically
### Step 4: Configure Firewall (if enabled)
If macOS Firewall is enabled on your iMac:
1. Go to System Settings → Network → Firewall
2. Click "Options..."
3. Add BlueBubbles to allowed apps
4. OR allow incoming connections on port 1234
### Step 5: Test Local Access
Before setting up external access, verify local functionality:
1. Note your server URL shown in BlueBubbles (should be like `http://10.0.10.11:1234`)
2. Note your password
3. Install BlueBubbles client on your phone/another device
4. Connect using:
- **Server URL:** `http://10.0.10.11:1234`
- **Password:** [Your password]
5. Verify you can see your messages
---
## External Access Setup (via Caddy VPS)
Once local access works, we'll set up external access via your VPS.
### Option 1: Use BlueBubbles Built-in Proxy (Easiest)
BlueBubbles includes built-in support for Cloudflare Tunnel, Ngrok, and Zrok:
1. In BlueBubbles settings, go to "Proxy Service"
2. Choose one of the free options:
- **Cloudflare Tunnel** (recommended - free, reliable)
- **Zrok** (free, open source)
- **Ngrok** (free tier available)
3. Follow the on-screen setup for your chosen service
4. Use the generated URL for remote connections
### Option 2: Use Your Caddy VPS (More Control)
Add to your Caddyfile on VPS (66.63.182.168):
```caddyfile
bluebubbles.nianticbooks.com {
reverse_proxy 10.0.10.11:1234 {
header_up X-Forwarded-Host {host}
header_up X-Forwarded-Proto {scheme}
header_up X-Real-IP {remote_host}
header_up X-Forwarded-For {remote_host}
}
}
```
Then reload Caddy:
```bash
ssh fred@66.63.182.168
sudo systemctl reload caddy
```
**Client Connection:**
- **Server URL:** `https://bluebubbles.nianticbooks.com`
- **Password:** [Your password]
---
## Security Considerations
1. **Use HTTPS:** Always use HTTPS for external connections (Caddy handles this automatically)
2. **Strong Password:** Use a unique, strong password for BlueBubbles
3. **Keep iMac Secure:** Since it has access to all your messages:
- Enable FileVault disk encryption
- Use a strong user password
- Keep macOS updated
4. **Network Security:** Your iMac is already on static IP 10.0.10.11, which is in the static range
---
## Maintenance
### Keep BlueBubbles Running
BlueBubbles needs to run 24/7 to relay messages. To ensure it stays running:
1. Set iMac to never sleep:
- System Settings → Energy Saver → Prevent automatic sleeping
- OR: Use "caffeinate" command to keep it awake
2. Enable "Start server on login" in BlueBubbles settings
3. Optionally: Set up monitoring in Uptime Kuma (10.0.10.26)
### Updates
Check for updates periodically:
- BlueBubbles → Settings → Check for Updates
- Or visit: https://github.com/BlueBubblesApp/bluebubbles-server/releases
### Logs
If you encounter issues:
- BlueBubbles → Settings → Logs
- Logs are stored in: `~/Library/Application Support/bluebubbles-server/`
---
## Client Apps
Download BlueBubbles clients for your devices:
- **Android:** https://play.google.com/store/apps/details?id=com.bluebubbles.messaging
- **Web:** https://bluebubbles.app/web
- **Desktop (Windows/Linux):** https://github.com/BlueBubblesApp/bluebubbles-app/releases
---
## Troubleshooting
### Messages not syncing
- Verify Full Disk Access is granted
- Check iMessage is signed in and working on iMac
- Restart BlueBubbles server
### Can't connect remotely
- Verify local connection works first (http://10.0.10.11:1234)
- Check Caddy configuration and reload
- Verify WireGuard tunnel is up
- Check firewall settings on iMac
### Private API not working
- Ensure SIP (System Integrity Protection) is not blocking it
- Reinstall Private API helper from BlueBubbles settings
- Check macOS version compatibility
### iMac goes to sleep
- Disable sleep in Energy Saver settings
- Check "Prevent automatic sleeping when display is off"
- Use "caffeinate" command to prevent sleep
---
## Next Steps
After completing this setup:
1. ✅ Test local connection
2. ✅ Set up external access (choose Option 1 or 2)
3. ✅ Install client apps on your devices
4. ✅ Test sending/receiving messages remotely
5. ⬜ Set up monitoring in Uptime Kuma (optional)
6. ⬜ Document final configuration in SERVICES.md
---
## Service Information
**Service Details:**
- **Host:** Fred's iMac (Late 2013, 3.2GHz i5, 24GB RAM, macOS Sequoia)
- **IP Address (Ethernet):** 10.0.10.11 (configured, cable not connected)
- **IP Address (Wi-Fi):** 10.0.10.144 (currently active)
- **Port:** 1234 (default)
- **Version:** 1.9.9
- **Auto-Start:** Enabled
- **Private API:** Enabled (recommended)
**Public Access:**
- **Option 1:** Built-in proxy (Cloudflare/Ngrok/Zrok)
- **Option 2:** https://bluebubbles.nianticbooks.com (via Caddy VPS)
**Status:** Setup in progress
---
## References
- GitHub: https://github.com/BlueBubblesApp/bluebubbles-server
- Documentation: https://bluebubbles.app/faq/
- Web Client: https://bluebubbles.app/web
- Community: https://discord.gg/bluebubbles
---
**Installation Date:** 2026-01-31
**Installed By:** Fred
**Last Verified:** Pending initial setup

View File

@@ -0,0 +1,495 @@
# Infrastructure Brainstorming Session
**Date**: 2025-10-28
**Status**: Planning Phase
---
## Initial Claude Code Discovery
I watched this video, https://youtu.be/MsQACpcuTkU?si=2h5VUlgtIcpLbP1v literally took his word as fact and subscribed to Claude Pro. I need to set this up on my 2013 Mac Pro Running Sequoia (using Open Core Legacy Patcher) please outline the steps and process for making this happen
### Claude Code Interest - The /init Command
Specifically Claude Code, as a clarification, I am most intrigued by the use of the /init command
**Setup Requirements:**
- Homebrew installation
- Claude Code CLI tool
- API authentication (separate from Claude Pro subscription)
- Note: Claude Pro ≠ API access (separate billing)
---
## Infrastructure Expansion Plans
### Current Environment
**VPS:**
- 2 cores / 4GB RAM
- Running: Pangolin reverse proxy with Gerbil tunnels (WireGuard-based)
- Concern: RAM and CPU usage limits
**Home Lab (Proxmox):**
- **DL380p**: 32 cores, 96GB RAM (main cluster node)
- **i5**: 8 cores, 8GB RAM (secondary cluster node)
- **OMV**: 12TB storage node
**Development Machine:**
- Mac Pro 2013 running Sequoia (via Open Core Legacy Patcher)
### Proposed New Services
1. **RustDesk Server** - Self-hosted remote desktop
2. **n8n** - Workflow automation platform
3. **Authentik** - Single Sign-On (SSO) platform
4. **Obsidian Livesync** - Self-hosted note synchronization
---
## Architecture Decision: Hybrid Approach
### VPS (Lightweight Services Only)
- Pangolin reverse proxy (existing)
- Gerbil tunnels (existing, WireGuard-based)
- RustDesk relay server (hbbr) - ~30-50MB RAM for NAT traversal only
**Reasoning**: Keep VPS lightweight to avoid resource constraints
### DL380p Proxmox (Heavy Lifting)
- PostgreSQL (shared database server)
- Authentik SSO with WebAuthn support
- n8n workflow automation
- RustDesk ID server (hbbs) - handles registration and signaling
- Prometheus + Grafana monitoring
- Obsidian CouchDB sync server
**Reasoning**: Abundant resources (32 cores, 96GB RAM) available for all services
---
## Authentik SSO - Core Requirements
### WebAuthn/FIDO2 Hardware Authentication
**Critical Requirement**: Device-specific hardware 2FA
**Supported Devices:**
- iPhone with Face ID (biometric authentication)
- Windows 11 laptop with Windows Hello (fingerprint/face/PIN)
- No YubiKey required (but supported if needed later)
**Security Features:**
- Phishing-resistant (WebAuthn verifies domain)
- Each device has unique cryptographic key
- Keys stored in device secure enclave (iPhone) or TPM (Windows)
- Can revoke individual devices if lost/stolen
- TOTP as backup MFA method
### Integration Targets
**Priority 1 (Critical):**
- Proxmox VE (OpenID Connect)
- n8n (OAuth2)
- Pangolin admin dashboard (if supported)
**Priority 2 (Nice to have):**
- Grafana (OAuth2)
- HomeAssistant (OAuth2)
- Any future services
**SSO Policies:**
- External access (via Pangolin): WebAuthn REQUIRED
- Internal network access: WebAuthn preferred, TOTP acceptable
- Admin operations: Always require WebAuthn
---
## Network Architecture
### Flow Diagram
```
Internet → VPS (Pangolin Reverse Proxy)
Gerbil Tunnel (WireGuard)
DL380p Proxmox Home Lab
Authentik SSO ←→ All Services
├─→ n8n
├─→ RustDesk (hbbs)
├─→ Grafana
├─→ Proxmox Web UI
└─→ HomeAssistant (future)
```
### Service Endpoints
- `auth.yourdomain.com` → Authentik SSO
- `n8n.yourdomain.com` → n8n workflows
- `grafana.yourdomain.com` → Monitoring dashboards
- `obsidian.yourdomain.com` → Note sync (CouchDB)
---
## Implementation Strategy: 8 Phases
### Phase 1: Planning & Preparation
- Document current infrastructure
- Make architecture decisions (LXC vs Docker, shared vs separate PostgreSQL)
- Create project structure with Claude Code
- Plan network layout and port assignments
### Phase 2: Infrastructure Foundation on Proxmox
- Deploy PostgreSQL 15 (shared database server)
- Network and port planning
- Reserve static IPs for all services
### Phase 3: Deploy Core Services on Proxmox
- Authentik SSO with WebAuthn/FIDO2 support
- n8n workflow automation
- RustDesk ID server (hbbs)
### Phase 4: VPS Configuration
- RustDesk relay server (hbbr) - lightweight
- Update Pangolin reverse proxy routes
- DNS record creation
- SSL certificate management
### Phase 5: SSO Integration & WebAuthn Enrollment
- Configure Authentik OAuth2/OIDC providers
- Integrate Proxmox with OpenID Connect
- Integrate n8n with OAuth2
- Enroll all personal devices (iPhone, Windows laptop)
- Set up TOTP backup
### Phase 6: Monitoring, Security & Hardening
- Deploy Prometheus + Grafana monitoring stack
- Security hardening (firewall rules, Fail2ban, SSL)
- WebAuthn policies and device management
- Configure alerts
### Phase 7: Backup, Documentation & Testing
- Comprehensive backup solution to OMV (NFS)
- Complete infrastructure documentation
- Testing and validation procedures
- Disaster recovery drills
### Phase 8: Future Integrations
- HomeAssistant integration with Authentik
- Obsidian Livesync deployment
- Additional services as needed
---
## Resource Allocation Plan
### Proxmox DL380p Services
| Service | Cores | RAM | Storage | Purpose |
|---------|-------|-----|---------|---------|
| PostgreSQL | 2 | 4GB | 20GB | Shared database for all services |
| Authentik | 2 | 3GB | 30GB | SSO platform with WebAuthn |
| n8n | 4 | 4GB | 40GB | Workflow automation |
| RustDesk (hbbs) | 2 | 2GB | 10GB | Remote desktop ID server |
| Monitoring | 2 | 4GB | 50GB | Prometheus + Grafana |
| Obsidian Sync | 2 | 2GB | 50GB | CouchDB for note synchronization |
| **Total** | **14** | **19GB** | **200GB** | |
| **Available** | **18/32** | **77GB/96GB** | - | Still plenty of headroom! |
### VPS Resource Usage
| Service | Cores | RAM | Purpose |
|---------|-------|-----|---------|
| Pangolin | ~1 | ~2GB | Reverse proxy |
| Gerbil | ~0.5 | ~256MB | WireGuard tunnels |
| RustDesk (hbbr) | ~0.5 | ~128MB | NAT traversal relay |
| **Total** | **~2** | **~2.4GB** | |
| **Limit** | **2** | **4GB** | Within safe limits ✅ |
---
## Obsidian Implementation Details
### Why Obsidian for Infrastructure Documentation?
- Native markdown checkbox support
- Real-time sync across all devices (Mac, Windows, iPhone)
- Self-hosted sync (no subscription needed)
- Can store infrastructure checklist, notes, diagrams
- Works offline
- End-to-end encrypted
### Obsidian Livesync Architecture
- CouchDB server on Proxmox (backend)
- Obsidian apps on all devices (clients)
- Self-hosted sync via Pangolin reverse proxy
- Database: `obsidian-vault`
- Backup to OMV storage
### Device Setup
1. Mac Pro: Primary documentation device
2. Windows 11 Laptop: Access from work/travel
3. iPhone: Mobile access to infrastructure notes and checklists
### Integration with Infrastructure Project
- Implementation checklist (190+ tasks) stored in Obsidian
- Real-time updates across devices as tasks are completed
- Can attach network diagrams, screenshots, configs
- Version history via CouchDB replication
---
## Security Considerations
### Authentication Layers
1. **Network Level**: Gerbil tunnel encryption (WireGuard)
2. **Application Level**: Authentik SSO with WebAuthn
3. **Device Level**: Hardware-based authentication (Face ID, Windows Hello)
4. **Backup Level**: TOTP authenticator app
### Firewall Strategy
- VPS: Only expose Pangolin ports (80, 443, Gerbil tunnel port)
- Proxmox: Internal network only, no direct external access
- LXC containers: Isolated, only necessary inter-container communication
- Fail2ban on Authentik and VPS SSH
### Backup Security
- Daily backups to OMV (12TB NFS storage)
- Weekly and monthly rotation
- PostgreSQL dumps (compressed)
- Authentik media and config backups
- n8n workflow backups (credentials encrypted)
- RustDesk encryption keys (CRITICAL)
- Grafana dashboards
- Off-site backup optional (cloud via rclone)
### Certificate Management
- Let's Encrypt via Pangolin
- Automated renewal
- HSTS headers enabled
- TLS 1.3 enforcement
---
## Development Approach: Claude Code Usage
### Primary Use Cases
1. Generate complete deployment scripts for each service
2. Create LXC container configurations
3. Generate Docker Compose files
4. Create backup automation scripts
5. Generate comprehensive documentation
6. Create testing and validation scripts
### Example /init Commands
**PostgreSQL Deployment:**
```
/init Create PostgreSQL 15 deployment for Proxmox LXC container with:
- Debian 12 base
- Separate databases for authentik, n8n, rustdesk, grafana
- Optimized for 4GB RAM
- Backup scripts to NFS mount
```
**Authentik with WebAuthn:**
```
/init Create Authentik SSO server deployment for Proxmox LXC with WebAuthn/FIDO2 support:
- Docker Compose setup
- External PostgreSQL connection
- WebAuthn enrollment flows
- OAuth2/OIDC provider configurations
- Integration templates for Proxmox, n8n, Grafana
```
**Complete Infrastructure:**
```
/init Create comprehensive project structure for self-hosted infrastructure:
- Folder organization for all services
- Deployment phase documentation
- Environment templates
- Backup automation
- Monitoring dashboards
- Security hardening checklists
```
---
## Timeline Estimate
### Week 1: Foundation (Phases 1-3)
- Day 1-2: Planning and documentation
- Day 3-4: PostgreSQL and network setup
- Day 5-7: Deploy Authentik, n8n, RustDesk on Proxmox
### Week 2: Integration (Phases 4-5)
- Day 1-2: VPS services and Pangolin configuration
- Day 3-5: SSO integration and WebAuthn enrollment
- Day 6-7: Testing and troubleshooting
### Week 3: Finalization (Phases 6-7)
- Day 1-3: Monitoring, security hardening, backup automation
- Day 4-5: Complete documentation
- Day 6-7: Comprehensive testing and disaster recovery drill
### Week 4+: Expansion (Phase 8)
- HomeAssistant integration
- Obsidian Livesync deployment
- Additional services as needed
**Note**: This is a methodical, careful rollout. No rushing. Test each phase thoroughly before proceeding.
---
## Success Metrics
### Technical Metrics
- All services accessible externally via SSO
- WebAuthn works on all enrolled devices
- No single service exceeding allocated resources
- VPS CPU/RAM usage under control (<50% / <3GB)
- Backups running successfully (100% success rate)
- All monitoring dashboards populated with data
- Zero unplanned downtime during deployment
### User Experience Metrics
- Single sign-on across all services
- Face ID / Windows Hello authentication works seamlessly
- No password fatigue (SSO handles everything)
- Mobile access to all services via Authentik
- Infrastructure documentation accessible from any device (Obsidian)
- Fast response times (<2s for service access)
### Security Metrics
- All external access requires WebAuthn
- No default passwords remaining
- Fail2ban protecting critical services
- SSL certificates valid and auto-renewing
- Audit logging enabled in Authentik
- Regular backup verification (monthly)
---
## Open Questions / Decisions Needed
### To Decide Before Starting:
- [ ] Confirm domain names to use (auth.domain.com, n8n.domain.com, etc.)
- [ ] LXC containers vs Docker VMs? (Recommendation: LXC for efficiency)
- [ ] Shared PostgreSQL or separate instances? (Recommendation: Shared)
- [ ] Separate VLAN for services? (Recommendation: Yes, if possible)
- [ ] Let's Encrypt via Pangolin or internal CA? (Recommendation: Let's Encrypt)
- [ ] Off-site backup strategy? (Cloud, second location, etc.)
### To Document During Setup:
- [ ] IP addresses assigned to each service
- [ ] Database credentials (store securely)
- [ ] OAuth Client IDs and secrets
- [ ] Authentik admin credentials
- [ ] RustDesk encryption keys (CRITICAL!)
- [ ] Backup schedule and retention
- [ ] Emergency access procedures
---
## Lessons Learned / Notes
### Why Hybrid Architecture?
- VPS is resource-constrained (2 cores / 4GB RAM)
- DL380p has abundant resources (32 cores / 96GB RAM)
- Gerbil tunnels already provide secure connectivity
- Minimizes VPS costs while maximizing home lab utilization
- Services stay responsive (no resource contention on VPS)
### Why Authentik over Alternatives?
- **vs Keycloak**: Much lighter weight (Keycloak needs 1-2GB+ RAM)
- **vs Authelia**: More feature-complete, better app support
- Native WebAuthn/FIDO2 support
- Modern UI
- Active development
- Good documentation
- Self-hosted (privacy and control)
### Why LXC Containers?
- More efficient than VMs (less overhead)
- Native Proxmox integration
- Easier backups and snapshots
- Better resource utilization
- Faster boot times
- Still provides isolation
### Why Shared PostgreSQL?
- Single database server to manage
- Easier backups (one dump for all databases)
- Resource efficiency (connection pooling)
- Simpler monitoring
- Adequate for home lab scale
- Can migrate to separate instances later if needed
---
## Reference Links
### Tools & Services
- **Claude Code**: https://docs.claude.com/en/docs/claude-code
- **Authentik**: https://goauthentik.io/
- **n8n**: https://n8n.io/
- **RustDesk**: https://rustdesk.com/
- **Obsidian**: https://obsidian.md/
- **Prometheus**: https://prometheus.io/
- **Grafana**: https://grafana.com/
### Documentation Created
- CLAUDE.md - Repository guidance for Claude Code
- RUNBOOK.md - Operational procedures
- DISASTER-RECOVERY.md - Recovery procedures
- SERVICES.md - Service configuration templates
- IMPROVEMENTS.md - Infrastructure recommendations
- MONITORING.md - Monitoring setup guide
- infrastructure-audit.md - Infrastructure audit checklist
- Infrastructure-Implementation-Checklist.md - Complete deployment checklist
### Automation Scripts
- backup-proxmox.sh - VM/container backups
- backup-vps.sh - VPS configuration backups
- health-check.sh - Service health monitoring
- cert-check.sh - SSL certificate expiration
- tunnel-monitor.sh - Gerbil tunnel monitoring
- resource-report.sh - Weekly resource reports
---
## Next Immediate Actions
1. **Review and finalize architecture decisions**
- Confirm domain names
- Decide on LXC vs Docker
- Plan network/VLAN layout
2. **Start with Claude Code project structure**
```bash
cd ~/proxmox-infrastructure
claude
/init Create comprehensive project structure...
```
3. **Fill out infrastructure audit checklist**
- Current VPS details
- Proxmox network configuration
- Available IP addresses
- DNS provider details
4. **Set up Obsidian for documentation**
- Install on Mac Pro
- Import implementation checklist
- Begin checking off tasks as completed
5. **Begin Phase 1: Planning & Preparation**
- Document current state
- Make final decisions
- Create project scaffolding
---
**Status**: Ready to begin implementation!
**Excitement Level**: 🚀🚀🚀
**Last Updated**: 2025-10-28

View File

@@ -0,0 +1,231 @@
# CA Certificate Deployment Summary
**Deployment Date:** 2026-01-25
**Deployment Status:** ✅ Complete - Phase 1
## What Was Deployed
### 1. Homelab Internal CA Root Certificate Distribution
The internal CA root certificate from your Step-CA server (10.0.10.15, CT 115) has been installed on:
#### LXC Containers
- ✅ CT 102 - PostgreSQL (10.0.10.20)
- ✅ CT 106 - n8n (10.0.10.22)
- ✅ CT 127 - Dockge (10.0.10.27)
- ✅ CT 128 - Uptime Kuma (10.0.10.26)
- ⚠️ CT 104 - Authentik (10.0.10.21) - Not running during deployment
#### Proxmox Hosts
- ✅ main-pve (10.0.10.3)
- ✅ pve-router (10.0.10.2)
- ✅ pve-storage (10.0.10.4)
#### VPS
- ✅ 66.63.182.168 (vps.nianticbooks.com)
**Location:** `/usr/local/share/ca-certificates/homelab-ca.crt` on all systems
### 2. Internal HTTPS Reverse Proxy Deployment
**Service:** Caddy Internal Proxy
**Location:** Docker container on CT 127 (10.0.10.27)
**Container Name:** caddy-internal
**Configuration:** `/opt/caddy-internal/` on CT 127
#### Services Now Available via HTTPS
All services are accessible at `https://<service>.nianticbooks.home`:
| Service | HTTPS URL | Backend Port |
|---------|-----------|--------------|
| Sonarr | https://sonarr.nianticbooks.home | 8989 |
| Radarr | https://radarr.nianticbooks.home | 7878 |
| Prowlarr | https://prowlarr.nianticbooks.home | 9696 |
| Bazarr | https://bazarr.nianticbooks.home | 6767 |
| Deluge | https://deluge.nianticbooks.home | 8112 |
| Calibre-Web | https://calibre.nianticbooks.home | 8083 |
| Vikunja | https://vikunja.nianticbooks.home | 3456 |
| Dockge | https://dockge.nianticbooks.home | 5001 |
**Certificate Type:** Caddy Internal PKI (self-signed)
**Certificate Authority:** Caddy Local Authority - 2026 ECC Root
## Client Configuration Required
To access these services without certificate warnings, you need to install the Caddy Internal CA certificate on your client devices.
### CA Certificate Location
The Caddy internal root CA certificate is saved at:
- **Infrastructure Repo:** `~/projects/infrastructure/Caddy-Internal-Root-CA.crt`
- **On Server:** Extract with `docker exec caddy-internal cat /data/caddy/pki/authorities/local/root.crt`
### Installation Instructions
#### Windows
1. Download `Caddy-Internal-Root-CA.crt` from the infrastructure repo
2. Double-click the certificate file
3. Click "Install Certificate"
4. Select "Local Machine" (requires admin)
5. Choose "Place all certificates in the following store"
6. Click "Browse" and select "Trusted Root Certification Authorities"
7. Click "Next" and "Finish"
#### Linux/WSL
```bash
sudo cp Caddy-Internal-Root-CA.crt /usr/local/share/ca-certificates/
sudo update-ca-certificates
```
#### macOS
```bash
sudo security add-trusted-cert -d -r trustRoot -k /Library/Keychains/System.keychain Caddy-Internal-Root-CA.crt
```
#### Firefox (All Platforms)
Firefox uses its own certificate store:
1. Open Firefox Settings → Privacy & Security → Certificates → View Certificates
2. Click "Authorities" tab → "Import"
3. Select `Caddy-Internal-Root-CA.crt`
4. Check "Trust this CA to identify websites"
5. Click OK
## DNS Configuration
For the `.nianticbooks.home` domains to resolve, add to your DNS server (UCG Ultra DHCP/DNS):
```
sonarr.nianticbooks.home → 10.0.10.27
radarr.nianticbooks.home → 10.0.10.27
prowlarr.nianticbooks.home → 10.0.10.27
bazarr.nianticbooks.home → 10.0.10.27
deluge.nianticbooks.home → 10.0.10.27
calibre.nianticbooks.home → 10.0.10.27
vikunja.nianticbooks.home → 10.0.10.27
dockge.nianticbooks.home → 10.0.10.27
```
Or add a wildcard entry:
```
*.nianticbooks.home → 10.0.10.27
```
Alternatively, add to your local `/etc/hosts` (Linux/Mac) or `C:\Windows\System32\drivers\etc\hosts` (Windows):
```
10.0.10.27 sonarr.nianticbooks.home radarr.nianticbooks.home prowlarr.nianticbooks.home bazarr.nianticbooks.home deluge.nianticbooks.home calibre.nianticbooks.home vikunja.nianticbooks.home dockge.nianticbooks.home
```
## Management Commands
### View Caddy Logs
```bash
ssh root@10.0.10.3 "pct exec 127 -- docker logs caddy-internal -f"
```
### Restart Caddy
```bash
ssh root@10.0.10.3 "pct exec 127 -- docker restart caddy-internal"
```
### Update Caddyfile
```bash
# Edit on server
ssh root@10.0.10.3
pct exec 127 -- bash
cd /opt/caddy-internal
nano Caddyfile
docker restart caddy-internal
```
### View Generated Certificates
```bash
ssh root@10.0.10.3 "pct exec 127 -- docker exec caddy-internal ls -la /data/caddy/certificates/local/"
```
## What's Still Needed (Phase 2)
### Step-CA ACME Integration
The current setup uses Caddy's internal PKI (self-signed certificates). For better integration with your existing Step-CA server, we need to:
1. **Fix CA Server Certificate:** The Step-CA server certificate needs an IP SAN for 10.0.10.15
2. **Configure ACME Client:** Update Caddy to use Step-CA ACME endpoint
3. **Trust Chain:** Ensure Caddy trusts the Step-CA root certificate
**Benefit:** Single CA for the entire homelab instead of two separate CAs.
### Services Still Needing SSL
**Proxmox Hosts:**
- ✅ main-pve (10.0.10.3) - Already has SSL, needs CA-signed cert
- ✅ pve-router (10.0.10.2) - Already has SSL, needs CA-signed cert
- ✅ pve-storage (10.0.10.4) - Already has SSL, needs CA-signed cert
**LXC Services:**
- ✅ Home Assistant (10.0.10.24) - Already has SSL, needs CA-signed cert
- ⚠️ n8n (10.0.10.22) - HTTP only
- ⚠️ Authentik (10.0.10.21) - HTTP only
- ⚠️ Grafana (10.0.10.25) - HTTP only
**VPS Caddy:**
- Update VPS Caddy to use internal CA for public services
- Avoids "invalid certificate" warnings when accessing services remotely
### Documentation
- [ ] Update SERVICES.md with new HTTPS endpoints
- [ ] Create quick-start guide for new devices
- [ ] Add monitoring for certificate expiration
## Scripts Created
- **`scripts/deploy-ca-certificates.sh`** - Deploys homelab CA root to all containers
- **`scripts/setup-internal-caddy.sh`** - Interactive Caddy deployment (not used - manual deployment preferred)
## Troubleshooting
### Certificate Warnings Still Appear
1. Verify CA certificate is installed on client device
2. Check that DNS resolves to 10.0.10.27
3. Ensure you're using `https://` (not `http://`)
4. Clear browser cache and restart browser
### Service Not Accessible
1. Check Caddy is running: `docker ps | grep caddy-internal`
2. Check Caddy logs: `docker logs caddy-internal`
3. Verify backend service is running: `docker ps` or `systemctl status <service>`
4. Check firewall rules on CT 127
### Connection Refused
- Caddy listens on port 443 only (no port 80)
- Ensure you're using HTTPS URLs
- Verify Caddy container is in `host` network mode
## Security Considerations
**Current State:**
- ✅ All internal traffic encrypted
- ✅ CA certificates properly distributed
- ⚠️ Using Caddy internal PKI (self-signed) instead of Step-CA
**Recommendations:**
- Install CA certificate on all client devices immediately
- Do NOT expose Caddy internal proxy ports publicly (internal use only)
- Regularly update Caddy container for security patches
## Next Steps
1. **Immediate:** Install Caddy CA certificate on your primary devices
2. **Short-term:** Add DNS entries or hosts file entries
3. **Medium-term:** Migrate from Caddy internal PKI to Step-CA ACME
4. **Long-term:** Add remaining services (n8n, Authentik, Grafana) to HTTPS
---
**Deployment Completed By:** Fred (with Claude Code)
**Last Updated:** 2026-01-25
**Status:** ✅ Phase 1 Complete - Services accessible via HTTPS with self-signed certificates

View File

@@ -0,0 +1,153 @@
# CA Integration Work - Status & Resume Guide
**Last Updated:** 2026-01-25
**Status:** Phase 1 Complete ✅ - Ready for Phase 2
## Quick Summary
You were concerned that your internal CA server (10.0.10.15, CT 115) wasn't being used by services, causing certificate warnings when accessing them remotely. We've completed Phase 1 of fixing this.
## What's Been Done ✅
### Phase 1: Internal HTTPS (Complete)
- ✅ Distributed homelab CA root cert to all containers, Proxmox hosts, and VPS
- ✅ Deployed Caddy reverse proxy on CT 127 with SSL termination
- ✅ All Docker services (Sonarr, Radarr, Prowlarr, Bazarr, Deluge, Calibre, Vikunja, Dockge) now have HTTPS
**Key Files:**
- [CA-DEPLOYMENT-SUMMARY.md](CA-DEPLOYMENT-SUMMARY.md) - Complete documentation
- [scripts/deploy-ca-certificates.sh](scripts/deploy-ca-certificates.sh) - Automation script
- Caddy config: `/opt/caddy-internal/Caddyfile` on CT 127
**Services:** All accessible at `https://<service>.nianticbooks.home`
**Caddy Container:**
- Running on CT 127 (10.0.10.27)
- Container name: `caddy-internal`
- Listens on port 443 (HTTPS only)
- Uses Caddy internal PKI (self-signed certificates)
## What You Still Need To Do
**Immediate (to remove browser warnings):**
1. Install CA certificate on your devices: `Caddy-Internal-Root-CA.crt` (in this directory)
- See CA-DEPLOYMENT-SUMMARY.md for platform-specific instructions
2. Add DNS entries or edit hosts file:
```
10.0.10.27 sonarr.nianticbooks.home radarr.nianticbooks.home prowlarr.nianticbooks.home ...
```
## What's Still Needed (Phase 2)
### High Priority
1. **Migrate to Step-CA ACME** (instead of Caddy internal PKI)
- Problem: CA server cert at 10.0.10.15 doesn't have IP SAN
- Solution: Regenerate CA server cert with IP SAN, or use DNS name
- Benefit: Single CA for entire homelab
2. **Add SSL to remaining services:**
- Home Assistant (10.0.10.24) - has SSL, needs CA-signed cert
- Proxmox hosts (10.0.10.2, 10.0.10.3, 10.0.10.4) - have SSL, need CA-signed certs
- n8n (10.0.10.22) - HTTP only
- Authentik (10.0.10.21) - HTTP only
- Grafana (10.0.10.25) - HTTP only
3. **Update VPS Caddy:**
- Configure VPS Caddy to use internal CA for public services
- Fixes cert warnings when accessing services from outside network
### Lower Priority
- Update SERVICES.md with new HTTPS endpoints
- Set up certificate expiration monitoring
- Add more services as needed
## How To Resume This Work
**To continue CA integration:**
```bash
cd ~/projects/infrastructure
# Review current state
cat CA-DEPLOYMENT-SUMMARY.md
cat CA-WORK-IN-PROGRESS.md # This file
# Check Caddy status
ssh root@10.0.10.3 "pct exec 127 -- docker logs caddy-internal"
# Continue with Phase 2 tasks above
```
**To modify Caddy configuration:**
```bash
ssh root@10.0.10.3
pct exec 127 -- bash
cd /opt/caddy-internal
nano Caddyfile
docker restart caddy-internal
```
**To add more services:**
1. Edit `/opt/caddy-internal/Caddyfile` on CT 127
2. Add new service block (see existing examples)
3. Restart: `docker restart caddy-internal`
4. Add DNS entry or hosts file entry
## Key Locations
**CA Certificates:**
- Homelab CA root: `/usr/local/share/ca-certificates/homelab-ca.crt` (on all systems)
- Caddy internal CA: Extract with `docker exec caddy-internal cat /data/caddy/pki/authorities/local/root.crt`
- Step-CA root: `/etc/step-ca/.step/certs/root_ca.crt` on CT 115
**Caddy Configuration:**
- Config directory: `/opt/caddy-internal/` on CT 127
- Caddyfile: `/opt/caddy-internal/Caddyfile`
- Docker compose: `/opt/caddy-internal/docker-compose.yml`
- Certificate storage: Inside container at `/data/caddy/certificates/local/`
**Services:**
- CA Server: 10.0.10.15 (CT 115) - ACME endpoint: https://10.0.10.15:8443/acme/acme/directory
- Caddy proxy: 10.0.10.27 (CT 127) - Port 443
## Problem Context (Why We Did This)
**Original Issue:**
- You have many Docker services with web UIs
- They were HTTP only, causing download/connection issues
- When accessed remotely through VPS Caddy, certificate warnings appeared
- Your internal CA server wasn't being utilized by services
**Solution Implemented:**
- Deployed SSL reverse proxy for all internal services
- Distributed CA certificates to trust the proxy
- Now all services have HTTPS with valid certificates (once CA cert installed on clients)
## Commands Reference
**View Caddy logs:**
```bash
ssh root@10.0.10.3 "pct exec 127 -- docker logs caddy-internal -f"
```
**Restart Caddy:**
```bash
ssh root@10.0.10.3 "pct exec 127 -- docker restart caddy-internal"
```
**Check listening ports:**
```bash
ssh root@10.0.10.3 "pct exec 127 -- ss -tlnp | grep caddy"
```
**Redeploy CA certificates (if needed):**
```bash
cd ~/projects/infrastructure
./scripts/deploy-ca-certificates.sh
```
---
**Git Commit:** `2418b48` - Deploy CA certificates and internal HTTPS reverse proxy
**Branch:** master
**Files Modified:** CA-DEPLOYMENT-SUMMARY.md, scripts/deploy-ca-certificates.sh, scripts/setup-internal-caddy.sh

162
infrastructure/CLAUDE.md Normal file
View File

@@ -0,0 +1,162 @@
# CLAUDE.md
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
## Quick Reference
This is an infrastructure documentation and automation repository for a self-hosted home lab environment.
**When you need specific information, refer to these specialized docs:**
### Core Architecture
- [.claude/docs/ARCHITECTURE.md](.claude/docs/ARCHITECTURE.md) - Infrastructure components, network config, WireGuard VPN
- [.claude/docs/SERVICES.md](.claude/docs/SERVICES.md) - Deployed services, planned deployments, service-specific details
- [.claude/docs/IP-QUICK-REF.md](.claude/docs/IP-QUICK-REF.md) - Essential IP addresses (< 20 most-used IPs)
### Workflows & Operations
- [.claude/docs/COMMON-TASKS.md](.claude/docs/COMMON-TASKS.md) - SSH access, Caddy management, git workflow, script deployment
- [.claude/docs/HOME-ASSISTANT.md](.claude/docs/HOME-ASSISTANT.md) - HA config management, sync workflows, integrations
- [.claude/docs/CERTIFICATES.md](.claude/docs/CERTIFICATES.md) - Step-CA setup, ACME provisioner, SSL configuration
### Detailed References (load when needed)
- `IP-ALLOCATION.md` - Complete network IP allocation (10.0.10.0/24) - **SOURCE OF TRUTH**
- `infrastructure-audit.md` - Current infrastructure state, running containers, service details
- `SERVICES.md` - Comprehensive service documentation with startup/health check commands
- `RUNBOOK.md` - Operational procedures
- `scripts/README.md` - Script usage and automation
- `guides/HOMELAB-BACKUP-STRATEGY.md` - Backup system architecture
## Essential Quick Facts
**Primary Infrastructure:**
- VPS: fred@66.63.182.168 (Caddy reverse proxy)
- Gaming VPS: ubuntu@51.222.12.162 (WireGuard server, deadeyeg4ming.vip)
- Proxmox: root@10.0.10.3 (main-pve), root@10.0.10.2 (pve-router), root@10.0.10.4 (pve-storage)
- Network: 10.0.10.0/24 (DHCP: .50-.254, Static: .1-.49)
**Critical IPs:**
- 10.0.10.1 - UCG Ultra (gateway)
- 10.0.10.3 - main-pve (primary Proxmox)
- 10.0.10.10 - HOMELAB-COMMAND (Gaming PC, Windows 11, Claude Code host)
- 10.0.10.24 - Home Assistant
**WireGuard Tunnel:**
- Tunnel: 10.0.9.0/24 (Gaming VPS .1, UCG Ultra .2, VPS proxy IP .3)
- Endpoint: 51.222.12.162:51820
## Documentation Structure
**Living Docs:** infrastructure-audit.md, IP-ALLOCATION.md, IMPROVEMENTS.md, MORNING-REMINDER.md
**Reference Guides:** BRAINSTORM.md, RUNBOOK.md, DISASTER-RECOVERY.md, MONITORING.md
**Automation:** scripts/README.md (all scripts require customization, use --dry-run)
## Key Constraints
- VPS: 2 CPU / 4GB RAM - lightweight services only
- Proxmox storage: `local` only (never `local-lvm`)
- SSH: Always use key-based auth
- Scripts: Test with `--dry-run` before production
## Important Service Patterns
### Internal HTTPS Access (Caddy Internal Proxy)
Services on CT 127 (Dockge) use Caddy Internal Proxy for HTTPS with self-signed certificates:
- **Location:** CT 127 (10.0.10.27)
- **Config:** `/opt/caddy-internal/Caddyfile`
- **Reload:** `ssh root@10.0.10.3 "pct exec 127 -- docker exec caddy-internal caddy reload --config /etc/caddy/Caddyfile"`
- **CA Certificate:** Extract with `pct exec 127 -- docker exec caddy-internal cat /data/caddy/pki/authorities/local/root.crt`
**Internal domains pattern:** `https://<service>.nianticbooks.home``10.0.10.27:<port>`
To enable HTTPS access:
1. Extract CA cert from Caddy Internal
2. Install on client devices (Windows: Trusted Root Certification Authorities)
3. Add entries to `C:\Windows\System32\drivers\etc\hosts` pointing to 10.0.10.27
**TODO:** Dockge container (CT 127) is not configured with the internal CA certificate. This needs to be addressed to allow Dockge UI to trust internal services using Caddy Internal PKI certificates.
### Public HTTPS Access (via VPS Caddy)
Services exposed publicly use Caddy on VPS (66.63.182.168):
- **Config:** `/etc/caddy/Caddyfile` on VPS
- **Reload:** `ssh fred@66.63.182.168 "sudo systemctl reload caddy"`
- **Certificates:** Automatic Let's Encrypt via ACME
- **Traffic Flow:** Internet → VPS Caddy → WireGuard tunnel (10.0.9.3) → Gaming VPS (10.0.9.1) → Home network (10.0.10.x)
**Current routing limitation:** WireGuard tunnel between Gaming VPS and home network has routing issues. Services work through port forwarding (DNAT) but general HTTP proxying times out. For new services requiring public access, prefer internal-only setup until routing is fixed.
### LXC Container Management
**Execute commands in containers:**
```bash
ssh root@10.0.10.3 "pct exec <CT-ID> -- <command>"
```
**Common container IDs:**
- CT 127 (Dockge): Media stack, internal proxy, Docker Compose management
- CT 106 (n8n): Workflow automation
- CT 102 (PostgreSQL): Shared database
- CT 121 (Authentik): SSO/authentication
- CT 128 (Uptime Kuma): Monitoring
**Docker in LXC:** Dockge (CT 127) runs Docker. Access containers:
```bash
pct exec 127 -- docker ps
pct exec 127 -- docker logs <container-name>
pct exec 127 -- docker restart <container-name>
```
## Network Troubleshooting
### WireGuard Tunnel Status
**Gaming VPS acts as hub:**
- 10.0.9.1 (Gaming VPS server)
- 10.0.9.2 (UCG Ultra client - gateway to home network)
- 10.0.9.3 (Old VPS proxy IP)
**Check tunnel:**
```bash
# From Gaming VPS
ssh ubuntu@51.222.12.162 "sudo wg show"
# From old VPS
ssh fred@66.63.182.168 "sudo wg show"
```
**Known issue:** Gaming VPS cannot currently route traffic between WireGuard peers (10.0.9.3 → home network 10.0.10.x). Handshakes work but ICMP/HTTP traffic times out. UCG Ultra firewall allows traffic; issue is likely routing configuration on Gaming VPS.
### DNS Resolution for Internal Services
Internal `.home` domains require hosts file entries:
- Windows: `C:\Windows\System32\drivers\etc\hosts`
- Linux/Mac: `/etc/hosts`
Format: `10.0.10.27 <service>.nianticbooks.home`
## Documentation Maintenance
**Living documents (update frequently):**
- `IP-ALLOCATION.md` - Network IP assignments (source of truth)
- `infrastructure-audit.md` - Current infrastructure state
- `IMPROVEMENTS.md` - Planned enhancements
- `MORNING-REMINDER.md` - Daily workflow checklist
**When adding new services:**
1. Reserve IP in `IP-ALLOCATION.md`
2. Document in `SERVICES.md` with startup/health check commands
3. Update `infrastructure-audit.md` with deployment date
4. Add to relevant .claude/docs files
5. If using internal HTTPS: add to Caddy Internal config
6. If using public HTTPS: add to VPS Caddy config

View File

@@ -0,0 +1,456 @@
# Disaster Recovery Plan
This document outlines procedures for recovering from various disaster scenarios affecting your infrastructure.
## Table of Contents
- [Emergency Contact Information](#emergency-contact-information)
- [Recovery Time Objectives](#recovery-time-objectives)
- [Backup Locations](#backup-locations)
- [Disaster Scenarios](#disaster-scenarios)
- [Recovery Procedures](#recovery-procedures)
- [Post-Recovery Checklist](#post-recovery-checklist)
---
## Emergency Contact Information
### Primary Contacts
| Role | Name | Phone | Email | Availability |
|------|------|-------|-------|--------------|
| Infrastructure Owner | _____________ | _____________ | _____________ | 24/7 |
| Network Admin | _____________ | _____________ | _____________ | Business Hours |
| Backup Contact | _____________ | _____________ | _____________ | 24/7 |
### Service Provider Contacts
| Provider | Service | Support Number | Account ID | Notes |
|----------|---------|----------------|------------|-------|
| VPS Provider | _____________ | _____________ | _____________ | _____________ |
| DNS Provider | _____________ | _____________ | _____________ | _____________ |
| Domain Registrar | _____________ | _____________ | _____________ | _____________ |
| ISP (Home Lab) | _____________ | _____________ | _____________ | _____________ |
---
## Recovery Time Objectives
Define acceptable downtime for each service tier:
| Tier | Service Type | RTO (Recovery Time Objective) | RPO (Recovery Point Objective) |
|------|-------------|------------------------------|--------------------------------|
| Critical | Public-facing services, Authentication | 1 hour | 15 minutes |
| Important | Internal services, Databases | 4 hours | 1 hour |
| Standard | Development, Testing | 24 hours | 24 hours |
| Low Priority | Monitoring, Logging | 48 hours | 24 hours |
---
## Backup Locations
### Primary Backup Location
- **Location**: _____________ (e.g., OMV storage node, external drive)
- **Path**: _____________
- **Retention**: _____________
- **Access Method**: _____________
### Secondary Backup Location (Off-site)
- **Location**: _____________ (e.g., Cloud storage, remote server)
- **Path**: _____________
- **Retention**: _____________
- **Access Method**: _____________
### Backup Schedule
- **Proxmox VMs/Containers**: Daily at _____
- **Configuration Files**: Weekly on _____
- **Critical Data**: Hourly/Daily
- **Off-site Sync**: Daily/Weekly
### Critical Items to Backup
- [ ] Proxmox VM/Container configurations and disks
- [ ] Pangolin reverse proxy configurations
- [ ] Gerbil tunnel configurations and keys
- [ ] SSL/TLS certificates and keys
- [ ] SSH keys and authorized_keys files
- [ ] Network configuration files
- [ ] DNS zone files (if self-hosted)
- [ ] Database dumps
- [ ] Application data and configurations
- [ ] Documentation and credentials (encrypted)
---
## Disaster Scenarios
### Scenario 1: VPS Complete Failure
**Impact**: All public-facing services down, no external access to home lab services
**Recovery Procedure**:
1. **Immediate Actions (0-15 minutes)**
- Verify VPS is actually down (ping, SSH, web checks)
- Contact VPS provider support
- Check VPS provider status page
- Notify users if necessary
2. **Short-term Mitigation (15-60 minutes)**
- If hardware failure, request provider rebuild
- If account issue, resolve with provider
- Consider spinning up temporary VPS with another provider
3. **VPS Rebuild (1-4 hours)**
```bash
# On new VPS:
# 1. Update system
sudo apt update && sudo apt upgrade -y
# 2. Install Pangolin
# [Installation commands]
# 3. Restore Pangolin configuration from backup
scp backup-server:/backups/pangolin-config.tar.gz .
sudo tar -xzf pangolin-config.tar.gz -C /
# 4. Install Gerbil server
# [Installation commands]
# 5. Restore Gerbil configuration
scp backup-server:/backups/gerbil-config.tar.gz .
sudo tar -xzf gerbil-config.tar.gz -C /
# 6. Restore SSL certificates
sudo tar -xzf ssl-certs-backup.tar.gz -C /etc/letsencrypt/
# 7. Configure firewall
sudo ufw allow 22/tcp
sudo ufw allow 80/tcp
sudo ufw allow 443/tcp
sudo ufw allow [GERBIL_PORT]/tcp
sudo ufw enable
# 8. Start services
sudo systemctl enable --now pangolin
sudo systemctl enable --now gerbil
# 9. Update DNS A record to new VPS IP
# [DNS provider steps]
# 10. Reconnect Gerbil tunnels from home lab
# [See Gerbil reconnection below]
```
4. **Verification**
- Test all public routes
- Verify Gerbil tunnels are connected
- Check SSL certificates are valid
- Monitor logs for errors
---
### Scenario 2: Home Lab Network Outage
**Impact**: All home lab services unreachable, Gerbil tunnels down
**Recovery Procedure**:
1. **Immediate Actions (0-15 minutes)**
- Check router/modem status
- Verify ISP is not having outage
- Check physical connections
- Reboot router/modem if necessary
2. **ISP Outage (Variable duration)**
- Contact ISP support
- Consider failover to mobile hotspot if critical
- Notify users of expected downtime
3. **Restore Gerbil Tunnels**
```bash
# On each home lab machine with tunnels:
# 1. Verify local services are running
systemctl status [service-name]
# 2. Test VPS connectivity
ping [VPS_IP]
# 3. Restart Gerbil tunnels
sudo systemctl restart gerbil-tunnel-*
# 4. Verify tunnels are connected
gerbil status
# 5. Check logs for errors
journalctl -u gerbil-tunnel-* -n 50
```
---
### Scenario 3: Proxmox Node Failure (DL380p or i5)
**Impact**: All VMs/containers on failed node are down
**Recovery Procedure**:
1. **Immediate Actions (0-30 minutes)**
- Identify which node has failed
- Determine cause (power, hardware, network)
- Check if other cluster nodes are healthy
2. **If Node Can Be Recovered**
```bash
# Try to boot node
# If successful, check cluster status:
pvecm status
# Check VM/Container status
qm list
pct list
# Start critical VMs/Containers
qm start VMID
pct start CTID
```
3. **If Node Cannot Be Recovered - Migrate Services**
```bash
# On working node:
# 1. Check available resources
pvesh get /nodes/NODE/status
# 2. Restore VMs from backup to working node
qmrestore /path/to/backup/vzdump-qemu-VMID.vma.zst NEW_VMID --storage local-lvm
# 3. Restore containers from backup
pct restore NEW_CTID /path/to/backup/vzdump-lxc-CTID.tar.zst --storage local-lvm
# 4. Start restored VMs/containers
qm start NEW_VMID
pct start NEW_CTID
# 5. Update internal DNS/documentation with new IPs if changed
```
4. **Resource Constraints**
- If insufficient resources on remaining node:
- Prioritize critical services only
- Consider scaling down VM resources temporarily
- Plan for hardware replacement/repair
---
### Scenario 4: Storage Node (OMV) Failure
**Impact**: Shared storage unavailable, backups inaccessible, data loss risk
**Recovery Procedure**:
1. **Immediate Actions (0-30 minutes)**
- Verify storage node is down
- Check if disks are healthy (if node boots)
- Identify affected services using shared storage
2. **If Disk Failure**
- Check RAID status (if configured)
- Replace failed disk
- Rebuild RAID array
- Restore from off-site backup if necessary
3. **If Complete Storage Loss**
```bash
# 1. Rebuild OMV on new hardware/disks
# [OMV installation]
# 2. Configure network shares
# [NFS/CIFS setup]
# 3. Restore data from off-site backup
rsync -avz backup-location:/backups/ /mnt/storage/
# 4. Remount shares on Proxmox nodes
# Update /etc/fstab on each node
mount -a
# 5. Verify Proxmox can access storage
pvesm status
```
---
### Scenario 5: DNS Provider Failure
**Impact**: Domain not resolving, all services unreachable by domain name
**Recovery Procedure**:
1. **Immediate Actions (0-15 minutes)**
- Check DNS provider status page
- Test DNS resolution: `nslookup domain.com`
- Verify it's provider issue, not configuration
2. **Short-term Mitigation (15-60 minutes)**
- Share direct IP addresses with users temporarily
- Set up temporary DNS using Cloudflare (free tier)
3. **Migrate to New DNS Provider**
```bash
# 1. Export zone file from old provider (if possible)
# 2. Create account with new DNS provider
# 3. Import zone file or manually create records:
# A record: domain.com -> VPS_IP
# A record: *.domain.com -> VPS_IP (if using wildcard)
# Other records as needed
# 4. Update nameservers at domain registrar
# (Propagation takes 24-48 hours)
# 5. Monitor DNS propagation
dig domain.com @8.8.8.8
```
---
### Scenario 6: Complete Data Center Loss (Home Lab)
**Impact**: All home lab infrastructure destroyed (fire, flood, etc.)
**Recovery Procedure**:
1. **Immediate Actions**
- Ensure safety of personnel
- Contact insurance provider
- Assess extent of damage
- Secure remaining equipment
2. **Short-term (Services that must continue)**
- Move critical services to VPS temporarily
- Use cloud providers for temporary hosting
- Restore from off-site backups
3. **Long-term (Infrastructure Rebuild)**
- Procure replacement hardware
- Rebuild Proxmox cluster
- Restore VMs/containers from off-site backups
- Reconfigure network
- Re-establish Gerbil tunnels
- Full testing and verification
---
## Recovery Procedures
### General Recovery Steps
1. **Assess the Situation**
- Identify what has failed
- Determine scope of impact
- Estimate recovery time
2. **Communicate**
- Notify affected users
- Update status page if available
- Keep stakeholders informed
3. **Prioritize**
- Focus on critical services first
- Use RTO/RPO objectives
- Document decisions
4. **Execute Recovery**
- Follow specific scenario procedures
- Document all actions taken
- Keep logs of commands executed
5. **Verify**
- Test all restored services
- Check data integrity
- Monitor for issues
6. **Document**
- Record what happened
- Document what worked/didn't work
- Update this document with lessons learned
---
## Post-Recovery Checklist
After any disaster recovery, complete the following:
### Immediate Post-Recovery (0-24 hours)
- [ ] All critical services are operational
- [ ] All services are monitored
- [ ] Temporary workarounds documented
- [ ] Incident logged with timeline
### Short-term (1-7 days)
- [ ] All services fully restored
- [ ] Performance is normal
- [ ] Backups are running
- [ ] Security review completed
- [ ] Post-mortem meeting scheduled
### Long-term (1-4 weeks)
- [ ] Post-mortem completed
- [ ] Lessons learned documented
- [ ] Disaster recovery plan updated
- [ ] Preventive measures implemented
- [ ] Training updated if needed
- [ ] Backup/monitoring improvements made
---
## Post-Mortem Template
After each disaster recovery event, complete a post-mortem:
**Incident Date**: _____________
**Recovery Completed**: _____________
**Total Downtime**: _____________
### What Happened?
[Detailed description of the incident]
### Timeline
| Time | Event |
|------|-------|
| _____ | _____ |
| _____ | _____ |
### Root Cause
[What caused the failure?]
### What Went Well?
-
-
### What Went Poorly?
-
-
### Action Items
| Action | Owner | Due Date | Status |
|--------|-------|----------|--------|
| _______ | _____ | ________ | ______ |
### Improvements to This Plan
[What should be updated in the disaster recovery plan?]
---
## Testing Schedule
Regular disaster recovery testing ensures procedures work when needed:
| Test Type | Frequency | Last Test | Next Test | Status |
|-----------|-----------|-----------|-----------|--------|
| Backup restore test | Quarterly | _________ | _________ | ______ |
| VPS failover drill | Semi-annually | _________ | _________ | ______ |
| Node failure simulation | Annually | _________ | _________ | ______ |
| Full DR scenario | Annually | _________ | _________ | ______ |
---
## Document Maintenance
**Last Updated**: _____________
**Updated By**: _____________
**Next Review Date**: _____________
**Version**: 1.0

View File

@@ -0,0 +1,269 @@
# DNS over TLS Configuration Guide
This guide covers setting up DNS over TLS (DoT) on your UCG Ultra gateway to encrypt DNS queries network-wide.
## What is DNS over TLS?
DNS over TLS encrypts DNS queries between your network and upstream DNS servers, preventing ISPs and other third parties from monitoring or logging your DNS lookups. Once configured on the UCG Ultra, all devices on your network automatically benefit without per-device configuration.
## Prerequisites
- UCG Ultra at 10.0.10.1
- Admin access to UniFi Network application
- Internet connectivity
## Configuration Steps
### 1. Access UCG Ultra Web Interface
```
URL: https://10.0.10.1
or access via UniFi Network application
```
### 2. Navigate to DNS Settings
1. Click **Settings** (gear icon)
2. Select **Internet**
3. Click on **WAN** (or **Primary WAN1**)
4. Scroll to **DNS Servers** section
### 3. Enable DNS over TLS
1. Toggle **DNS over TLS** to **ON**
2. Configure upstream DNS servers (see options below)
3. Click **Apply Changes**
## Recommended DNS Providers
### Option 1: Cloudflare (Recommended - Privacy-focused, fastest)
**DNS Servers:**
- Primary: `1.1.1.1`
- Secondary: `1.0.0.1`
**TLS Hostname:** `cloudflare-dns.com`
**Features:**
- Fastest global DNS resolver
- Privacy-focused (doesn't log queries)
- DNSSEC validation
- Malware blocking available (1.1.1.2/1.0.0.2)
- Family filtering available (1.1.1.3/1.0.0.3)
### Option 2: Quad9 (Security-focused)
**DNS Servers:**
- Primary: `9.9.9.9`
- Secondary: `149.112.112.112`
**TLS Hostname:** `dns.quad9.net`
**Features:**
- Blocks known malicious domains
- Privacy-focused (based in Switzerland)
- DNSSEC validation
- Threat intelligence from multiple sources
### Option 3: Google Public DNS
**DNS Servers:**
- Primary: `8.8.8.8`
- Secondary: `8.8.4.4`
**TLS Hostname:** `dns.google`
**Features:**
- High reliability and uptime
- Fast global network
- DNSSEC validation
- Note: Google logs queries for 24-48 hours
### Option 4: AdGuard DNS (Ad Blocking)
**DNS Servers (Default - Ad Blocking):**
- Primary: `94.140.14.14`
- Secondary: `94.140.15.15`
**TLS Hostname:** `dns.adguard-dns.com`
**Features:**
- Built-in ad and tracker blocking
- Family protection mode available
- No logging
- Free tier available
## Additional Recommended Settings
### Enable DNSSEC
While in DNS settings:
1. Enable **DNSSEC** (if available)
2. This cryptographically validates DNS responses to prevent spoofing
### Configure Local DNS Records
For easier access to infrastructure services:
**Settings → Networks → LAN → DHCP → Local DNS Records**
Add custom entries:
- `proxmox.home``10.0.10.3` (main-pve)
- `proxmox-router.home``10.0.10.2` (pve-router)
- `storage.home``10.0.10.5` (OpenMediaVault)
- `homeassistant.home``10.0.10.24` (Home Assistant)
- `esphome.home``10.0.10.28` (ESPHome)
- `auth.home``10.0.10.21` (Authentik - when deployed)
This allows you to access services via friendly names instead of IP addresses.
## Verification
### Test DNS over TLS is Working
**Method 1: Cloudflare Test (if using Cloudflare DNS)**
1. Visit: https://1.1.1.1/help
2. Look for "Using DNS over TLS (DoT)" - should show **Yes**
**Method 2: Command Line Test**
From any device on your network:
```bash
# Test DNS resolution
nslookup google.com
# Test DNSSEC validation (should succeed)
dig @1.1.1.1 dnssec-failed.org
# Check if queries are encrypted (requires tcpdump)
sudo tcpdump -i any port 853
# Should show TLS traffic on port 853, not plaintext DNS on port 53
```
**Method 3: Check UCG Ultra Logs**
In UniFi Network application:
1. Navigate to **System Settings → Logs**
2. Filter for DNS-related events
3. Should see successful DoT connections
## Advanced: Local DNS Resolver Option
For even more control (ad blocking, custom filtering, detailed logging), consider deploying a local DNS resolver:
### Pi-hole or AdGuard Home
**Deployment:**
- LXC container on pve-router (10.0.10.2)
- IP: 10.0.10.26 (available in allocation plan)
- Resources: 1 CPU, 512MB-1GB RAM, 4GB storage
**Configuration:**
1. Deploy Pi-hole/AdGuard Home on 10.0.10.26
2. Configure it to use DoT upstream (Cloudflare, Quad9, etc.)
3. Point UCG Ultra DNS to 10.0.10.26
4. All network traffic → Pi-hole → DoT upstream
**Benefits:**
- Network-wide ad blocking
- Detailed query logging and statistics
- Custom blocklists and whitelists
- Local DNS record management
- DoT encryption for upstream queries
See `PIHOLE-SETUP.md` (future document) for deployment guide.
## Troubleshooting
### DNS Resolution Fails After Enabling DoT
**Symptoms:** Websites won't load, "DNS resolution failed" errors
**Solutions:**
1. Verify upstream DNS servers are correct
2. Check UCG Ultra has internet connectivity
3. Temporarily disable DoT to isolate issue
4. Try different DNS provider (Cloudflare vs Quad9)
5. Check firewall rules allow outbound port 853 (DoT)
### Slow DNS Resolution
**Possible Causes:**
- Upstream DNS server is slow/distant
- Network latency issues
- DNSSEC validation overhead
**Solutions:**
1. Try different DNS provider closer to your region
2. Use DNS benchmark tool: https://www.grc.com/dns/benchmark.htm
3. Check UCG Ultra CPU/memory usage
### Some Devices Can't Resolve DNS
**Check:**
1. Device is using UCG Ultra as DNS (10.0.10.1)
2. Device has valid DHCP lease
3. No hardcoded DNS servers on the device
4. Firewall rules aren't blocking DNS
## Security Considerations
### What DoT Protects Against
- ISP DNS query logging and selling data
- DNS query snooping on local network
- Man-in-the-middle DNS hijacking
### What DoT Does NOT Protect Against
- Website tracking (cookies, fingerprinting)
- ISP seeing which websites you visit (they see IP addresses)
- Malware/phishing (use Quad9 or filtering DNS for this)
### Additional Privacy Measures
- Use HTTPS everywhere (encrypted web traffic)
- Consider VPN for full traffic encryption
- Use privacy-focused browsers (Firefox, Brave)
- Enable tracking protection in browsers
## Maintenance
### Regular Checks
- **Monthly:** Verify DoT is still active (check test sites)
- **Quarterly:** Review DNS provider performance
- **As Needed:** Update local DNS records for new services
### When to Reconfigure
- Moving to new internet provider
- DNS provider changes policies
- Performance degrades
- Adding local DNS resolver (Pi-hole)
## Network-Wide Impact
Once configured, DNS over TLS benefits:
- All computers (Windows, Mac, Linux)
- Mobile devices (phones, tablets)
- IoT devices (smart home, cameras, etc.)
- Guest network devices
- All VLANs managed by UCG Ultra
**No per-device configuration needed.**
## Related Documentation
- `IP-ALLOCATION.md` - Network addressing plan
- `RUNBOOK.md` - General network troubleshooting
- `SERVICES.md` - Service configuration reference
- Future: `PIHOLE-SETUP.md` - Local DNS resolver deployment
## References
- [Cloudflare DNS over TLS](https://developers.cloudflare.com/1.1.1.1/encryption/dns-over-tls/)
- [Quad9 Documentation](https://www.quad9.net/support/faq/)
- [DNS over TLS RFC 7858](https://datatracker.ietf.org/doc/html/rfc7858)
- [UniFi Gateway Documentation](https://help.ui.com/hc/en-us/categories/200320654-UniFi-Gateway)
---
**Last Updated:** 2025-11-18
**Status:** Ready for deployment
**Priority:** Medium (privacy/security enhancement)

View File

@@ -0,0 +1,451 @@
# Infrastructure Improvement Recommendations
Based on the infrastructure audit checklist, this document outlines recommended improvements for security, reliability, and operational efficiency.
## Table of Contents
- [High Priority Improvements](#high-priority-improvements)
- [Security Enhancements](#security-enhancements)
- [Reliability & Availability](#reliability--availability)
- [Monitoring & Observability](#monitoring--observability)
- [Automation Opportunities](#automation-opportunities)
- [Documentation & Knowledge Management](#documentation--knowledge-management)
- [Capacity Planning](#capacity-planning)
- [Cost Optimization](#cost-optimization)
---
## High Priority Improvements
### 1. Implement Automated Backups
**Current State**: Manual or ad-hoc backups
**Target State**: Automated, scheduled backups with verification
**Action Items**:
- [ ] Set up automated Proxmox VM/Container backups (see `scripts/backup-proxmox.sh`)
- [ ] Configure automatic backup of VPS configurations
- [ ] Implement off-site backup sync (to cloud storage or remote location)
- [ ] Schedule regular backup restoration tests
- [ ] Set up backup monitoring and alerting
**Priority**: 🔴 Critical
**Estimated Effort**: 4-8 hours
**Benefits**: Data loss prevention, faster disaster recovery
---
### 2. SSL Certificate Auto-Renewal
**Current State**: Manual certificate management
**Target State**: Automated certificate renewal with monitoring
**Action Items**:
- [ ] Install and configure certbot with auto-renewal
- [ ] Set up certbot systemd timer: `systemctl enable certbot.timer`
- [ ] Configure renewal hooks to reload services
- [ ] Monitor certificate expiration dates
- [ ] Consider wildcard certificates to simplify management
**Priority**: 🔴 Critical
**Estimated Effort**: 2-4 hours
**Benefits**: Prevent service outages from expired certificates
**Implementation**:
```bash
# Enable auto-renewal
sudo systemctl enable certbot.timer
sudo systemctl start certbot.timer
# Test renewal
sudo certbot renew --dry-run
# Add renewal hook for Pangolin
echo "systemctl reload pangolin" | sudo tee /etc/letsencrypt/renewal-hooks/deploy/reload-pangolin.sh
sudo chmod +x /etc/letsencrypt/renewal-hooks/deploy/reload-pangolin.sh
```
---
### 3. Implement Basic Monitoring
**Current State**: No centralized monitoring
**Target State**: Uptime monitoring with alerts for critical services
**Action Items**:
- [ ] Deploy Uptime Kuma for service monitoring (lightweight, easy to set up)
- [ ] Configure health checks for all public services
- [ ] Set up alerting (email, SMS, or Slack)
- [ ] Monitor VPS resources (CPU, RAM, disk)
- [ ] Monitor Proxmox node resources
- [ ] Track Gerbil tunnel status
**Priority**: 🟠 High
**Estimated Effort**: 4-6 hours
**Benefits**: Early detection of issues, reduced downtime
See [MONITORING.md](MONITORING.md) for detailed setup instructions.
---
## Security Enhancements
### 4. Harden SSH Access
**Recommendations**:
- [ ] Disable password authentication (key-only)
- [ ] Change default SSH port on VPS
- [ ] Implement fail2ban for brute force protection
- [ ] Use SSH certificate authority for easier key management
- [ ] Enable 2FA for SSH (Google Authenticator)
**Implementation**:
```bash
# /etc/ssh/sshd_config
PasswordAuthentication no
PubkeyAuthentication yes
PermitRootLogin prohibit-password
Port 2222 # Non-standard port
# Install fail2ban
sudo apt install fail2ban
sudo systemctl enable fail2ban
```
**Priority**: 🟠 High
**Estimated Effort**: 2-3 hours
---
### 5. Implement Network Segmentation
**Current State**: Flat network
**Target State**: VLANs separating different service tiers
**Recommendations**:
- [ ] VLAN 10: Management (Proxmox, OMV admin interfaces)
- [ ] VLAN 20: Production Services
- [ ] VLAN 30: Development/Testing
- [ ] VLAN 40: IoT/Untrusted devices
- [ ] Configure firewall rules between VLANs
**Priority**: 🟡 Medium
**Estimated Effort**: 8-12 hours
**Benefits**: Improved security, network isolation, easier troubleshooting
---
### 6. Secrets Management
**Current State**: Credentials in config files or documentation
**Target State**: Centralized secrets management
**Recommendations**:
- [ ] Use environment variables for sensitive data
- [ ] Implement Bitwarden/Vaultwarden for password management
- [ ] Consider HashiCorp Vault for API keys and certificates
- [ ] Encrypt sensitive files with GPG or age
- [ ] Never commit secrets to git
**Priority**: 🟠 High
**Estimated Effort**: 4-6 hours
---
### 7. Regular Security Updates
**Recommendations**:
- [ ] Enable unattended-upgrades for security patches
- [ ] Schedule monthly maintenance windows for updates
- [ ] Subscribe to security mailing lists for critical software
- [ ] Implement vulnerability scanning
**Implementation**:
```bash
# Enable automatic security updates
sudo apt install unattended-upgrades
sudo dpkg-reconfigure --priority=low unattended-upgrades
```
**Priority**: 🟠 High
**Estimated Effort**: 2-3 hours
---
## Reliability & Availability
### 8. Implement High Availability for Critical Services
**Recommendations**:
- [ ] Run critical services on both Proxmox nodes
- [ ] Set up floating IP or load balancing
- [ ] Configure automatic failover
- [ ] Use Proxmox HA features for critical VMs
**Priority**: 🟡 Medium
**Estimated Effort**: 8-16 hours
---
### 9. Backup VPS Provider Relationship
**Recommendations**:
- [ ] Document procedures for spinning up with alternate VPS provider
- [ ] Keep configuration backups accessible outside primary VPS
- [ ] Test VPS migration annually
- [ ] Consider multi-region deployment for critical services
**Priority**: 🟡 Medium
**Estimated Effort**: 4-6 hours
---
### 10. UPS and Power Management
**Recommendations**:
- [ ] Install UPS on all Proxmox nodes
- [ ] Configure Network UPS Tools (NUT) for graceful shutdown
- [ ] Test power failure procedures
- [ ] Document power-on sequence after outage
**Priority**: 🟠 High (if not already implemented)
**Estimated Effort**: 3-4 hours (plus hardware cost)
---
## Monitoring & Observability
### 11. Comprehensive Monitoring Stack
**Recommendations**:
- [ ] Deploy Prometheus for metrics collection
- [ ] Set up Grafana for visualization
- [ ] Configure Loki for log aggregation
- [ ] Implement Alertmanager for alerting
- [ ] Create dashboards for key metrics
**Dashboards to Create**:
- VPS resource utilization
- Proxmox cluster overview
- Storage capacity trends
- Service uptime and response times
- Gerbil tunnel status
**Priority**: 🟡 Medium
**Estimated Effort**: 12-16 hours
**See**: [MONITORING.md](MONITORING.md)
---
### 12. Centralized Logging
**Recommendations**:
- [ ] Aggregate logs from all services to central location
- [ ] Implement log retention policies
- [ ] Set up log-based alerts for errors
- [ ] Create log analysis dashboards
**Priority**: 🟡 Medium
**Estimated Effort**: 6-8 hours
---
## Automation Opportunities
### 13. Infrastructure as Code
**Current State**: Manual configuration
**Target State**: Automated, version-controlled infrastructure
**Recommendations**:
- [ ] Document VPS setup as Ansible playbooks
- [ ] Use Terraform for DNS and cloud resources
- [ ] Create Proxmox VM templates with cloud-init
- [ ] Version control all automation
**Priority**: 🟡 Medium
**Estimated Effort**: 16-24 hours
**Benefits**: Reproducible infrastructure, faster recovery, documentation
---
### 14. Automated Health Checks
**Recommendations**:
- [ ] Create scheduled health check scripts (see `scripts/health-check.sh`)
- [ ] Automated service restart on failure
- [ ] Self-healing for common issues
- [ ] Integration with monitoring system
**Priority**: 🟡 Medium
**Estimated Effort**: 4-6 hours
---
### 15. Certificate Management Automation
**Recommendations**:
- [ ] Automate certificate deployment to all services
- [ ] Automated service reloads after certificate renewal
- [ ] Certificate expiration monitoring
- [ ] Automated DNS validation for wildcard certs
**Priority**: 🟠 High
**Estimated Effort**: 3-4 hours
---
## Documentation & Knowledge Management
### 16. Living Documentation
**Current State**: Basic documentation
**Target State**: Comprehensive, up-to-date documentation
**Action Items**:
- [x] Complete infrastructure audit checklist
- [x] Create RUNBOOK.md with operational procedures
- [x] Create DISASTER-RECOVERY.md
- [x] Create SERVICES.md
- [ ] Fill in all service details in SERVICES.md
- [ ] Document network topology diagram
- [ ] Create quick reference cards for common tasks
- [ ] Schedule quarterly documentation reviews
**Priority**: 🟠 High
**Estimated Effort**: Ongoing
---
### 17. Runbook Automation
**Recommendations**:
- [ ] Convert manual procedures to scripts where possible
- [ ] Create interactive troubleshooting guides
- [ ] Document lessons learned from incidents
- [ ] Share knowledge across team
**Priority**: 🟡 Medium
**Estimated Effort**: Ongoing
---
## Capacity Planning
### 18. Resource Monitoring and Trending
**Recommendations**:
- [ ] Track resource utilization over time
- [ ] Set up alerts for capacity thresholds (80%, 90%)
- [ ] Create capacity planning reports
- [ ] Plan for growth based on trends
**Metrics to Track**:
- CPU utilization per node
- RAM usage per node
- Storage growth rate (OMV)
- Network bandwidth utilization
- Number of VMs/containers
**Priority**: 🟡 Medium
**Estimated Effort**: 4-6 hours (plus ongoing)
---
### 19. Resource Right-Sizing
**Recommendations**:
- [ ] Review VM/container resource allocations
- [ ] Identify over-provisioned VMs
- [ ] Identify resource-constrained VMs
- [ ] Adjust allocations based on actual usage
**Priority**: 🟢 Low
**Estimated Effort**: 2-4 hours
---
## Cost Optimization
### 20. VPS Cost Review
**Recommendations**:
- [ ] Compare current VPS pricing with alternatives
- [ ] Consider reserved instances or annual billing
- [ ] Evaluate if all VPS resources are utilized
- [ ] Review bandwidth usage and overage costs
**Priority**: 🟢 Low
**Estimated Effort**: 2-3 hours
---
### 21. Power Consumption Optimization
**Recommendations**:
- [ ] Enable CPU power management features
- [ ] Schedule non-critical services for off-peak hours
- [ ] Consider shutting down development VMs overnight
- [ ] Monitor power consumption
**Priority**: 🟢 Low
**Estimated Effort**: 3-4 hours
---
## Implementation Roadmap
### Phase 1: Critical (Weeks 1-2)
1. Automated backups with off-site storage
2. SSL certificate auto-renewal
3. SSH hardening and fail2ban
4. Basic uptime monitoring
### Phase 2: High Priority (Weeks 3-6)
1. Comprehensive monitoring stack
2. Security updates automation
3. Secrets management
4. Documentation completion
5. Health check automation
### Phase 3: Medium Priority (Weeks 7-12)
1. Network segmentation with VLANs
2. High availability for critical services
3. Infrastructure as Code implementation
4. Centralized logging
5. Capacity planning processes
### Phase 4: Ongoing
1. Regular security audits
2. Documentation maintenance
3. Performance optimization
4. Cost reviews
5. DR testing
---
## Success Metrics
Track the following to measure improvement:
| Metric | Current | Target |
|--------|---------|--------|
| Mean Time To Recovery (MTTR) | _____ | < 1 hour |
| Backup success rate | _____ | 100% |
| Service uptime | _____ | 99.9% |
| Certificate renewal failures | _____ | 0 |
| Security patches applied within | _____ | 7 days |
| Unplanned outages per month | _____ | < 1 |
| Time to detect issues | _____ | < 5 minutes |
---
## Notes
- Prioritize improvements based on your specific needs and risk tolerance
- Review and update this document quarterly
- Track implementation progress
- Measure impact of improvements
**Last Updated**: _____________
**Next Review**: _____________
**Version**: 1.0

View File

@@ -0,0 +1,360 @@
# Infrastructure TODO List
**Created:** 2025-12-29
**Last Updated:** 2025-12-29
**Status:** Active development tasks
This document tracks all incomplete infrastructure tasks and future improvements.
---
## ✅ Completed Items
### 1. Fix Home Assistant Public Domain Access
**Status**: ✅ COMPLETED (2025-12-29)
**What was done**:
1. Updated Caddy to use HTTPS backend for Home Assistant
2. Added VPS WireGuard IP (10.0.8.1) to Home Assistant's trusted_proxies
3. Verified bob.nianticbooks.com is accessible
**Result**: All 5 public domains now working:
- ✅ freddesk.nianticbooks.com → Proxmox
- ✅ bob.nianticbooks.com → Home Assistant
- ✅ ad5m.nianticbooks.com → 3D Printer
- ✅ auth.nianticbooks.com → Authentik SSO
- ✅ bible.nianticbooks.com → Bible reading plan
### 2. Deploy RustDesk ID Server
**Status**: ✅ COMPLETED (2025-12-25)
**What was deployed**:
1. ID Server (hbbs) on main-pve LXC 123 at 10.0.10.23
2. Relay Server (hbbr) on VPS at 66.63.182.168:21117
3. Generated encryption key pair
4. Verified client connectivity
**Result**: RustDesk fully operational
- ✅ ID Server (hbbs): 10.0.10.23 ports 21115, 21116, 21118
- ✅ Relay Server (hbbr): VPS port 21117
- ✅ Public Key: `sfYuCTMHxrA22kukomb/RAKYyUgr8iaMfm/U4CFLfL0=`
- ✅ Client Configuration: ID Server `66.63.182.168`, Key included
- ✅ Version: 1.1.14 (both servers)
**Documentation**:
- SERVICES.md - Service inventory and health checks
- guides/RUSTDESK-DEPLOYMENT-COMPLETE.md - Complete deployment guide
---
## Medium Priority
### 3. Deploy Prometheus + Grafana Monitoring
**Status**: ✅ DISCOVERED - Already deployed (2025-12-29)
**Current State**:
- **Location**: 10.0.10.25 (responding to ping)
- **Grafana**: Port 3000 ✅ Running (redirects to /login)
- **Prometheus**: Port 9090 ✅ Running
- **Deployment Method**: TBD (need to investigate)
**Remaining Configuration Tasks**:
1. Document deployment method (Docker Compose, systemd, VM/Container type)
2. Configure PostgreSQL database on 10.0.10.20 for Grafana (if not already done)
3. Set up Authentik SSO for Grafana
4. Configure Prometheus monitoring targets:
- Proxmox nodes (via node_exporter)
- VPS (WireGuard tunnel metrics)
- PostgreSQL
- Home Assistant
- Other services
5. Import Grafana dashboards:
- Proxmox overview
- PostgreSQL metrics
- Network metrics
6. Set up alerting (email/Slack)
7. Optionally add Caddy public route
**Priority**: Low-Medium (services running, configuration needed)
**Note**: This was discovered during the infrastructure audit. The basic services are operational, but monitoring targets and dashboards need configuration.
---
## Low Priority (Cleanup)
### 4. Remove Deprecated VMs
**Objective**: Reclaim resources from unused services
**Status**: ⏸️ Deferred - Non-critical
#### 4.1 Remove Spoolman VM
**Current State**:
- IP: 10.0.10.71 (allocated but not in use)
- Reason: Bambu printer incompatible, service no longer needed
**Steps**:
1. Verify no dependencies: `pct/qm status <VMID>`
2. Backup if needed: `vzdump <VMID> --storage backup`
3. Stop VM/container: `pct stop <VMID>` or `qm stop <VMID>`
4. Delete: `pct destroy <VMID>` or `qm destroy <VMID>`
5. Remove Pangolin route (if exists)
6. Update IP-ALLOCATION.md to mark 10.0.10.71 as available
7. Update documentation
**Priority**: Low
**Estimated Time**: 15 minutes
#### 4.2 Remove Authelia VM
**Current State**:
- IP: 10.0.10.112 (allocated but not in use)
- Reason: Replaced by Authentik SSO
**Steps**:
1. Verify Authentik is working for all services
2. Backup Authelia config for reference (if needed)
3. Stop VM/container: `pct stop <VMID>` or `qm stop <VMID>`
4. Delete: `pct destroy <VMID>` or `qm destroy <VMID>`
5. Update IP-ALLOCATION.md to mark 10.0.10.112 as available (or remove from list)
6. Update documentation
**Priority**: Low
**Estimated Time**: 15 minutes
---
## Future Enhancements
### 5. n8n + Claude Code Advanced Features
**Objective**: Enhance n8n and Claude Code integration
**Status**: ✅ Basic integration working, advanced features optional
**Remaining Optional Tasks** (from MIGRATION-CHECKLIST.md 6.4):
- [ ] Session management workflow (UUID generation, multi-turn conversations)
- [ ] Slack integration (Slack → n8n → Claude Code → Slack)
- [ ] Tool deployment with `--dangerously-skip-permissions` flag
- [ ] Error handling (network disconnect, invalid commands)
- [ ] Resource monitoring during heavy Claude operations
- [ ] Production hardening:
- SSH timeout configuration
- Output length limits
- Logging for Claude executions
- Error notifications
- Optional Caddy route for public n8n access (with Authentik SSO)
**Reference**:
- MIGRATION-CHECKLIST.md section 6.4
- N8N-CLAUDE-STATUS.md
**Priority**: Low (nice-to-have, basic functionality working)
**Estimated Time**: 2-4 hours for each feature
---
### 6. Home Assistant Enhancements
#### 6.1 Configure Local HTTPS Certificates
**Objective**: Use local CA certificates for internal HTTPS access
**Status**: ⏸️ Deferred (CA setup complete, deployment pending)
**Details**:
- CA already set up (HTTPS-SETUP-STATUS.md from 2025-12-06)
- Certificates generated for services
- Need to deploy certificates to Home Assistant and other services
**Steps** (from HTTPS-SETUP-STATUS.md):
1. Copy certificates to Home Assistant:
```bash
scp ~/certs/bob.crt ~/certs/bob.key root@10.0.10.24:/config/ssl/
```
2. Update Home Assistant configuration:
```yaml
http:
ssl_certificate: /config/ssl/bob.crt
ssl_key: /config/ssl/bob.key
server_port: 8123
```
3. Restart Home Assistant
4. Trust CA on client devices
**Note**: Current setup uses local CA certificate. Public domain uses Caddy with Let's Encrypt.
**Priority**: Low (HTTPS already working with local CA cert)
**Estimated Time**: 30 minutes
#### 6.2 Integrate More Services with Authentik SSO
**Objective**: Single sign-on for additional services
**Status**: 📋 Planned
**Completed**:
- ✅ Proxmox (all 3 hosts)
- ✅ Grafana (OAuth2 configured)
**Not Possible**:
- ❌ n8n (requires Enterprise license for OIDC/SSO)
**Pending**:
- [ ] Home Assistant (complex - requires proxy provider or LDAP)
- [ ] Other services as they're deployed
**Priority**: Low (manual login acceptable for now)
**Estimated Time**: 1-2 hours per service
---
### 7. Backup Strategy Completion
**Objective**: Implement full 3-tier backup system
**Status**: ✅ Tier 1 complete, Tier 2-3 planned
**Current State** (from CLAUDE.md):
- ✅ Tier 1 (Local/OMV NFS): Fully operational
- PostgreSQL backups: Daily 2:00 AM
- Proxmox VM/container backups: Daily 2:30 AM
- Retention: 7 days daily, 4 weeks weekly, 3 months monthly
**Remaining Tiers**:
- [ ] Tier 2: Off-site external drives (manual rotation)
- [ ] Tier 3: Backblaze B2 cloud storage (automated)
**Reference**:
- guides/HOMELAB-BACKUP-STRATEGY.md
- guides/BACKUP-QUICK-START.md
**Priority**: Medium (Tier 1 provides good protection, Tier 2-3 for disaster recovery)
**Estimated Time**: 2-4 hours for Tier 3 cloud setup
---
### 8. Monitoring & Alerting
**Objective**: Proactive monitoring of infrastructure health
**Status**: 📋 Planned (prerequisite: Prometheus + Grafana deployment)
**Components**:
- [ ] Service uptime monitoring
- [ ] Resource utilization (CPU, RAM, disk)
- [ ] Network connectivity (WireGuard tunnel status)
- [ ] Backup success/failure alerts
- [ ] Certificate expiration warnings
- [ ] Disk space alerts (OMV storage)
**Alerting Methods**:
- Email
- Slack/Discord webhook
- Home Assistant notifications
**Priority**: Medium (blocked by Prometheus deployment)
**Estimated Time**: 2-3 hours (after Prometheus is deployed)
---
### 9. Cleanup and Archive Old Documentation
**Objective**: Remove or archive outdated status documents
**Status**: 📋 Pending
**Files to Archive or Update**:
1. **wireguard-setup-progress.md**
- Status: Outdated (from November 2025)
- Contains old troubleshooting info that's no longer relevant
- WireGuard now operational (verified 2025-12-29)
- Action: Archive to `docs/archive/` or delete
2. **HTTPS-SETUP-STATUS.md**
- Status: Partially outdated (from December 6, 2025)
- CA setup complete, but local cert deployment not done
- Services using Caddy with Let's Encrypt for public access
- Action: Archive or update with current HTTPS status
3. **N8N-CLAUDE-STATUS.md**
- Status: Partially outdated
- Basic integration complete
- Many "TODO" items that are now optional
- Action: Archive or consolidate into SERVICES.md
**Priority**: Low
**Estimated Time**: 30 minutes
---
## Documentation Maintenance
### 10. Keep Documentation Updated
**Objective**: Maintain accurate infrastructure documentation
**Regular Tasks**:
- [ ] Update SERVICES.md when services change
- [ ] Update IP-ALLOCATION.md for new devices
- [ ] Update MIGRATION-CHECKLIST.md for completed phases
- [ ] Update INFRASTRUCTURE-TODO.md (this file) as tasks are completed
- [ ] Update CLAUDE.md when architecture changes
**Frequency**: As changes occur
**Priority**: Ongoing
---
## Quick Reference: IP Addresses Still Available
### Reserved but Unused (Available for new services):
- 10.0.10.6-9 (infrastructure expansion)
- 10.0.10.11-12, 10.0.10.14-19 (management)
- 10.0.10.23 (RustDesk - planned)
- 10.0.10.25 (Prometheus/Grafana - planned)
- 10.0.10.26 (production services)
- 10.0.10.28 (was ESPHome - now runs as HA add-on, IP available)
- 10.0.10.31-39 (IoT devices)
- 10.0.10.41-49 (utility services)
### To Be Reclaimed (after cleanup):
- 10.0.10.71 (Spoolman - to be removed)
- 10.0.10.112 (Authelia - to be removed)
---
## Notes
- All critical infrastructure is operational (verified 2025-12-29)
- WireGuard tunnel stable and functional
- Public domains working (except Home Assistant HTTPS backend)
- PostgreSQL shared database serving multiple services
- Authentik SSO integrated with Proxmox cluster
- Automated backups operational (Tier 1 local/NFS)
**Next High-Value Tasks**:
1.~~Fix Home Assistant public domain~~ - COMPLETED
2.~~Discover/Document Prometheus + Grafana~~ - COMPLETED
3.~~Discover/Document RustDesk~~ - COMPLETED
4. Configure Prometheus monitoring targets and Grafana dashboards
5. Cleanup deprecated VMs (Spoolman, Authelia)
---
**Last Updated**: 2025-12-29
**Updated By**: Fred (with Claude Code)

View File

@@ -0,0 +1,262 @@
# Network IP Allocation Plan
**Last Updated:** 2026-01-18
**Status:** Active - Source of Truth
**Network:** 10.0.10.0/24
**Gateway:** 10.0.10.1 (UCG Ultra)
---
## IP Range Allocation
| Range | Purpose | Count | Method |
|-------|---------|-------|--------|
| 10.0.10.1-9 | **Core Infrastructure** | 9 | Static on device |
| 10.0.10.10-19 | **Management & Remote Access** | 10 | Static on device |
| 10.0.10.20-29 | **Production Services** | 10 | Static on device |
| 10.0.10.30-39 | **IoT & 3D Printing** | 10 | Static/Reserved |
| 10.0.10.40-49 | **Utility Services & Gaming** | 10 | Static on device |
| 10.0.10.50-254 | **DHCP Pool** | 205 | Dynamic |
**Note:** IPs 10.0.10.1-49 use static configuration on devices, NOT DHCP reservations on UCG Ultra.
---
## Detailed IP Assignments
### Core Infrastructure (10.0.10.1-9)
| IP | Hostname | Device/Service | Location | CT/VM ID | Status |
|----|----------|----------------|----------|----------|--------|
| 10.0.10.1 | ucg-ultra | UCG Ultra Gateway | - | - | Active |
| 10.0.10.2 | pve-router | i5 Proxmox Node (8c/8GB) | Office | Host | Active |
| 10.0.10.3 | main-pve | DL380p Proxmox (32c/96GB) | Remote | Host | Active |
| 10.0.10.4 | pve-storage | Proxmox Host for OMV | - | Host | Active |
| 10.0.10.5 | omv | OpenMediaVault (12TB) | pve-storage | VM 400 | Active |
| 10.0.10.6 | - | AVAILABLE | - | - | - |
| 10.0.10.7 | - | AVAILABLE | - | - | - |
| 10.0.10.8 | - | AVAILABLE | - | - | - |
| 10.0.10.9 | - | AVAILABLE | - | - | - |
### Management & Remote Access (10.0.10.10-19)
| IP | Hostname | Device/Service | Location | CT/VM ID | Status |
|----|----------|----------------|----------|----------|--------|
| 10.0.10.10 | homelab-command | Gaming PC (RTX 5060, Wyoming, Ollama) | Office | Physical | Active |
| 10.0.10.11 | freds-imac | Fred's iMac (Late 2013, 3.2GHz i5, 24GB RAM, OpenClaw Desktop, user: fredi5) - Ethernet | Office | Physical | Configured |
| 10.0.10.12 | - | AVAILABLE | - | - | - |
| 10.0.10.13 | ilo | HP iLO (DL380p Management) | Remote | Physical | Active |
| 10.0.10.14 | - | AVAILABLE | - | - | - |
| 10.0.10.15 | ca-server | Step-CA Certificate Authority | main-pve | CT 115 | Active |
| 10.0.10.16 | - | AVAILABLE | - | - | - |
| 10.0.10.17 | - | AVAILABLE | - | - | - |
| 10.0.10.18 | - | AVAILABLE | - | - | - |
| 10.0.10.19 | - | AVAILABLE | - | - | - |
**Note on Fred's iMac:**
- **Ethernet (en0)**: 10.0.10.11 (Static) - MAC: ac:87:a3:2b:43:62 - **Status: Configured, cable not connected**
- **Wi-Fi (en1)**: 10.0.10.144 (DHCP) - MAC: b8:09:8a:ca:6c:53 - **Status: Active**
- When Ethernet cable is connected, both interfaces will be active simultaneously
- OpenClaw Desktop client accessible via either IP
### Production Services (10.0.10.20-29)
| IP | Hostname | Service | Location | CT/VM ID | Status |
|----|----------|---------|----------|----------|--------|
| 10.0.10.20 | postgresql | PostgreSQL (Shared DB) | main-pve | CT 102 | Active |
| 10.0.10.21 | authentik | Authentik SSO | main-pve | CT 121 | Active |
| 10.0.10.22 | n8n | n8n Workflow Automation | main-pve | CT 106 | Active |
| 10.0.10.23 | rustdesk | RustDesk ID Server (hbbs) | main-pve | CT 123 | Active |
| 10.0.10.24 | homeassistant | Home Assistant OS | pve-router | VM 104 | Active |
| 10.0.10.25 | prometheus | Prometheus + Grafana | main-pve | CT 125 | Active |
| 10.0.10.26 | uptime-kuma | Uptime Kuma Monitoring | main-pve | CT 128 | Active |
| 10.0.10.27 | dockge | Dockge + Media Stack (Sonarr, Radarr, Prowlarr, Bazarr, Deluge, Calibre-Web) + Vikunja (deprecated) + Dashboard + Caddy Internal Proxy | main-pve | CT 127 | Active |
| 10.0.10.28 | openclaw | OpenClaw Gateway (Multi-Agent AI Coordinator) - Port 18789 | main-pve | CT 130 | Active |
| 10.0.10.29 | - | AVAILABLE | - | - | - |
### IoT & 3D Printing (10.0.10.30-39)
| IP | Hostname | Device | MAC Address | Status |
|----|----------|--------|-------------|--------|
| 10.0.10.30 | ad5m | Flashforge AD5M 3D Printer | 88:a9:a7:99:c3:64 | Active |
| 10.0.10.31 | bambu-a1 | Bambu Lab A1 3D Printer | cc:ba:97:21:4c:f8 | Active |
| 10.0.10.32 | - | AVAILABLE | - | - |
| 10.0.10.33 | - | AVAILABLE | - | - |
| 10.0.10.34 | - | AVAILABLE | - | - |
| 10.0.10.35 | vehicle-tracker | Vehicle Maintenance Tracker (FastAPI) - CT 135 main-pve | - | Planned |
| 10.0.10.36 | - | AVAILABLE | - | - |
| 10.0.10.37 | - | AVAILABLE | - | - |
| 10.0.10.38 | - | AVAILABLE | - | - |
| 10.0.10.39 | - | AVAILABLE | - | - |
### Utility Services & Gaming (10.0.10.40-49)
| IP | Hostname | Service | Location | CT/VM ID | Status |
|----|----------|---------|----------|----------|--------|
| 10.0.10.40 | bar-assistant | Cocktail Recipe Manager | main-pve | CT 103 | Active |
| 10.0.10.41 | minecraft-forge | Minecraft Forge (CFMRPGU) | main-pve | CT 130 | Active |
| 10.0.10.42 | minecraft-stoneblock4 | Minecraft Stoneblock 4 | main-pve | CT 131 | Active |
| 10.0.10.43 | - | AVAILABLE | - | - | - |
| 10.0.10.44 | - | AVAILABLE | - | - | - |
| 10.0.10.45 | pterodactyl-panel | Pterodactyl Game Panel | main-pve | CT 105 | Active |
| 10.0.10.46 | pterodactyl-wings | Pterodactyl Wings (Node) | main-pve | CT 107 | Active |
| 10.0.10.47 | - | AVAILABLE | - | - | - |
| 10.0.10.48 | - | AVAILABLE | - | - | - |
| 10.0.10.49 | - | AVAILABLE | - | - | - |
---
## DHCP Pool Devices (10.0.10.50-254)
These devices receive dynamic IPs from UCG Ultra DHCP. Some have DHCP reservations.
### Fixed DHCP Reservations (on UCG Ultra)
| IP | Hostname | Device | MAC Address | DNS Record |
|----|----------|--------|-------------|------------|
| 10.0.10.179 | twingate-connector | Twingate Zero-Trust | bc:24:11:26:54:60 | - |
| 10.0.10.204 | cutter | Cutter iMac | 7c:c3:a1:af:d6:93 | cutter.nianticbooks.home |
### Known Dynamic Devices (as of 2026-01-13)
**Computers & Workstations:**
| IP | Hostname | Device | MAC Address |
|----|----------|--------|-------------|
| .105 | Freds-Mac-Pro | Jill's MacPro | 80:00:6e:f2:13:52 |
| .116 | HP8610 | HP Printer | 6c:c2:17:53:4e:f8 |
| .144 | Freds-iMac-WiFi | Fred's iMac Wi-Fi (Late 2013, 3.2GHz i5, 24GB RAM, OpenClaw Desktop, macOS Sequoia, user: fredi5) | b8:09:8a:ca:6c:53 |
| .156 | KobePC | Kobe's PC | 64:5d:86:15:de:20 |
| .157 | TP15 | ThinkPad 15 | 78:20:51:f6:9d:d0 |
| .162 | TP25 | ThinkPad 25 | b0:19:21:df:79:30 |
| .213 | Kevin-PC | Kevin's PC | a0:ad:9f:30:8c:af |
**Smart Home & IoT:**
| IP | Device | MAC Address |
|----|--------|-------------|
| .62 | SolarEdge SE7K Inverter | 84:d6:c5:4a:70:32 |
| .170 | TY_WR (Tuya Device) | 68:57:2d:b4:dd:25 |
| .185 | GoveeLife Tower Fan | 98:17:3c:90:5e:aa |
| .190 | Ecobee Thermostat | 44:61:32:90:e0:a3 |
| .154 | Blink XT Camera | ac:41:6a:69:3a:8e |
| .176 | Blink Sync Module 2 | e8:4c:4a:12:03:32 |
| .189 | Sony PlayStation 5 | 70:66:2a:b2:3f:ec |
| .235 | Jill's Monitor | a8:2c:3e:bc:e2:bf |
**Mesh WiFi (eero):**
| IP | Device | MAC Address |
|----|--------|-------------|
| .101 | eero node | 64:da:ed:29:12:ad |
| .216 | eero node | 64:da:ed:29:2e:8d |
| .227 | eero node | 64:da:ed:1c:b5:6d |
**ESP/Raspberry Pi Devices:**
| IP | Hostname | MAC Address | Purpose |
|----|----------|-------------|---------|
| .81 | wlan0 | 70:89:76:ba:0f:d4 | Unknown Pi |
| .90 | ESP_C1DDAA | 84:f3:eb:c1:dd:aa | ESPHome device |
| .171 | raspberrypi | b8:27:eb:a9:03:66 | Unknown |
| .207 | esphome-web-055c68 | 6c:c8:40:05:5c:68 | ESPHome device |
| .246 | raspberrypi | b8:27:eb:fc:56:33 | Unknown |
**Mobile Devices:** Various iPhones, iPads, Watches in DHCP pool (transient)
---
## External Infrastructure
### VPS (Hudson Valley Host)
| IP | Hostname | Service |
|----|----------|---------|
| 66.63.182.168 | vps.nianticbooks.com | Caddy Reverse Proxy |
### Gaming VPS (deadeyeg4ming.vip)
| IP | Hostname | Service |
|----|----------|---------|
| 51.222.12.162 | deadeyeg4ming.vip | WireGuard Server (unlimited bandwidth) |
### WireGuard Tunnel (10.0.9.0/24)
| IP | Endpoint | Role |
|----|----------|------|
| 10.0.9.1 | Gaming VPS | WireGuard Server |
| 10.0.9.2 | UCG Ultra | WireGuard Client |
| 10.0.9.3 | VPS Proxy | Internal proxy IP (used by Caddy) |
---
## Public Domain Routes (via Caddy on VPS)
| Domain | Backend | Status |
|--------|---------|--------|
| freddesk.nianticbooks.com | 10.0.10.3:8006 | Active |
| ad5m.nianticbooks.com | 10.0.10.30:80 | Active |
| bob.nianticbooks.com | 10.0.10.24:8123 | Active |
| auth.nianticbooks.com | 10.0.10.21:9000 | Active |
| cocktails.nianticbooks.com | 10.0.10.40 | Active |
| tasks.nianticbooks.com | 10.0.10.27:3456 | Active (Vikunja - no longer actively used) |
## Internal HTTPS Routes (via Caddy Internal Proxy on CT 127)
| Domain | Backend | Purpose | Certificate |
|--------|---------|---------|-------------|
| sonarr.nianticbooks.home | 10.0.10.27:8989 | TV automation | Caddy Internal PKI |
| radarr.nianticbooks.home | 10.0.10.27:7878 | Movie automation | Caddy Internal PKI |
| prowlarr.nianticbooks.home | 10.0.10.27:9696 | Indexer manager | Caddy Internal PKI |
| bazarr.nianticbooks.home | 10.0.10.27:6767 | Subtitle automation | Caddy Internal PKI |
| deluge.nianticbooks.home | 10.0.10.27:8112 | BitTorrent client | Caddy Internal PKI |
| calibre.nianticbooks.home | 10.0.10.27:8083 | eBook library | Caddy Internal PKI |
| vikunja.nianticbooks.home | 10.0.10.27:3456 | Task management (deprecated) | Caddy Internal PKI |
| dockge.nianticbooks.home | 10.0.10.27:5001 | Docker stack mgmt | Caddy Internal PKI |
---
## Container/VM Quick Reference
### main-pve (10.0.10.3)
| CT ID | Name | IP |
|-------|------|-----|
| 102 | postgresql | 10.0.10.20 |
| 103 | bar-assistant | 10.0.10.40 |
| 105 | pterodactyl-panel | 10.0.10.45 |
| 106 | n8n | 10.0.10.22 |
| 107 | pterodactyl-wings | 10.0.10.46 |
| 115 | ca-server | 10.0.10.15 |
| 121 | authentik | 10.0.10.21 |
| 123 | rustdesk | 10.0.10.23 |
| 125 | prometheus | 10.0.10.25 |
| 127 | dockge | 10.0.10.27 |
| 128 | uptime-kuma | 10.0.10.26 |
| 130 | openclaw | 10.0.10.28 |
| 131 | minecraft-forge | 10.0.10.41 |
| 132 | minecraft-stoneblock4 | 10.0.10.42 |
| 135 | vehicle-tracker | 10.0.10.35 |
### pve-router (10.0.10.2)
| ID | Name | IP |
|----|------|-----|
| VM 104 | haos16.2 (Home Assistant) | 10.0.10.24 |
| CT 101 | twingate-connector | 10.0.10.179 |
### pve-storage (10.0.10.4)
| ID | Name | IP |
|----|------|-----|
| VM 400 | OMV | 10.0.10.5 |
---
## Deprecated/Removed
| Date | Item | Reason |
|------|------|--------|
| 2026-01-13 | CT 100 pve-scripts-local | Unused experiment, caused IP conflict with bar-assistant |
| - | 10.0.10.71 spoolman | Bambu printer incompatible |
| - | 10.0.10.112 authelia | Failed experiment |
---
## Audit History
| Date | Action | Notes |
|------|--------|-------|
| 2026-01-13 | Full network audit | Compared UCG DHCP export vs documentation, verified all running services |
| 2026-01-13 | Removed CT 100 | pve-scripts-local on pve-router - IP conflict resolved |
| 2025-12-29 | Initial audit | Infrastructure audit template completed |

View File

@@ -0,0 +1,460 @@
# Local Certificate Authority Setup
This document describes the setup of a local Certificate Authority (CA) for issuing HTTPS certificates to internal services.
## Overview
**Problem:** Internal services (Home Assistant, AD5M printer, Dockge, etc.) use HTTP, causing:
- Browser security warnings
- Some services refuse to work without HTTPS
- Insecure credential transmission
**Solution:** Local CA using `step-ca` that issues trusted certificates for `.nianticbooks.home` domain.
## Architecture
### CA Server Location
- **Host:** main-pve (10.0.10.3) - most reliable server
- **Container:** LXC container for isolation
- **IP:** 10.0.10.15 (reserved for ca-server)
- **Domain:** ca.nianticbooks.home
### Services Requiring Certificates
| Service | Current IP | Domain | Port |
|---------|-----------|---------|------|
| Home Assistant | 10.0.10.24 | bob.nianticbooks.home | 8123 |
| AD5M Printer | 10.0.10.30 | ad5m.nianticbooks.home | 80 |
| Dockge | 10.0.10.27 | dockge.nianticbooks.home | 5001 |
| Proxmox (main-pve) | 10.0.10.3 | freddesk.nianticbooks.home | 8006 |
| Proxmox (pve-router) | 10.0.10.2 | pve-router.nianticbooks.home | 8006 |
| OMV | 10.0.10.5 | omv.nianticbooks.home | 80 |
## Implementation Plan
### Phase 1: CA Server Setup
1. Create LXC container on main-pve
2. Install step-ca
3. Initialize CA with:
- CA name: "Homelab Internal CA"
- Domain: nianticbooks.home
- Validity: 10 years (root), 1 year (leaf certificates)
### Phase 2: Certificate Generation
For each service, generate:
- Server certificate
- Private key
- Auto-renewal script
### Phase 3: Service Configuration
Configure each service to:
- Use generated certificate
- Listen on HTTPS port
- Optionally redirect HTTP → HTTPS
### Phase 4: Client Trust
Distribute root CA certificate to:
- Linux clients (this computer, servers)
- Windows clients
- macOS clients
- Mobile devices (iOS/Android)
## Step-by-Step Implementation
### 1. Create CA Server Container
```bash
# On main-pve (10.0.10.3)
pct create 115 local:vztmpl/ubuntu-22.04-standard_22.04-1_amd64.tar.zst \
--hostname ca-server \
--cores 1 \
--memory 512 \
--swap 512 \
--storage local-lvm \
--rootfs 8 \
--net0 name=eth0,bridge=vmbr0,ip=10.0.10.15/24,gw=10.0.10.1 \
--unprivileged 1 \
--features nesting=1 \
--onboot 1
# Start container
pct start 115
# Enter container
pct enter 115
```
### 2. Install step-ca
```bash
# Inside container
apt update && apt install -y wget
# Download step-ca
wget https://dl.smallstep.com/gh-release/cli/gh-release-header/v0.25.0/step-cli_0.25.0_amd64.deb
wget https://dl.smallstep.com/gh-release/certificates/gh-release-header/v0.25.0/step-ca_0.25.0_amd64.deb
# Install
dpkg -i step-cli_0.25.0_amd64.deb step-ca_0.25.0_amd64.deb
# Verify
step version
step-ca version
```
### 3. Initialize CA
```bash
# Create CA user
useradd -r -s /bin/bash -m -d /etc/step-ca step
# Initialize CA (as step user)
su - step
step ca init
# Configuration prompts:
# Name: Homelab Internal CA
# DNS: ca.nianticbooks.home
# Address: :443
# Provisioner: admin@nianticbooks.home
# Password: [generate strong password, save to password manager]
```
### 4. Configure CA Service
```bash
# Create systemd service
cat > /etc/systemd/system/step-ca.service <<'EOF'
[Unit]
Description=step-ca Certificate Authority
After=network.target
[Service]
Type=simple
User=step
Group=step
WorkingDirectory=/etc/step-ca
ExecStart=/usr/bin/step-ca /etc/step-ca/config/ca.json --password-file=/etc/step-ca/password.txt
Restart=on-failure
RestartSec=10
[Install]
WantedBy=multi-user.target
EOF
# Store CA password securely
echo "YOUR_CA_PASSWORD" > /etc/step-ca/password.txt
chown step:step /etc/step-ca/password.txt
chmod 600 /etc/step-ca/password.txt
# Enable and start
systemctl daemon-reload
systemctl enable step-ca
systemctl start step-ca
systemctl status step-ca
```
### 5. Issue Certificates for Services
#### Home Assistant (bob.nianticbooks.home)
```bash
# Generate certificate
step certificate create bob.nianticbooks.home \
bob.crt bob.key \
--profile leaf \
--not-after 8760h \
--ca /etc/step-ca/certs/intermediate_ca.crt \
--ca-key /etc/step-ca/secrets/intermediate_ca_key \
--bundle
# Copy to Home Assistant
scp bob.crt bob.key homeassistant.local:/ssl/
```
Configure Home Assistant (`configuration.yaml`):
```yaml
http:
ssl_certificate: /ssl/bob.crt
ssl_key: /ssl/bob.key
server_port: 8123
```
#### AD5M Printer (Fluidd/Klipper)
```bash
step certificate create ad5m.nianticbooks.home \
ad5m.crt ad5m.key \
--profile leaf \
--not-after 8760h \
--ca /etc/step-ca/certs/intermediate_ca.crt \
--ca-key /etc/step-ca/secrets/intermediate_ca_key \
--bundle
# Copy to printer
scp ad5m.crt ad5m.key root@10.0.10.30:/etc/nginx/ssl/
```
Configure nginx for Fluidd:
```nginx
server {
listen 443 ssl;
server_name ad5m.nianticbooks.home;
ssl_certificate /etc/nginx/ssl/ad5m.crt;
ssl_certificate_key /etc/nginx/ssl/ad5m.key;
location / {
proxy_pass http://localhost:80;
}
}
```
#### Dockge
```bash
step certificate create dockge.nianticbooks.home \
dockge.crt dockge.key \
--profile leaf \
--not-after 8760h \
--ca /etc/step-ca/certs/intermediate_ca.crt \
--ca-key /etc/step-ca/secrets/intermediate_ca_key \
--bundle
# Copy to Dockge host
```
#### Proxmox
```bash
# Main PVE
step certificate create freddesk.nianticbooks.home \
freddesk.crt freddesk.key \
--profile leaf \
--not-after 8760h \
--ca /etc/step-ca/certs/intermediate_ca.crt \
--ca-key /etc/step-ca/secrets/intermediate_ca_key \
--bundle
# Copy to Proxmox
scp freddesk.crt freddesk.key root@10.0.10.3:/etc/pve/local/pveproxy-ssl.pem
scp freddesk.key root@10.0.10.3:/etc/pve/local/pveproxy-ssl.key
# Restart Proxmox web service
ssh root@10.0.10.3 "systemctl restart pveproxy"
```
### 6. Certificate Auto-Renewal
Create renewal script on CA server:
```bash
cat > /usr/local/bin/renew-certs.sh <<'EOF'
#!/bin/bash
# Auto-renew certificates for homelab services
SERVICES=(
"bob.nianticbooks.home:10.0.10.24:/ssl/"
"ad5m.nianticbooks.home:10.0.10.30:/etc/nginx/ssl/"
"dockge.nianticbooks.home:10.0.10.27:/etc/ssl/"
"freddesk.nianticbooks.home:10.0.10.3:/etc/pve/local/"
)
for service in "${SERVICES[@]}"; do
IFS=':' read -r domain ip path <<< "$service"
echo "Renewing certificate for $domain..."
# Check if certificate expires in < 30 days
if step certificate needs-renewal "${domain}.crt"; then
# Generate new certificate
step certificate create "$domain" \
"${domain}.crt" "${domain}.key" \
--profile leaf \
--not-after 8760h \
--ca /etc/step-ca/certs/intermediate_ca.crt \
--ca-key /etc/step-ca/secrets/intermediate_ca_key \
--bundle --force
# Deploy to service
scp "${domain}.crt" "${domain}.key" "root@${ip}:${path}"
# Restart service
case "$domain" in
bob.*)
ssh root@${ip} "systemctl restart home-assistant@homeassistant"
;;
ad5m.*)
ssh root@${ip} "systemctl restart nginx"
;;
freddesk.*)
ssh root@${ip} "systemctl restart pveproxy"
;;
esac
echo "✓ Renewed $domain"
fi
done
EOF
chmod +x /usr/local/bin/renew-certs.sh
# Add to crontab (weekly check)
echo "0 2 * * 0 /usr/local/bin/renew-certs.sh" >> /etc/crontab
```
### 7. Client Trust Configuration
#### Linux (Ubuntu/Debian)
```bash
# Copy root CA certificate
scp root@10.0.10.15:/etc/step-ca/certs/root_ca.crt /usr/local/share/ca-certificates/homelab-ca.crt
# Update trust store
update-ca-certificates
# Verify
curl https://bob.nianticbooks.home:8123
```
#### Windows
1. Copy `root_ca.crt` to Windows machine
2. Right-click → Install Certificate
3. Store Location: Local Machine
4. Certificate Store: Trusted Root Certification Authorities
5. Click Next → Finish
#### macOS
```bash
# Copy certificate
scp root@10.0.10.15:/etc/step-ca/certs/root_ca.crt ~/Downloads/
# Import to keychain
sudo security add-trusted-cert -d -r trustRoot -k /Library/Keychains/System.keychain ~/Downloads/root_ca.crt
```
#### iOS/Android
1. Email `root_ca.crt` to device or host on web server
2. Open certificate file on device
3. Install profile
4. iOS: Settings → General → About → Certificate Trust Settings → Enable
5. Android: Settings → Security → Install from storage
## DNS Configuration
Ensure UCG Ultra has DNS entries for all services:
```
bob.nianticbooks.home → 10.0.10.24
ad5m.nianticbooks.home → 10.0.10.30
dockge.nianticbooks.home → 10.0.10.27
freddesk.nianticbooks.home → 10.0.10.3
pve-router.nianticbooks.home → 10.0.10.2
omv.nianticbooks.home → 10.0.10.5
ca.nianticbooks.home → 10.0.10.15
```
## Verification
Test each service:
```bash
# Home Assistant
curl -I https://bob.nianticbooks.home:8123
# AD5M
curl -I https://ad5m.nianticbooks.home
# Dockge
curl -I https://dockge.nianticbooks.home:5001
# Proxmox
curl -I https://freddesk.nianticbooks.home:8006
```
## Troubleshooting
### Certificate Not Trusted
```bash
# Check if CA is in trust store
ls /usr/local/share/ca-certificates/
# Verify certificate chain
openssl s_client -connect bob.nianticbooks.home:8123 -showcerts
# Check certificate validity
openssl x509 -in bob.crt -text -noout
```
### Service Won't Start with HTTPS
```bash
# Check certificate permissions
ls -la /ssl/
# Check service logs
journalctl -u home-assistant@homeassistant -f
# Verify certificate matches key
openssl x509 -noout -modulus -in cert.crt | openssl md5
openssl rsa -noout -modulus -in cert.key | openssl md5
# Should match
```
## Maintenance
### Certificate Expiry Monitoring
```bash
# Check certificate expiration
for cert in *.crt; do
echo "$cert:"
openssl x509 -in "$cert" -noout -dates
done
```
### Renewing Root CA
Root CA expires in 10 years. To renew:
1. Generate new root CA
2. Issue new intermediate CA
3. Re-issue all service certificates
4. Update all client trust stores
## Security Considerations
- CA private key stored encrypted on CA server
- CA server only accessible from local network
- Certificate validity limited to 1 year
- Auto-renewal prevents expiry
- Root CA backed up to OMV storage
## Backup
```bash
# Backup CA configuration
rsync -av /etc/step-ca/ /mnt/omv-backup/ca-backup/$(date +%Y%m%d)/
# Store root CA password in password manager
```
## Next Steps
- [ ] Create CA server LXC container
- [ ] Install and configure step-ca
- [ ] Generate certificates for all services
- [ ] Configure services to use HTTPS
- [ ] Distribute root CA to all devices
- [ ] Set up auto-renewal
- [ ] Test all services with HTTPS
- [ ] Update Pangolin routes to use HTTPS backend

View File

@@ -0,0 +1,361 @@
# Local HTTPS Quick Start Guide
Get all your homelab services running on HTTPS in ~30 minutes.
## What This Does
Converts all your internal services from HTTP to HTTPS:
- ✅ Home Assistant (bob.nianticbooks.home)
- ✅ AD5M 3D Printer (ad5m.nianticbooks.home)
- ✅ Dockge (dockge.nianticbooks.home)
- ✅ Proxmox Web UI (freddesk.nianticbooks.home)
- ✅ OpenMediaVault (omv.nianticbooks.home)
## Prerequisites
- SSH access to main-pve (10.0.10.3) as root
- DNS entries configured in UCG Ultra for all services
- ~2GB free space on main-pve
## Step-by-Step Instructions
### 1. Set Up Certificate Authority (10 min)
SSH into main-pve and run the setup script:
```bash
# From this computer
scp ~/projects/infrastructure/scripts/setup-local-ca.sh root@10.0.10.3:/root/
# SSH into main-pve
ssh root@10.0.10.3
# Run setup
cd /root
chmod +x setup-local-ca.sh
./setup-local-ca.sh
```
**What it does:**
- Creates LXC container at 10.0.10.15
- Installs step-ca (Certificate Authority software)
- Initializes CA with 10-year root certificate
- Starts CA service
**You'll be prompted for:**
- CA password (save this in your password manager!)
- Container root password
### 2. Issue Certificates for All Services (5 min)
Still on main-pve:
```bash
# Copy and run certificate generation script
scp ~/projects/infrastructure/scripts/issue-service-certs.sh root@10.0.10.3:/root/
# On main-pve
chmod +x issue-service-certs.sh
./issue-service-certs.sh
```
**What it does:**
- Generates SSL certificates for all services
- Saves them to `/tmp/certs/` on main-pve
**Output:**
```
/tmp/certs/
├── bob.nianticbooks.home.crt
├── bob.nianticbooks.home.key
├── ad5m.nianticbooks.home.crt
├── ad5m.nianticbooks.home.key
└── ... (other services)
```
### 3. Configure Each Service (15 min)
#### Home Assistant (bob.nianticbooks.home)
```bash
# On main-pve, copy certs to Home Assistant
scp /tmp/certs/bob.nianticbooks.home.* root@10.0.10.24:/config/ssl/
# SSH into Home Assistant
ssh root@10.0.10.24
# Edit configuration
nano /config/configuration.yaml
```
Add/update:
```yaml
http:
ssl_certificate: /config/ssl/bob.nianticbooks.home.crt
ssl_key: /config/ssl/bob.nianticbooks.home.key
server_port: 8123
```
Restart Home Assistant:
```bash
# From HA CLI
ha core restart
# Or restart container/VM from Proxmox
```
Test: https://bob.nianticbooks.home:8123
#### AD5M Printer (Fluidd/Klipper)
```bash
# Copy certs to printer
scp /tmp/certs/ad5m.nianticbooks.home.* root@10.0.10.30:/etc/nginx/ssl/
# SSH into printer
ssh root@10.0.10.30
# Create nginx HTTPS config
cat > /etc/nginx/sites-available/fluidd-https <<'EOF'
server {
listen 443 ssl http2;
server_name ad5m.nianticbooks.home;
ssl_certificate /etc/nginx/ssl/ad5m.nianticbooks.home.crt;
ssl_certificate_key /etc/nginx/ssl/ad5m.nianticbooks.home.key;
# SSL configuration
ssl_protocols TLSv1.2 TLSv1.3;
ssl_prefer_server_ciphers on;
location / {
proxy_pass http://127.0.0.1:80;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
# WebSocket support for Moonraker
location /websocket {
proxy_pass http://127.0.0.1:7125/websocket;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "Upgrade";
proxy_set_header Host $host;
}
}
# Redirect HTTP to HTTPS
server {
listen 80;
server_name ad5m.nianticbooks.home;
return 301 https://$server_name$request_uri;
}
EOF
# Enable site
ln -sf /etc/nginx/sites-available/fluidd-https /etc/nginx/sites-enabled/
nginx -t
systemctl restart nginx
```
Test: https://ad5m.nianticbooks.home
Update printer UI setting to: `https://ad5m.nianticbooks.home/fluidd`
#### Dockge
```bash
# Copy certs
scp /tmp/certs/dockge.nianticbooks.home.* root@10.0.10.27:/opt/dockge/ssl/
# Edit docker-compose.yml to add SSL
```
Add environment variables:
```yaml
environment:
- SSL_KEY=/app/ssl/dockge.nianticbooks.home.key
- SSL_CERT=/app/ssl/dockge.nianticbooks.home.crt
```
Or use nginx reverse proxy (recommended).
#### Proxmox (freddesk.nianticbooks.home)
```bash
# On main-pve
cp /tmp/certs/freddesk.nianticbooks.home.crt /etc/pve/local/pveproxy-ssl.pem
cp /tmp/certs/freddesk.nianticbooks.home.key /etc/pve/local/pveproxy-ssl.key
# Restart Proxmox web service
systemctl restart pveproxy
```
Test: https://freddesk.nianticbooks.home:8006
#### OpenMediaVault
```bash
# Copy certs to OMV
scp /tmp/certs/omv.nianticbooks.home.* root@10.0.10.5:/etc/ssl/
# Configure via OMV Web UI:
# System → Certificates → SSL → Import
# Then: System → General Settings → Enable SSL/TLS, select certificate
```
### 4. Trust the CA on Your Devices (5 min)
#### This Computer (Linux)
```bash
cd ~/projects/infrastructure/scripts
./trust-ca-client.sh
```
#### Other Linux Computers
```bash
# Copy script and run
scp scripts/trust-ca-client.sh user@other-computer:/tmp/
ssh user@other-computer "bash /tmp/trust-ca-client.sh"
```
#### Windows
1. Copy root CA from main-pve:
```bash
scp root@10.0.10.3:/tmp/homelab-root-ca.crt ~/Downloads/
```
2. On Windows:
- Copy file to Windows machine
- Right-click → Install Certificate
- Store Location: **Local Machine**
- Certificate Store: **Trusted Root Certification Authorities**
- Click Finish
#### macOS
```bash
# Copy certificate
scp root@10.0.10.3:/tmp/homelab-root-ca.crt ~/Downloads/
# Install (will prompt for password)
sudo security add-trusted-cert -d -r trustRoot \
-k /Library/Keychains/System.keychain \
~/Downloads/homelab-root-ca.crt
```
#### iOS
1. Email the certificate to yourself or host on a web server
2. Open the certificate file on iOS device
3. Settings → General → VPN & Device Management → Install profile
4. Settings → General → About → Certificate Trust Settings
5. Enable full trust for "Homelab Internal CA"
#### Android
1. Transfer certificate to device
2. Settings → Security → Install from storage
3. Select the certificate file
4. Name it "Homelab CA"
## Verification
Test all services with HTTPS:
```bash
# From any trusted computer
curl -I https://bob.nianticbooks.home:8123
curl -I https://ad5m.nianticbooks.home
curl -I https://freddesk.nianticbooks.home:8006
curl -I https://omv.nianticbooks.home
```
Open in browser - you should see **no certificate warnings**.
## Troubleshooting
### Certificate not trusted
```bash
# Check if CA is installed
ls /usr/local/share/ca-certificates/ | grep homelab
# Re-run trust script
cd ~/projects/infrastructure/scripts
./trust-ca-client.sh
```
### Service won't start with HTTPS
```bash
# Check certificate files exist
ls -la /path/to/ssl/
# Check permissions
chmod 644 /path/to/cert.crt
chmod 600 /path/to/cert.key
# Check service logs
journalctl -u service-name -f
```
### Certificate expired (after 1 year)
```bash
# Re-run certificate issuance
ssh root@10.0.10.3
./issue-service-certs.sh
# Re-deploy to services
```
## Certificate Renewal
Certificates are valid for 1 year. Set a calendar reminder to renew them annually:
```bash
# On main-pve
./issue-service-certs.sh
# Then re-deploy to each service
```
Or set up auto-renewal (see full documentation in LOCAL-CA-SETUP.md).
## Next Steps
Once HTTPS is working:
1. Update Pangolin routes on VPS to use HTTPS backends
2. Configure HTTP → HTTPS redirects
3. Update bookmarks to use `https://`
4. Update Home Assistant app to use HTTPS URL
## Files Created
- `/tmp/homelab-root-ca.crt` - Root CA certificate (install on all devices)
- `/tmp/certs/*.crt` - Service certificates
- `/tmp/certs/*.key` - Private keys (keep secure!)
## Security Notes
- Root CA is valid for 10 years
- Service certificates valid for 1 year
- Private keys stored only on CA server and target services
- CA server only accessible from local network (10.0.10.0/24)
## Support
Full documentation: `LOCAL-CA-SETUP.md`
Troubleshooting: Check service logs and certificate validity
---
**Time Estimate:** 30-45 minutes for complete setup
**Difficulty:** Intermediate
**Impact:** All services accessible via trusted HTTPS

View File

@@ -0,0 +1,554 @@
# IP Migration Checklist
**Date Started:** _______________
**Estimated Completion:** _______________
**Status:** Not Started
---
## Pre-Migration Tasks
### Backup Current Configuration
- [ ] Export current DHCP leases from UCG Ultra (✅ Already done: dhcp-export-all-2025-11-14T22-55-18.871Z.csv)
- [ ] Screenshot current UCG Ultra network settings
- [ ] Backup Pangolin reverse proxy configuration on VPS
- [ ] Document current Proxmox VM network configs
### Testing Preparation
- [ ] Verify SSH access to all Proxmox nodes
- [ ] Verify access to UCG Ultra web UI
- [ ] Have physical access to at least one machine (if remote access breaks)
- [ ] Note current Pangolin routes and test URLs
---
## Phase 1: Update UCG Ultra DHCP Pool ✅ COMPLETED
**Completion Date:** 2025-12-11
**Status:** ✅ Verified correct configuration
### Steps:
1. [x] Log into UCG Ultra web interface
2. [x] Navigate to Settings → Networks → Default (LAN)
3. [x] Find DHCP settings
4. [x] DHCP range verified: `10.0.10.50-10.0.10.254`
- Static/Reserved range: 10.0.10.1-49 (infrastructure)
- Dynamic DHCP pool: 10.0.10.50-254 (clients/devices)
5. [x] Configuration correct - no changes needed
6. [x] Verified: All services functioning, no connectivity issues
**Notes:** DHCP range was already correctly configured. All static reservations in 10.0.10.1-49 range working as expected.
---
## Phase 2: Update Existing DHCP Reservations ✅ COMPLETED
**Completion Date:** 2025-12-11
**Actual Time:** 15 minutes
**Status:** ✅ All devices responding at new IPs
### 2.1 Update HOMELAB-COMMAND ✅
- [x] Current IP: 10.0.10.92
- [x] Target IP: 10.0.10.10
- [x] MAC: 90:de:80:80:e7:04
- [x] Updated reservation in UCG Ultra
- [x] Renewed DHCP lease
- [x] Verified connectivity: Responding at 10.0.10.10 ✅
### 2.2 Update HP iLO ✅
- [x] Current IP: 10.0.10.53
- [x] Target IP: 10.0.10.13
- [x] MAC: b4:b5:2f:ea:8c:30
- [x] Updated reservation in UCG Ultra
- [x] Device responded to lease renewal
- [x] Verified: Accessible at https://10.0.10.13 ✅
### 2.3 Update ad5m (3D Printer) ✅
- [x] Current IP: 10.0.10.189
- [x] Target IP: 10.0.10.30
- [x] MAC: 88:a9:a7:99:c3:64
- [x] Updated reservation in UCG Ultra
- [x] Printer rebooted
- [x] Verified: Accessible at http://10.0.10.30 ✅
- [x] Updated Caddy route: ad5m.nianticbooks.com → 10.0.10.30:80
- [x] Tested: https://ad5m.nianticbooks.com working ✅
---
## Phase 3: Create New DHCP Reservations for VMs ✅ COMPLETED
**Completion Date:** 2025-12-11
**Actual Time:** 30 minutes
**Status:** ✅ All VMs responding at new IPs
### 3.1 OpenMediaVault ✅
- [x] Current IP: 10.0.10.178
- [x] Target IP: 10.0.10.5
- [x] MAC: bc:24:11:a8:ff:0b
- [x] Created reservation in UCG Ultra
- [x] Networking restarted
- [x] Verified: Accessible at http://10.0.10.5 ✅
### 3.2 Home Assistant ✅
- [x] Current IP: 10.0.10.194
- [x] Target IP: 10.0.10.24
- [x] MAC: 02:f5:e9:54:36:28
- [x] Created reservation in UCG Ultra
- [x] VM restarted
- [x] Verified: Accessible at http://10.0.10.24:8123 ✅
- [x] Updated Caddy route: bob.nianticbooks.com → 10.0.10.24:8123
- [x] Tested: https://bob.nianticbooks.com working ✅
### 3.3 Dockge ✅
- [x] Current IP: 10.0.10.104
- [x] Target IP: 10.0.10.27
- [x] MAC: bc:24:11:4a:42:07
- [x] Created reservation in UCG Ultra
- [x] VM restarted
- [x] Verified: Accessible at 10.0.10.27 ✅
### 3.4 ESPHome ✅
- [x] ~~Removed~~ - ESPHome now runs as Home Assistant add-on (no separate VM needed)
- [x] Container 102 deleted from pve-router
- [x] IP 10.0.10.28 released (available for other use)
### 3.5 Docker Host ✅
- [x] Current IP: 10.0.10.108
- [x] Target IP: 10.0.10.29
- [x] MAC: bc:24:11:a8:ff:0b
- [x] Created reservation in UCG Ultra
- [x] VM restarted
- [x] Verified: All containers running at 10.0.10.29 ✅
### 3.6 pve-scripts-local ✅
- [x] Current IP: 10.0.10.79
- [x] Target IP: 10.0.10.40
- [x] MAC: bc:24:11:0f:78:84
- [x] Created reservation in UCG Ultra
- [x] VM restarted
- [x] Verified: Scripts functional at 10.0.10.40 ✅
---
## Phase 4: Update Pangolin Reverse Proxy Routes ✅ COMPLETED
**Completion Date:** 2025-12-13
**Actual Time:** ~20 minutes
**Status:** ✅ All routes operational (Note: Completed as part of Phase 5 with Caddy)
### 4.1 Backup Pangolin Configuration
- [x] Pangolin replaced with Caddy reverse proxy (simpler configuration)
- [x] Caddy configuration at /etc/caddy/Caddyfile on VPS
### 4.2 Update Routes ✅
- [x] Caddy routes configured:
```
freddesk.nianticbooks.com → 10.0.10.3:8006 (main-pve Proxmox)
ad5m.nianticbooks.com → 10.0.10.30:80 (Prusa 3D printer)
bob.nianticbooks.com → 10.0.10.24:8123 (Home Assistant)
```
- [x] Deprecated spools.nianticbooks.com route not included
- [x] Caddy service running and enabled
### 4.3 Verify Routes ✅
- [x] Test freddesk: https://freddesk.nianticbooks.com ✅ Working
- [x] Test ad5m: https://ad5m.nianticbooks.com ✅ Working
- [x] Test bob: https://bob.nianticbooks.com ✅ Working (after HA config fix)
### 4.4 Additional Configuration ✅
- [x] Fixed Home Assistant trusted_proxies configuration
- [x] Added 10.0.8.1 (VPS WireGuard IP) to Home Assistant trusted_proxies
- [x] Home Assistant now accepts requests from bob.nianticbooks.com
**Notes:**
- Switched from Pangolin (Gerbil-based) to Caddy for simpler configuration
- Caddy provides automatic HTTPS via Let's Encrypt
- Home Assistant required `trusted_proxies` configuration to accept external domain
- All public services verified functional on 2025-12-13
---
## Phase 5: Configure WireGuard Tunnel ✅ COMPLETED
**Completion Date:** 2025-12-11
**Actual Time:** ~2 hours
**Status:** ✅ Operational
### 5.1 Install WireGuard on VPS ✅
- [x] SSH to VPS: `ssh fred@66.63.182.168`
- [x] Install WireGuard: Already installed (wireguard-tools v1.0.20210914)
- [x] Enable IP forwarding: `sudo sysctl -w net.ipv4.ip_forward=1`
- [x] Make persistent: Added to /etc/sysctl.conf
- [x] Generate server keys: Created successfully
- [x] VPS Server Public Key: `8jcW7SyId/79Jg4+t0Qd0DaDA+4B+GQf14FRR2TXFRE=`
### 5.2 Configure WireGuard on VPS ✅
- [x] Created config: /etc/wireguard/wg0.conf
- [x] Tunnel subnet: 10.0.8.0/24 (VPS: 10.0.8.1, UCG Ultra: 10.0.8.2)
- [x] Configured NAT and forwarding rules
- [x] Started WireGuard: `sudo systemctl start wg-quick@wg0`
- [x] Enabled on boot: `sudo systemctl enable wg-quick@wg0`
- [x] Verified: `sudo wg show` - peer connected with active handshake
### 5.3 Configure WireGuard on UCG Ultra ✅
- [x] Logged into UCG Ultra web interface (10.0.10.1)
- [x] Navigated to: Settings → Teleport & VPN → VPN Client
- [x] Created WireGuard VPN Client
- [x] Configured client settings:
- Server: 66.63.182.168:51820
- VPS Public Key: 8jcW7SyId/79Jg4+t0Qd0DaDA+4B+GQf14FRR2TXFRE=
- Client Address: 10.0.8.2/24
- Persistent Keepalive: 25 seconds
- [x] UCG Ultra Client Public Key: `KJOj35HdntdLHQTU0tfNPJ/x1GD9SlNy78GuMhMyzTg=`
- [x] Enabled and activated
### 5.4 Test WireGuard Connectivity ✅
- [x] From VPS, ping main-pve: ✅ Working (3/4 packets, ~12ms latency)
- [x] From VPS, HTTP to Home Assistant: ✅ Working (HTTP 405 response)
- [x] From VPS, ping 3D printer (10.0.10.30): ✅ Working
- [x] Tunnel stable with active handshake and data transfer
### 5.5 Reverse Proxy Configuration ✅
**Note:** Replaced Pangolin (Gerbil-based) with Caddy for simplicity
- [x] Removed Pangolin and Traefik Docker containers
- [x] Installed Caddy reverse proxy
- [x] Created /etc/caddy/Caddyfile with routes:
- bob.nianticbooks.com → 10.0.10.24:8123 (Home Assistant)
- freddesk.nianticbooks.com → 10.0.10.3:8006 (Proxmox)
- ad5m.nianticbooks.com → 10.0.10.30:80 (Prusa 3D Printer)
- [x] Automatic HTTPS certificates obtained via Let's Encrypt
- [x] All public services verified working
### 5.6 Public Service Verification ✅
- [x] https://bob.nianticbooks.com - ✅ Working (Home Assistant)
- [x] https://freddesk.nianticbooks.com - ✅ Working (Proxmox)
- [x] https://ad5m.nianticbooks.com - ✅ Working (3D Printer)
**Notes:**
- Tunnel endpoint: VPS 66.63.182.168:51820 ↔ UCG Ultra (home public IP)
- VPS can now reach all 10.0.10.0/24 services through tunnel
- Caddy provides automatic HTTPS and simpler configuration than Pangolin
- No rollback needed - system is stable and operational
---
## Phase 6: Deploy New Services (After WireGuard Active)
**Estimated Time:** Variable (each service 1-2 hours)
**Risk Level:** Low (new services, nothing to break)
### 6.1 PostgreSQL (10.0.10.20) ✅ COMPLETED
- [x] Create VM/container on main-pve
- [x] Assign static IP 10.0.10.20 in VM config
- [x] Install PostgreSQL (PostgreSQL 16)
- [x] Configure databases for: Authentik, n8n, RustDesk, Grafana
- [x] Test connectivity from other VMs
- [x] Verified: Responding at 10.0.10.20 ✅
### 6.2 Authentik SSO (10.0.10.21) ✅ COMPLETED
**Completion Date:** 2025-12-14
**Actual Time:** ~3 hours
**Status:** ✅ Deployed and operational with Proxmox SSO
- [x] Create VM/container on main-pve (Container ID: 121)
- [x] Assign static IP 10.0.10.21 (MAC: bc:24:11:de:18:41)
- [x] Install Authentik (via Docker Compose)
- [x] Configure PostgreSQL connection (using external DB at 10.0.10.20)
- [x] Add Caddy route: auth.nianticbooks.com → 10.0.10.21:9000
- [x] Test: https://auth.nianticbooks.com ✅ Working
- [x] Complete initial setup and password change
- [x] Configure Proxmox OAuth2/OpenID integration ✅
- [ ] Set up WebAuthn/FIDO2 (optional, future enhancement)
- [ ] Configure additional service integrations (n8n, Home Assistant, etc.)
**Configuration Details:**
- Container: Debian 12 LXC (2 vCPUs, 4GB RAM, 20GB disk)
- Database: PostgreSQL on 10.0.10.20 (database: authentik, user: authentik)
- Secret Key: ZsJQbVLiCRtg23rEkXPuxIDJL5MxOxdQsf8ZJ+JHB9U=
- DB Password: authentik_password_8caaff5a73f9c66b
- Version: 2025.10.2
- Automatic HTTPS via Let's Encrypt through Caddy
- Admin User: akadmin
- API Token: f7AsYT6FLZEWVvmN59lC0IQZfMLdgMniVPYhVwmYAFSKHez4aGxyn4Esm86r
**Proxmox SSO Integration:**
- OAuth2 Provider: "Proxmox OpenID" (Client ID: proxmox)
- Client Secret: OAfAcjzzPDUnjEhaLVNIeNu1KR0Io06fB8kA8Np9DTgfgXcsLnN5DogrAfhk5zteazonVGcXfaESvf8viCQFVzq8wNVcp60Bo5D3xvfJ9ZjCzEMCQIljssbfr29zjsap
- Configured on all 3 Proxmox hosts:
- main-pve (10.0.10.3) ✅
- gaming-pve (10.0.10.2) ✅
- backup-pve (10.0.10.4) ✅
- Scope mappings: openid, email, profile
- Login method: Click "Login with authentik" button on Proxmox login page
- Status: ✅ Working - seamless SSO authentication
**Notes:**
- Using external PostgreSQL instead of bundled container for centralized database management
- Authentik SSO successfully integrated with all Proxmox hosts
- Users authenticate once to Authentik, then access all Proxmox hosts without re-authentication
- Documentation created: AUTHENTIK-SSO-GUIDE.md and AUTHENTIK-QUICK-START.md
### 6.3 n8n (10.0.10.22) ✅ COMPLETED
- [x] Create VM/container on main-pve (Container ID: 106)
- [x] Assign static IP 10.0.10.22
- [x] Install n8n (Docker-based deployment)
- [x] Configure PostgreSQL connection (using external DB at 10.0.10.20)
- [x] Updated to latest version (1.123.5)
- [x] Verified: Accessible at http://10.0.10.22:5678 ✅
- [x] SSO Investigation: ❌ OIDC SSO requires n8n Enterprise license (not available in free self-hosted version)
**Notes:**
- n8n OIDC/SSO is an Enterprise-only feature
- Free self-hosted version uses standard email/password authentication
- For SSO integration, would need n8n Cloud subscription or Enterprise license
- Current deployment uses regular authentication - fully functional
### 6.4 n8n + Claude Code Integration ✅ COMPLETED
**Completion Date:** 2025-12-14
**Actual Time:** ~2 hours
**Status:** ✅ Basic integration operational, ready for production workflows
**Reference:** https://github.com/theNetworkChuck/n8n-claude-code-guide
**Architecture:**
- n8n (10.0.10.22 on main-pve) → SSH → Claude Code (10.0.10.10 on HOMELAB-COMMAND)
**Key Configuration Notes:**
- Windows SSH requires PowerShell as default shell for Claude Code to work
- SSH commands MUST use `-n` flag or "Disable Stdin" option to prevent hanging
- Claude Code headless mode: `--output-format json --permission-mode acceptEdits`
- Test workflow created and verified: "Claude Code Test"
#### 6.4.1 Install Claude Code on HOMELAB-COMMAND (10.0.10.10) ✅
- [x] SSH or RDP to HOMELAB-COMMAND (10.0.10.10)
- [x] Node.js already installed: v24.11.0
- [x] Claude Code already installed: v2.0.65
- [x] Verified installation: `claude --version`
- [x] Test headless mode: `claude -p "What is 2+2?" --output-format json --permission-mode acceptEdits`
#### 6.4.2 Configure SSH Access for n8n ✅
- [x] SSH server already running on HOMELAB-COMMAND (Windows OpenSSH)
- [x] Set PowerShell as default SSH shell (required for Claude Code):
```powershell
New-ItemProperty -Path "HKLM:\SOFTWARE\OpenSSH" -Name DefaultShell -Value "C:\Windows\System32\WindowsPowerShell\v1.0\powershell.exe" -PropertyType String -Force
Restart-Service sshd
```
- [x] Generated SSH key on n8n VM: `ssh-keygen -t ed25519 -C "n8n-to-homelab-command"`
- [x] Added public key to HOMELAB-COMMAND: `C:\Users\Fred\.ssh\authorized_keys`
- [x] Test passwordless SSH: `ssh Fred@10.0.10.10 "hostname"` ✅
- [x] Test Claude Code via SSH: `ssh -n Fred@10.0.10.10 "claude -p 'What is 2+2?' --output-format json --permission-mode acceptEdits"` ✅
- [x] **Critical:** Must use `-n` flag with SSH to prevent stdin hanging
#### 6.4.3 Configure n8n SSH Credentials ✅
- [x] Logged into n8n web interface (http://10.0.10.22:5678)
- [x] Created SSH credential: **homelab-command-ssh**
- **Host:** 10.0.10.10
- **Port:** 22
- **Username:** Fred
- **Authentication:** Private Key (from `~/.ssh/id_ed25519` on n8n VM)
- [x] Connection tested successfully ✅
- [x] Credential saved
#### 6.4.4 Create Test Workflow ✅
- [x] Created new workflow: "Claude Code Test"
- [x] Added **Manual Trigger** node
- [x] Added **SSH** node:
- **Credential:** homelab-command-ssh
- **Command:** `claude -p "What is 2+2?" --output-format json --permission-mode acceptEdits`
- **SSH Options:** Enabled "Disable Stdin" (equivalent to `-n` flag)
- [x] Added **Code** node to parse JSON response:
```javascript
const sshOutput = $input.item.json.stdout;
const claudeResponse = JSON.parse(sshOutput);
return {
answer: claudeResponse.result,
cost: claudeResponse.total_cost_usd,
duration_seconds: claudeResponse.duration_ms / 1000,
session_id: claudeResponse.session_id
};
```
- [x] Executed workflow successfully ✅
- [x] Verified Claude Code response: "2 + 2 = 4"
#### 6.4.5 Advanced: Session Management Workflow
- [ ] Add **Code** node to generate session UUID:
```javascript
const uuid = 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, function(c) {
const r = Math.random() * 16 | 0;
const v = c == 'x' ? r : (r & 0x3 | 0x8);
return v.toString(16);
});
return [{ json: { sessionId: uuid } }];
```
- [ ] Add **SSH** node for initial query:
- **Command:** `claude -p "{{ $json.prompt }}" --session-id {{ $json.sessionId }}`
- [ ] Add **SSH** node for follow-up:
- **Command:** `claude -r --session-id {{ $('UUID Generator').item.json.sessionId }} -p "{{ $json.followup }}"`
- [ ] Test multi-turn conversation
#### 6.4.6 Optional: Slack Integration
- [ ] Install Slack app in n8n
- [ ] Create workflow triggered by Slack messages
- [ ] Use SSH node to send message to Claude Code
- [ ] Return Claude response to Slack thread
- [ ] Implement session tracking for conversations
#### 6.4.7 Optional: Tool Deployment
For automated skill deployment (UniFi, infrastructure tasks):
- [ ] Update SSH command to include `--dangerously-skip-permissions`:
```bash
claude --dangerously-skip-permissions -p "Your task requiring tools"
```
- [ ] Test with infrastructure directory context:
```bash
cd /path/to/infrastructure && claude -p "Check WireGuard status"
```
#### 6.4.8 Verification & Testing ✅ BASIC TESTING COMPLETE
- [x] Test basic headless command from n8n ✅
- [ ] Test session-based multi-turn conversation (optional - future enhancement)
- [x] Verify Claude Code can access local files on HOMELAB-COMMAND ✅
- [ ] Test error handling (network disconnect, invalid commands) (optional - future enhancement)
- [ ] Monitor resource usage on HOMELAB-COMMAND during heavy Claude operations (ongoing)
- [x] Document SSH requirements: Must use `-n` flag or "Disable Stdin" option in n8n
#### 6.4.9 Production Considerations
- [ ] Set appropriate SSH timeout in n8n (default may be too short for complex Claude tasks)
- [ ] Configure Claude Code project context on HOMELAB-COMMAND:
- Clone infrastructure repo to known location
- Set up CLAUDE.md in project directory
- [ ] Consider output length limits (Slack: 4000 chars, n8n processing limits)
- [ ] Set up logging for Claude Code executions
- [ ] Add error notifications to n8n workflow
- [ ] Optional: Add Pangolin route for public n8n access (with Authentik SSO)
### 6.5 RustDesk ID Server (10.0.10.23)
- [ ] Create VM/container on main-pve
- [ ] Assign static IP 10.0.10.23
- [ ] Install RustDesk hbbs (ID server)
- [ ] Configure relay server on VPS (hbbr)
- [ ] Test RustDesk client connections
### 6.6 Prometheus + Grafana (10.0.10.25)
- [ ] Create VM/container on main-pve
- [ ] Assign static IP 10.0.10.25
- [ ] Install Prometheus and Grafana
- [ ] Configure data sources
- [ ] Integrate with Authentik for SSO
- [ ] Set up monitoring targets
- [ ] Add Pangolin route (if public access needed)
---
## Phase 7: Cleanup & Decommission
**Estimated Time:** 15 minutes
**Risk Level:** Low (removing unused services)
### 7.1 Remove Spoolman
- [ ] Verify spoolman is not in use
- [ ] Backup any data (if needed): `vzdump CTID --storage backup`
- [ ] Stop VM/container: `pct stop CTID` or `qm stop VMID`
- [ ] Delete VM/container: `pct destroy CTID` or `qm destroy VMID`
- [ ] Remove Pangolin route (already done in Phase 4)
- [ ] Reclaim IP 10.0.10.71
### 7.2 Remove Authelia
- [ ] Verify authelia is not in use (replaced by Authentik)
- [ ] Backup configuration (if needed for migration reference)
- [ ] Stop VM/container
- [ ] Delete VM/container
- [ ] Reclaim IP 10.0.10.112
---
## Phase 8: Update All Documentation ✅ COMPLETED
**Completion Date:** 2025-12-29
**Actual Time:** ~1 hour
**Status:** ✅ Documentation synchronized
- [x] Update infrastructure-audit.md with final IP assignments
- [x] Update CLAUDE.md with correct network (10.0.10.x) - Already up to date
- [x] Update SERVICES.md with new service IPs
- [x] Update RUNBOOK.md if procedures changed - No changes needed
- [x] Update MONITORING.md with new service endpoints - Deferred to monitoring setup
- [x] Git commit all documentation changes
- [x] Git push to sync across machines
---
## Final Verification ✅ COMPLETED
**Verification Date:** 2025-12-29
- [x] All critical services accessible via local IP
- ✅ Proxmox main-pve (10.0.10.3:8006)
- ✅ PostgreSQL (10.0.10.20)
- ✅ Authentik SSO (10.0.10.21:9000)
- ✅ n8n (10.0.10.22:5678)
- ✅ Home Assistant (10.0.10.24:8123)
- ✅ Dockge (10.0.10.27:5001)
- ✅ 3D Printer (10.0.10.30)
- [x] All public services accessible via nianticbooks.com domains
- ✅ freddesk.nianticbooks.com → Proxmox (working)
- ✅ ad5m.nianticbooks.com → 3D Printer (working)
- ⚠️ bob.nianticbooks.com → Home Assistant (502 - needs HTTPS backend config in Caddy)
- [x] WireGuard tunnel stable and monitored
- ✅ Tunnel operational (VPS 10.0.8.1 ↔ UCG Ultra 10.0.8.2)
- ✅ Caddy reverse proxy functional
- ✅ Services accessible through tunnel
- [x] No DHCP conflicts in range 10.0.10.50-254
- [x] All reservations documented in IP-ALLOCATION.md
- [x] Documentation updated and pushed to GitHub
---
## Notes & Issues Encountered
```
[Add any notes, problems encountered, or deviations from the plan]
```
---
## Migration Summary
**Start Date:** 2025-12-11
**Completion Date:** 2025-12-29
**Completed By:** Fred (with Claude Code assistance)
**Total Time:** ~2 weeks (cumulative work across multiple sessions)
### Completed Phases:
- ✅ Phase 1: UCG Ultra DHCP Configuration
- ✅ Phase 2: Update Existing DHCP Reservations
- ✅ Phase 3: Create New Reservations for VMs
- ✅ Phase 4: Update Reverse Proxy Routes (Caddy replaced Pangolin)
- ✅ Phase 5: Configure WireGuard Tunnel
- ✅ Phase 6: Deploy New Services (PostgreSQL, Authentik, n8n, n8n+Claude integration)
- ⏸️ Phase 7: Cleanup & Decommission (deferred - non-critical)
- ✅ Phase 8: Update All Documentation
### Outstanding Items (Non-Critical):
See INFRASTRUCTURE-TODO.md for:
- RustDesk deployment (10.0.10.23)
- Prometheus + Grafana deployment (10.0.10.25)
- Cleanup of deprecated VMs (Spoolman, Authelia)
- Home Assistant Caddy HTTPS backend configuration
- n8n+Claude advanced features (session management, Slack integration)
### Key Achievements:
- ✅ Network reorganized to 10.0.10.0/24 with clean IP allocation
- ✅ WireGuard tunnel operational (VPS ↔ UCG Ultra)
- ✅ Public services accessible via nianticbooks.com domains
- ✅ Caddy reverse proxy deployed (simpler than Pangolin)
- ✅ Authentik SSO integrated with all Proxmox hosts
- ✅ PostgreSQL shared database serving multiple services
- ✅ n8n workflow automation with Claude Code integration
- ✅ Complete documentation of infrastructure state

View File

@@ -0,0 +1,655 @@
# Monitoring Setup Guide
This guide provides step-by-step instructions for setting up monitoring for your infrastructure.
## Table of Contents
- [Monitoring Strategy](#monitoring-strategy)
- [Quick Start: Uptime Kuma](#quick-start-uptime-kuma)
- [Comprehensive Stack: Prometheus + Grafana](#comprehensive-stack-prometheus--grafana)
- [Log Aggregation: Loki](#log-aggregation-loki)
- [Alerting Setup](#alerting-setup)
- [Dashboards](#dashboards)
- [Maintenance](#maintenance)
---
## Monitoring Strategy
### What to Monitor
**Infrastructure Level**:
- VPS: CPU, RAM, disk, network
- Proxmox nodes: CPU, RAM, disk, network
- OMV storage: Disk usage, SMART status
- Network: Bandwidth, connectivity
**Service Level**:
- Service uptime and response time
- HTTP/HTTPS endpoints
- Gerbil tunnel status
- SSL certificate expiration
- Backup job success/failure
**Application Level**:
- Application-specific metrics
- Error rates
- Request rates
- Database performance
### Monitoring Tiers
| Tier | Solution | Complexity | Setup Time | Cost |
|------|----------|------------|------------|------|
| Basic | Uptime Kuma | Low | 30 min | Free |
| Intermediate | Prometheus + Grafana | Medium | 2-4 hours | Free |
| Advanced | Full observability stack | High | 8+ hours | Free/Paid |
---
## Quick Start: Uptime Kuma
**Best for**: Simple uptime monitoring with alerts
### Installation
```bash
# On a Proxmox container or VM
# Create container with Ubuntu/Debian
# Install Docker
curl -fsSL https://get.docker.com | sh
# Run Uptime Kuma
docker run -d --restart=always \
-p 3001:3001 \
-v uptime-kuma:/app/data \
--name uptime-kuma \
louislam/uptime-kuma:1
# Access at http://HOST_IP:3001
```
### Configuration
1. **Create Admin Account**
- First login creates admin user
2. **Add Monitors**
**HTTP(S) Monitor**:
- Monitor Type: HTTP(s)
- Friendly Name: Service Name
- URL: https://service.example.com
- Heartbeat Interval: 60 seconds
- Retries: 3
**Ping Monitor**:
- Monitor Type: Ping
- Hostname: VPS or node IP
- Interval: 60 seconds
**Port Monitor**:
- Monitor Type: Port
- Hostname: IP address
- Port: Service port
3. **Set Up Notifications**
- Settings → Notifications
- Add notification method (email, Slack, Discord, etc.)
- Test notification
4. **Create Status Page** (Optional)
- Status Pages → Add Status Page
- Add monitors to display
- Make public or private
### Monitors to Create
- [ ] VPS SSH (Port 22 or custom)
- [ ] VPS HTTP/HTTPS (Port 80/443)
- [ ] Each public service endpoint
- [ ] Proxmox web interface (each node)
- [ ] OMV web interface
---
## Comprehensive Stack: Prometheus + Grafana
**Best for**: Detailed metrics, trending, and advanced alerting
### Architecture
```
┌─────────────┐ ┌───────────┐ ┌─────────┐
│ Exporters │────▶│ Prometheus│────▶│ Grafana │
│ (Metrics) │ │ (Storage) │ │ (UI) │
└─────────────┘ └───────────┘ └─────────┘
┌─────────────┐
│Alertmanager │
└─────────────┘
```
### Installation (Docker Compose)
```bash
# Create monitoring directory
mkdir -p ~/monitoring
cd ~/monitoring
# Create docker-compose.yml
cat > docker-compose.yml <<'EOF'
version: '3.8'
services:
prometheus:
image: prom/prometheus:latest
container_name: prometheus
restart: unless-stopped
ports:
- "9090:9090"
volumes:
- ./prometheus.yml:/etc/prometheus/prometheus.yml
- prometheus-data:/prometheus
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--storage.tsdb.retention.time=30d'
grafana:
image: grafana/grafana:latest
container_name: grafana
restart: unless-stopped
ports:
- "3000:3000"
volumes:
- grafana-data:/var/lib/grafana
environment:
- GF_SECURITY_ADMIN_PASSWORD=changeme
- GF_INSTALL_PLUGINS=grafana-piechart-panel
node-exporter:
image: prom/node-exporter:latest
container_name: node-exporter
restart: unless-stopped
ports:
- "9100:9100"
command:
- '--path.rootfs=/host'
volumes:
- '/:/host:ro,rslave'
volumes:
prometheus-data:
grafana-data:
EOF
# Create Prometheus config
cat > prometheus.yml <<'EOF'
global:
scrape_interval: 15s
evaluation_interval: 15s
scrape_configs:
# Prometheus itself
- job_name: 'prometheus'
static_configs:
- targets: ['localhost:9090']
# Node exporter (system metrics)
- job_name: 'node'
static_configs:
- targets: ['node-exporter:9100']
labels:
instance: 'monitoring-host'
# Add more exporters here
# - job_name: 'proxmox'
# static_configs:
# - targets: ['proxmox-node-1:9221']
EOF
# Start services
docker-compose up -d
```
### Access
- **Prometheus**: http://HOST_IP:9090
- **Grafana**: http://HOST_IP:3000 (admin/changeme)
### Configure Grafana
1. **Add Prometheus Data Source**
- Configuration → Data Sources → Add data source
- Select Prometheus
- URL: http://prometheus:9090
- Click "Save & Test"
2. **Import Dashboards**
- Dashboard → Import
- Import these popular dashboards:
- 1860: Node Exporter Full
- 10180: Proxmox via Prometheus
- 763: Disk I/O performance
### Install Exporters
**Node Exporter** (on each host to monitor):
```bash
# Download
wget https://github.com/prometheus/node_exporter/releases/download/v1.7.0/node_exporter-1.7.0.linux-amd64.tar.gz
tar xvfz node_exporter-1.7.0.linux-amd64.tar.gz
sudo cp node_exporter-1.7.0.linux-amd64/node_exporter /usr/local/bin/
# Create systemd service
sudo cat > /etc/systemd/system/node_exporter.service <<'EOF'
[Unit]
Description=Node Exporter
After=network.target
[Service]
Type=simple
ExecStart=/usr/local/bin/node_exporter
[Install]
WantedBy=multi-user.target
EOF
sudo systemctl daemon-reload
sudo systemctl enable node_exporter
sudo systemctl start node_exporter
# Verify
curl http://localhost:9100/metrics
```
**Proxmox VE Exporter**:
```bash
# On Proxmox node
wget https://github.com/prometheus-pve/prometheus-pve-exporter/releases/latest/download/pve_exporter
chmod +x pve_exporter
sudo mv pve_exporter /usr/local/bin/
# Create config
sudo mkdir -p /etc/prometheus
sudo cat > /etc/prometheus/pve.yml <<EOF
default:
user: monitoring@pve
password: your_password
verify_ssl: false
EOF
# Create systemd service
sudo cat > /etc/systemd/system/pve_exporter.service <<'EOF'
[Unit]
Description=Proxmox VE Exporter
After=network.target
[Service]
Type=simple
ExecStart=/usr/local/bin/pve_exporter /etc/prometheus/pve.yml
[Install]
WantedBy=multi-user.target
EOF
sudo systemctl daemon-reload
sudo systemctl enable pve_exporter
sudo systemctl start pve_exporter
```
**Blackbox Exporter** (for HTTP/HTTPS probing):
```bash
# Add to docker-compose.yml
blackbox:
image: prom/blackbox-exporter:latest
container_name: blackbox-exporter
restart: unless-stopped
ports:
- "9115:9115"
volumes:
- ./blackbox.yml:/etc/blackbox_exporter/config.yml
```
```yaml
# blackbox.yml
modules:
http_2xx:
prober: http
timeout: 5s
http:
valid_http_versions: ["HTTP/1.1", "HTTP/2.0"]
valid_status_codes: []
method: GET
tcp_connect:
prober: tcp
timeout: 5s
```
### Add Scrape Targets
Add to `prometheus.yml`:
```yaml
# VPS Node Exporter
- job_name: 'vps'
static_configs:
- targets: ['VPS_IP:9100']
# Proxmox Nodes
- job_name: 'proxmox'
static_configs:
- targets: ['PROXMOX_IP_1:9221', 'PROXMOX_IP_2:9221']
# HTTP Endpoints
- job_name: 'blackbox'
metrics_path: /probe
params:
module: [http_2xx]
static_configs:
- targets:
- https://service1.example.com
- https://service2.example.com
relabel_configs:
- source_labels: [__address__]
target_label: __param_target
- source_labels: [__param_target]
target_label: instance
- target_label: __address__
replacement: blackbox:9115
```
---
## Log Aggregation: Loki
**Best for**: Centralized logging from all services
### Installation
Add to `docker-compose.yml`:
```yaml
loki:
image: grafana/loki:latest
container_name: loki
restart: unless-stopped
ports:
- "3100:3100"
volumes:
- ./loki-config.yml:/etc/loki/local-config.yaml
- loki-data:/loki
promtail:
image: grafana/promtail:latest
container_name: promtail
restart: unless-stopped
volumes:
- ./promtail-config.yml:/etc/promtail/config.yml
- /var/log:/var/log
command: -config.file=/etc/promtail/config.yml
volumes:
loki-data:
```
### Configure Loki in Grafana
1. Configuration → Data Sources → Add data source
2. Select Loki
3. URL: http://loki:3100
4. Save & Test
### Query Logs
In Grafana Explore:
```logql
# All logs
{job="varlogs"}
# Filter by service
{job="varlogs"} |= "pangolin"
# Error logs
{job="varlogs"} |= "error"
```
---
## Alerting Setup
### Prometheus Alerting Rules
Create `alerts.yml`:
```yaml
groups:
- name: infrastructure
interval: 30s
rules:
# Node down
- alert: InstanceDown
expr: up == 0
for: 5m
labels:
severity: critical
annotations:
summary: "Instance {{ $labels.instance }} down"
description: "{{ $labels.instance }} has been down for more than 5 minutes"
# High CPU
- alert: HighCPU
expr: 100 - (avg by(instance) (rate(node_cpu_seconds_total{mode="idle"}[5m])) * 100) > 80
for: 10m
labels:
severity: warning
annotations:
summary: "High CPU on {{ $labels.instance }}"
description: "CPU usage is above 80% for 10 minutes"
# High Memory
- alert: HighMemory
expr: (node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes) * 100 < 10
for: 5m
labels:
severity: warning
annotations:
summary: "Low memory on {{ $labels.instance }}"
description: "Available memory is below 10%"
# Disk Space
- alert: LowDiskSpace
expr: (node_filesystem_avail_bytes / node_filesystem_size_bytes) * 100 < 10
for: 5m
labels:
severity: critical
annotations:
summary: "Low disk space on {{ $labels.instance }}"
description: "Disk space is below 10% on {{ $labels.mountpoint }}"
# SSL Certificate Expiring
- alert: SSLCertExpiringSoon
expr: probe_ssl_earliest_cert_expiry - time() < 86400 * 30
for: 1h
labels:
severity: warning
annotations:
summary: "SSL certificate expiring soon"
description: "Certificate for {{ $labels.instance }} expires in less than 30 days"
```
### Alertmanager Configuration
```yaml
# alertmanager.yml
global:
resolve_timeout: 5m
route:
group_by: ['alertname', 'cluster']
group_wait: 10s
group_interval: 10s
repeat_interval: 12h
receiver: 'default'
receivers:
- name: 'default'
email_configs:
- to: 'your-email@example.com'
from: 'alertmanager@example.com'
smarthost: 'smtp.example.com:587'
auth_username: 'username'
auth_password: 'password'
# Slack
- name: 'slack'
slack_configs:
- api_url: 'https://hooks.slack.com/services/YOUR/WEBHOOK/URL'
channel: '#alerts'
text: '{{ range .Alerts }}{{ .Annotations.description }}{{ end }}'
```
---
## Dashboards
### Essential Dashboards
1. **Infrastructure Overview**
- All nodes status
- Overall resource utilization
- Service uptime
2. **VPS Dashboard**
- CPU, RAM, disk, network
- Running services
- Firewall connections
3. **Proxmox Cluster**
- Cluster health
- VM/container count and status
- Resource allocation vs usage
4. **Storage**
- Disk space trends
- I/O performance
- SMART status
5. **Services**
- Uptime percentage
- Response times
- Error rates
6. **Tunnels**
- Gerbil tunnel status
- Connection count
- Bandwidth usage
### Creating Custom Dashboard
1. Grafana → Create → Dashboard
2. Add Panel → Select visualization
3. Write PromQL query
4. Configure thresholds and alerts
5. Save dashboard
---
## Maintenance
### Regular Tasks
**Daily**:
- Review alerts
- Check dashboard for anomalies
**Weekly**:
- Review resource trends
- Check for unused monitors
- Update dashboards
**Monthly**:
- Review and tune alert thresholds
- Clean up old metrics
- Update monitoring stack
- Test alerting
**Quarterly**:
- Review monitoring strategy
- Evaluate new monitoring tools
- Update documentation
### Troubleshooting
**Prometheus not scraping**:
```bash
# Check targets
curl http://localhost:9090/targets
# Check Prometheus logs
docker logs prometheus
```
**Grafana dashboard empty**:
- Verify data source connection
- Check time range
- Verify metrics exist in Prometheus
**No alerts firing**:
- Check alerting rules syntax
- Verify Alertmanager connection
- Test alert evaluation
---
## Monitoring Checklist
### Initial Setup
- [ ] Choose monitoring tier (Basic/Intermediate/Advanced)
- [ ] Deploy monitoring stack
- [ ] Install exporters on all hosts
- [ ] Configure Grafana data sources
- [ ] Import/create dashboards
- [ ] Set up alerting
- [ ] Configure notification channels
- [ ] Test alerts
### Monitors to Configure
- [ ] VPS uptime and resources
- [ ] Proxmox node resources
- [ ] OMV storage capacity
- [ ] All public HTTP(S) endpoints
- [ ] Gerbil tunnel status
- [ ] SSL certificate expiration
- [ ] Backup job success
- [ ] Network connectivity
- [ ] Service-specific metrics
### Alerts to Configure
- [ ] Service down (>5 min)
- [ ] High CPU (>80% for 10 min)
- [ ] High memory (>90% for 5 min)
- [ ] Low disk space (<10%)
- [ ] SSL cert expiring (<30 days)
- [ ] Backup failure
- [ ] Tunnel disconnected
---
## Cost Considerations
### Free Tier Options
- **Uptime Kuma**: Fully free, self-hosted
- **Prometheus + Grafana**: Free, self-hosted
- **Grafana Cloud**: Free tier available (limited)
### Paid Options (if needed)
- **Datadog**: $15/host/month
- **New Relic**: $99/month+
- **Better Uptime**: $10/month+
**Recommendation**: Start with free self-hosted tools, upgrade only if needed.
---
**Last Updated**: _____________
**Next Review**: _____________
**Version**: 1.0

View File

@@ -0,0 +1,70 @@
# Morning Reminder
## Questions to Ask Claude Code
### GitHub Sync for Multiple Claude Code Sessions
**Question:** How can I use GitHub to synchronize my Claude Code sessions across different machines?
**Context:**
- I have Claude Code running on multiple machines:
- VPS (ubuntu-24.04 - 66.63.182.168)
- Mac Pro 2013 (Sequoia via Open Core Legacy Patcher)
- Potentially other development machines
**What I want to understand:**
- How to keep this infrastructure repository in sync across all machines
- Best practices for Git workflow with Claude Code on multiple machines
- How to ensure changes made in one Claude Code session are available in others
- Whether I should use branches for machine-specific work or work directly on main
- How to handle potential merge conflicts when working from different locations
- Whether Claude Code has any built-in features for multi-machine workflows
**Use Cases:**
1. Start work on VPS, continue on Mac Pro
2. Document infrastructure changes from whichever machine I'm on
3. Run scripts and update documentation in the same repo from different locations
4. Ensure consistency across all documentation and automation scripts
---
## Git Workflow for Multiple Machines (Quick Reference)
**On laptop (first time):**
```bash
git clone https://github.com/FredN9MRQ/infrastructure.git
cd infrastructure
claude # Start Claude Code (install first if needed)
```
**When switching between machines:**
**Before leaving current machine:**
```bash
git add . # Stage all changes
git commit -m "Description here" # Save snapshot
git push # Upload to GitHub
```
**On new machine:**
```bash
cd infrastructure
git pull # Download latest changes
claude # Continue working
```
**All your work is now synced!** Any changes on desktop → laptop → VPS stay in sync.
---
## Today's Priority Tasks
1. **Configure WireGuard tunnel between UCG Ultra and VPS** ← CRITICAL - services are down
2. Continue filling out infrastructure-audit.md (Proxmox, network config)
3. Plan IP addressing scheme and DHCP pool boundaries
---
**Created:** 2025-11-14 (midnight)
**Status:** ✅ All changes committed and pushed to GitHub - ready for laptop!
**Read this first thing in the morning!**

View File

@@ -0,0 +1,744 @@
# MQTT Broker Setup Guide (Mosquitto)
This guide covers deploying a Mosquitto MQTT broker for Home Assistant and other IoT integrations on your Proxmox infrastructure.
## What is MQTT?
MQTT (Message Queuing Telemetry Transport) is a lightweight messaging protocol designed for IoT devices. It enables efficient publish/subscribe communication between devices and services like Home Assistant.
**Common Uses:**
- Smart home device communication (sensors, switches, lights)
- ESP8266/ESP32 devices via ESPHome
- Zigbee2MQTT and Z-Wave integration
- Custom automation scripts
- Inter-service messaging
## Deployment Overview
**Service:** Mosquitto MQTT Broker
**Location:** pve-router (i5 Proxmox node at 10.0.10.2)
**IP Address:** 10.0.10.26
**Deployment Method:** LXC Container (lightweight, efficient)
## Prerequisites
- Access to Proxmox web interface (https://10.0.10.2:8006)
- Ubuntu/Debian LXC template downloaded on Proxmox
- DHCP reservation configured on UCG Ultra for 10.0.10.26
## Part 1: Create LXC Container
### 1.1 Download Container Template
In Proxmox web interface on pve-router:
1. Select **pve-router** node
2. Click **local (pve-router)** storage
3. Click **CT Templates**
4. Click **Templates** button
5. Search for and download: **ubuntu-22.04-standard** or **debian-12-standard**
### 1.2 Create Container
1. Click **Create CT** (top right)
2. Configure as follows:
**General Tab:**
- Node: `pve-router`
- CT ID: (next available, e.g., 105)
- Hostname: `mosquitto`
- Password: (set a strong root password)
- SSH public key: (optional, recommended)
**Template Tab:**
- Storage: `local`
- Template: `ubuntu-22.04-standard` (or debian-12)
**Disks Tab:**
- Storage: `local-lvm` (or your preferred storage)
- Disk size: `2 GiB` (plenty for MQTT logs and config)
**CPU Tab:**
- Cores: `1`
**Memory Tab:**
- Memory (MiB): `512`
- Swap (MiB): `512`
**Network Tab:**
- Bridge: `vmbr0`
- IPv4: `Static`
- IPv4/CIDR: `10.0.10.26/24`
- Gateway: `10.0.10.1`
- IPv6: `SLAAC` (or disable if not using IPv6)
**DNS Tab:**
- DNS domain: `home` (or leave blank)
- DNS servers: `10.0.10.1` (UCG Ultra)
3. Click **Finish** (uncheck "Start after created" - we'll configure first)
### 1.3 Configure UCG Ultra DHCP Reservation (Optional Backup)
Even though we're using static IP, create a DHCP reservation as backup:
1. Access UCG Ultra at https://10.0.10.1
2. Settings → Networks → LAN → DHCP
3. Add reservation:
- **IP Address:** 10.0.10.26
- **MAC Address:** (get from Proxmox container network tab after first start)
- **Hostname:** mosquitto
- **Description:** MQTT Broker (Mosquitto)
## Part 2: Install and Configure Mosquitto
### 2.1 Start Container and Login
In Proxmox:
1. Select the mosquitto container
2. Click **Start**
3. Click **Console** (or SSH via `ssh root@10.0.10.26`)
### 2.2 Update System
```bash
# Update package lists
apt update
# Upgrade existing packages
apt upgrade -y
# Install basic utilities
apt install -y curl wget nano htop
```
### 2.3 Install Mosquitto
```bash
# Install Mosquitto broker and clients
apt install -y mosquitto mosquitto-clients
# Enable and start service
systemctl enable mosquitto
systemctl start mosquitto
# Verify it's running
systemctl status mosquitto
```
### 2.4 Configure Mosquitto
**Create configuration file:**
```bash
# Backup original config
cp /etc/mosquitto/mosquitto.conf /etc/mosquitto/mosquitto.conf.bak
# Create new configuration
nano /etc/mosquitto/mosquitto.conf
```
**Basic Configuration:**
```conf
# /etc/mosquitto/mosquitto.conf
# Basic MQTT broker configuration
# Listener settings
listener 1883
protocol mqtt
# Allow anonymous connections (for initial testing only)
allow_anonymous true
# Persistence settings
persistence true
persistence_location /var/lib/mosquitto/
# Logging
log_dest file /var/log/mosquitto/mosquitto.log
log_dest stdout
log_type error
log_type warning
log_type notice
log_type information
# Connection, protocol, and logging settings
connection_messages true
log_timestamp true
```
**Save and exit** (Ctrl+X, Y, Enter)
### 2.5 Restart Mosquitto
```bash
systemctl restart mosquitto
systemctl status mosquitto
```
## Part 3: Secure MQTT with Authentication
### 3.1 Create MQTT User
```bash
# Create password file for user 'homeassistant'
mosquitto_passwd -c /etc/mosquitto/passwd homeassistant
# You'll be prompted to enter a password twice
# Save this password - you'll need it for Home Assistant
```
**Add additional users:**
```bash
# Add more users (without -c flag to avoid overwriting)
mosquitto_passwd /etc/mosquitto/passwd esphome
mosquitto_passwd /etc/mosquitto/passwd automation
```
### 3.2 Update Configuration for Authentication
Edit config:
```bash
nano /etc/mosquitto/mosquitto.conf
```
**Change this line:**
```conf
allow_anonymous true
```
**To:**
```conf
allow_anonymous false
password_file /etc/mosquitto/passwd
```
**Save and restart:**
```bash
systemctl restart mosquitto
systemctl status mosquitto
```
## Part 4: Testing MQTT
### 4.1 Test Locally on Container
**Terminal 1 - Subscribe to test topic:**
```bash
mosquitto_sub -h localhost -t test/topic -u homeassistant -P YOUR_PASSWORD
```
**Terminal 2 - Publish message:**
```bash
mosquitto_pub -h localhost -t test/topic -m "Hello MQTT!" -u homeassistant -P YOUR_PASSWORD
```
You should see "Hello MQTT!" appear in Terminal 1.
### 4.2 Test from Another Machine
From any computer on your network:
```bash
# Subscribe
mosquitto_sub -h 10.0.10.26 -t test/topic -u homeassistant -P YOUR_PASSWORD
# Publish (from another terminal)
mosquitto_pub -h 10.0.10.26 -t test/topic -m "Remote test" -u homeassistant -P YOUR_PASSWORD
```
## Part 5: Integrate with Home Assistant
### 5.1 Add MQTT Integration
1. Open Home Assistant at http://10.0.10.24:8123
2. Go to **Settings → Devices & Services**
3. Click **Add Integration**
4. Search for **MQTT**
5. Configure:
- **Broker:** `10.0.10.26`
- **Port:** `1883`
- **Username:** `homeassistant`
- **Password:** (password you set earlier)
- Leave other settings as default
6. Click **Submit**
### 5.2 Verify Connection
In Home Assistant:
1. Go to **Settings → Devices & Services**
2. Click on **MQTT**
3. You should see "Connected"
### 5.3 Test MQTT in Home Assistant
1. Developer Tools → **MQTT**
2. **Listen to topic:** `homeassistant/#`
3. Click **Start Listening**
4. In **Publish** section:
- **Topic:** `homeassistant/test`
- **Payload:** `{"message": "Hello from HA"}`
- Click **Publish**
5. You should see the message appear in the listen section
## Part 6: ESPHome Integration
If you're using ESPHome (10.0.10.28):
**Add to your ESPHome device YAML:**
```yaml
mqtt:
broker: 10.0.10.26
port: 1883
username: esphome
password: YOUR_ESPHOME_PASSWORD
discovery: true
discovery_prefix: homeassistant
```
This enables ESPHome devices to publish data to Home Assistant via MQTT.
## Part 7: Advanced Configuration (Optional)
### 7.1 Enable TLS/SSL Encryption
**Generate self-signed certificate (for internal use):**
```bash
# Install certbot or use openssl
apt install -y openssl
# Create directory for certificates
mkdir -p /etc/mosquitto/certs
cd /etc/mosquitto/certs
# Generate CA key and certificate
openssl req -new -x509 -days 3650 -extensions v3_ca -keyout ca.key -out ca.crt
# Generate server key and certificate
openssl genrsa -out server.key 2048
openssl req -new -out server.csr -key server.key
openssl x509 -req -in server.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out server.crt -days 3650
# Set permissions
chmod 600 /etc/mosquitto/certs/*.key
chown mosquitto:mosquitto /etc/mosquitto/certs/*
```
**Update mosquitto.conf:**
```conf
# Add TLS listener
listener 8883
protocol mqtt
cafile /etc/mosquitto/certs/ca.crt
certfile /etc/mosquitto/certs/server.crt
keyfile /etc/mosquitto/certs/server.key
```
Restart: `systemctl restart mosquitto`
### 7.2 WebSocket Support (for Browser Clients)
**Add to mosquitto.conf:**
```conf
# WebSocket listener
listener 9001
protocol websockets
```
Useful for web-based MQTT clients or browser automation.
### 7.3 Access Control Lists (ACLs)
**Create ACL file:**
```bash
nano /etc/mosquitto/acl
```
**Example ACL:**
```conf
# homeassistant can access everything
user homeassistant
topic readwrite #
# esphome can only publish sensor data
user esphome
topic write sensors/#
topic read homeassistant/status
# automation user for scripts
user automation
topic readwrite automation/#
```
**Update mosquitto.conf:**
```conf
acl_file /etc/mosquitto/acl
```
Restart: `systemctl restart mosquitto`
## Part 8: Monitoring and Maintenance
### 8.1 Check Logs
```bash
# Real-time log monitoring
tail -f /var/log/mosquitto/mosquitto.log
# Check systemd logs
journalctl -u mosquitto -f
# Last 100 lines
journalctl -u mosquitto -n 100
```
### 8.2 Monitor MQTT Traffic
```bash
# Subscribe to all topics (careful in production!)
mosquitto_sub -h localhost -t '#' -u homeassistant -P YOUR_PASSWORD -v
# Monitor specific namespace
mosquitto_sub -h localhost -t 'homeassistant/#' -u homeassistant -P YOUR_PASSWORD -v
```
### 8.3 Container Resource Monitoring
```bash
# Check CPU/memory usage
htop
# Check disk usage
df -h
# Check mosquitto process
ps aux | grep mosquitto
```
### 8.4 Backup Configuration
```bash
# Create backup script
nano /root/backup-mqtt.sh
```
**Backup script:**
```bash
#!/bin/bash
# MQTT configuration backup
BACKUP_DIR="/root/mqtt-backups"
DATE=$(date +%Y%m%d-%H%M%S)
mkdir -p "$BACKUP_DIR"
# Backup config, passwords, ACLs
tar -czf "$BACKUP_DIR/mqtt-config-$DATE.tar.gz" \
/etc/mosquitto/mosquitto.conf \
/etc/mosquitto/passwd \
/etc/mosquitto/acl \
/etc/mosquitto/certs/ 2>/dev/null
# Keep only last 10 backups
ls -t "$BACKUP_DIR"/*.tar.gz | tail -n +11 | xargs -r rm
echo "Backup completed: mqtt-config-$DATE.tar.gz"
```
Make executable: `chmod +x /root/backup-mqtt.sh`
**Add to crontab:**
```bash
crontab -e
```
Add: `0 2 * * 0 /root/backup-mqtt.sh` (weekly at 2 AM Sunday)
## Part 9: Firewall Configuration
### 9.1 UFW Firewall (Optional but Recommended)
```bash
# Install UFW
apt install -y ufw
# Allow SSH (IMPORTANT - don't lock yourself out!)
ufw allow 22/tcp
# Allow MQTT
ufw allow 1883/tcp comment 'MQTT'
# If using TLS
ufw allow 8883/tcp comment 'MQTT TLS'
# If using WebSockets
ufw allow 9001/tcp comment 'MQTT WebSocket'
# Enable firewall
ufw enable
# Check status
ufw status
```
### 9.2 UCG Ultra Firewall
The UCG Ultra should allow internal LAN traffic by default. No additional rules needed for 10.0.10.0/24 communication.
## Part 10: Troubleshooting
### MQTT Service Won't Start
```bash
# Check for syntax errors in config
mosquitto -c /etc/mosquitto/mosquitto.conf -v
# Check permissions
ls -la /etc/mosquitto/
ls -la /var/lib/mosquitto/
```
### Cannot Connect from Home Assistant
**Check network connectivity:**
```bash
# From Home Assistant container/VM
ping 10.0.10.26
# Check if port is open
nc -zv 10.0.10.26 1883
```
**Verify Mosquitto is listening:**
```bash
# On MQTT container
ss -tlnp | grep 1883
```
### Authentication Failures
```bash
# Check password file exists and has correct permissions
ls -la /etc/mosquitto/passwd
# Test credentials locally
mosquitto_sub -h localhost -t test -u homeassistant -P YOUR_PASSWORD -d
```
**Check logs for specific error:**
```bash
tail -f /var/log/mosquitto/mosquitto.log
```
### High Resource Usage
MQTT is very lightweight. If seeing high CPU/RAM:
```bash
# Check for message loops or excessive traffic
mosquitto_sub -h localhost -t '#' -u homeassistant -P YOUR_PASSWORD -v | head -100
# Check connection count
ss -tn | grep :1883 | wc -l
```
## Part 11: Integration Examples
### 11.1 Basic Sensor in Home Assistant
```yaml
# configuration.yaml
mqtt:
sensor:
- name: "Temperature Sensor"
state_topic: "home/sensor/temperature"
unit_of_measurement: "°F"
value_template: "{{ value_json.temperature }}"
```
**Publish test data:**
```bash
mosquitto_pub -h 10.0.10.26 -t home/sensor/temperature -m '{"temperature":72.5}' -u homeassistant -P YOUR_PASSWORD
```
### 11.2 MQTT Switch in Home Assistant
```yaml
# configuration.yaml
mqtt:
switch:
- name: "Test Switch"
state_topic: "home/switch/test"
command_topic: "home/switch/test/set"
payload_on: "ON"
payload_off: "OFF"
```
### 11.3 ESP8266/ESP32 Example (Arduino)
```cpp
#include <ESP8266WiFi.h>
#include <PubSubClient.h>
const char* ssid = "YourWiFi";
const char* password = "YourPassword";
const char* mqtt_server = "10.0.10.26";
const char* mqtt_user = "esphome";
const char* mqtt_password = "YOUR_PASSWORD";
WiFiClient espClient;
PubSubClient client(espClient);
void setup() {
client.setServer(mqtt_server, 1883);
}
void loop() {
if (!client.connected()) {
client.connect("ESP8266Client", mqtt_user, mqtt_password);
}
client.publish("home/sensor/esp", "Hello from ESP8266");
delay(5000);
}
```
## Part 12: Performance Tuning
### 12.1 Connection Limits
Edit `/etc/mosquitto/mosquitto.conf`:
```conf
# Maximum simultaneous client connections
max_connections 100
# Maximum QoS 1 and 2 messages in flight
max_inflight_messages 20
# Maximum queued messages
max_queued_messages 1000
```
### 12.2 Memory Optimization
```conf
# Limit memory usage
memory_limit 256M
# Message size limit (bytes)
message_size_limit 1024000
```
## Network Diagram
```
┌─────────────────────────────────────────────┐
│ UCG Ultra (10.0.10.1) │
│ Gateway / DNS / DHCP │
└─────────────────┬───────────────────────────┘
┌───────────┼───────────────┬─────────────┐
│ │ │ │
┌─────▼─────┐ ┌──▼──────────┐ ┌──▼──────┐ ┌───▼─────────┐
│ Home │ │ MQTT Broker │ │ ESPHome │ │ Proxmox │
│ Assistant │ │ 10.0.10.26 │ │.28 │ │ Nodes │
│ .24 │ │ Mosquitto │ │ │ │ .2, .3, .4 │
└───────────┘ └─────────────┘ └─────────┘ └─────────────┘
│ │ │
└──────────────┴──────────────┘
MQTT Protocol (Port 1883)
Topics: homeassistant/*, sensors/*
```
## Quick Reference
### Common Commands
```bash
# Service management
systemctl status mosquitto
systemctl restart mosquitto
systemctl stop mosquitto
systemctl start mosquitto
# View logs
journalctl -u mosquitto -f
tail -f /var/log/mosquitto/mosquitto.log
# User management
mosquitto_passwd /etc/mosquitto/passwd USERNAME
# Testing
mosquitto_sub -h 10.0.10.26 -t '#' -u USER -P PASS -v
mosquitto_pub -h 10.0.10.26 -t test -m "message" -u USER -P PASS
```
### Configuration Files
- **Main config:** `/etc/mosquitto/mosquitto.conf`
- **Passwords:** `/etc/mosquitto/passwd`
- **ACLs:** `/etc/mosquitto/acl`
- **Logs:** `/var/log/mosquitto/mosquitto.log`
- **Data:** `/var/lib/mosquitto/`
### Network Details
- **IP:** 10.0.10.26
- **Hostname:** mosquitto
- **MQTT Port:** 1883 (standard)
- **MQTT TLS Port:** 8883 (if configured)
- **WebSocket Port:** 9001 (if configured)
## Related Documentation
- [IP-ALLOCATION.md](IP-ALLOCATION.md) - Network IP plan (MQTT at .26)
- [SERVICES.md](SERVICES.md) - Service inventory
- [infrastructure-audit.md](infrastructure-audit.md) - Current infrastructure state
- [Home Assistant Documentation](https://www.home-assistant.io/integrations/mqtt/)
- [Mosquitto Documentation](https://mosquitto.org/documentation/)
## Security Best Practices
1. **Never use `allow_anonymous true` in production**
2. **Use strong passwords** for MQTT users (16+ characters)
3. **Enable TLS/SSL** if accessing over internet or untrusted networks
4. **Use ACLs** to limit user permissions
5. **Regular backups** of configuration and password files
6. **Monitor logs** for suspicious connection attempts
7. **Keep Mosquitto updated:** `apt update && apt upgrade mosquitto`
## Future Enhancements
- [ ] Configure TLS encryption for external access
- [ ] Set up Mosquitto bridge to cloud MQTT broker (if needed)
- [ ] Integrate with Authentik SSO (when deployed at 10.0.10.21)
- [ ] Add to Prometheus/Grafana monitoring (when deployed at 10.0.10.25)
- [ ] Configure message retention policies
- [ ] Set up MQTT-based automation scripts
---
**Last Updated:** 2025-11-18
**Status:** Ready for deployment
**Priority:** Medium (required for Home Assistant IoT integrations)
**Deployment Location:** pve-router (10.0.10.2)
**Resource Requirements:** 1 CPU core, 512MB RAM, 2GB storage

View File

@@ -0,0 +1,181 @@
# OpenClaw Automation Prompts
**Setup Guide for Fred's OpenClaw Desktop**
**Date:** 2026-01-31
---
## Overview
This document contains all 4 automation workflows to configure in your OpenClaw desktop app.
**Quick Start:** Copy each prompt below and paste it into a new conversation in OpenClaw. The agent will understand the scheduling requirements from the prompt.
---
## 1. Morning Brief ☀️
**Schedule:** Every morning at 8:00 AM (your local time)
**Prompt:**
```
I want you to send me a morning brief every morning at 8am my time. I want this morning brief to include:
- The local weather for the day
- A list of a few trending YouTube videos about my interests
- A list of tasks I need to get done today based on my todo list
- Tasks that you think you can do for me today that will be helpful based on what you know about me
- A list of trending stories based on my interests
- Recommendations you can make for me that will make today super productive
```
**What it does:**
- Provides daily weather forecast
- Curates YouTube content based on your interests
- Reviews your todo list and suggests priorities
- Identifies automation opportunities
- Delivers relevant news stories
- Offers productivity recommendations
---
## 2. Proactive Coder 🌙
**Schedule:** Every night at 11:00 PM (while you sleep)
**Prompt:**
```
I am a 1 man business. I work from the moment I wake up to the moment I go to sleep. I need an employee taking as much off my plate and being as proactive as possible.
Please take everything you know about me and just do work you think would make my life easier or improve my business and make me money. I want to wake up every morning and be like "wow, you got a lot done while I was sleeping."
Don't be afraid to monitor my business and build things that would help improve our workflow. Just create PRs for me to review, don't push anything live. I'll test and commit. Every night when I go to bed, build something cool I can test. Schedule time to work every night at 11pm.
```
**What it does:**
- Works on improvements while you sleep
- Creates pull requests for code changes (never pushes directly)
- Monitors infrastructure and suggests optimizations
- Builds tools to improve your workflows
- Provides morning summary of overnight work
- Focuses on revenue-generating improvements
---
## 3. Second Brain 🧠
**Type:** One-time project setup (not scheduled)
**Prompt:**
```
I want you to build me a 2nd brain. This should be a NextJS app that shows a list of documents you create as we work together in a nice document viewer that feels like a mix of Obsidian and Linear.
I want you to create a folder where all the documents in that folder are viewable in this 2nd brain. Update your memories/skills so that as we talk every day, you create documents in that 2nd brain that explore some of the more important concepts we discuss.
You should also create daily journal entries that record from a high level all our daily discussions.
```
**What it does:**
- Creates a NextJS document viewer app
- Auto-generates concept exploration documents from conversations
- Creates daily journal entries automatically
- Provides Obsidian/Linear-style interface
- Stores knowledge base in organized folder structure
- Enables search and navigation of your knowledge
---
## 4. Afternoon Research Report 📚
**Schedule:** Every afternoon at 2:00 PM
**Prompt:**
```
I want a daily research report sent to me every afternoon. Based on what you know about me I want you to research and give me a report about a concept that would improve me, processes that would improve our working relationship, or anything else that would be helpful for me.
Examples would be:
- Deep dives on concepts I'm interested in like machine learning
- A new workflow we can implement together that will improve our productivity
```
**What it does:**
- Delivers daily research on topics of interest
- Provides deep dives on technical concepts
- Suggests workflow improvements
- Researches business optimization strategies
- Includes sources and citations
- Tailored to your current projects and interests
---
## How to Configure
### Method 1: Desktop App (Recommended)
1. Open OpenClaw desktop app on your iMac
2. Start a new conversation
3. Paste one of the prompts above
4. The agent will understand the scheduling from the prompt
5. Repeat for each automation
### Method 2: CLI (Advanced)
From the Gateway container:
```bash
ssh root@10.0.10.3
pct exec 130 -- openclaw cron add "0 8 * * *" "Morning Brief"
pct exec 130 -- openclaw cron add "0 23 * * *" "Proactive Coder"
pct exec 130 -- openclaw cron add "0 14 * * *" "Afternoon Research"
```
### Method 3: Web Dashboard
Access at: https://openclaw.nianticbooks.home
---
## Tips for Best Results
1. **Morning Brief:**
- Share your interests and preferences with the agent
- Connect your todo list (n8n, task manager, etc.)
- Provide weather location preferences
2. **Proactive Coder:**
- Grant the agent access to your git repositories
- Set up PR notifications
- Review and approve changes each morning
3. **Second Brain:**
- This is a one-time setup, not a scheduled task
- Once built, it runs automatically during conversations
- Check the document viewer regularly to review captured knowledge
4. **Afternoon Research:**
- Mention topics you're curious about
- The agent learns your interests over time
- Use research reports as learning materials
---
## Gateway Configuration
**Gateway URL:** ws://10.0.10.28:18789
**Dashboard:** https://openclaw.nianticbooks.home
**Container:** CT 130 on main-pve (10.0.10.28)
**Desktop Client:** Fred's iMac (10.0.10.11 Ethernet / 10.0.10.144 Wi-Fi)
---
## Resources
- **Full Setup Guide:** [OPENCLAW-SETUP.md](OPENCLAW-SETUP.md)
- **Quick Start:** [OPENCLAW-QUICKSTART.md](OPENCLAW-QUICKSTART.md)
- **Service Docs:** [SERVICES.md](SERVICES.md)
- **Network Info:** [IP-ALLOCATION.md](IP-ALLOCATION.md)
- **Official Docs:** https://docs.openclaw.ai
- **GitHub:** https://github.com/openclaw/openclaw
- **YouTube Tutorials:**
- Introduction: https://www.youtube.com/watch?v=tEjg56ZYKJo
- Tutorial with Prompts: https://www.youtube.com/watch?v=b-l9sGh1-UY
---
**Last Updated:** 2026-01-31
**Status:** Ready to configure

View File

@@ -0,0 +1,242 @@
# OpenClaw Quick Start Guide
**Container**: CT 130 (10.0.10.28)
**Auth Token**: `0b0259af04929be8424b51b6520b4bb48c70d0f595dde9fb6f4c3d5d6410a9fa`
**Status**: Gateway running and accessible on LAN
## Gateway Status
**Onboarding Complete**
**Gateway Running** (check with `pct exec 130 -- pgrep -f 'openclaw gateway'`)
**LAN Accessible** at ws://10.0.10.28:18789
**Model Configured**: claude-sonnet-4-5
**Hooks Enabled**: boot-md, command-logger, session-memory
**Commands Enabled**: bash (native commands enabled)
**User Profile**: Created at `/root/USER.md` with Fred's preferences
### Current Configuration
- **Bind Address**: LAN (0.0.0.0:18789)
- **Auth Mode**: Token-based
- **Tailscale**: Disabled (using direct LAN access)
- **Dashboard**: <http://10.0.10.28:18789>
### Verify Gateway Status
```bash
# Check if Gateway is running
pct exec 130 -- pgrep -f "openclaw gateway"
# Test connectivity from Proxmox host
curl -I http://10.0.10.28:18789
# Or from your local machine
curl -I http://10.0.10.28:18789
```
## Manual Start/Stop Commands
If you need to manually control the Gateway:
```bash
# Start Gateway in foreground (for testing)
ssh root@10.0.10.3 "pct exec 130 -- openclaw gateway"
# Start Gateway in background
ssh root@10.0.10.3 "pct exec 130 -- nohup openclaw gateway > /var/log/openclaw-gateway.log 2>&1 &"
# Stop Gateway
ssh root@10.0.10.3 "pct exec 130 -- pkill -f 'openclaw gateway'"
# View logs
ssh root@10.0.10.3 "pct exec 130 -- tail -f /var/log/openclaw-gateway.log"
```
## Systemd Service (If Installed)
If the onboarding installed a systemd service:
```bash
# Check status
ssh root@10.0.10.3 "pct exec 130 -- systemctl status openclaw-gateway"
# Start/Stop/Restart
ssh root@10.0.10.3 "pct exec 130 -- systemctl start openclaw-gateway"
ssh root@10.0.10.3 "pct exec 130 -- systemctl stop openclaw-gateway"
ssh root@10.0.10.3 "pct exec 130 -- systemctl restart openclaw-gateway"
# Enable auto-start on boot
ssh root@10.0.10.3 "pct exec 130 -- systemctl enable openclaw-gateway"
# View logs
ssh root@10.0.10.3 "pct exec 130 -- journalctl -u openclaw-gateway -f"
```
## Installing Desktop Client (iMac)
After Gateway is running:
1. **Download OpenClaw for macOS**:
- Visit: https://openclaw.bot
- Or: https://github.com/openclaw/openclaw/releases
- Download the `.dmg` installer
2. **Install**:
- Open the `.dmg` file
- Drag OpenClaw to Applications folder
- Open OpenClaw from Applications
3. **Configure Connection**:
- Gateway URL: `ws://10.0.10.28:18789`
- Auth Token: `0b0259af04929be8424b51b6520b4bb48c70d0f595dde9fb6f4c3d5d6410a9fa`
- Or use the auto-discovery (should find Gateway on LAN)
4. **Device Pairing**:
- iMac should auto-pair (local network)
- Check Gateway dashboard to approve pairing if needed
## Configuring Your 4 Automation Workflows
Once desktop client is connected, configure these workflows:
### 1. Morning Brief (8:00 AM)
Create a new agent or scheduled task with this prompt:
```
I want you to send me a morning brief every morning at 8am my time. I want this morning brief to include:
- The local weather for the day
- A list of a few trending YouTube videos about my interests
- A list of tasks I need to get done today based on my todo list
- Tasks that you think you can do for me today that will be helpful based on what you know about me
- A list of trending stories based on my interests
- Recommendations you can make for me that will make today super productive
```
**Schedule**: Daily at 8:00 AM (use cron: `0 8 * * *`)
### 2. Proactive Coder (11:00 PM)
Create a scheduled coding agent:
```
I am a 1 man business. I work from the moment I wake up to the moment I go to sleep. I need an employee taking as much off my plate and being as proactive as possible.
Please take everything you know about me and just do work you think would make my life easier or improve my business and make me money. I want to wake up every morning and be like "wow, you got a lot done while I was sleeping."
Don't be afraid to monitor my business and build things that would help improve our workflow. Just create PRs for me to review, don't push anything live. I'll test and commit. Every night when I go to bed, build something cool I can test. Schedule time to work every night at 11pm.
```
**Schedule**: Daily at 11:00 PM (use cron: `0 23 * * *`)
**Tools Enabled**: file, bash, git (for creating PRs)
### 3. Second Brain (NextJS Document Viewer)
Initial setup prompt:
```
I want you to build me a 2nd brain. This should be a NextJS app that shows a list of documents you create as we work together in a nice document viewer that feels like a mix of Obsidian and Linear.
I want you to create a folder where all the documents in that folder are viewable in this 2nd brain. Update your memories/skills so that as we talk every day, you create documents in that 2nd brain that explore some of the more important concepts we discuss.
You should also create daily journal entries that record from a high level all our daily discussions.
```
**This is a project task, not a scheduled task**
**Agent should**: Create the NextJS app, set up document folder, configure auto-documentation
### 4. Afternoon Research Report
Create a scheduled research agent:
```
I want a daily research report sent to me every afternoon. Based on what you know about me I want you to research and give me a report about a concept that would improve me, processes that would improve our working relationship, or anything else that would be helpful for me.
Examples would be:
- Deep dives on concepts I'm interested in like machine learning
- A new workflow we can implement together that will improve our productivity
```
**Schedule**: Daily at 2:00 PM (use cron: `0 14 * * *`)
**Tools Enabled**: web search, web fetch
## Troubleshooting
### Gateway won't start
```bash
# Check config
ssh root@10.0.10.3 "pct exec 130 -- openclaw doctor"
# Fix configuration issues
ssh root@10.0.10.3 "pct exec 130 -- openclaw doctor --fix"
# Check logs
ssh root@10.0.10.3 "pct exec 130 -- cat /var/log/openclaw-gateway.log"
```
### Can't connect from iMac
```bash
# Verify Gateway is listening on LAN interface
ssh root@10.0.10.3 "pct exec 130 -- netstat -tlnp | grep 18789"
# Test from Proxmox host
ssh root@10.0.10.3 "curl -I http://10.0.10.28:18789"
# Check firewall (container shouldn't have firewall by default)
ssh root@10.0.10.3 "pct exec 130 -- iptables -L"
```
### Auth token issues
```bash
# Verify token is set
ssh root@10.0.10.3 "pct exec 130 -- openclaw config get gateway.auth.token"
# Reset token if needed
ssh root@10.0.10.3 "pct exec 130 -- openclaw config set gateway.auth.token NEW_TOKEN_HERE"
```
## Configuration Files
**Main config**: `/root/.openclaw/openclaw.json`
**Gateway state**: `/root/.openclaw/gateway/`
**Agent configs**: `/root/.openclaw/agents/`
**Credentials**: `/root/.openclaw/credentials/`
## Backup Important Files
Regularly backup the OpenClaw state:
```bash
# From Proxmox host
ssh root@10.0.10.3 "pct exec 130 -- tar -czf /tmp/openclaw-backup.tar.gz /root/.openclaw"
ssh root@10.0.10.3 "pct exec 130 -- mv /tmp/openclaw-backup.tar.gz /mnt/omv-backups/openclaw/"
```
## Next Steps After Setup
1. ✅ Complete onboarding
2. ✅ Verify Gateway is running and accessible
3. Install desktop client on iMac
4. Configure device pairing
5. Set up 4 automation workflows
6. Test voice integration on iMac
7. Configure Home Assistant integration
8. Set up n8n webhook integration
9. Test all scheduled tasks
## Resources
- **Full Setup Guide**: [OPENCLAW-SETUP.md](OPENCLAW-SETUP.md)
- **Service Docs**: [SERVICES.md](SERVICES.md)
- **Network Info**: [IP-ALLOCATION.md](IP-ALLOCATION.md)
- **Official Docs**: https://docs.openclaw.ai
- **GitHub**: https://github.com/openclaw/openclaw
- **Security Guide**: https://docs.openclaw.ai/gateway/security
---
**Status**: Gateway running and ready for iMac client connection
**Last Updated**: 2026-01-31

View File

@@ -0,0 +1,329 @@
# OpenClaw AI Agent Setup
**Last Updated:** 2026-01-30
**Status:** Planned
## Overview
OpenClaw is an open-source AI agent coordination platform that enables multi-agent AI workflows with voice integration, smart home control, and cross-platform messaging.
**Architecture:**
- **Gateway (Control Plane)**: LXC container on main-pve (CT 130, 10.0.10.28)
- **Desktop Client**: macOS app on Fred's iMac (10.0.10.11)
- **Communication**: WebSocket on port 18789
## Infrastructure Details
### Gateway Server (LXC Container)
**Container Specifications:**
- **Host**: main-pve (10.0.10.3)
- **Container ID**: CT 130
- **IP Address**: 10.0.10.28
- **Hostname**: openclaw
- **Resources**: 2 vCPUs, 4GB RAM, 16GB storage
- **OS**: Debian 12 (Bookworm)
- **Port**: 18789 (WebSocket/HTTP)
**Service Details:**
- **Service Name**: `openclaw-gateway` (systemd)
- **Configuration**: `~/.openclaw/` directory
- **State Storage**: `~/.openclaw/` (backed up to OMV)
- **Node Version**: ≥22.12.0 LTS
- **Package Manager**: pnpm 10.23.0+
### Desktop Client (iMac)
**Hardware:**
- **Device**: Late 2013 iMac
- **CPU**: 3.2GHz Quad Core Intel i5
- **GPU**: Nvidia GeForce GT 755M (1GB)
- **RAM**: 24GB
- **OS**: macOS Sequoia (via OpenCore)
- **IP Address**: 10.0.10.11 (static on device)
- **Hostname**: freds-imac
**Client Features:**
- Voice input/output for AI interactions
- Morning briefing automation
- macOS system integration
- Desktop notifications
## Installation Plan
### Phase 1: Gateway Deployment
1. **Create LXC Container**
```bash
ssh root@10.0.10.3
pct create 130 local:vztmpl/debian-12-standard_12.2-1_amd64.tar.zst \
--hostname openclaw \
--memory 4096 \
--cores 2 \
--rootfs local:16 \
--net0 name=eth0,bridge=vmbr0,ip=10.0.10.28/24,gw=10.0.10.1 \
--nameserver 10.0.10.1 \
--features nesting=1 \
--unprivileged 1 \
--start 1
```
2. **Install OpenClaw Gateway**
```bash
pct exec 130 -- bash -c "curl -fsSL https://openclaw.bot/install.sh | bash"
pct exec 130 -- openclaw onboard --install-daemon
```
3. **Configure Network Access**
- Default: Loopback only (`127.0.0.1:18789`)
- LAN Access: Bind to `10.0.10.28:18789` with authentication
- Security: Token-based auth for non-loopback access
4. **Verify Installation**
```bash
pct exec 130 -- openclaw doctor
pct exec 130 -- openclaw status
pct exec 130 -- systemctl status openclaw-gateway
```
### Phase 2: Desktop Client Setup
1. **Install OpenClaw Desktop App on iMac**
- Download from openclaw.bot or GitHub releases
- Install to Applications folder
- Configure Gateway connection: `10.0.10.28:18789`
2. **Device Pairing**
- iMac connects to Gateway
- Local network: Auto-approval expected
- Verify pairing in Gateway dashboard
3. **Voice Integration**
- Configure microphone input
- Test voice commands
- Set up wake word (if supported)
### Phase 3: Agent Configuration & Automation
**YouTube Tutorial Resources:**
- Introduction Video: https://www.youtube.com/watch?v=tEjg56ZYKJo
- Tutorial with Prompts: https://www.youtube.com/watch?v=b-l9sGh1-UY
#### 1. Morning Brief (8:00 AM Daily)
**Schedule:** Every morning at 8:00 AM (Fred's local time)
**Prompt Template:**
```
I want you to send me a morning brief every morning at 8am my time. I want this morning brief to include:
- The local weather for the day
- A list of a few trending YouTube videos about my interests
- A list of tasks I need to get done today based on my todo list
- Tasks that you think you can do for me today that will be helpful based on what you know about me
- A list of trending stories based on my interests
- Recommendations you can make for me that will make today super productive
```
**Implementation:**
- Configure as recurring agent task or cron job in OpenClaw
- Pull weather from Weather API or Home Assistant integration
- YouTube trending: Use YouTube Data API filtered by interests
- Todo list: Integrate with n8n or local todo system
- News: RSS feeds or news API filtered by interests
#### 2. Proactive Coder (11:00 PM Nightly)
**Schedule:** Every night at 11:00 PM (while Fred is sleeping)
**Prompt Template:**
```
I am a 1 man business. I work from the moment I wake up to the moment I go to sleep. I need an employee taking as much off my plate and being as proactive as possible.
Please take everything you know about me and just do work you think would make my life easier or improve my business and make me money. I want to wake up every morning and be like "wow, you got a lot done while I was sleeping."
Don't be afraid to monitor my business and build things that would help improve our workflow. Just create PRs for me to review, don't push anything live. I'll test and commit. Every night when I go to bed, build something cool I can test. Schedule time to work every night at 11pm.
```
**Implementation:**
- Scheduled agent session starting at 11:00 PM
- Agent has access to:
- Code repositories (via SSH or git)
- Infrastructure monitoring data
- Business metrics and analytics
- Output: Pull requests for review (not direct commits)
- Morning summary of work completed overnight
#### 3. Second Brain (NextJS Document Viewer)
**Purpose:** Obsidian/Linear-style document viewer for daily knowledge capture
**Prompt Template:**
```
I want you to build me a 2nd brain. This should be a NextJS app that shows a list of documents you create as we work together in a nice document viewer that feels like a mix of Obsidian and Linear.
I want you to create a folder where all the documents in that folder are viewable in this 2nd brain. Update your memories/skills so that as we talk every day, you create documents in that 2nd brain that explore some of the more important concepts we discuss.
You should also create daily journal entries that record from a high level all our daily discussions.
```
**Technical Implementation:**
- NextJS app deployed as container or static site
- Document storage: Local filesystem or database
- Features:
- Markdown document viewer
- Daily journal entries (auto-created)
- Concept exploration documents (auto-created during conversations)
- Search and navigation
- Obsidian/Linear-inspired UI
- Integration: OpenClaw agent writes to document folder automatically
#### 4. Afternoon Research Report (Daily)
**Schedule:** Every afternoon (time TBD, suggest 2:00 PM)
**Prompt Template:**
```
I want a daily research report sent to me every afternoon. Based on what you know about me I want you to research and give me a report about a concept that would improve me, processes that would improve our working relationship, or anything else that would be helpful for me.
Examples would be:
- Deep dives on concepts I'm interested in like machine learning
- A new workflow we can implement together that will improve our productivity
```
**Implementation:**
- Scheduled agent research session
- Topics determined by:
- Recent conversations and interests
- Current projects and challenges
- Learning goals
- Business improvement opportunities
- Output: Well-researched report with sources
- Delivery: Morning briefing channel or dedicated report folder
## Integration Points
### Planned Integrations
1. **Home Assistant** (10.0.10.24)
- Custom plugin for device control
- Voice commands for smart home
- Morning briefing with home status
2. **n8n Workflows** (10.0.10.22)
- Webhook triggers from OpenClaw
- Automated task execution
- Custom workflow integration
3. **Calendar Integration**
- Morning briefing with daily schedule
- Meeting reminders
- Task coordination
4. **Weather Integration**
- Morning weather briefing
- Daily forecast
## Security Configuration
**Gateway Authentication:**
- Token-based auth for LAN access
- No public internet exposure (internal only)
- Optional: Reverse proxy via Caddy Internal (10.0.10.27)
**Network Access:**
- Loopback: `127.0.0.1:18789` (no auth required)
- LAN Binding: `10.0.10.28:18789` (auth required)
- Firewall: Restrict to 10.0.10.0/24 network
**Device Pairing:**
- Local clients: Auto-approve
- Remote clients: Manual approval with 1-hour expiration codes
## Startup & Management
**Gateway Service:**
```bash
# Check status
pct exec 130 -- systemctl status openclaw-gateway
# Start/stop/restart
pct exec 130 -- systemctl start openclaw-gateway
pct exec 130 -- systemctl stop openclaw-gateway
pct exec 130 -- systemctl restart openclaw-gateway
# View logs
pct exec 130 -- journalctl -u openclaw-gateway -f
# Dashboard
curl http://10.0.10.28:18789/
```
**Desktop Client:**
- Launch OpenClaw app from macOS Applications
- Check connection status in app
- Configure preferences and voice settings
## Health Checks
```bash
# Gateway accessibility
curl -I http://10.0.10.28:18789
# WebSocket test
wscat -c ws://10.0.10.28:18789
# Service status
ssh root@10.0.10.3 'pct exec 130 -- openclaw status'
# Check Gateway sessions
ssh root@10.0.10.3 'pct exec 130 -- openclaw sessions'
```
## Backup Strategy
**Gateway State Backup:**
- Location: `/root/.openclaw/` on CT 130
- Backup to: `/mnt/omv-backups/openclaw/`
- Frequency: Daily
- Retention: 7 days
**Configuration Files:**
- OpenClaw config: `~/.openclaw/`
- Agent profiles: `~/.openclaw/agents/`
- Gateway state: `~/.openclaw/gateway/`
## Resources
- **GitHub Repository**: https://github.com/openclaw/openclaw
- **Documentation**: https://docs.openclaw.ai
- **Installation Guide**: https://docs.openclaw.ai/start
- **Gateway Docs**: https://docs.openclaw.ai/gateway
- **Security Guide**: https://docs.openclaw.ai/security
- **Plugin System**: https://docs.openclaw.ai/plugins
## YouTube Tutorials
- **Introduction**: https://www.youtube.com/watch?v=tEjg56ZYKJo
- **Tutorial & Prompts**: https://www.youtube.com/watch?v=b-l9sGh1-UY
## Notes
- **Node.js Version**: Must use ≥22.12.0 for critical security patches (CVE-2025-59466, CVE-2026-21636)
- **Resource Requirements**: Lightweight - 2 vCPUs and 4GB RAM sufficient
- **Multi-Instance**: Can run multiple Gateways on different ports if needed
- **Docker Alternative**: Could use Docker Compose instead of native install if preferred
## Next Steps
1. Review YouTube tutorial prompts manually (WebFetch unable to access YouTube)
2. Extract morning briefing templates
3. Create LXC container CT 130
4. Install OpenClaw Gateway
5. Install desktop client on iMac
6. Configure voice integration
7. Set up morning briefing automation
8. Test Home Assistant integration
---
**Status**: Documentation complete, awaiting deployment

View File

@@ -0,0 +1,728 @@
# Proxmox Recovery Guide
Detailed procedures for recovering Proxmox VE installations, VMs, and containers from various failure scenarios.
## Table of Contents
- [Overview](#overview)
- [Backup Strategy](#backup-strategy)
- [Recovery Scenarios](#recovery-scenarios)
- [Tools and Commands](#tools-and-commands)
- [Preventive Measures](#preventive-measures)
## Overview
This guide covers recovery procedures for Proxmox VE environments, specifically:
- Proxmox node failures (hardware issues, corruption, etc.)
- VM/Container restoration
- Cluster recovery
- Configuration restoration
## Backup Strategy
### What to Backup
**1. Proxmox Configuration**
```bash
# Backup Proxmox configs
tar -czf /backup/proxmox-etc-$(date +%Y%m%d).tar.gz /etc/pve/
# Backup network configuration
cp /etc/network/interfaces /backup/interfaces.$(date +%Y%m%d)
# Backup storage configuration
pvesm status > /backup/storage-status.$(date +%Y%m%d).txt
```
**2. VM/Container Backups**
```bash
# Backup all VMs/containers
vzdump --all --mode snapshot --compress zstd --storage [backup-storage]
# Backup specific VM
vzdump VMID --mode snapshot --compress zstd --storage [backup-storage]
# Backup to network location
vzdump VMID --dumpdir /mnt/backup --mode snapshot --compress zstd
```
**3. Boot Configuration**
```bash
# Backup boot loader
dd if=/dev/sda of=/backup/mbr-backup.img bs=512 count=1
# Backup partition table
sfdisk -d /dev/sda > /backup/partition-table.$(date +%Y%m%d).txt
```
### Automated Backup Script
Create `/usr/local/bin/backup-proxmox.sh`:
```bash
#!/bin/bash
# Automated Proxmox backup script
BACKUP_DIR="/mnt/backup/proxmox"
DATE=$(date +%Y%m%d-%H%M%S)
RETENTION_DAYS=30
# Create backup directory
mkdir -p "$BACKUP_DIR/$DATE"
# Backup Proxmox configuration
tar -czf "$BACKUP_DIR/$DATE/pve-config.tar.gz" /etc/pve/ 2>/dev/null
# Backup network config
cp /etc/network/interfaces "$BACKUP_DIR/$DATE/interfaces"
# Backup storage config
pvesm status > "$BACKUP_DIR/$DATE/storage-status.txt"
# Backup firewall rules
iptables-save > "$BACKUP_DIR/$DATE/iptables-rules"
# List all VMs and containers
qm list > "$BACKUP_DIR/$DATE/vm-list.txt"
pct list > "$BACKUP_DIR/$DATE/ct-list.txt"
# Backup VM configs (excluding disks)
for vm in $(qm list | awk '{if(NR>1) print $1}'); do
qm config $vm > "$BACKUP_DIR/$DATE/vm-$vm-config.txt"
done
# Backup container configs
for ct in $(pct list | awk '{if(NR>1) print $1}'); do
pct config $ct > "$BACKUP_DIR/$DATE/ct-$ct-config.txt"
done
# Remove old backups
find "$BACKUP_DIR" -type d -mtime +$RETENTION_DAYS -exec rm -rf {} +
echo "Backup completed: $BACKUP_DIR/$DATE"
```
Set up cron job:
```bash
# Daily backup at 2 AM
0 2 * * * /usr/local/bin/backup-proxmox.sh
```
## Recovery Scenarios
### Scenario 1: Single VM/Container Recovery
**Symptoms:**
- VM won't start
- VM corrupted
- Accidental deletion
**Recovery Procedure:**
**1. From Proxmox Backup**
```bash
# List available backups
ls -lh /var/lib/vz/dump/
# Restore VM from backup
qmrestore /var/lib/vz/dump/vzdump-qemu-VMID-DATE.vma.zst NEW_VMID \
--storage local-lvm
# Restore container from backup
pct restore NEW_CTID /var/lib/vz/dump/vzdump-lxc-CTID-DATE.tar.zst \
--storage local-lvm
# Start restored VM/container
qm start NEW_VMID
pct start NEW_CTID
```
**2. From External Backup Location**
```bash
# Mount backup location if needed
mount /dev/sdX1 /mnt/backup
# Or mount network share
mount -t nfs backup-server:/backups /mnt/backup
# Restore from external location
qmrestore /mnt/backup/vzdump-qemu-VMID.vma.zst NEW_VMID \
--storage local-lvm
```
**3. Restore to Different Storage**
```bash
# List available storage
pvesm status
# Restore to specific storage
qmrestore /path/to/backup.vma.zst NEW_VMID --storage [storage-name]
```
### Scenario 2: Proxmox Node Complete Failure
**Symptoms:**
- Hardware failure (motherboard, CPU, RAM)
- Disk controller failure
- Proxmox installation corrupted
**Recovery Options:**
**Option A: Reinstall Proxmox and Restore VMs**
**1. Reinstall Proxmox VE**
```bash
# Boot from Proxmox ISO
# Follow installation wizard
# Configure same network settings as before
# Configure same hostname
# After installation, update system
apt update && apt full-upgrade
```
**2. Restore Network Configuration**
```bash
# Copy backed up network config
scp backup-server:/backup/interfaces /etc/network/interfaces
# Restart networking
systemctl restart networking
```
**3. Configure Storage**
```bash
# Recreate storage configurations
# Web UI: Datacenter → Storage → Add
# Or via command line
pvesm add dir backup --path /mnt/backup --content backup
pvesm add nfs shared-storage --server NFS_IP --export /export/path --content images,backup
```
**4. Restore VMs/Containers**
```bash
# Copy backups if needed
scp -r backup-server:/backups/* /var/lib/vz/dump/
# Restore each VM
for backup in /var/lib/vz/dump/vzdump-qemu-*.vma.zst; do
VMID=$(basename $backup | grep -oP '\d+')
echo "Restoring VM $VMID..."
qmrestore $backup $VMID --storage local-lvm
done
# Restore each container
for backup in /var/lib/vz/dump/vzdump-lxc-*.tar.zst; do
CTID=$(basename $backup | grep -oP '\d+')
echo "Restoring CT $CTID..."
pct restore $CTID $backup --storage local-lvm
done
```
**Option B: Disk Recovery (If disks are intact)**
**1. Boot from Proxmox Live ISO**
```bash
# Don't install - boot to rescue mode
```
**2. Mount Proxmox System Disk**
```bash
# Identify system disk
lsblk
# Mount root filesystem
mkdir /mnt/pve-root
mount /dev/sdX3 /mnt/pve-root # Adjust partition number
# Mount boot partition
mount /dev/sdX2 /mnt/pve-root/boot/efi
```
**3. Chroot into System**
```bash
# Mount proc, sys, dev
mount -t proc proc /mnt/pve-root/proc
mount -t sysfs sys /mnt/pve-root/sys
mount -o bind /dev /mnt/pve-root/dev
mount -t devpts devpts /mnt/pve-root/dev/pts
# Chroot
chroot /mnt/pve-root
# Try to repair
proxmox-boot-tool refresh
update-grub
update-initramfs -u
# Exit chroot
exit
# Unmount and reboot
umount -R /mnt/pve-root
reboot
```
### Scenario 3: ZFS Pool Recovery
**Symptoms:**
- ZFS pool degraded
- Missing or failed disk in ZFS mirror/RAID
**Recovery Procedure:**
**1. Check Pool Status**
```bash
# Check ZFS pool health
zpool status
# Example output showing degraded pool:
# pool: rpool
# state: DEGRADED
# scan: scrub in progress since...
```
**2. Replace Failed Disk in ZFS Mirror**
```bash
# Identify failed disk
zpool status rpool
# Replace disk (assuming /dev/sdb failed, replacing with /dev/sdc)
zpool replace rpool /dev/sdb /dev/sdc
# Monitor resilvering progress
watch zpool status rpool
```
**3. Import Pool from Backup Disks**
```bash
# If pool is not automatically imported
zpool import
# Import specific pool
zpool import rpool
# Force import if needed (use cautiously)
zpool import -f rpool
```
**4. Scrub Pool After Recovery**
```bash
# Start scrub to verify data integrity
zpool scrub rpool
# Monitor scrub progress
zpool status
```
### Scenario 4: LVM Recovery
**Symptoms:**
- LVM volume group issues
- Corrupted LVM metadata
- Missing physical volumes
**Recovery Procedure:**
**1. Scan for Volume Groups**
```bash
# Scan for all volume groups
vgscan
# Activate all volume groups
vgchange -ay
```
**2. Restore LVM Metadata**
```bash
# LVM automatically backs up metadata to /etc/lvm/archive/
# List available metadata backups
ls -lh /etc/lvm/archive/
# Restore from backup
vgcfgrestore pve -f /etc/lvm/archive/pve_XXXXX.vg
# Activate volume group
vgchange -ay pve
```
**3. Recover from Failed Disk**
```bash
# Remove failed physical volume from volume group
vgreduce pve /dev/sdX
# Add new physical volume
pvcreate /dev/sdY
vgextend pve /dev/sdY
# Move data from old to new disk (if old disk still readable)
pvmove /dev/sdX /dev/sdY
vgreduce pve /dev/sdX
```
### Scenario 5: Cluster Node Recovery
**Symptoms:**
- Node removed from cluster
- Cluster quorum lost
- Split-brain scenario
**Recovery Procedure:**
**1. Check Cluster Status**
```bash
# Check cluster status
pvecm status
# Check quorum
pvecm nodes
```
**2. Restore Single Node from Cluster**
```bash
# If node was removed from cluster and you want to use it standalone
# Stop cluster services
systemctl stop pve-cluster
systemctl stop corosync
# Start in local mode
pmxcfs -l
# Remove cluster configuration
rm /etc/pve/corosync.conf
rm -rf /etc/corosync/*
# Restart services
killall pmxcfs
systemctl start pve-cluster
```
**3. Rejoin Node to Cluster**
```bash
# On the node to be rejoined
pvecm add CLUSTER_NODE_IP
# Enter cluster network information when prompted
# Node will rejoin cluster and sync configuration
```
**4. Recover Lost Quorum (Emergency Only)**
```bash
# If majority of cluster nodes are down and you need to continue
# WARNING: This can cause split-brain if other nodes come back
# Set expected votes to current online nodes
pvecm expected 1
# This allows single node to have quorum temporarily
```
### Scenario 6: Configuration Recovery Without Backups
**If /etc/pve/ is lost but VMs/containers intact:**
**1. Identify Existing VMs/Containers**
```bash
# List LVM volumes
lvs
# List ZFS datasets
zfs list -t all
# VM disks typically in:
# LVM: pve/vm-XXX-disk-Y
# ZFS: rpool/data/vm-XXX-disk-Y
```
**2. Recreate VM Configuration**
```bash
# Create new VM with same VMID
qm create VMID --name "recovered-vm" --memory 4096 --cores 2
# Attach existing disk (LVM example)
qm set VMID --scsi0 local-lvm:vm-VMID-disk-0
# For ZFS
qm set VMID --scsi0 local-zfs:vm-VMID-disk-0
# Set other options as needed
qm set VMID --net0 virtio,bridge=vmbr0
qm set VMID --boot c --bootdisk scsi0
# Try to start VM
qm start VMID
```
**3. Recreate Container Configuration**
```bash
# Containers are stored in /var/lib/vz/ or ZFS dataset
# Check for rootfs
# Create container pointing to existing rootfs
pct create CTID /var/lib/vz/template/cache/[template].tar.gz \
--rootfs local-lvm:vm-CTID-disk-0 \
--hostname recovered-ct \
--memory 2048
# Start container
pct start CTID
```
## Tools and Commands
### Essential Proxmox Commands
**VM Management:**
```bash
# List all VMs
qm list
# Show VM config
qm config VMID
# Start/stop VM
qm start VMID
qm stop VMID
qm shutdown VMID
# Clone VM
qm clone VMID NEW_VMID --name new-vm-name
# Migrate VM (in cluster)
qm migrate VMID TARGET_NODE
```
**Container Management:**
```bash
# List all containers
pct list
# Show container config
pct config CTID
# Start/stop container
pct start CTID
pct stop CTID
pct shutdown CTID
# Enter container
pct enter CTID
```
**Storage Management:**
```bash
# List storage
pvesm status
# Add storage
pvesm add [type] [storage-id] [options]
# Scan for storage
pvesm scan [type]
```
**Backup/Restore:**
```bash
# Create backup
vzdump VMID --mode snapshot --compress zstd
# Restore backup
qmrestore /path/to/backup.vma.zst NEW_VMID
# List backups
pvesh get /nodes/NODE/storage/STORAGE/content --content backup
```
### Diagnostic Commands
```bash
# Check Proxmox version
pveversion -v
# Check system resources
pvesh get /nodes/NODE/status
# Check running processes
pvesh get /nodes/NODE/tasks
# Check logs
journalctl -u pve-cluster
journalctl -u pvedaemon
journalctl -u pveproxy
# Check disk health
smartctl -a /dev/sdX
# Check network
ip addr
ip route
```
### Recovery Tools
**SystemRescue CD:**
- Boot from SystemRescue ISO
- Access to ZFS, LVM, and filesystem tools
- Can mount and repair Proxmox installations
**Proxmox Live ISO:**
- Boot without installing
- Can mount existing installations
- Repair bootloader and configurations
**TestDisk/PhotoRec:**
- Recover deleted files
- Repair partition tables
## Preventive Measures
### Regular Maintenance
**1. Daily Checks**
```bash
# Check cluster/node status
pvecm status
# Check VM/CT status
qm list
pct list
# Check storage health
pvesm status
```
**2. Weekly Tasks**
```bash
# Update Proxmox
apt update && apt dist-upgrade
# Check for failed systemd services
systemctl --failed
# Review logs
journalctl -p err -b
```
**3. Monthly Tasks**
```bash
# Test backup restore
qmrestore [backup] 999 --storage local-lvm
qm start 999
# Verify VM boots correctly
qm stop 999
qm destroy 999
# Check disk health
for disk in /dev/sd?; do smartctl -H $disk; done
# Check ZFS scrub
zpool scrub rpool
```
### Backup Best Practices
**1. 3-2-1 Backup Strategy**
- 3 copies of data
- 2 different media types
- 1 off-site copy
**2. Automated Backups**
- Schedule regular VM/CT backups
- Backup Proxmox configuration
- Test restore procedures regularly
**3. Documentation**
- Keep network diagrams updated
- Document IP allocations
- Maintain runbooks for common tasks
- Store documentation off-site
### Monitoring Setup
**1. Setup Email Alerts**
```bash
# Configure postfix for email
apt install postfix
# Test email
echo "Test" | mail -s "Proxmox Alert Test" your@email.com
```
**2. Monitor Resources**
- Set up monitoring for CPU, RAM, disk usage
- Alert on high resource consumption
- Monitor backup job success/failure
**3. Health Checks**
```bash
# Create health check script
cat > /usr/local/bin/health-check.sh << 'EOF'
#!/bin/bash
# Proxmox Health Check
# Check cluster status
if ! pvecm status &>/dev/null; then
echo "WARNING: Cluster status check failed"
fi
# Check storage
pvesm status | grep -v active && echo "WARNING: Storage issue detected"
# Check for failed VMs
qm list | grep stopped && echo "INFO: Stopped VMs detected"
# Check system load
LOAD=$(cat /proc/loadavg | awk '{print $1}')
if (( $(echo "$LOAD > 8" | bc -l) )); then
echo "WARNING: High system load: $LOAD"
fi
# Check disk space
df -h | awk '$5 ~ /^9[0-9]%/ || $5 ~ /^100%/ {print "WARNING: Disk space low on " $6 ": " $5}'
EOF
chmod +x /usr/local/bin/health-check.sh
# Add to crontab
echo "*/15 * * * * /usr/local/bin/health-check.sh | mail -s 'Proxmox Health Alert' your@email.com" | crontab -
```
## Emergency Contacts
### Proxmox Resources
- Proxmox Forums: https://forum.proxmox.com/
- Proxmox Documentation: https://pve.proxmox.com/pve-docs/
- Proxmox Wiki: https://pve.proxmox.com/wiki/
### Hardware Support
- Document hardware vendor support contacts
- Keep warranty information accessible
- Maintain spare parts inventory
## Recovery Time Objectives
| Scenario | Target Recovery Time | Notes |
|----------|---------------------|-------|
| Single VM restore | 30 minutes | From local backup |
| Complete node rebuild | 4-8 hours | Including OS reinstall |
| ZFS pool recovery | 1-6 hours | Depends on resilvering time |
| Cluster rejoin | 1-2 hours | Network reconfiguration |
| Full disaster recovery | 24-48 hours | From off-site backups |
## Recent Recovery Events
### Event Log Template
**Date:** YYYY-MM-DD
**Affected System:** [Proxmox node/VM/CT]
**Issue:** [Description]
**Resolution:** [Steps taken]
**Downtime:** [Duration]
**Lessons Learned:** [Improvements for next time]
---
**Last Updated:** 2025-12-13
**Version:** 1.0

174
infrastructure/README.md Normal file
View File

@@ -0,0 +1,174 @@
# Home Infrastructure
Comprehensive documentation and configuration management for home network infrastructure, Home Assistant, and smart home automation.
## Repository Structure
```
infrastructure/
├── home-assistant/ # Home Assistant configuration files
│ ├── configuration.yaml
│ ├── automations.yaml
│ ├── scripts.yaml
│ ├── switches.yaml
│ └── secrets.yaml (gitignored)
├── esphome/ # ESPHome device configurations
│ ├── garage-controller.yaml
│ └── README.md
├── voice-assistant/ # Local voice assistant system
│ ├── gaming-pc-setup/ # Docker services (GPU-accelerated AI)
│ ├── surface-go-setup/ # Wyoming satellite installation
│ ├── home-assistant-config/ # Voice pipeline HA config (merge with main)
│ ├── docs/ # Voice system documentation
│ ├── README.md # Voice system overview
│ └── QUICK_START.md # 30-minute voice setup guide
├── docs/ # Infrastructure documentation
│ ├── FURNACE-PROJECT.md # Furnace control integration project
│ ├── HOME-ASSISTANT-CONFIG-MERGE.md # Guide to merge voice HA config
│ ├── MQTT-SETUP.md # MQTT broker configuration
│ ├── DNS-OVER-TLS-SETUP.md # DNS security setup
│ ├── MONITORING.md # System monitoring setup
│ ├── SERVICES.md # Services inventory
│ ├── DISASTER-RECOVERY.md # Backup and recovery procedures
│ ├── RUNBOOK.md # Operational procedures
│ └── IMPROVEMENTS.md # Future improvements tracking
├── scripts/ # Automation and utility scripts
└── claude-shared/ # Shared resources with Claude Code assistant
```
## Quick Start
### Home Assistant
Configuration files are in `/home-assistant/`.
**Key Files:**
- `configuration.yaml` - Main HA configuration
- `automations.yaml` - All automations
- `scripts.yaml` - Reusable scripts
- `secrets.yaml` - Sensitive data (create from secrets.yaml.example)
**Deployment:**
Copy files to your Home Assistant config directory (typically `/config/` in HA OS).
### ESPHome Devices
Device configurations are in `/esphome/`.
**Current Devices:**
- **garage-controller** - ESP32 with 8-relay board controlling garage doors, lights, and planned furnace integration
**Deployment:**
```bash
cd esphome
esphome run garage-controller.yaml
```
Or use Home Assistant ESPHome integration for OTA updates.
### Voice Assistant
**Local GPU-accelerated voice assistant system** with Home Assistant integration.
**System Components:**
- **Gaming PC (RTX 5060):** Docker containers for Ollama LLM, Whisper STT, Piper TTS, OpenWakeWord
- **Home Assistant:** Voice pipeline coordinator
- **Surface Go:** Wyoming satellite (microphone/speaker interface)
**Quick Start:**
1. Set up Gaming PC AI services: `/voice-assistant/gaming-pc-setup/`
2. Set up Surface Go satellite: `/voice-assistant/surface-go-setup/`
3. Merge voice HA config: See `/docs/HOME-ASSISTANT-CONFIG-MERGE.md`
4. Full documentation: `/voice-assistant/README.md`
**Current Status:** Operational - needs HA config merge for full integration
## Active Projects
### 🔥 Furnace Control Integration
- **Status:** Planning phase
- **Goal:** Replace failed furnace board with ESP32-based smart control
- **Documentation:** [docs/FURNACE-PROJECT.md](docs/FURNACE-PROJECT.md)
- **Hardware:** ESP32-WROOM-32E with relay board, 6x temp/humidity sensors
- **Features:** Multi-zone monitoring (garage + shed), HA integration, safety interlocks
### 🎤 Local Voice Assistant
- **Status:** Operational, pending HA config merge
- **Goal:** GPU-accelerated local voice control with LLM
- **Documentation:** [voice-assistant/README.md](voice-assistant/README.md)
- **Hardware:** Gaming PC (RTX 5060), Surface Go (Wyoming satellite)
- **Features:** Ollama LLM, Whisper STT, Piper TTS, OpenWakeWord, no cloud dependencies
- **Integration:** See [docs/HOME-ASSISTANT-CONFIG-MERGE.md](docs/HOME-ASSISTANT-CONFIG-MERGE.md)
### 🌐 Network Infrastructure
- **MQTT:** Mosquitto broker for device communication
- **DNS:** DNS-over-TLS with Unbound
- **Documentation:** See docs/MQTT-SETUP.md and docs/DNS-OVER-TLS-SETUP.md
## Documentation
All infrastructure documentation is in the `/docs/` directory:
- **[FURNACE-PROJECT.md](docs/FURNACE-PROJECT.md)** - ESP32 furnace control integration
- **[HOME-ASSISTANT-CONFIG-MERGE.md](docs/HOME-ASSISTANT-CONFIG-MERGE.md)** - Merge voice assistant HA config
- **[MQTT-SETUP.md](docs/MQTT-SETUP.md)** - MQTT broker setup and configuration
- **[DNS-OVER-TLS-SETUP.md](docs/DNS-OVER-TLS-SETUP.md)** - Secure DNS configuration
- **[MONITORING.md](docs/MONITORING.md)** - System monitoring and alerting
- **[SERVICES.md](docs/SERVICES.md)** - Inventory of all services
- **[DISASTER-RECOVERY.md](docs/DISASTER-RECOVERY.md)** - Backup and recovery procedures
- **[RUNBOOK.md](docs/RUNBOOK.md)** - Operational procedures and troubleshooting
**Voice assistant documentation** is in `/voice-assistant/docs/`:
- Installation, commands, calendar setup, troubleshooting
## Network Information
Current network setup and IP allocations are documented in:
- [IP-ALLOCATION.md](IP-ALLOCATION.md) - IP address assignments
- DHCP exports in repository root
## Contributing
This is a personal infrastructure repository. Updates are made through:
1. Local testing and validation
2. Git commits with descriptive messages
3. Push to GitHub for backup and history
## Secrets Management
Sensitive information (passwords, API keys, etc.) is stored in `secrets.yaml` files which are gitignored.
**Template files:**
- `/home-assistant/secrets.yaml.example`
- Create your own `secrets.yaml` from the template
## Backup Strategy
See [DISASTER-RECOVERY.md](docs/DISASTER-RECOVERY.md) for:
- Backup procedures
- Recovery steps
- Critical system information
## Support & Notes
- **Experience Level:** 20+ years low voltage wiring, network infrastructure
- **Tools:** Home Assistant, ESPHome, MQTT, Unbound DNS
- **Approach:** Document everything, safety-first, incremental improvements
## Recent Updates
- **2025-11-28:**
- Consolidated all smart home projects into monorepo
- Added ESPHome directory and furnace control project documentation
- Integrated voice assistant project (Gaming PC AI + Surface Go)
- Created Home Assistant config merge guide
- **2025-11-27:** Home Assistant configuration updates
- **2025-11-18:** MQTT and DNS-over-TLS setup documentation
---
*Maintained by: Fred N9MRQ*
*Repository: https://github.com/FredN9MRQ/infrastructure*

479
infrastructure/RUNBOOK.md Normal file
View File

@@ -0,0 +1,479 @@
# Infrastructure Runbook
This runbook provides step-by-step procedures for common operational tasks in your infrastructure.
## Table of Contents
- [Pangolin Reverse Proxy Operations](#pangolin-reverse-proxy-operations)
- [Gerbil Tunnel Management](#gerbil-tunnel-management)
- [Proxmox Operations](#proxmox-operations)
- [SSL/TLS Certificate Management](#ssltls-certificate-management)
- [Network Troubleshooting](#network-troubleshooting)
- [Security Procedures](#security-procedures)
- [Backup Operations](#backup-operations)
---
## Pangolin Reverse Proxy Operations
### Add a New Route
```bash
# 1. SSH into VPS
ssh user@your-vps-ip
# 2. Edit Pangolin configuration
sudo nano /path/to/pangolin/config.yml
# 3. Add new route configuration
# domain.example.com -> backend:port
# 4. Test configuration
sudo pangolin config test
# 5. Reload Pangolin
sudo systemctl reload pangolin
# OR
sudo pangolin reload
# 6. Verify route is active
curl -I https://domain.example.com
```
### Remove a Route
```bash
# 1. Edit configuration and comment out or remove route
sudo nano /path/to/pangolin/config.yml
# 2. Reload Pangolin
sudo systemctl reload pangolin
# 3. Verify route is removed
curl -I https://domain.example.com
```
### View Pangolin Logs
```bash
# Real-time logs
sudo tail -f /var/log/pangolin/access.log
sudo tail -f /var/log/pangolin/error.log
# Search for specific domain
grep "domain.example.com" /var/log/pangolin/access.log
# Check last 100 errors
sudo tail -n 100 /var/log/pangolin/error.log
```
### Restart Pangolin Service
```bash
# Check status
sudo systemctl status pangolin
# Restart
sudo systemctl restart pangolin
# Verify it's running
sudo systemctl is-active pangolin
```
---
## Gerbil Tunnel Management
### Check Active Tunnels
```bash
# On VPS - check listening Gerbil server
ss -tlnp | grep gerbil
# On home lab - check active tunnel connections
gerbil status
# OR
ps aux | grep gerbil
```
### Start a Tunnel
```bash
# On home lab machine
gerbil connect --name tunnel-name \
--local localhost:PORT \
--remote VPS_IP:REMOTE_PORT \
--auth-key /path/to/auth.key
# Start as systemd service
sudo systemctl start gerbil-tunnel-name
```
### Stop a Tunnel
```bash
# If running as service
sudo systemctl stop gerbil-tunnel-name
# If running manually
pkill -f "gerbil.*tunnel-name"
```
### Restart a Tunnel
```bash
sudo systemctl restart gerbil-tunnel-name
# Verify tunnel is active
gerbil status tunnel-name
# OR
ss -tn | grep REMOTE_PORT
```
### Debug Tunnel Connection Issues
```bash
# 1. Check if local service is running
curl http://localhost:LOCAL_PORT
# 2. Check if tunnel process is running
ps aux | grep gerbil
# 3. Check tunnel logs
journalctl -u gerbil-tunnel-name -n 50
# 4. Test VPS endpoint
# On VPS:
curl http://localhost:REMOTE_PORT
# 5. Check firewall on VPS
sudo ufw status
sudo iptables -L -n | grep REMOTE_PORT
```
---
## Proxmox Operations
### Create a New VM
```bash
# Via Proxmox web UI: https://PROXMOX_IP:8006
# Via CLI on Proxmox node:
qm create VMID --name vm-name --memory 2048 --cores 2 --net0 virtio,bridge=vmbr0
# Attach disk
qm set VMID --scsi0 local-lvm:32
# Set boot order
qm set VMID --boot order=scsi0
# Start VM
qm start VMID
```
### Create a New Container (LXC)
```bash
# Download template
pveam update
pveam available
pveam download local ubuntu-22.04-standard
# Create container
pct create CTID local:vztmpl/ubuntu-22.04-standard.tar.gz \
--hostname ct-name \
--memory 1024 \
--cores 2 \
--net0 name=eth0,bridge=vmbr0,ip=dhcp
# Start container
pct start CTID
# Enter container
pct enter CTID
```
### Stop/Start VM or Container
```bash
# VM operations
qm stop VMID # Stop
qm start VMID # Start
qm shutdown VMID # Graceful shutdown
qm reboot VMID # Reboot
qm status VMID # Check status
# Container operations
pct stop CTID
pct start CTID
pct shutdown CTID
pct reboot CTID
pct status CTID
```
### Migrate VM Between Nodes
```bash
# Online migration (VM stays running)
qm migrate VMID target-node --online
# Offline migration
qm migrate VMID target-node
# Check migration status
qm status VMID
```
### Check Resource Usage
```bash
# Overall cluster resources
pvesh get /cluster/resources
# Specific node resources
pvesh get /nodes/NODE_NAME/status
# VM resource usage
qm status VMID --verbose
# Storage usage
pvesm status
```
### Backup VM or Container
```bash
# Backup VM
vzdump VMID --storage STORAGE_NAME --mode snapshot
# Backup container
vzdump CTID --storage STORAGE_NAME
# List backups
pvesm list STORAGE_NAME
```
### Restore from Backup
```bash
# Restore VM
qmrestore /path/to/backup/vzdump-qemu-VMID.vma.zst VMID
# Restore container
pct restore CTID /path/to/backup/vzdump-lxc-CTID.tar.zst
```
---
## SSL/TLS Certificate Management
### Request New Let's Encrypt Certificate
```bash
# Install certbot if needed
sudo apt install certbot
# Request certificate (HTTP-01 challenge)
sudo certbot certonly --standalone -d domain.example.com
# Request wildcard certificate (DNS-01 challenge)
sudo certbot certonly --manual --preferred-challenges dns -d "*.example.com"
# Certificates are stored in: /etc/letsencrypt/live/domain.example.com/
```
### Renew Certificates
```bash
# Dry run to test renewal
sudo certbot renew --dry-run
# Renew all certificates
sudo certbot renew
# Renew specific certificate
sudo certbot renew --cert-name domain.example.com
# Set up auto-renewal (check if already configured)
sudo systemctl status certbot.timer
```
### Check Certificate Expiration
```bash
# Check local certificate
sudo certbot certificates
# Check remote certificate
echo | openssl s_client -servername domain.example.com -connect domain.example.com:443 2>/dev/null | openssl x509 -noout -dates
# Check all certificates expiring in 30 days
sudo certbot certificates | grep "Expiry Date"
```
### Deploy Certificate to Service
```bash
# Copy certificate to service location
sudo cp /etc/letsencrypt/live/domain.example.com/fullchain.pem /path/to/service/cert.pem
sudo cp /etc/letsencrypt/live/domain.example.com/privkey.pem /path/to/service/key.pem
# Set permissions
sudo chmod 644 /path/to/service/cert.pem
sudo chmod 600 /path/to/service/key.pem
# Reload service
sudo systemctl reload service-name
```
---
## Network Troubleshooting
### Check Network Connectivity
```bash
# Ping test
ping -c 4 8.8.8.8
# DNS resolution
nslookup domain.example.com
dig domain.example.com
# Trace route
traceroute domain.example.com
mtr domain.example.com
```
### Check Open Ports
```bash
# Check listening ports
ss -tlnp
netstat -tlnp
# Check if specific port is open
ss -tlnp | grep :PORT
nc -zv localhost PORT
# Check firewall rules
sudo ufw status numbered
sudo iptables -L -n -v
```
### Test Service Availability
```bash
# HTTP/HTTPS test
curl -I https://domain.example.com
curl -v https://domain.example.com
# Test specific port
nc -zv host PORT
telnet host PORT
# Check service status
sudo systemctl status service-name
```
### Check Network Interface Status
```bash
# List all interfaces
ip addr show
ip link show
# Check interface statistics
ip -s link show eth0
# Restart interface
sudo ip link set eth0 down
sudo ip link set eth0 up
```
---
## Security Procedures
### Update SSH Key
```bash
# Generate new SSH key
ssh-keygen -t ed25519 -C "description"
# Copy to server
ssh-copy-id -i ~/.ssh/new_key.pub user@server
# Test new key
ssh -i ~/.ssh/new_key user@server
# Update SSH config
nano ~/.ssh/config
```
### Review Failed Login Attempts
```bash
# Check auth logs
sudo grep "Failed password" /var/log/auth.log
sudo journalctl -u ssh -n 100
# Check fail2ban status (if installed)
sudo fail2ban-client status sshd
```
### Update Firewall Rules
```bash
# Add new rule
sudo ufw allow PORT/tcp
sudo ufw allow from IP_ADDRESS to any port PORT
# Remove rule
sudo ufw delete allow PORT/tcp
sudo ufw status numbered
sudo ufw delete NUMBER
# Reload firewall
sudo ufw reload
```
### Security Updates
```bash
# Check for updates
sudo apt update
sudo apt list --upgradable
# Install security updates only
sudo apt upgrade -y
# Reboot if kernel updated
sudo needrestart -r a
```
---
## Backup Operations
### Manual Backup
```bash
# Backup specific VM/Container
vzdump VMID --storage STORAGE_NAME --mode snapshot --compress zstd
# Backup configuration files
tar -czf config-backup-$(date +%Y%m%d).tar.gz /etc/pangolin /etc/gerbil
# Backup to remote location
rsync -avz /path/to/data/ user@backup-server:/path/to/backup/
```
### Verify Backup
```bash
# List backup contents
tar -tzf backup.tar.gz | less
# Check backup integrity
tar -tzf backup.tar.gz > /dev/null && echo "OK" || echo "CORRUPTED"
# Check vzdump backup
cat /path/to/backup/vzdump-qemu-VMID.log
```
### Restore Specific Files
```bash
# Extract specific file from backup
tar -xzf backup.tar.gz path/to/specific/file
# Restore from rsync backup
rsync -avz user@backup-server:/path/to/backup/ /path/to/restore/
```
---
## Emergency Contacts
- Infrastructure Owner: _______________
- Network Administrator: _______________
- VPS Provider Support: _______________
- DNS Provider Support: _______________
## Additional Resources
- Pangolin Documentation: _______________
- Gerbil Documentation: _______________
- Proxmox Documentation: https://pve.proxmox.com/pve-docs/
- Internal Wiki: _______________

1120
infrastructure/SERVICES.md Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,3 @@
SSH pub key
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIl9GHvvTkyHLW/XHTOaN/1ylqzzj7lQJYpHEDRHLPW0 surface-go-assistant
H

View File

@@ -0,0 +1,400 @@
# SSH Setup Guide - Homelab Infrastructure
Complete guide for setting up SSH connections between Windows machines in the homelab.
## Overview
This guide documents the SSH configuration between:
- **HOMELAB-COMMAND** (10.0.10.10) - Windows client/workstation
- **M6800** - Windows SSH server
## Architecture
```
HOMELAB-COMMAND (10.0.10.10)
↓ SSH (Port 22)
M6800 (Windows OpenSSH Server)
- Projects in C:\Users\Fred\projects
- Infrastructure scripts
- Development environment
```
## Prerequisites
- Windows 10/11 on both machines
- Both machines on same network (10.0.10.0/24)
- Administrator access on M6800 for SSH server setup
## Setup Steps
### 1. Install OpenSSH Server on M6800
```powershell
# Check if OpenSSH Server is installed
Get-WindowsCapability -Online | Where-Object Name -like 'OpenSSH.Server*'
# Install if not present
Add-WindowsCapability -Online -Name OpenSSH.Server~~~~0.0.1.0
# Start the service
Start-Service sshd
# Enable automatic startup
Set-Service -Name sshd -StartupType 'Automatic'
# Verify service is running
Get-Service sshd
```
### 2. Configure Windows Firewall on M6800
```powershell
# Allow SSH through Windows Firewall
New-NetFirewallRule -Name sshd -DisplayName 'OpenSSH Server (sshd)' -Enabled True -Direction Inbound -Protocol TCP -Action Allow -LocalPort 22
# Or use the automated script
# C:\Users\Fred\projects\infrastructure\scripts\enable-ssh-firewall.ps1
```
### 3. Generate SSH Keys on HOMELAB-COMMAND
```powershell
# Generate ED25519 key pair (recommended for better security)
ssh-keygen -t ed25519 -f $env:USERPROFILE\.ssh\id_ed25519 -C "homelab-command-to-m6800"
# Or generate RSA key (if ED25519 not supported)
ssh-keygen -t rsa -b 4096 -f $env:USERPROFILE\.ssh\id_rsa -C "homelab-command-to-m6800"
# Keys will be created in:
# - Private key: C:\Users\Fred\.ssh\id_ed25519
# - Public key: C:\Users\Fred\.ssh\id_ed25519.pub
```
### 4. Copy Public Key to M6800
**Manual Method:**
```powershell
# On HOMELAB-COMMAND - Display your public key
Get-Content $env:USERPROFILE\.ssh\id_ed25519.pub
# On M6800 - Create .ssh directory if needed
$sshDir = "$env:USERPROFILE\.ssh"
if (!(Test-Path $sshDir)) {
New-Item -Path $sshDir -ItemType Directory -Force
}
# Create authorized_keys file and paste the public key
# Note: For administrators, use administrators_authorized_keys instead
$authKeysFile = "$env:ProgramData\ssh\administrators_authorized_keys"
Set-Content -Path $authKeysFile -Value "YOUR_PUBLIC_KEY_HERE"
# Set correct permissions (critical!)
icacls.exe "$authKeysFile" /inheritance:r /grant "Administrators:F" /grant "SYSTEM:F"
```
**Automated Method (if SSH password auth is enabled):**
```powershell
# From HOMELAB-COMMAND
type $env:USERPROFILE\.ssh\id_ed25519.pub | ssh fred@M6800 "mkdir -p C:\ProgramData\ssh; cat >> C:\ProgramData\ssh\administrators_authorized_keys"
# Fix permissions on M6800
ssh fred@M6800 "icacls.exe C:\ProgramData\ssh\administrators_authorized_keys /inheritance:r /grant Administrators:F /grant SYSTEM:F"
```
### 5. Configure SSH Client on HOMELAB-COMMAND
Create or edit `C:\Users\Fred\.ssh\config`:
```
Host m6800
HostName M6800
User Fred
IdentityFile C:\Users\Fred\.ssh\id_ed25519
ServerAliveInterval 60
ServerAliveCountMax 3
Host homelab-command
HostName 10.0.10.10
User Fred
IdentityFile C:\Users\Fred\.ssh\id_ed25519
```
### 6. Test the Connection
```powershell
# Test SSH connection
ssh m6800
# Test with verbose output (for troubleshooting)
ssh -v m6800
# Test specific command
ssh m6800 "hostname"
```
## Troubleshooting
### Connection Refused
**Symptoms:**
```
ssh: connect to host M6800 port 22: Connection refused
```
**Solutions:**
1. Check SSH service is running on M6800:
```powershell
Get-Service sshd
Start-Service sshd # If stopped
```
2. Verify firewall rule exists:
```powershell
Get-NetFirewallRule -Name sshd
```
3. Test network connectivity:
```powershell
Test-NetConnection -ComputerName M6800 -Port 22
```
### Permission Denied (publickey)
**Symptoms:**
```
Permission denied (publickey,keyboard-interactive)
```
**Solutions:**
1. Verify public key is in correct location:
- For administrators: `C:\ProgramData\ssh\administrators_authorized_keys`
- For regular users: `C:\Users\Fred\.ssh\authorized_keys`
2. Check file permissions on M6800:
```powershell
icacls C:\ProgramData\ssh\administrators_authorized_keys
```
Should show only Administrators and SYSTEM with Full control.
3. Verify SSH key is being offered:
```powershell
ssh -v m6800 2>&1 | Select-String "Offering"
```
4. Restart SSH service after permission changes:
```powershell
Restart-Service sshd
```
### Name Resolution Issues
**Symptoms:**
```
Could not resolve hostname M6800
```
**Solutions:**
1. Use IP address instead of hostname in SSH config
2. Add entry to hosts file:
```powershell
Add-Content -Path C:\Windows\System32\drivers\etc\hosts -Value "10.0.10.XX M6800"
```
3. Configure static DNS entry in router/DHCP server
### Slow Connection/Hangs
**Solutions:**
1. Add to SSH config to disable GSSAPI authentication:
```
GSSAPIAuthentication no
```
2. Increase verbosity to identify hang point:
```powershell
ssh -vvv m6800
```
## Security Best Practices
### 1. Disable Password Authentication (After Key Setup Works)
On M6800, edit `C:\ProgramData\ssh\sshd_config`:
```
PasswordAuthentication no
PubkeyAuthentication yes
```
Restart SSH service:
```powershell
Restart-Service sshd
```
### 2. Change Default SSH Port (Optional)
Edit `C:\ProgramData\ssh\sshd_config`:
```
Port 2222 # Or any port above 1024
```
Update firewall rule:
```powershell
New-NetFirewallRule -Name sshd-custom -DisplayName 'OpenSSH Server (Custom Port)' -Enabled True -Direction Inbound -Protocol TCP -Action Allow -LocalPort 2222
```
### 3. Restrict SSH Access by IP
```powershell
# Allow SSH only from specific IP
New-NetFirewallRule -Name sshd-restricted -DisplayName 'OpenSSH Server (Restricted)' -Enabled True -Direction Inbound -Protocol TCP -Action Allow -LocalPort 22 -RemoteAddress 10.0.10.10
```
### 4. Key Management
- Use passphrase-protected keys for sensitive environments
- Rotate keys periodically (every 6-12 months)
- Remove old keys from authorized_keys file
- Keep private keys secure - never share them
### 5. Monitoring
Enable SSH logging on M6800:
```powershell
# View SSH logs
Get-WinEvent -LogName "OpenSSH/Operational" | Select-Object -First 20
# Monitor failed login attempts
Get-WinEvent -LogName "OpenSSH/Operational" | Where-Object {$_.Message -like "*failed*"}
```
## Automated Scripts
### enable-ssh-firewall.ps1
Location: `C:\Users\Fred\projects\infrastructure\scripts\enable-ssh-firewall.ps1`
Automatically configures firewall rules for SSH server.
### setup-ssh-server.ps1
Automated script for complete SSH server setup on Windows:
```powershell
# Install OpenSSH Server
Add-WindowsCapability -Online -Name OpenSSH.Server~~~~0.0.1.0
# Configure service
Start-Service sshd
Set-Service -Name sshd -StartupType 'Automatic'
# Configure firewall
New-NetFirewallRule -Name sshd -DisplayName 'OpenSSH Server (sshd)' -Enabled True -Direction Inbound -Protocol TCP -Action Allow -LocalPort 22
# Create admin authorized_keys file
$authKeysFile = "$env:ProgramData\ssh\administrators_authorized_keys"
New-Item -Path $authKeysFile -ItemType File -Force
icacls.exe "$authKeysFile" /inheritance:r /grant "Administrators:F" /grant "SYSTEM:F"
```
### test-homelab-ssh.sh
Location: `C:\Users\Fred\projects\infrastructure\scripts\test-homelab-ssh.sh`
Tests SSH connectivity between homelab machines.
## Common Use Cases
### 1. Remote File Transfer (SCP)
```powershell
# Copy file to M6800
scp localfile.txt m6800:C:/Users/Fred/
# Copy file from M6800
scp m6800:C:/Users/Fred/remotefile.txt ./
# Copy directory recursively
scp -r localdir/ m6800:C:/Users/Fred/remotedir/
```
### 2. Remote Command Execution
```powershell
# Single command
ssh m6800 "powershell -Command Get-Process"
# Multiple commands
ssh m6800 "powershell -Command 'Get-Date; hostname; Get-Service sshd'"
# Run script
ssh m6800 "powershell -ExecutionPolicy Bypass -File C:/scripts/script.ps1"
```
### 3. Port Forwarding
```powershell
# Local port forwarding - Access M6800 service on local port 8080
ssh -L 8080:localhost:80 m6800
# Remote port forwarding - Expose local service to M6800
ssh -R 9090:localhost:8080 m6800
# Dynamic port forwarding (SOCKS proxy)
ssh -D 1080 m6800
```
### 4. SSH Tunneling for Home Assistant
```powershell
# Forward Home Assistant port through SSH tunnel
ssh -L 8123:localhost:8123 m6800
# Access in browser: http://localhost:8123
```
## Integration with Claude Code
Claude Code can use SSH to access remote development environments:
```bash
# From Claude Code on HOMELAB-COMMAND
# Access M6800 projects directory
ssh m6800 "cd C:/Users/Fred/projects && dir"
# Edit files remotely
ssh m6800 "powershell -Command 'Get-Content C:/Users/Fred/projects/file.txt'"
```
## Maintenance Tasks
### Weekly
- Review SSH logs for failed attempts
- Check authorized_keys file for unauthorized entries
### Monthly
- Test SSH connectivity from all client machines
- Verify firewall rules are correct
- Update OpenSSH if new version available
### Quarterly
- Review and remove old SSH keys
- Audit SSH configuration for security best practices
- Test backup SSH access methods
## References
- [Microsoft OpenSSH Documentation](https://learn.microsoft.com/en-us/windows-server/administration/openssh/openssh_overview)
- [OpenSSH Manual Pages](https://www.openssh.com/manual.html)
- Infrastructure scripts: `C:\Users\Fred\projects\infrastructure\scripts\`
## Recent Work Log
### 2025-12-13
- Configured SSH server on M6800
- Set up SSH key authentication from HOMELAB-COMMAND
- Created firewall rules for SSH access
- Tested bidirectional connectivity
- Documented troubleshooting steps for permission issues

Binary file not shown.

After

Width:  |  Height:  |  Size: 427 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 481 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 88 KiB

View File

@@ -0,0 +1,28 @@
version: 1
metadata:
name: Home Assistant OAuth2 Integration
entries:
- model: authentik_providers_oauth2.oauth2provider
id: homeassistant-provider
identifiers:
name: Home Assistant
attrs:
authorization_flow: !Find [authentik_flows.flow, [slug, default-provider-authorization-implicit-consent]]
invalidation_flow: !Find [authentik_flows.flow, [slug, default-provider-invalidation-flow]]
client_type: confidential
client_id: !Format [homeassistant-%s, !Env RANDOM_ID]
client_secret: !Format [%s, !Env RANDOM_SECRET]
redirect_uris: |
https://bob.nianticbooks.com/auth/external/callback
signing_key: !Find [authentik_crypto.certificatekeypair, [name, authentik Self-signed Certificate]]
sub_mode: hashed_user_id
include_claims_in_id_token: true
- model: authentik_core.application
id: homeassistant-app
identifiers:
slug: home-assistant
attrs:
name: Home Assistant
provider: !KeyOf homeassistant-provider
launch_url: https://bob.nianticbooks.com

View File

@@ -0,0 +1,23 @@
<!DOCTYPE HTML>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Directory listing for /infrastructure/claude-shared/</title>
</head>
<body>
<h1>Directory listing for /infrastructure/claude-shared/</h1>
<hr>
<ul>
<li><a href=".assistant/">.assistant/</a></li>
<li><a href=".git">.git</a></li>
<li><a href="commands/">commands/</a></li>
<li><a href="GAMING-RIG-SETUP.md">GAMING-RIG-SETUP.md</a></li>
<li><a href="QUICK-START.md">QUICK-START.md</a></li>
<li><a href="README.md">README.md</a></li>
<li><a href="SETUP-GUIDE.md">SETUP-GUIDE.md</a></li>
<li><a href="setup-symlinks.ps1">setup-symlinks.ps1</a></li>
<li><a href="setup-symlinks.sh">setup-symlinks.sh</a></li>
</ul>
<hr>
</body>
</html>

View File

@@ -0,0 +1,50 @@
"MAC Address","IP Address",Hostname,"Local DNS Record","Lease Type",Name,"Expiration Time"
"02:f5:e9:54:36:28","10.0.10.194",homeassistant,,Dynamic,"homeassistant 36:28","2025-11-15T08:19:09.000-06:00"
"10:d5:61:3b:ce:32","10.0.10.76",wlan0,,Dynamic,"wlan0 ce:32","2025-11-15T05:19:04.000-06:00"
"32:5a:63:c9:e9:d7","10.0.10.129",Watch,,Dynamic,"Watch e9:d7","2025-11-15T09:17:22.000-06:00"
"44:61:32:90:e0:a3","10.0.10.102","My-ecobee",,Dynamic,"My-ecobee e0:a3","2025-11-15T13:50:38.000-06:00"
"62:03:ac:1f:89:40","10.0.10.202",iPhone,,Dynamic,"iPhone 89:40","2025-11-15T16:18:00.000-06:00"
"64:5d:86:15:de:20","10.0.10.157",KobePC,,Dynamic,"KobePC de:20","2025-11-15T16:02:18.000-06:00"
"64:da:ed:1c:b5:6d","10.0.10.227",eero,,Dynamic,"eero b5:6d","2025-11-15T15:37:17.000-06:00"
"64:da:ed:29:12:ad","10.0.10.101",eero,,Dynamic,"eero 12:ad","2025-11-15T06:48:00.000-06:00"
"64:da:ed:29:2e:8d","10.0.10.216",eero,,Dynamic,"eero 2e:8d","2025-11-15T16:01:22.000-06:00"
"68:57:2d:b4:dd:25","10.0.10.170","TY_WR",,Dynamic,"TY_WR dd:25","2025-11-15T09:12:44.000-06:00"
"6c:c8:40:05:5c:68","10.0.10.173","esphome-web-055c68",,Dynamic,"esphome-web-055c68 5c:68","2025-11-15T08:04:29.000-06:00"
"6e:7a:50:fa:de:94","10.0.10.238",,,Dynamic,"Docker de:94","2025-11-15T07:57:17.000-06:00"
"70:66:2a:65:36:bc","10.0.10.174",,,Dynamic,"Sony PlayStation 5 36:bc","2025-11-15T09:43:22.000-06:00"
"70:89:76:ba:0f:d4","10.0.10.81",wlan0,,Dynamic,"wlan0 0f:d4","2025-11-15T10:39:16.000-06:00"
"70:89:76:bc:f4:a4","10.0.10.151",wlan0,,Dynamic,"wlan0 f4:a4","2025-11-15T10:32:53.000-06:00"
"7c:f6:66:44:68:f6","10.0.10.201",wlan0,,Dynamic,"wlan0 68:f6","2025-11-15T14:21:51.000-06:00"
"7c:f6:66:45:44:44","10.0.10.160",wlan0,,Dynamic,"wlan0 44:44","2025-11-15T16:18:47.000-06:00"
"7e:e4:a4:b8:c2:f3","10.0.10.55",iPhone,,Dynamic,"iPhone c2:f3","2025-11-15T15:55:10.000-06:00"
"80:00:6e:f2:13:52","10.0.10.105","Freds-Mac-Pro",,Dynamic,"Jill's MacPro","2025-11-15T06:52:49.000-06:00"
"82:cc:cf:ec:5e:da","10.0.10.171",iPad,,Dynamic,"iPad 5e:da","2025-11-15T06:51:23.000-06:00"
"84:d6:c5:4a:70:32","10.0.10.62",,,Dynamic,"Solaredge SE7K 70:32","2025-11-15T16:47:36.000-06:00"
"84:f3:eb:c1:dd:aa","10.0.10.90","ESP_C1DDAA",,Dynamic,"ESP_C1DDAA dd:aa","2025-11-15T14:50:31.000-06:00"
"88:a9:a7:99:c3:64","10.0.10.189",AD5M,"AD5M.nianticbooks.home",Fixed,ad5m,"1969-12-31T18:00:00.000-06:00"
"88:e7:12:04:fc:06","10.0.10.195","MICRW_88_E7_12_04_FC_06",,Dynamic,"MICRW_88_E7_12_04_FC_06 fc:06","2025-11-15T05:58:46.000-06:00"
"90:de:80:80:e7:04","10.0.10.92","HOMELAB-COMMAND","HOMELAB-COMMAND.nianticbooks.home",Fixed,"Dad's PC","1969-12-31T18:00:00.000-06:00"
"a0:ad:9f:30:8c:af","10.0.10.213","Kevin-PC",,Dynamic,"Kevin-PC 8c:af","2025-11-15T12:23:50.000-06:00"
"a8:2c:3e:bc:e2:bf","10.0.10.235",,,Dynamic,"Jill's Monitor","2025-11-15T16:27:13.000-06:00"
"ac:41:6a:69:3a:8e","10.0.10.154",,,Dynamic,"Blink XT Security Camera 3a:8e","2025-11-15T08:10:01.000-06:00"
"ac:fd:ce:e6:9f:d8","10.0.10.241","Fred-M6800",,Dynamic,"Fred-M6800 9f:d8","2025-11-15T16:26:57.000-06:00"
"b4:b5:2f:ea:8c:30","10.0.10.53",ilob4b52fea8c30,"ilo.nianticbooks.home",Fixed,"HP iLO","1969-12-31T18:00:00.000-06:00"
"b8:09:8a:ca:6c:53","10.0.10.144","Freds-iMac",,Dynamic,"Freds-iMac 6c:53","2025-11-15T16:26:40.000-06:00"
"bc:24:11:0f:78:84","10.0.10.79","pve-scripts-local",,Dynamic,"pve-scripts-local 78:84","2025-11-15T14:29:45.000-06:00"
"bc:24:11:4a:42:07","10.0.10.104",dockge,,Dynamic,"dockge 42:07","2025-11-15T12:41:26.000-06:00"
"bc:24:11:57:dd:94","10.0.10.112",authelia,,Dynamic,"authelia dd:94","2025-11-15T14:54:51.000-06:00"
"bc:24:11:8f:eb:eb","10.0.10.113",esphome,,Dynamic,"esphome eb:eb","2025-11-15T13:17:30.000-06:00"
"bc:24:11:98:31:65","10.0.10.178",openmediavault,,Dynamic,"openmediavault 31:65","2025-11-15T11:19:16.000-06:00"
"bc:24:11:a8:ff:0b","10.0.10.108",docker,,Dynamic,"docker ff:0b","2025-11-15T13:52:10.000-06:00"
"bc:24:11:f9:12:5b","10.0.10.71",spoolman,,Dynamic,"spoolman 12:5b","2025-11-15T15:31:55.000-06:00"
"c4:3c:b0:fe:53:94","10.0.10.168",,,Dynamic,"Samsung Galaxy S8 Plus 53:94","2025-11-15T12:54:01.000-06:00"
"c6:f7:66:6e:fc:23","10.0.10.188",iPhone,,Dynamic,"iPhone fc:23","2025-11-15T09:27:57.000-06:00"
"cc:ba:97:21:4c:f8","10.0.10.167",,,Dynamic,"Bambu Lab A1 4c:f8","2025-11-15T15:14:51.000-06:00"
"ce:cb:0f:e1:86:8b","10.0.10.155",Watch,,Dynamic,"Watch 86:8b","2025-11-15T15:56:41.000-06:00"
"d4:a6:51:98:45:62","10.0.10.57",wlan0,,Dynamic,"wlan0 45:62","2025-11-15T07:56:44.000-06:00"
"dc:a4:ca:ea:cc:be","10.0.10.78","freds-airport-extreme",,Dynamic,"freds-airport-extreme cc:be","2025-11-15T13:35:48.000-06:00"
"de:35:17:a0:28:42","10.0.10.94",iPhone,,Dynamic,"iPhone 28:42","2025-11-15T05:00:04.000-06:00"
"e4:54:e8:50:90:af","10.0.10.2",,"proxmox.nianticbooks.home",Fixed,"In house Proxmox","1969-12-31T18:00:00.000-06:00"
"e8:4c:4a:12:03:32","10.0.10.176",,,Dynamic,"Blink Add-On Sync Module 2 03:32","2025-11-15T08:43:00.000-06:00"
"f8:e4:e3:f3:a7:69","10.0.10.253",debian,,Dynamic,"debian a7:69","2025-11-15T06:51:17.000-06:00"
"fa:2e:aa:f1:66:a3","10.0.10.61",iPhone,,Dynamic,"iPhone 66:a3","2025-11-15T15:53:32.000-06:00"
1 MAC Address IP Address Hostname Local DNS Record Lease Type Name Expiration Time
2 02:f5:e9:54:36:28 10.0.10.194 homeassistant Dynamic homeassistant 36:28 2025-11-15T08:19:09.000-06:00
3 10:d5:61:3b:ce:32 10.0.10.76 wlan0 Dynamic wlan0 ce:32 2025-11-15T05:19:04.000-06:00
4 32:5a:63:c9:e9:d7 10.0.10.129 Watch Dynamic Watch e9:d7 2025-11-15T09:17:22.000-06:00
5 44:61:32:90:e0:a3 10.0.10.102 My-ecobee Dynamic My-ecobee e0:a3 2025-11-15T13:50:38.000-06:00
6 62:03:ac:1f:89:40 10.0.10.202 iPhone Dynamic iPhone 89:40 2025-11-15T16:18:00.000-06:00
7 64:5d:86:15:de:20 10.0.10.157 KobePC Dynamic KobePC de:20 2025-11-15T16:02:18.000-06:00
8 64:da:ed:1c:b5:6d 10.0.10.227 eero Dynamic eero b5:6d 2025-11-15T15:37:17.000-06:00
9 64:da:ed:29:12:ad 10.0.10.101 eero Dynamic eero 12:ad 2025-11-15T06:48:00.000-06:00
10 64:da:ed:29:2e:8d 10.0.10.216 eero Dynamic eero 2e:8d 2025-11-15T16:01:22.000-06:00
11 68:57:2d:b4:dd:25 10.0.10.170 TY_WR Dynamic TY_WR dd:25 2025-11-15T09:12:44.000-06:00
12 6c:c8:40:05:5c:68 10.0.10.173 esphome-web-055c68 Dynamic esphome-web-055c68 5c:68 2025-11-15T08:04:29.000-06:00
13 6e:7a:50:fa:de:94 10.0.10.238 Dynamic Docker de:94 2025-11-15T07:57:17.000-06:00
14 70:66:2a:65:36:bc 10.0.10.174 Dynamic Sony PlayStation 5 36:bc 2025-11-15T09:43:22.000-06:00
15 70:89:76:ba:0f:d4 10.0.10.81 wlan0 Dynamic wlan0 0f:d4 2025-11-15T10:39:16.000-06:00
16 70:89:76:bc:f4:a4 10.0.10.151 wlan0 Dynamic wlan0 f4:a4 2025-11-15T10:32:53.000-06:00
17 7c:f6:66:44:68:f6 10.0.10.201 wlan0 Dynamic wlan0 68:f6 2025-11-15T14:21:51.000-06:00
18 7c:f6:66:45:44:44 10.0.10.160 wlan0 Dynamic wlan0 44:44 2025-11-15T16:18:47.000-06:00
19 7e:e4:a4:b8:c2:f3 10.0.10.55 iPhone Dynamic iPhone c2:f3 2025-11-15T15:55:10.000-06:00
20 80:00:6e:f2:13:52 10.0.10.105 Freds-Mac-Pro Dynamic Jill's MacPro 2025-11-15T06:52:49.000-06:00
21 82:cc:cf:ec:5e:da 10.0.10.171 iPad Dynamic iPad 5e:da 2025-11-15T06:51:23.000-06:00
22 84:d6:c5:4a:70:32 10.0.10.62 Dynamic Solaredge SE7K 70:32 2025-11-15T16:47:36.000-06:00
23 84:f3:eb:c1:dd:aa 10.0.10.90 ESP_C1DDAA Dynamic ESP_C1DDAA dd:aa 2025-11-15T14:50:31.000-06:00
24 88:a9:a7:99:c3:64 10.0.10.189 AD5M AD5M.nianticbooks.home Fixed ad5m 1969-12-31T18:00:00.000-06:00
25 88:e7:12:04:fc:06 10.0.10.195 MICRW_88_E7_12_04_FC_06 Dynamic MICRW_88_E7_12_04_FC_06 fc:06 2025-11-15T05:58:46.000-06:00
26 90:de:80:80:e7:04 10.0.10.92 HOMELAB-COMMAND HOMELAB-COMMAND.nianticbooks.home Fixed Dad's PC 1969-12-31T18:00:00.000-06:00
27 a0:ad:9f:30:8c:af 10.0.10.213 Kevin-PC Dynamic Kevin-PC 8c:af 2025-11-15T12:23:50.000-06:00
28 a8:2c:3e:bc:e2:bf 10.0.10.235 Dynamic Jill's Monitor 2025-11-15T16:27:13.000-06:00
29 ac:41:6a:69:3a:8e 10.0.10.154 Dynamic Blink XT Security Camera 3a:8e 2025-11-15T08:10:01.000-06:00
30 ac:fd:ce:e6:9f:d8 10.0.10.241 Fred-M6800 Dynamic Fred-M6800 9f:d8 2025-11-15T16:26:57.000-06:00
31 b4:b5:2f:ea:8c:30 10.0.10.53 ilob4b52fea8c30 ilo.nianticbooks.home Fixed HP iLO 1969-12-31T18:00:00.000-06:00
32 b8:09:8a:ca:6c:53 10.0.10.144 Freds-iMac Dynamic Freds-iMac 6c:53 2025-11-15T16:26:40.000-06:00
33 bc:24:11:0f:78:84 10.0.10.79 pve-scripts-local Dynamic pve-scripts-local 78:84 2025-11-15T14:29:45.000-06:00
34 bc:24:11:4a:42:07 10.0.10.104 dockge Dynamic dockge 42:07 2025-11-15T12:41:26.000-06:00
35 bc:24:11:57:dd:94 10.0.10.112 authelia Dynamic authelia dd:94 2025-11-15T14:54:51.000-06:00
36 bc:24:11:8f:eb:eb 10.0.10.113 esphome Dynamic esphome eb:eb 2025-11-15T13:17:30.000-06:00
37 bc:24:11:98:31:65 10.0.10.178 openmediavault Dynamic openmediavault 31:65 2025-11-15T11:19:16.000-06:00
38 bc:24:11:a8:ff:0b 10.0.10.108 docker Dynamic docker ff:0b 2025-11-15T13:52:10.000-06:00
39 bc:24:11:f9:12:5b 10.0.10.71 spoolman Dynamic spoolman 12:5b 2025-11-15T15:31:55.000-06:00
40 c4:3c:b0:fe:53:94 10.0.10.168 Dynamic Samsung Galaxy S8 Plus 53:94 2025-11-15T12:54:01.000-06:00
41 c6:f7:66:6e:fc:23 10.0.10.188 iPhone Dynamic iPhone fc:23 2025-11-15T09:27:57.000-06:00
42 cc:ba:97:21:4c:f8 10.0.10.167 Dynamic Bambu Lab A1 4c:f8 2025-11-15T15:14:51.000-06:00
43 ce:cb:0f:e1:86:8b 10.0.10.155 Watch Dynamic Watch 86:8b 2025-11-15T15:56:41.000-06:00
44 d4:a6:51:98:45:62 10.0.10.57 wlan0 Dynamic wlan0 45:62 2025-11-15T07:56:44.000-06:00
45 dc:a4:ca:ea:cc:be 10.0.10.78 freds-airport-extreme Dynamic freds-airport-extreme cc:be 2025-11-15T13:35:48.000-06:00
46 de:35:17:a0:28:42 10.0.10.94 iPhone Dynamic iPhone 28:42 2025-11-15T05:00:04.000-06:00
47 e4:54:e8:50:90:af 10.0.10.2 proxmox.nianticbooks.home Fixed In house Proxmox 1969-12-31T18:00:00.000-06:00
48 e8:4c:4a:12:03:32 10.0.10.176 Dynamic Blink Add-On Sync Module 2 03:32 2025-11-15T08:43:00.000-06:00
49 f8:e4:e3:f3:a7:69 10.0.10.253 debian Dynamic debian a7:69 2025-11-15T06:51:17.000-06:00
50 fa:2e:aa:f1:66:a3 10.0.10.61 iPhone Dynamic iPhone 66:a3 2025-11-15T15:53:32.000-06:00

View File

@@ -0,0 +1,55 @@
"MAC Address","IP Address",Hostname,"Local DNS Record","Lease Type",Name,"Expiration Time"
"02:b7:f9:f1:b9:67","10.0.10.64","pelican-wings",,Dynamic,"pelican-wings b9:67","2025-11-17T22:26:47.000-06:00"
"02:f5:e9:54:36:28","10.0.10.194",homeassistant,,Dynamic,"homeassistant 36:28","2025-11-17T15:28:37.000-06:00"
"10:d5:61:3b:ce:32","10.0.10.76",wlan0,,Dynamic,"wlan0 ce:32","2025-11-17T20:15:44.000-06:00"
"32:5a:63:c9:e9:d7","10.0.10.129",Watch,,Dynamic,"Watch e9:d7","2025-11-17T18:00:17.000-06:00"
"44:61:32:90:e0:a3","10.0.10.102","My-ecobee",,Dynamic,"My-ecobee e0:a3","2025-11-17T20:15:23.000-06:00"
"62:03:ac:1f:89:40","10.0.10.202",iPhone,,Dynamic,"iPhone 89:40","2025-11-17T12:15:23.000-06:00"
"64:5d:86:15:de:20","10.0.10.157",KobePC,,Dynamic,"KobePC de:20","2025-11-17T21:29:52.000-06:00"
"64:da:ed:1c:b5:6d","10.0.10.227",eero,,Dynamic,"eero b5:6d","2025-11-17T22:49:11.000-06:00"
"64:da:ed:29:12:ad","10.0.10.101",eero,,Dynamic,"eero 12:ad","2025-11-17T18:23:32.000-06:00"
"64:da:ed:29:2e:8d","10.0.10.216",eero,,Dynamic,"eero 2e:8d","2025-11-17T17:27:16.000-06:00"
"68:57:2d:b4:dd:25","10.0.10.170","TY_WR",,Dynamic,"TY_WR dd:25","2025-11-17T21:11:34.000-06:00"
"6c:c8:40:05:5c:68","10.0.10.173","esphome-web-055c68",,Dynamic,"esphome-web-055c68 5c:68","2025-11-17T15:24:13.000-06:00"
"70:66:2a:65:36:bc","10.0.10.174",,,Dynamic,"Sony PlayStation 5 36:bc","2025-11-17T13:09:26.000-06:00"
"70:89:76:ba:0f:d4","10.0.10.81",wlan0,,Dynamic,"wlan0 0f:d4","2025-11-17T17:44:47.000-06:00"
"70:89:76:bc:f4:a4","10.0.10.151",wlan0,,Dynamic,"wlan0 f4:a4","2025-11-17T17:39:55.000-06:00"
"7c:f6:66:44:68:f6","10.0.10.201",wlan0,,Dynamic,"wlan0 68:f6","2025-11-17T21:32:53.000-06:00"
"7c:f6:66:45:44:44","10.0.10.160",wlan0,,Dynamic,"wlan0 44:44","2025-11-17T21:32:15.000-06:00"
"7e:e4:a4:b8:c2:f3","10.0.10.55",iPhone,,Dynamic,"iPhone c2:f3","2025-11-17T15:33:13.000-06:00"
"80:00:6e:f2:13:52","10.0.10.105","Freds-Mac-Pro",,Dynamic,"Jill's MacPro","2025-11-17T17:30:26.000-06:00"
"84:d6:c5:4a:70:32","10.0.10.62",,,Dynamic,"Solaredge SE7K 70:32","2025-11-17T18:46:47.000-06:00"
"84:f3:eb:c1:dd:aa","10.0.10.90","ESP_C1DDAA",,Dynamic,"ESP_C1DDAA dd:aa","2025-11-17T22:16:20.000-06:00"
"88:a9:a7:99:c3:64","10.0.10.30",AD5M,"AD5M.nianticbooks.home",Fixed,ad5m,"1969-12-31T18:00:00.000-06:00"
"88:e7:12:04:fc:06","10.0.10.195","MICRW_88_E7_12_04_FC_06",,Dynamic,"MICRW_88_E7_12_04_FC_06 fc:06","2025-11-17T15:01:03.000-06:00"
"90:de:80:80:e7:04","10.0.10.10","HOMELAB-COMMAND","HOMELAB-COMMAND.nianticbooks.home",Fixed,"Dad's PC","1969-12-31T18:00:00.000-06:00"
"a0:ad:9f:30:8c:af","10.0.10.213","Kevin-PC",,Dynamic,"Kevin-PC 8c:af","2025-11-17T19:06:01.000-06:00"
"a8:2c:3e:bc:e2:bf","10.0.10.235",,,Dynamic,"Jill's Monitor","2025-11-17T15:30:12.000-06:00"
"ac:41:6a:69:3a:8e","10.0.10.154",,,Dynamic,"Blink XT Security Camera 3a:8e","2025-11-17T18:16:05.000-06:00"
"b4:b5:2f:ea:8c:30","10.0.10.13","ilo.nianticbooks.home","ilo.nianticbooks.home",Fixed,"HP iLO","1969-12-31T18:00:00.000-06:00"
"b8:09:8a:ca:6c:53","10.0.10.144","Freds-iMac",,Dynamic,"Freds-iMac 6c:53","2025-11-17T13:19:12.000-06:00"
"bc:24:11:0f:78:84","10.0.10.79","pve-scripts-local",,Dynamic,"pve-scripts-local 78:84","2025-11-17T15:46:18.000-06:00"
"bc:24:11:44:6f:49","10.0.10.117",esphome,,Dynamic,"esphome 6f:49","2025-11-17T22:27:22.000-06:00"
"bc:24:11:4a:42:07","10.0.10.104",dockge,,Dynamic,"dockge 42:07","2025-11-17T22:03:43.000-06:00"
"bc:24:11:4a:80:e5","10.0.10.247","twingate-connector",,Dynamic,"twingate-connector 80:e5","2025-11-17T22:28:07.000-06:00"
"bc:24:11:57:dd:94","10.0.10.112",authelia,,Dynamic,"authelia dd:94","2025-11-17T13:35:27.000-06:00"
"bc:24:11:6e:22:c3","10.0.10.77",ollama,,Dynamic,"ollama 22:c3","2025-11-17T22:25:02.000-06:00"
"bc:24:11:6f:8b:2c","10.0.10.236","pelican-panel",,Dynamic,"pelican-panel 8b:2c","2025-11-17T22:25:48.000-06:00"
"bc:24:11:74:bb:f8","10.0.10.212","pelican-wings",,Dynamic,"pelican-wings bb:f8","2025-11-17T22:26:32.000-06:00"
"bc:24:11:86:f0:c8","10.0.10.164","bar-assistant",,Dynamic,"bar-assistant f0:c8","2025-11-17T22:24:17.000-06:00"
"bc:24:11:8f:eb:eb","10.0.10.113",esphome,,Dynamic,"esphome eb:eb","2025-11-17T12:56:37.000-06:00"
"bc:24:11:98:31:65","10.0.10.178",openmediavault,,Dynamic,"openmediavault 31:65","2025-11-17T19:01:17.000-06:00"
"bc:24:11:a8:ff:0b","10.0.10.108",docker,,Dynamic,"docker ff:0b","2025-11-17T15:38:40.000-06:00"
"bc:24:11:d3:34:1c","10.0.10.192","twingate-connector",,Dynamic,"twingate-connector 34:1c","2025-11-17T22:28:52.000-06:00"
"bc:24:11:f9:12:5b","10.0.10.71",spoolman,,Dynamic,"spoolman 12:5b","2025-11-17T14:48:23.000-06:00"
"c4:3c:b0:fe:53:94","10.0.10.168",,,Dynamic,"Samsung Galaxy S8 Plus 53:94","2025-11-17T12:54:02.000-06:00"
"c6:f7:66:6e:fc:23","10.0.10.188",iPhone,,Dynamic,"iPhone fc:23","2025-11-17T14:31:23.000-06:00"
"cc:ba:97:21:4c:f8","10.0.10.167",,,Dynamic,"Bambu Lab A1 4c:f8","2025-11-17T22:08:36.000-06:00"
"ce:cb:0f:e1:86:8b","10.0.10.155",Watch,,Dynamic,"Watch 86:8b","2025-11-17T12:43:20.000-06:00"
"d0:c9:07:89:89:6c","10.0.10.80",,,Dynamic,"Govee H5082 89:6c","2025-11-17T19:45:10.000-06:00"
"d4:a6:51:98:45:62","10.0.10.57",wlan0,,Dynamic,"wlan0 45:62","2025-11-17T19:28:46.000-06:00"
"dc:a4:ca:ea:cc:be","10.0.10.78","freds-airport-extreme",,Dynamic,"freds-airport-extreme cc:be","2025-11-17T22:06:03.000-06:00"
"de:35:17:a0:28:42","10.0.10.94",iPhone,,Dynamic,"iPhone 28:42","2025-11-17T21:29:25.000-06:00"
"e4:54:e8:50:90:af","10.0.10.2",,"proxmox.nianticbooks.home",Fixed,"In house Proxmox","1969-12-31T18:00:00.000-06:00"
"e8:4c:4a:12:03:32","10.0.10.176",,,Dynamic,"Blink Add-On Sync Module 2 03:32","2025-11-17T20:43:01.000-06:00"
"fa:2e:aa:f1:66:a3","10.0.10.61",iPhone,,Dynamic,"iPhone 66:a3","2025-11-17T12:14:51.000-06:00"
1 MAC Address IP Address Hostname Local DNS Record Lease Type Name Expiration Time
2 02:b7:f9:f1:b9:67 10.0.10.64 pelican-wings Dynamic pelican-wings b9:67 2025-11-17T22:26:47.000-06:00
3 02:f5:e9:54:36:28 10.0.10.194 homeassistant Dynamic homeassistant 36:28 2025-11-17T15:28:37.000-06:00
4 10:d5:61:3b:ce:32 10.0.10.76 wlan0 Dynamic wlan0 ce:32 2025-11-17T20:15:44.000-06:00
5 32:5a:63:c9:e9:d7 10.0.10.129 Watch Dynamic Watch e9:d7 2025-11-17T18:00:17.000-06:00
6 44:61:32:90:e0:a3 10.0.10.102 My-ecobee Dynamic My-ecobee e0:a3 2025-11-17T20:15:23.000-06:00
7 62:03:ac:1f:89:40 10.0.10.202 iPhone Dynamic iPhone 89:40 2025-11-17T12:15:23.000-06:00
8 64:5d:86:15:de:20 10.0.10.157 KobePC Dynamic KobePC de:20 2025-11-17T21:29:52.000-06:00
9 64:da:ed:1c:b5:6d 10.0.10.227 eero Dynamic eero b5:6d 2025-11-17T22:49:11.000-06:00
10 64:da:ed:29:12:ad 10.0.10.101 eero Dynamic eero 12:ad 2025-11-17T18:23:32.000-06:00
11 64:da:ed:29:2e:8d 10.0.10.216 eero Dynamic eero 2e:8d 2025-11-17T17:27:16.000-06:00
12 68:57:2d:b4:dd:25 10.0.10.170 TY_WR Dynamic TY_WR dd:25 2025-11-17T21:11:34.000-06:00
13 6c:c8:40:05:5c:68 10.0.10.173 esphome-web-055c68 Dynamic esphome-web-055c68 5c:68 2025-11-17T15:24:13.000-06:00
14 70:66:2a:65:36:bc 10.0.10.174 Dynamic Sony PlayStation 5 36:bc 2025-11-17T13:09:26.000-06:00
15 70:89:76:ba:0f:d4 10.0.10.81 wlan0 Dynamic wlan0 0f:d4 2025-11-17T17:44:47.000-06:00
16 70:89:76:bc:f4:a4 10.0.10.151 wlan0 Dynamic wlan0 f4:a4 2025-11-17T17:39:55.000-06:00
17 7c:f6:66:44:68:f6 10.0.10.201 wlan0 Dynamic wlan0 68:f6 2025-11-17T21:32:53.000-06:00
18 7c:f6:66:45:44:44 10.0.10.160 wlan0 Dynamic wlan0 44:44 2025-11-17T21:32:15.000-06:00
19 7e:e4:a4:b8:c2:f3 10.0.10.55 iPhone Dynamic iPhone c2:f3 2025-11-17T15:33:13.000-06:00
20 80:00:6e:f2:13:52 10.0.10.105 Freds-Mac-Pro Dynamic Jill's MacPro 2025-11-17T17:30:26.000-06:00
21 84:d6:c5:4a:70:32 10.0.10.62 Dynamic Solaredge SE7K 70:32 2025-11-17T18:46:47.000-06:00
22 84:f3:eb:c1:dd:aa 10.0.10.90 ESP_C1DDAA Dynamic ESP_C1DDAA dd:aa 2025-11-17T22:16:20.000-06:00
23 88:a9:a7:99:c3:64 10.0.10.30 AD5M AD5M.nianticbooks.home Fixed ad5m 1969-12-31T18:00:00.000-06:00
24 88:e7:12:04:fc:06 10.0.10.195 MICRW_88_E7_12_04_FC_06 Dynamic MICRW_88_E7_12_04_FC_06 fc:06 2025-11-17T15:01:03.000-06:00
25 90:de:80:80:e7:04 10.0.10.10 HOMELAB-COMMAND HOMELAB-COMMAND.nianticbooks.home Fixed Dad's PC 1969-12-31T18:00:00.000-06:00
26 a0:ad:9f:30:8c:af 10.0.10.213 Kevin-PC Dynamic Kevin-PC 8c:af 2025-11-17T19:06:01.000-06:00
27 a8:2c:3e:bc:e2:bf 10.0.10.235 Dynamic Jill's Monitor 2025-11-17T15:30:12.000-06:00
28 ac:41:6a:69:3a:8e 10.0.10.154 Dynamic Blink XT Security Camera 3a:8e 2025-11-17T18:16:05.000-06:00
29 b4:b5:2f:ea:8c:30 10.0.10.13 ilo.nianticbooks.home ilo.nianticbooks.home Fixed HP iLO 1969-12-31T18:00:00.000-06:00
30 b8:09:8a:ca:6c:53 10.0.10.144 Freds-iMac Dynamic Freds-iMac 6c:53 2025-11-17T13:19:12.000-06:00
31 bc:24:11:0f:78:84 10.0.10.79 pve-scripts-local Dynamic pve-scripts-local 78:84 2025-11-17T15:46:18.000-06:00
32 bc:24:11:44:6f:49 10.0.10.117 esphome Dynamic esphome 6f:49 2025-11-17T22:27:22.000-06:00
33 bc:24:11:4a:42:07 10.0.10.104 dockge Dynamic dockge 42:07 2025-11-17T22:03:43.000-06:00
34 bc:24:11:4a:80:e5 10.0.10.247 twingate-connector Dynamic twingate-connector 80:e5 2025-11-17T22:28:07.000-06:00
35 bc:24:11:57:dd:94 10.0.10.112 authelia Dynamic authelia dd:94 2025-11-17T13:35:27.000-06:00
36 bc:24:11:6e:22:c3 10.0.10.77 ollama Dynamic ollama 22:c3 2025-11-17T22:25:02.000-06:00
37 bc:24:11:6f:8b:2c 10.0.10.236 pelican-panel Dynamic pelican-panel 8b:2c 2025-11-17T22:25:48.000-06:00
38 bc:24:11:74:bb:f8 10.0.10.212 pelican-wings Dynamic pelican-wings bb:f8 2025-11-17T22:26:32.000-06:00
39 bc:24:11:86:f0:c8 10.0.10.164 bar-assistant Dynamic bar-assistant f0:c8 2025-11-17T22:24:17.000-06:00
40 bc:24:11:8f:eb:eb 10.0.10.113 esphome Dynamic esphome eb:eb 2025-11-17T12:56:37.000-06:00
41 bc:24:11:98:31:65 10.0.10.178 openmediavault Dynamic openmediavault 31:65 2025-11-17T19:01:17.000-06:00
42 bc:24:11:a8:ff:0b 10.0.10.108 docker Dynamic docker ff:0b 2025-11-17T15:38:40.000-06:00
43 bc:24:11:d3:34:1c 10.0.10.192 twingate-connector Dynamic twingate-connector 34:1c 2025-11-17T22:28:52.000-06:00
44 bc:24:11:f9:12:5b 10.0.10.71 spoolman Dynamic spoolman 12:5b 2025-11-17T14:48:23.000-06:00
45 c4:3c:b0:fe:53:94 10.0.10.168 Dynamic Samsung Galaxy S8 Plus 53:94 2025-11-17T12:54:02.000-06:00
46 c6:f7:66:6e:fc:23 10.0.10.188 iPhone Dynamic iPhone fc:23 2025-11-17T14:31:23.000-06:00
47 cc:ba:97:21:4c:f8 10.0.10.167 Dynamic Bambu Lab A1 4c:f8 2025-11-17T22:08:36.000-06:00
48 ce:cb:0f:e1:86:8b 10.0.10.155 Watch Dynamic Watch 86:8b 2025-11-17T12:43:20.000-06:00
49 d0:c9:07:89:89:6c 10.0.10.80 Dynamic Govee H5082 89:6c 2025-11-17T19:45:10.000-06:00
50 d4:a6:51:98:45:62 10.0.10.57 wlan0 Dynamic wlan0 45:62 2025-11-17T19:28:46.000-06:00
51 dc:a4:ca:ea:cc:be 10.0.10.78 freds-airport-extreme Dynamic freds-airport-extreme cc:be 2025-11-17T22:06:03.000-06:00
52 de:35:17:a0:28:42 10.0.10.94 iPhone Dynamic iPhone 28:42 2025-11-17T21:29:25.000-06:00
53 e4:54:e8:50:90:af 10.0.10.2 proxmox.nianticbooks.home Fixed In house Proxmox 1969-12-31T18:00:00.000-06:00
54 e8:4c:4a:12:03:32 10.0.10.176 Dynamic Blink Add-On Sync Module 2 03:32 2025-11-17T20:43:01.000-06:00
55 fa:2e:aa:f1:66:a3 10.0.10.61 iPhone Dynamic iPhone 66:a3 2025-11-17T12:14:51.000-06:00

View File

@@ -0,0 +1,17 @@
<!DOCTYPE HTML>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Directory listing for /infrastructure/docs/</title>
</head>
<body>
<h1>Directory listing for /infrastructure/docs/</h1>
<hr>
<ul>
<li><a href="archive/">archive/</a></li>
<li><a href="FURNACE-PROJECT.md">FURNACE-PROJECT.md</a></li>
<li><a href="HOME-ASSISTANT-CONFIG-MERGE.md">HOME-ASSISTANT-CONFIG-MERGE.md</a></li>
</ul>
<hr>
</body>
</html>

View File

@@ -0,0 +1,21 @@
<!DOCTYPE HTML>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Directory listing for /infrastructure/esphome/</title>
</head>
<body>
<h1>Directory listing for /infrastructure/esphome/</h1>
<hr>
<ul>
<li><a href=".gitignore">.gitignore</a></li>
<li><a href="FURNACE-HYBRID-DESIGN.md">FURNACE-HYBRID-DESIGN.md</a></li>
<li><a href="FURNACE-SPECS.md">FURNACE-SPECS.md</a></li>
<li><a href="FURNACE-WIRING-DIAGRAMS.md">FURNACE-WIRING-DIAGRAMS.md</a></li>
<li><a href="garage-controller.yaml">garage-controller.yaml</a></li>
<li><a href="Pictures/">Pictures/</a></li>
<li><a href="README.md">README.md</a></li>
</ul>
<hr>
</body>
</html>

View File

@@ -0,0 +1,27 @@
<!DOCTYPE HTML>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Directory listing for /infrastructure/guides/</title>
</head>
<body>
<h1>Directory listing for /infrastructure/guides/</h1>
<hr>
<ul>
<li><a href="AUTHENTIK-QUICK-START.md">AUTHENTIK-QUICK-START.md</a></li>
<li><a href="AUTHENTIK-SSO-GUIDE.md">AUTHENTIK-SSO-GUIDE.md</a></li>
<li><a href="BACKUP-QUICK-START.md">BACKUP-QUICK-START.md</a></li>
<li><a href="GRAFANA-SETUP-COMPLETE.md">GRAFANA-SETUP-COMPLETE.md</a></li>
<li><a href="HOME-ASSISTANT-PROMETHEUS-SETUP.md">HOME-ASSISTANT-PROMETHEUS-SETUP.md</a></li>
<li><a href="HOMELAB-BACKUP-STRATEGY.md">HOMELAB-BACKUP-STRATEGY.md</a></li>
<li><a href="HOMELAB-IMPROVEMENTS-2025.md">HOMELAB-IMPROVEMENTS-2025.md</a></li>
<li><a href="MONITORING-SETUP-COMPLETE.md">MONITORING-SETUP-COMPLETE.md</a></li>
<li><a href="RUSTDESK-DEPLOYMENT-COMPLETE.md">RUSTDESK-DEPLOYMENT-COMPLETE.md</a></li>
<li><a href="SESSION-SUMMARY-2025-12-25.md">SESSION-SUMMARY-2025-12-25.md</a></li>
<li><a href="SESSION-SUMMARY-2025-12-26.md">SESSION-SUMMARY-2025-12-26.md</a></li>
<li><a href="TWINGATE-RESOURCES-SETUP.md">TWINGATE-RESOURCES-SETUP.md</a></li>
<li><a href="WEBAUTHN-ENROLLMENT-GUIDE.md">WEBAUTHN-ENROLLMENT-GUIDE.md</a></li>
</ul>
<hr>
</body>
</html>

View File

@@ -0,0 +1,82 @@
<!DOCTYPE HTML>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Directory listing for /infrastructure/home-assistant/</title>
</head>
<body>
<h1>Directory listing for /infrastructure/home-assistant/</h1>
<hr>
<ul>
<li><a href=".gitignore">.gitignore</a></li>
<li><a href="001157.png">001157.png</a></li>
<li><a href="2025-12-08%20004316.png">2025-12-08 004316.png</a></li>
<li><a href="231422.png">231422.png</a></li>
<li><a href="232641.png">232641.png</a></li>
<li><a href="232821.png">232821.png</a></li>
<li><a href="233029.png">233029.png</a></li>
<li><a href="233607.png">233607.png</a></li>
<li><a href="233732.png">233732.png</a></li>
<li><a href="234206.png">234206.png</a></li>
<li><a href="AUTOMATIONS.md">AUTOMATIONS.md</a></li>
<li><a href="automations.yaml">automations.yaml</a></li>
<li><a href="BRAINSTORM.md">BRAINSTORM.md</a></li>
<li><a href="claude-shared/">claude-shared/</a></li>
<li><a href="CLAUDE.md">CLAUDE.md</a></li>
<li><a href="configuration.yaml">configuration.yaml</a></li>
<li><a href="configuration.yaml.bak">configuration.yaml.bak</a></li>
<li><a href="dhcp-export-all-2025-11-14T22-55-18.871Z.csv">dhcp-export-all-2025-11-14T22-55-18.871Z.csv</a></li>
<li><a href="dhcp-export-all-2025-11-17T04-56-03.591Z.csv">dhcp-export-all-2025-11-17T04-56-03.591Z.csv</a></li>
<li><a href="DISASTER-RECOVERY.md">DISASTER-RECOVERY.md</a></li>
<li><a href="DNS-OVER-TLS-SETUP.md">DNS-OVER-TLS-SETUP.md</a></li>
<li><a href="docs/">docs/</a></li>
<li><a href="esphome/">esphome/</a></li>
<li><a href="fullchain.pem">fullchain.pem</a></li>
<li><a href="headache-tracking-config.yaml">headache-tracking-config.yaml</a></li>
<li><a href="home-assistant.log.1">home-assistant.log.1</a></li>
<li><a href="home-assistant.log.fault">home-assistant.log.fault</a></li>
<li><a href="home-assistant.log.old">home-assistant.log.old</a></li>
<li><a href="IMPROVEMENTS.md">IMPROVEMENTS.md</a></li>
<li><a href="infrastructure-audit.md">infrastructure-audit.md</a></li>
<li><a href="IP-ALLOCATION.md">IP-ALLOCATION.md</a></li>
<li><a href="MIGRATION-CHECKLIST.md">MIGRATION-CHECKLIST.md</a></li>
<li><a href="MONITORING.md">MONITORING.md</a></li>
<li><a href="MORNING-REMINDER.md">MORNING-REMINDER.md</a></li>
<li><a href="MQTT-SETUP.md">MQTT-SETUP.md</a></li>
<li><a href="ONE-LINER-UPDATE.txt">ONE-LINER-UPDATE.txt</a></li>
<li><a href="Pasted%20image%20%282%29.png">Pasted image (2).png</a></li>
<li><a href="Pasted%20image.png">Pasted image.png</a></li>
<li><a href="privkey.pem">privkey.pem</a></li>
<li><a href="README.md">README.md</a></li>
<li><a href="RUNBOOK.md">RUNBOOK.md</a></li>
<li><a href="scenes.yaml">scenes.yaml</a></li>
<li><a href="Screenshot%202025-11-17%20225014.png">Screenshot 2025-11-17 225014.png</a></li>
<li><a href="Screenshot%202025-11-17%20230122.png">Screenshot 2025-11-17 230122.png</a></li>
<li><a href="Screenshot%202025-12-07%20214448.png">Screenshot 2025-12-07 214448.png</a></li>
<li><a href="Screenshot%202025-12-07%20215218.png">Screenshot 2025-12-07 215218.png</a></li>
<li><a href="Screenshot%202025-12-07%20215408.png">Screenshot 2025-12-07 215408.png</a></li>
<li><a href="Screenshot%202025-12-07%20215931.png">Screenshot 2025-12-07 215931.png</a></li>
<li><a href="Screenshot%202025-12-07%20220103.png">Screenshot 2025-12-07 220103.png</a></li>
<li><a href="Screenshot%202025-12-07%20220610.png">Screenshot 2025-12-07 220610.png</a></li>
<li><a href="Screenshot%202025-12-07%20220940.png">Screenshot 2025-12-07 220940.png</a></li>
<li><a href="Screenshot%202025-12-07%20223617.png">Screenshot 2025-12-07 223617.png</a></li>
<li><a href="Screenshot%202025-12-07%20224659.png">Screenshot 2025-12-07 224659.png</a></li>
<li><a href="Screenshot%202025-12-07%20224941.png">Screenshot 2025-12-07 224941.png</a></li>
<li><a href="Screenshot%202025-12-07%20225202.png">Screenshot 2025-12-07 225202.png</a></li>
<li><a href="Screenshot%202025-12-07%20225444.png">Screenshot 2025-12-07 225444.png</a></li>
<li><a href="Screenshot%202025-12-07%202307.png">Screenshot 2025-12-07 2307.png</a></li>
<li><a href="Screenshot%202025-12-07%20231138.png">Screenshot 2025-12-07 231138.png</a></li>
<li><a href="Screenshot%202025-12-07%20232154.png">Screenshot 2025-12-07 232154.png</a></li>
<li><a href="scripts/">scripts/</a></li>
<li><a href="scripts.yaml">scripts.yaml</a></li>
<li><a href="SERVICES.md">SERVICES.md</a></li>
<li><a href="switches.yaml">switches.yaml</a></li>
<li><a href="TROUBLESHOOTING-2025-12-07.md">TROUBLESHOOTING-2025-12-07.md</a></li>
<li><a href="TROUBLESHOOTING-GENERAL.md">TROUBLESHOOTING-GENERAL.md</a></li>
<li><a href="update-config-on-server.sh">update-config-on-server.sh</a></li>
<li><a href="voice-assistant/">voice-assistant/</a></li>
<li><a href="wireguard-setup-progress.md">wireguard-setup-progress.md</a></li>
</ul>
<hr>
</body>
</html>

View File

@@ -0,0 +1,282 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Homelab Dashboard</title>
<style>
* {
margin: 0;
padding: 0;
box-sizing: border-box;
}
body {
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Arial, sans-serif;
background: linear-gradient(135deg, #1e3c72 0%, #2a5298 100%);
min-height: 100vh;
padding: 20px;
color: #333;
}
.container {
max-width: 1200px;
margin: 0 auto;
}
h1 {
color: white;
text-align: center;
margin-bottom: 10px;
font-size: 2.5em;
text-shadow: 2px 2px 4px rgba(0,0,0,0.3);
}
.subtitle {
color: rgba(255,255,255,0.9);
text-align: center;
margin-bottom: 30px;
font-size: 1.1em;
}
.section {
background: white;
border-radius: 12px;
padding: 25px;
margin-bottom: 20px;
box-shadow: 0 4px 6px rgba(0,0,0,0.1);
}
.section h2 {
color: #2a5298;
margin-bottom: 15px;
font-size: 1.5em;
border-bottom: 2px solid #e0e0e0;
padding-bottom: 10px;
}
.resource-grid {
display: grid;
grid-template-columns: repeat(auto-fill, minmax(280px, 1fr));
gap: 15px;
margin-top: 15px;
}
.resource-card {
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
padding: 20px;
border-radius: 8px;
text-decoration: none;
color: white;
transition: transform 0.2s, box-shadow 0.2s;
display: block;
}
.resource-card:hover {
transform: translateY(-3px);
box-shadow: 0 6px 12px rgba(0,0,0,0.2);
}
.resource-card h3 {
font-size: 1.2em;
margin-bottom: 8px;
}
.resource-card .url {
font-size: 0.85em;
opacity: 0.9;
font-family: monospace;
background: rgba(255,255,255,0.2);
padding: 4px 8px;
border-radius: 4px;
display: inline-block;
margin-bottom: 8px;
}
.resource-card .description {
font-size: 0.9em;
opacity: 0.95;
line-height: 1.4;
}
.priority-1 {
background: linear-gradient(135deg, #f093fb 0%, #f5576c 100%);
}
.priority-2 {
background: linear-gradient(135deg, #4facfe 0%, #00f2fe 100%);
}
.priority-3 {
background: linear-gradient(135deg, #43e97b 0%, #38f9d7 100%);
}
.tools-section {
background: linear-gradient(135deg, #fa709a 0%, #fee140 100%);
}
.tool-card {
background: white;
color: #333;
padding: 15px;
border-radius: 8px;
margin-bottom: 10px;
cursor: pointer;
transition: transform 0.2s;
}
.tool-card:hover {
transform: translateX(5px);
}
.tool-card h3 {
color: #fa709a;
margin-bottom: 5px;
}
.tool-card .command {
font-family: monospace;
background: #f5f5f5;
padding: 8px;
border-radius: 4px;
font-size: 0.85em;
margin-top: 8px;
color: #666;
}
.status-indicator {
display: inline-block;
width: 10px;
height: 10px;
border-radius: 50%;
background: #43e97b;
margin-right: 5px;
animation: pulse 2s infinite;
}
@keyframes pulse {
0%, 100% { opacity: 1; }
50% { opacity: 0.5; }
}
@media (max-width: 768px) {
h1 { font-size: 1.8em; }
.resource-grid {
grid-template-columns: 1fr;
}
}
</style>
</head>
<body>
<div class="container">
<h1><span class="status-indicator"></span>Homelab Dashboard</h1>
<p class="subtitle">Quick access to all your infrastructure resources</p>
<!-- Priority 1: Management & Monitoring -->
<div class="section">
<h2>🎯 Management & Monitoring</h2>
<div class="resource-grid">
<a href="https://10.0.10.3:8006" class="resource-card priority-1">
<h3>Proxmox - Main (DL380p)</h3>
<span class="url">https://10.0.10.3:8006</span>
<p class="description">Primary Proxmox host - 32 cores, 96GB RAM. Login with fred@authentik</p>
</a>
<a href="https://10.0.10.2:8006" class="resource-card priority-1">
<h3>Proxmox - Router (i5)</h3>
<span class="url">https://10.0.10.2:8006</span>
<p class="description">Secondary Proxmox host at office. Login with fred@authentik</p>
</a>
<a href="https://10.0.10.4:8006" class="resource-card priority-1">
<h3>Proxmox - Storage</h3>
<span class="url">https://10.0.10.4:8006</span>
<p class="description">Storage-focused Proxmox host. Login with fred@authentik</p>
</a>
<a href="http://10.0.10.25:3000" class="resource-card priority-1">
<h3>Grafana Monitoring</h3>
<span class="url">http://10.0.10.25:3000</span>
<p class="description">Infrastructure monitoring and metrics. Login with fred@nianticbooks.com</p>
</a>
<a href="http://10.0.10.21:9000" class="resource-card priority-1">
<h3>Authentik SSO</h3>
<span class="url">http://10.0.10.21:9000</span>
<p class="description">User authentication and SSO management. Login: akadmin</p>
</a>
</div>
</div>
<!-- Priority 2: Home Automation & Apps -->
<div class="section">
<h2>🏠 Home Automation & Apps</h2>
<div class="resource-grid">
<a href="http://10.0.10.24:8123" class="resource-card priority-2">
<h3>Home Assistant</h3>
<span class="url">http://10.0.10.24:8123</span>
<p class="description">Smart home control and automation</p>
</a>
<a href="http://10.0.10.22:5678" class="resource-card priority-2">
<h3>n8n Workflows</h3>
<span class="url">http://10.0.10.22:5678</span>
<p class="description">Automation workflows and integrations</p>
</a>
</div>
</div>
<!-- Priority 3: Storage & Infrastructure -->
<div class="section">
<h2>💾 Storage & Infrastructure</h2>
<div class="resource-grid">
<a href="http://10.0.10.5" class="resource-card priority-3">
<h3>OpenMediaVault</h3>
<span class="url">http://10.0.10.5</span>
<p class="description">12TB storage management and backup monitoring. Login: admin</p>
</a>
<a href="http://10.0.10.27:5001" class="resource-card priority-3">
<h3>Dockge</h3>
<span class="url">http://10.0.10.27:5001</span>
<p class="description">Docker Compose stack management</p>
</a>
</div>
</div>
<!-- Development Tools -->
<div class="section tools-section">
<h2>🛠️ Development Tools</h2>
<div class="tool-card" onclick="launchVSCode()">
<h3>🚀 Launch Claude Code with Infrastructure</h3>
<p class="description">Opens VS Code Insiders with Claude Code and claude-shared context</p>
<div class="command">Click to launch (Windows only)</div>
</div>
<div class="tool-card" onclick="window.open('vscode-insiders://file/C:/Users/Fred/projects', '_blank')">
<h3>📂 Open Projects Folder</h3>
<p class="description">Opens your projects directory in VS Code Insiders</p>
<div class="command">vscode-insiders://file/C:/Users/Fred/projects</div>
</div>
</div>
</div>
<script>
function launchVSCode() {
// Try to open VS Code with the workspace file
const workspaceUrl = 'vscode-insiders://file/C:/Users/Fred/projects/claude-shared/homelab.code-workspace';
window.open(workspaceUrl, '_blank');
}
// Add keyboard shortcut: Press 'h' to return home (reload)
document.addEventListener('keydown', function(e) {
if (e.key === 'h' && !e.ctrlKey && !e.metaKey) {
location.reload();
}
});
// Simple status check (optional - can be enhanced later)
console.log('Homelab Dashboard loaded successfully');
console.log('Press "h" to refresh dashboard');
</script>
</body>
</html>

76
infrastructure/index.html Normal file
View File

@@ -0,0 +1,76 @@
<!DOCTYPE HTML>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Directory listing for /infrastructure/</title>
</head>
<body>
<h1>Directory listing for /infrastructure/</h1>
<hr>
<ul>
<li><a href=".claude/">.claude/</a></li>
<li><a href=".env.example">.env.example</a></li>
<li><a href=".gitignore">.gitignore</a></li>
<li><a href=".gitmodules">.gitmodules</a></li>
<li><a href=".mcp.json">.mcp.json</a></li>
<li><a href="2025-12-10%20231147.png">2025-12-10 231147.png</a></li>
<li><a href="2025-12-10%20231412.png">2025-12-10 231412.png</a></li>
<li><a href="3D-PRINTING-QUICK-START.md">3D-PRINTING-QUICK-START.md</a></li>
<li><a href="3D-PRINTING-SETUP.md">3D-PRINTING-SETUP.md</a></li>
<li><a href="AGENT-REFERENCE.md">AGENT-REFERENCE.md</a></li>
<li><a href="authentik-homeassistant-blueprint.yaml">authentik-homeassistant-blueprint.yaml</a></li>
<li><a href="BLUEBUBBLES-SETUP.md">BLUEBUBBLES-SETUP.md</a></li>
<li><a href="BRAINSTORM.md">BRAINSTORM.md</a></li>
<li><a href="CA-DEPLOYMENT-SUMMARY.md">CA-DEPLOYMENT-SUMMARY.md</a></li>
<li><a href="CA-WORK-IN-PROGRESS.md">CA-WORK-IN-PROGRESS.md</a></li>
<li><a href="Caddy-Internal-Root-CA.crt">Caddy-Internal-Root-CA.crt</a></li>
<li><a href="claude-shared/">claude-shared/</a></li>
<li><a href="CLAUDE.md">CLAUDE.md</a></li>
<li><a href="dhcp-export-all-2025-11-14T22-55-18.871Z.csv">dhcp-export-all-2025-11-14T22-55-18.871Z.csv</a></li>
<li><a href="dhcp-export-all-2025-11-17T04-56-03.591Z.csv">dhcp-export-all-2025-11-17T04-56-03.591Z.csv</a></li>
<li><a href="DISASTER-RECOVERY.md">DISASTER-RECOVERY.md</a></li>
<li><a href="DNS-OVER-TLS-SETUP.md">DNS-OVER-TLS-SETUP.md</a></li>
<li><a href="docs/">docs/</a></li>
<li><a href="esphome/">esphome/</a></li>
<li><a href="guides/">guides/</a></li>
<li><a href="home-assistant/">home-assistant/</a></li>
<li><a href="homelab-dashboard/">homelab-dashboard/</a></li>
<li><a href="Homelab-Root-CA.crt">Homelab-Root-CA.crt</a></li>
<li><a href="IMPROVEMENTS.md">IMPROVEMENTS.md</a></li>
<li><a href="infrastructure-audit.md">infrastructure-audit.md</a></li>
<li><a href="INFRASTRUCTURE-TODO.md">INFRASTRUCTURE-TODO.md</a></li>
<li><a href="install-ca-cert-admin.ps1">install-ca-cert-admin.ps1</a></li>
<li><a href="IP-ALLOCATION.md">IP-ALLOCATION.md</a></li>
<li><a href="LOCAL-CA-SETUP.md">LOCAL-CA-SETUP.md</a></li>
<li><a href="LOCAL-HTTPS-QUICKSTART.md">LOCAL-HTTPS-QUICKSTART.md</a></li>
<li><a href="mc_server/">mc_server/</a></li>
<li><a href="MIGRATION-CHECKLIST.md">MIGRATION-CHECKLIST.md</a></li>
<li><a href="MONITORING.md">MONITORING.md</a></li>
<li><a href="MORNING-REMINDER.md">MORNING-REMINDER.md</a></li>
<li><a href="MQTT-SETUP.md">MQTT-SETUP.md</a></li>
<li><a href="n8n-workflows/">n8n-workflows/</a></li>
<li><a href="nul">nul</a></li>
<li><a href="OPENCLAW-AUTOMATION-PROMPTS.md">OPENCLAW-AUTOMATION-PROMPTS.md</a></li>
<li><a href="OPENCLAW-QUICKSTART.md">OPENCLAW-QUICKSTART.md</a></li>
<li><a href="OPENCLAW-SETUP.md">OPENCLAW-SETUP.md</a></li>
<li><a href="prometheus-config.yml">prometheus-config.yml</a></li>
<li><a href="PROXMOX-RECOVERY-GUIDE.md">PROXMOX-RECOVERY-GUIDE.md</a></li>
<li><a href="pterodactyl/">pterodactyl/</a></li>
<li><a href="README.md">README.md</a></li>
<li><a href="RUNBOOK.md">RUNBOOK.md</a></li>
<li><a href="Screenshot%202025-11-17%20225014.png">Screenshot 2025-11-17 225014.png</a></li>
<li><a href="Screenshot%202025-11-17%20230122.png">Screenshot 2025-11-17 230122.png</a></li>
<li><a href="Screenshot%202025-12-27%20092411.png">Screenshot 2025-12-27 092411.png</a></li>
<li><a href="scripts/">scripts/</a></li>
<li><a href="SERVICES.md">SERVICES.md</a></li>
<li><a href="SSH%20pub%20key%20ssh%20ed25519.txt">SSH pub key ssh ed25519.txt</a></li>
<li><a href="SSH-SETUP-GUIDE.md">SSH-SETUP-GUIDE.md</a></li>
<li><a href="temp_scan.ps1">temp_scan.ps1</a></li>
<li><a href="TODO.md">TODO.md</a></li>
<li><a href="UNDOCUMENTED-CONTAINERS.md">UNDOCUMENTED-CONTAINERS.md</a></li>
<li><a href="vehicle-maintenance-tracker/">vehicle-maintenance-tracker/</a></li>
<li><a href="voice-assistant/">voice-assistant/</a></li>
</ul>
<hr>
</body>
</html>

View File

@@ -0,0 +1,290 @@
# Infrastructure Audit
**Last Updated:** 2026-01-18
**Status:** Active - Source of Truth
This document provides a comprehensive inventory of all infrastructure components. For IP allocations, see `IP-ALLOCATION.md`.
---
## 1. VPS Configuration
| Property | Value |
|----------|-------|
| Provider | Hudson Valley Host |
| Public IP | 66.63.182.168 |
| Hostname | vps.nianticbooks.com |
| OS | Ubuntu 24.04 x86_64 |
| Specs | 2 vCPUs, 4GB RAM, 100GB storage |
### VPS Services
| Service | Port | Status |
|---------|------|--------|
| Caddy Reverse Proxy | 80, 443 | Active |
| WireGuard VPN Server | 51820/UDP | Active |
| RustDesk Relay (hbbr) | 21117 | Active |
### Caddy Routes (via WireGuard to home lab)
| Domain | Backend | Status |
|--------|---------|--------|
| freddesk.nianticbooks.com | 10.0.10.3:8006 | Active |
| ad5m.nianticbooks.com | 10.0.10.30:80 | Active |
| bob.nianticbooks.com | 10.0.10.24:8123 | Active |
| auth.nianticbooks.com | 10.0.10.21:9000 | Active |
| cocktails.nianticbooks.com | 10.0.10.40 | Active |
---
## 2. WireGuard Tunnel
| Property | Value |
|----------|-------|
| Status | Active |
| Gaming VPS Endpoint | 51.222.12.162:51820 |
| Gaming VPS Tunnel IP | 10.0.9.1 |
| UCG Ultra Tunnel IP | 10.0.9.2 |
| VPS Proxy Tunnel IP | 10.0.9.3 |
| Home Lab Subnet | 10.0.10.0/24 |
| Keepalive | 25 seconds |
---
## 3. Proxmox Cluster
### main-pve (DL380p) - Production Workloads
| Property | Value |
|----------|-------|
| IP Address | 10.0.10.3 (static) |
| iLO Management | 10.0.10.13 |
| Location | Remote |
| CPU | 32 cores |
| RAM | 96 GB |
| Role | Primary production host |
**Running Containers (14 total):**
| CT ID | Name | IP | Service |
|-------|------|-----|---------|
| 102 | postgresql | 10.0.10.20 | Shared PostgreSQL database |
| 103 | bar-assistant | 10.0.10.40 | Cocktail recipe manager |
| 105 | pterodactyl-panel | 10.0.10.45 | Game server management panel |
| 106 | n8n | 10.0.10.22 | Workflow automation |
| 107 | pterodactyl-wings | 10.0.10.46 | Game server node |
| 115 | ca-server | 10.0.10.15 | Step-CA certificate authority |
| 121 | authentik | 10.0.10.21 | SSO/Identity provider |
| 123 | rustdesk | 10.0.10.23 | RustDesk ID server (hbbs) |
| 125 | prometheus | 10.0.10.25 | Monitoring (Prometheus + Grafana) |
| 127 | dockge | 10.0.10.27 | Docker Compose mgmt + Media Stack (6 services) |
| 128 | uptime-kuma | 10.0.10.26 | Uptime monitoring |
| 130 | minecraft-forge | 10.0.10.41 | Minecraft Forge server |
| 131 | minecraft-stoneblock4 | 10.0.10.42 | Minecraft Stoneblock 4 |
| 135 | vehicle-tracker | 10.0.10.35 | Vehicle Maintenance Tracker (Planned) |
### pve-router (i5) - Local/Light Workloads
| Property | Value |
|----------|-------|
| IP Address | 10.0.10.2 (static) |
| DNS | proxmox.nianticbooks.home |
| Location | Office |
| CPU | 8 cores |
| RAM | 8 GB |
| Role | Local development, Home Assistant |
**Running VMs (1 total):**
| VM ID | Name | IP | Service |
|-------|------|-----|---------|
| 104 | haos16.2 | 10.0.10.24 | Home Assistant OS |
**Running Containers (1 total):**
| CT ID | Name | IP | Service |
|-------|------|-----|---------|
| 101 | twingate-connector | 10.0.10.179 | Zero-trust remote access |
### pve-storage - Storage Host
| Property | Value |
|----------|-------|
| IP Address | 10.0.10.4 (static) |
| Role | Storage host (3.5" drive support) |
**Running VMs (1 total):**
| VM ID | Name | IP | Service |
|-------|------|-----|---------|
| 400 | OMV | 10.0.10.5 | OpenMediaVault (12TB) |
---
## 4. Network Configuration
| Property | Value |
|----------|-------|
| Subnet | 10.0.10.0/24 |
| Gateway | 10.0.10.1 (UCG Ultra) |
| DHCP Range | 10.0.10.50-254 |
| Static Range | 10.0.10.1-49 |
**Note:** All infrastructure IPs (.1-.49) use static configuration on devices, not DHCP reservations.
See `IP-ALLOCATION.md` for complete IP assignments.
---
## 5. Key Services Summary
### Authentication & Security
| Service | IP | Port | Purpose |
|---------|-----|------|---------|
| Authentik SSO | 10.0.10.21 | 9000 | OAuth2/OIDC, WebAuthn |
| Step-CA | 10.0.10.15 | 8443 | Internal certificate authority |
| Twingate | 10.0.10.179 | - | Zero-trust remote access |
### Databases
| Service | IP | Port | Purpose |
|---------|-----|------|---------|
| PostgreSQL | 10.0.10.20 | 5432 | Shared DB (Authentik, n8n, RustDesk, Grafana) |
### Monitoring
| Service | IP | Port | Purpose |
|---------|-----|------|---------|
| Prometheus | 10.0.10.25 | 9090 | Metrics collection |
| Grafana | 10.0.10.25 | 3000 | Dashboards |
| Uptime Kuma | 10.0.10.26 | 3001 | Uptime monitoring |
### Automation
| Service | IP | Port | Purpose |
|---------|-----|------|---------|
| n8n | 10.0.10.22 | 5678 | Workflow automation |
| Home Assistant | 10.0.10.24 | 8123 | Smart home |
### Gaming
| Service | IP | Port | Purpose |
|---------|-----|------|---------|
| Pterodactyl Panel | 10.0.10.45 | 80 | Game server management |
| Pterodactyl Wings | 10.0.10.46 | 8080 | Game server node |
| Minecraft Forge | 10.0.10.41 | 25565 | CFMRPGU modpack |
| Minecraft SB4 | 10.0.10.42 | 25566 | Stoneblock 4 modpack |
### Remote Access
| Service | IP | Port | Purpose |
|---------|-----|------|---------|
| RustDesk ID (hbbs) | 10.0.10.23 | 21116 | Remote desktop ID server |
| RustDesk Relay (hbbr) | VPS | 21117 | Remote desktop relay |
### Storage
| Service | IP | Purpose |
|---------|-----|---------|
| OpenMediaVault | 10.0.10.5 | 12TB NFS/SMB storage (media library for Arr stack) |
| Dockge | 10.0.10.27 | Docker stack management |
### Media Automation (Arr Stack)
| Service | IP | Port | Purpose |
|---------|-----|------|---------|
| Sonarr | 10.0.10.27 | 8989 | TV show monitoring & automation |
| Radarr | 10.0.10.27 | 7878 | Movie monitoring & automation |
| Prowlarr | 10.0.10.27 | 9696 | Indexer management for *arr apps |
| Bazarr | 10.0.10.27 | 6767 | Subtitle download automation |
| Deluge | 10.0.10.27 | 8112 | BitTorrent download client |
| Calibre-Web | 10.0.10.27 | 8083 | eBook library management |
| Caddy Internal Proxy | 10.0.10.27 | 443 | HTTPS reverse proxy (Caddy Internal PKI) |
**Storage Paths:**
- `/media/tv` - Sonarr TV library
- `/media/movies` - Radarr movie library
- `/media/downloads` - Deluge download directory
- `/media/books` - Calibre library
**Note:** All services run as Docker containers on CT 127 (Dockge), accessible via HTTPS at `https://<service>.nianticbooks.home`
### Utility
| Service | IP | Port | Purpose |
|---------|-----|------|---------|
| Bar Assistant | 10.0.10.40 | 80 | Cocktail recipe manager |
| Vikunja | 10.0.10.27 | 3456 | Task management (no longer actively used) |
---
## 6. Backup System
### Tier 1 - Local (OMV NFS)
| Property | Value |
|----------|-------|
| Storage | 10.0.10.5:/export/backups |
| Available | 7.3 TB |
| Mount Point | /mnt/omv-backups (all Proxmox hosts) |
**Automated Backups:**
| Time | What | Retention |
|------|------|-----------|
| 2:00 AM | PostgreSQL (all databases) | 7 daily, 4 weekly, 3 monthly |
| 2:30 AM | Proxmox VMs/containers | 7 daily, 4 weekly, 3 monthly |
---
## 7. Physical Devices
### HOMELAB-COMMAND (10.0.10.10)
| Property | Value |
|----------|-------|
| Type | Gaming PC |
| GPU | RTX 5060 |
| Services | Wyoming (Whisper STT, Piper TTS), Ollama LLM |
| OS | Windows 11 |
| Role | Claude Code host, voice assistant hub |
### HP iLO (10.0.10.13)
| Property | Value |
|----------|-------|
| Type | Server management |
| Purpose | DL380p (main-pve) remote management |
### 3D Printers
| Device | IP | Status |
|--------|-----|--------|
| Flashforge AD5M | 10.0.10.30 | Active |
| Bambu Lab A1 | 10.0.10.31 | Active |
---
## 8. Audit History
| Date | Action | Notes |
|------|--------|-------|
| 2026-01-25 | Deployed Media Stack | Sonarr, Radarr, Prowlarr, Bazarr, Deluge, Calibre-Web on CT 127 via Docker |
| 2026-01-25 | Deployed Caddy Internal Proxy | HTTPS reverse proxy for internal services on CT 127 |
| 2026-01-25 | Deployed CA certificates | Homelab root CA distributed to all LXC containers and Proxmox hosts |
| 2026-01-25 | Deprecated Vikunja | No longer actively used (Claude Code replaced n8n workflow use case) |
| 2026-01-18 | Deployed Vikunja | Task management on Dockge (10.0.10.27:3456), tasks.nianticbooks.com |
| 2026-01-13 | Full network audit | Compared UCG DHCP export vs docs, verified all services |
| 2026-01-13 | Removed CT 100 | pve-scripts-local - unused, IP conflict with bar-assistant |
| 2025-12-29 | Initial audit | Infrastructure audit template completed |
---
## 9. Outstanding Items
- [ ] Fix Home Assistant public domain (Caddy HTTPS backend config)
- [x] Move Bambu A1 to static IP 10.0.10.31 (done 2026-01-13)
- [ ] Identify unknown Raspberry Pi devices (.81, .171, .246)
- [ ] Document ESP devices purpose (.90, .207)
- [ ] Cleanup deprecated VMs (Spoolman .71, Authelia .112)

View File

@@ -0,0 +1,36 @@
# Run this script as Administrator to install CA cert system-wide
# Right-click this file and select "Run with PowerShell"
$certPath = "C:\Users\Fred\projects\infrastructure\Homelab-Root-CA.crt"
Write-Host "Installing Homelab CA Certificate (System-Wide)..." -ForegroundColor Cyan
Write-Host ""
try {
$cert = New-Object System.Security.Cryptography.X509Certificates.X509Certificate2
$cert.Import($certPath)
$store = New-Object System.Security.Cryptography.X509Certificates.X509Store('Root','LocalMachine')
$store.Open('ReadWrite')
$store.Add($cert)
$store.Close()
Write-Host "SUCCESS! CA certificate installed system-wide!" -ForegroundColor Green
Write-Host ""
Write-Host "Certificate Details:" -ForegroundColor Cyan
Write-Host " Subject: $($cert.Subject)" -ForegroundColor White
Write-Host " Issuer: $($cert.Issuer)" -ForegroundColor White
Write-Host " Valid Until: $($cert.NotAfter)" -ForegroundColor White
Write-Host ""
Write-Host "Close and reopen your browser, then visit:" -ForegroundColor Yellow
Write-Host " https://10.0.10.24:8123" -ForegroundColor White
} catch {
Write-Host "ERROR: $($_.Exception.Message)" -ForegroundColor Red
Write-Host ""
Write-Host "Make sure you ran this script as Administrator!" -ForegroundColor Yellow
}
Write-Host ""
Write-Host "Press any key to exit..."
$null = $Host.UI.RawUI.ReadKey('NoEcho,IncludeKeyDown')

View File

@@ -0,0 +1,28 @@
<!DOCTYPE HTML>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Directory listing for /infrastructure/mc_server/</title>
</head>
<body>
<h1>Directory listing for /infrastructure/mc_server/</h1>
<hr>
<ul>
<li><a href="backups/">backups/</a></li>
<li><a href="configs/">configs/</a></li>
<li><a href="DEPLOYMENT-COMPLETE.md">DEPLOYMENT-COMPLETE.md</a></li>
<li><a href="docs/">docs/</a></li>
<li><a href="MIGRATION-SUMMARY.md">MIGRATION-SUMMARY.md</a></li>
<li><a href="modpack/">modpack/</a></li>
<li><a href="QUICK-START-CRAFTY.md">QUICK-START-CRAFTY.md</a></li>
<li><a href="QUICK-START.md">QUICK-START.md</a></li>
<li><a href="README.md">README.md</a></li>
<li><a href="Screenshot%202026-01-10%20114648.png">Screenshot 2026-01-10 114648.png</a></li>
<li><a href="Screenshot%202026-01-11%20124035.png">Screenshot 2026-01-11 124035.png</a></li>
<li><a href="Screenshot%202026-01-11%20130343.png">Screenshot 2026-01-11 130343.png</a></li>
<li><a href="Screenshot%202026-01-11%20130731.png">Screenshot 2026-01-11 130731.png</a></li>
<li><a href="scripts/">scripts/</a></li>
</ul>
<hr>
</body>
</html>

View File

@@ -0,0 +1,40 @@
<!DOCTYPE HTML>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Directory listing for /infrastructure/n8n-workflows/</title>
</head>
<body>
<h1>Directory listing for /infrastructure/n8n-workflows/</h1>
<hr>
<ul>
<li><a href=".claude/">.claude/</a></li>
<li><a href="alertmanager-homelab-config.yml">alertmanager-homelab-config.yml</a></li>
<li><a href="CLAUDE.md">CLAUDE.md</a></li>
<li><a href="credentials/">credentials/</a></li>
<li><a href="c%EF%80%BAtempworkflow-check.json">ctempworkflow-check.json</a></li>
<li><a href="c%EF%80%BAUsersFredprojectsinfrastructuren8n-workflowsweek1current-workflow.json">cUsersFredprojectsinfrastructuren8n-workflowsweek1current-workflow.json</a></li>
<li><a href="c%EF%80%BAUsersFredprojectsinfrastructuren8n-workflowsweek1working-workflow.json">cUsersFredprojectsinfrastructuren8n-workflowsweek1working-workflow.json</a></li>
<li><a href="deploy-prometheus-alerting.ps1">deploy-prometheus-alerting.ps1</a></li>
<li><a href="deploy_week1.py">deploy_week1.py</a></li>
<li><a href="DEPLOYMENT-GUIDE.md">DEPLOYMENT-GUIDE.md</a></li>
<li><a href="DEPLOYMENT-SUMMARY.md">DEPLOYMENT-SUMMARY.md</a></li>
<li><a href="HUGGINGFACE-QUICKSTART.md">HUGGINGFACE-QUICKSTART.md</a></li>
<li><a href="IMPORT-TROUBLESHOOTING.md">IMPORT-TROUBLESHOOTING.md</a></li>
<li><a href="MANUAL-OLLAMA-SETUP.md">MANUAL-OLLAMA-SETUP.md</a></li>
<li><a href="nul">nul</a></li>
<li><a href="OLLAMA-LOCAL-SETUP.md">OLLAMA-LOCAL-SETUP.md</a></li>
<li><a href="prometheus-alert-rules.yml">prometheus-alert-rules.yml</a></li>
<li><a href="prometheus-homelab-monitoring.json">prometheus-homelab-monitoring.json</a></li>
<li><a href="PROMETHEUS-SETUP.md">PROMETHEUS-SETUP.md</a></li>
<li><a href="QUICK-REFERENCE.md">QUICK-REFERENCE.md</a></li>
<li><a href="README.md">README.md</a></li>
<li><a href="test-prometheus-webhook.ps1">test-prometheus-webhook.ps1</a></li>
<li><a href="VIKUNJA-IMPLEMENTATION-PLAN.md">VIKUNJA-IMPLEMENTATION-PLAN.md</a></li>
<li><a href="week1/">week1/</a></li>
<li><a href="WEEK1-DEPLOYMENT-CHECKLIST.md">WEEK1-DEPLOYMENT-CHECKLIST.md</a></li>
<li><a href="Working%20in%20VS%20Code%20with%20n8n%20Workflows.md">Working in VS Code with n8n Workflows.md</a></li>
</ul>
<hr>
</body>
</html>

View File

@@ -0,0 +1,240 @@
# Prometheus Configuration for Fred's Homelab
# Last Updated: 2025-12-25
#
# Installation Instructions:
# 1. Install node_exporter on each host you want to monitor
# 2. Copy this file to 10.0.10.25:/etc/prometheus/prometheus.yml
# 3. Restart Prometheus: systemctl restart prometheus
# 4. Verify targets: http://10.0.10.25:9090/targets
global:
scrape_interval: 15s
evaluation_interval: 15s
external_labels:
environment: 'homelab'
datacenter: 'home'
# Alertmanager configuration (optional - configure later)
alerting:
alertmanagers:
- static_configs:
- targets:
# - alertmanager:9093
# Load alerting rules
rule_files:
# - "/etc/prometheus/alerts/*.yml"
scrape_configs:
# ====================================
# Prometheus Self-Monitoring
# ====================================
- job_name: 'prometheus'
static_configs:
- targets: ['localhost:9090']
labels:
app: 'prometheus'
instance: 'prometheus-main'
# ====================================
# Proxmox Hosts (Node Exporters)
# ====================================
# Install: apt install prometheus-node-exporter
- job_name: 'proxmox-nodes'
static_configs:
- targets: ['10.0.10.2:9100'] # pve-router (i5)
labels:
hostname: 'pve-router'
role: 'proxmox-host'
location: 'office'
- targets: ['10.0.10.3:9100'] # main-pve (DL380p)
labels:
hostname: 'main-pve'
role: 'proxmox-host'
location: 'remote'
- targets: ['10.0.10.4:9100'] # backup-pve
labels:
hostname: 'backup-pve'
role: 'proxmox-host'
location: 'storage'
# ====================================
# Proxmox VE API Metrics (Proxmox Exporter)
# ====================================
# Optional: Install https://github.com/prometheus-pve/prometheus-pve-exporter
# This gives you VM/CT specific metrics
# - job_name: 'proxmox-api'
# static_configs:
# - targets: ['10.0.10.25:9221'] # Run pve_exporter on prometheus host
# labels:
# exporter: 'pve_exporter'
# ====================================
# VPS (Node Exporter)
# ====================================
# Install on VPS: apt install prometheus-node-exporter
- job_name: 'vps'
static_configs:
- targets: ['66.63.182.168:9100'] # VPS external IP
labels:
hostname: 'vps-hv'
role: 'vps'
provider: 'hudson-valley-host'
# ====================================
# Gaming PC / HOMELAB-COMMAND
# ====================================
# Windows: Use windows_exporter
# https://github.com/prometheus-community/windows_exporter
- job_name: 'gaming-pc'
static_configs:
- targets: ['10.0.10.10:9182'] # windows_exporter default port
labels:
hostname: 'HOMELAB-COMMAND'
role: 'workstation'
os: 'windows-11'
# ====================================
# Databases
# ====================================
# PostgreSQL Exporter
# Install: https://github.com/prometheus-community/postgres_exporter
- job_name: 'postgresql'
static_configs:
- targets: ['10.0.10.20:9187']
labels:
hostname: 'postgresql'
role: 'database'
app: 'postgres'
# ====================================
# Application Services
# ====================================
# Authentik (built-in metrics)
- job_name: 'authentik'
metrics_path: '/application/o/prometheus-outpost/metrics/'
static_configs:
- targets: ['10.0.10.21:9000']
labels:
app: 'authentik'
role: 'sso'
# n8n (if metrics enabled)
# - job_name: 'n8n'
# static_configs:
# - targets: ['10.0.10.22:5678']
# labels:
# app: 'n8n'
# Home Assistant (via integration)
# https://www.home-assistant.io/integrations/prometheus/
- job_name: 'homeassistant'
metrics_path: '/api/prometheus'
bearer_token: 'YOUR_LONG_LIVED_ACCESS_TOKEN' # Create in HA: Profile -> Long-Lived Access Tokens
static_configs:
- targets: ['10.0.10.24:8123']
labels:
app: 'homeassistant'
role: 'automation'
# Caddy (metrics built-in, needs admin API enabled)
# - job_name: 'caddy'
# static_configs:
# - targets: ['66.63.182.168:2019'] # Caddy admin API
# labels:
# app: 'caddy'
# role: 'reverse-proxy'
# ====================================
# Network Devices
# ====================================
# UCG Ultra (SNMP Exporter)
# https://github.com/prometheus/snmp_exporter
# - job_name: 'unifi-gateway'
# static_configs:
# - targets: ['10.0.10.1']
# labels:
# device: 'ucg-ultra'
# role: 'gateway'
# ====================================
# Storage
# ====================================
# OpenMediaVault Node Exporter
- job_name: 'storage'
static_configs:
- targets: ['10.0.10.5:9100']
labels:
hostname: 'openmediavault'
role: 'storage'
capacity: '12tb'
# ====================================
# Blackbox Exporter (Endpoint Monitoring)
# ====================================
# Optional: Monitor HTTP endpoints, SSL certs, DNS, etc.
# https://github.com/prometheus/blackbox_exporter
# - job_name: 'blackbox-http'
# metrics_path: /probe
# params:
# module: [http_2xx]
# static_configs:
# - targets:
# - https://auth.nianticbooks.com
# - https://freddesk.nianticbooks.com
# - https://bob.nianticbooks.com
# relabel_configs:
# - source_labels: [__address__]
# target_label: __param_target
# - source_labels: [__param_target]
# target_label: instance
# - target_label: __address__
# replacement: localhost:9115 # Blackbox exporter address
# ====================================
# Installation Quick Reference
# ====================================
#
# Node Exporter (Debian/Ubuntu):
# apt update && apt install prometheus-node-exporter -y
# systemctl enable prometheus-node-exporter
# systemctl start prometheus-node-exporter
# # Verify: curl http://localhost:9100/metrics
#
# Node Exporter (Manual install):
# wget https://github.com/prometheus/node_exporter/releases/download/v1.7.0/node_exporter-1.7.0.linux-amd64.tar.gz
# tar xvfz node_exporter-*.tar.gz
# sudo mv node_exporter-*/node_exporter /usr/local/bin/
# sudo useradd -rs /bin/false node_exporter
#
# # Create systemd service: /etc/systemd/system/node_exporter.service
# sudo systemctl daemon-reload
# sudo systemctl enable node_exporter
# sudo systemctl start node_exporter
#
# Windows Exporter:
# Download: https://github.com/prometheus-community/windows_exporter/releases
# Install MSI package
# Verify: http://localhost:9182/metrics
#
# PostgreSQL Exporter:
# apt install prometheus-postgres-exporter -y
# # Configure connection in /etc/default/prometheus-postgres-exporter
# # DATA_SOURCE_NAME="postgresql://user:pass@localhost:5432/postgres?sslmode=disable"
#
# Home Assistant Prometheus Integration:
# 1. Settings -> Devices & Services -> Add Integration
# 2. Search "Prometheus"
# 3. Configure
# 4. Create long-lived access token: Profile -> Security -> Long-Lived Access Tokens

View File

@@ -0,0 +1,24 @@
<!DOCTYPE HTML>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Directory listing for /infrastructure/pterodactyl/</title>
</head>
<body>
<h1>Directory listing for /infrastructure/pterodactyl/</h1>
<hr>
<ul>
<li><a href="GEMINI.md">GEMINI.md</a></li>
<li><a href="hytale/">hytale/</a></li>
<li><a href="keys/">keys/</a></li>
<li><a href="NEOFORGE-QUICK-DEPLOY.md">NEOFORGE-QUICK-DEPLOY.md</a></li>
<li><a href="notes/">notes/</a></li>
<li><a href="Screenshot%202026-01-11%20193016.png">Screenshot 2026-01-11 193016.png</a></li>
<li><a href="Screenshot%202026-01-13%20123742.png">Screenshot 2026-01-13 123742.png</a></li>
<li><a href="scripts/">scripts/</a></li>
<li><a href="stoneblock4/">stoneblock4/</a></li>
<li><a href="web_page/">web_page/</a></li>
</ul>
<hr>
</body>
</html>

View File

@@ -0,0 +1,37 @@
<!DOCTYPE HTML>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Directory listing for /infrastructure/scripts/</title>
</head>
<body>
<h1>Directory listing for /infrastructure/scripts/</h1>
<hr>
<ul>
<li><a href="backup-postgresql.sh">backup-postgresql.sh</a></li>
<li><a href="backup-proxmox-to-omv.sh">backup-proxmox-to-omv.sh</a></li>
<li><a href="backup-proxmox.sh">backup-proxmox.sh</a></li>
<li><a href="backup-vps-configs.sh">backup-vps-configs.sh</a></li>
<li><a href="backup-vps.sh">backup-vps.sh</a></li>
<li><a href="cert-check.sh">cert-check.sh</a></li>
<li><a href="clone-all-repos.ps1">clone-all-repos.ps1</a></li>
<li><a href="deploy-ca-certificates.sh">deploy-ca-certificates.sh</a></li>
<li><a href="enable-ssh-firewall.ps1">enable-ssh-firewall.ps1</a></li>
<li><a href="health-check.sh">health-check.sh</a></li>
<li><a href="install-bluebubbles.sh">install-bluebubbles.sh</a></li>
<li><a href="issue-service-certs.sh">issue-service-certs.sh</a></li>
<li><a href="README.md">README.md</a></li>
<li><a href="resource-report.sh">resource-report.sh</a></li>
<li><a href="setup-homelab-command.ps1">setup-homelab-command.ps1</a></li>
<li><a href="setup-internal-caddy.sh">setup-internal-caddy.sh</a></li>
<li><a href="setup-local-ca.sh">setup-local-ca.sh</a></li>
<li><a href="setup-ssh-server.ps1">setup-ssh-server.ps1</a></li>
<li><a href="sync-ha-config.ps1">sync-ha-config.ps1</a></li>
<li><a href="sync-ha-config.sh">sync-ha-config.sh</a></li>
<li><a href="test-homelab-ssh.sh">test-homelab-ssh.sh</a></li>
<li><a href="trust-ca-client.sh">trust-ca-client.sh</a></li>
<li><a href="tunnel-monitor.sh">tunnel-monitor.sh</a></li>
</ul>
<hr>
</body>
</html>

View File

@@ -0,0 +1,40 @@
# Quick network scan of infrastructure IPs
$ips = @(
"10.0.10.1", # UCG Ultra
"10.0.10.2", # pve-router
"10.0.10.3", # main-pve
"10.0.10.4", # pve-storage
"10.0.10.5", # openmediavault
"10.0.10.10", # HOMELAB-COMMAND
"10.0.10.13", # HP iLO
"10.0.10.15", # CA Server
"10.0.10.20", # PostgreSQL
"10.0.10.21", # Authentik
"10.0.10.22", # n8n
"10.0.10.23", # RustDesk
"10.0.10.24", # Home Assistant
"10.0.10.25", # Monitoring (Prometheus/Grafana)
"10.0.10.26", # Uptime Kuma
"10.0.10.27", # Dockge
"10.0.10.28", # ESPHome (deprecated)
"10.0.10.30", # ad5m 3D printer
"10.0.10.40", # Bar Assistant
"10.0.10.41", # Minecraft
"10.0.10.88" # Web Power Switch
)
Write-Host "Scanning documented infrastructure IPs..."
$results = @()
foreach ($ip in $ips) {
$online = Test-Connection -ComputerName $ip -Count 1 -Quiet -TimeoutSeconds 1
$status = if ($online) { "UP" } else { "DOWN" }
$results += [PSCustomObject]@{
IP = $ip
Status = $status
}
Write-Host "$ip - $status"
}
Write-Host "`nSummary:"
$results | Format-Table -AutoSize