feat: Commit initial

This commit is contained in:
Tellsanguis 2025-11-07 09:33:38 +01:00
commit 40dc0f4184
43 changed files with 1990 additions and 0 deletions

140
.forgejo/workflows/ci.yml Normal file
View file

@ -0,0 +1,140 @@
name: CI - Validation
on:
push:
branches: ['**'] # All branches
pull_request:
jobs:
ci-terraform:
name: Terraform Validation
runs-on: self-hosted
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup OpenTofu
run: |
if ! command -v tofu &> /dev/null; then
curl -fsSL https://get.opentofu.org/install-opentofu.sh | bash
fi
- name: Terraform Format Check
run: |
cd terraform
tofu fmt -check -recursive
continue-on-error: false
- name: Terraform Validate
run: |
cd terraform
tofu init -backend=false
tofu validate
- name: Terraform Plan
if: github.event_name == 'push'
run: |
cd terraform
cp terraform.tfvars.example terraform.tfvars
tofu init
tofu plan -out=tfplan
env:
TF_VAR_proxmox_token_id: ${{ secrets.PROXMOX_TOKEN_ID }}
TF_VAR_proxmox_token_secret: ${{ secrets.PROXMOX_TOKEN_SECRET }}
TF_VAR_ssh_public_key: ${{ secrets.SSH_PUBLIC_KEY }}
TF_VAR_forgejo_token: ${{ secrets.FORGEJO_TOKEN }}
- name: Upload Terraform Plan
if: github.event_name == 'push' && github.ref == 'refs/heads/main'
uses: actions/upload-artifact@v4
with:
name: tfplan
path: terraform/tfplan
retention-days: 1
ci-ansible:
name: Ansible Validation
runs-on: self-hosted
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Install Ansible
run: |
if ! command -v ansible &> /dev/null; then
sudo apt-get update
sudo apt-get install -y ansible
fi
- name: Ansible Syntax Check
run: |
ansible-playbook ansible/site.yml --syntax-check
- name: Ansible Lint
run: |
if ! command -v ansible-lint &> /dev/null; then
pip3 install ansible-lint
fi
ansible-lint ansible/ || true
continue-on-error: true
- name: YAML Lint
run: |
if ! command -v yamllint &> /dev/null; then
pip3 install yamllint
fi
yamllint ansible/ || true
continue-on-error: true
ci-kubernetes:
name: Kubernetes Validation
runs-on: self-hosted
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Install kubectl
run: |
if ! command -v kubectl &> /dev/null; then
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
fi
- name: Validate Kubernetes Manifests
run: |
kubectl apply --dry-run=client -f kubernetes/apps/ -R || true
kubectl apply --dry-run=client -f kubernetes/flux-system/ -R || true
- name: Install kubeconform
run: |
if ! command -v kubeconform &> /dev/null; then
wget https://github.com/yannh/kubeconform/releases/latest/download/kubeconform-linux-amd64.tar.gz
tar xf kubeconform-linux-amd64.tar.gz
sudo mv kubeconform /usr/local/bin/
fi
- name: Kubeconform Validation
run: |
kubeconform -strict -ignore-missing-schemas kubernetes/ || true
continue-on-error: true
security-scan:
name: Security Scan
runs-on: self-hosted
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Install Trivy
run: |
if ! command -v trivy &> /dev/null; then
wget -qO - https://aquasecurity.github.io/trivy-repo/deb/public.key | sudo apt-key add -
echo "deb https://aquasecurity.github.io/trivy-repo/deb $(lsb_release -sc) main" | sudo tee -a /etc/apt/sources.list.d/trivy.list
sudo apt-get update
sudo apt-get install -y trivy
fi
- name: Run Trivy IaC Scan
run: |
trivy config . --exit-code 0 --severity HIGH,CRITICAL
continue-on-error: true

View file

@ -0,0 +1,131 @@
name: CD - Deploy Infrastructure
on:
push:
branches:
- main
workflow_dispatch: # Allow manual trigger
jobs:
# Run CI first
ci:
uses: ./.forgejo/workflows/ci.yml
secrets: inherit
# Deploy infrastructure in parallel
deploy-pve1:
name: Deploy on pve1
runs-on: self-hosted
needs: ci
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Terraform Apply on pve1
run: |
cd terraform/pve1
cat > terraform.tfvars <<EOF
proxmox_token_id = "${{ secrets.PROXMOX_TOKEN_ID }}"
proxmox_token_secret = "${{ secrets.PROXMOX_TOKEN_SECRET }}"
ssh_public_key = "${{ secrets.SSH_PUBLIC_KEY }}"
forgejo_token = "${{ secrets.FORGEJO_TOKEN }}"
forgejo_repo_url = "${{ secrets.FORGEJO_REPO_URL }}"
k3s_version = "v1.28.5+k3s1"
ubuntu_template = "ubuntu-2204-cloudinit"
storage_pool = "local-lvm"
snippets_storage = "local"
k3s_network_bridge = "k3s"
k3s_gateway = "10.100.20.1"
k3s_dns = ["10.100.20.1", "1.1.1.1"]
k3s_server_1_config = { ip = "10.100.20.10/24", cores = 6, memory = 12288, disk_size = "100G" }
EOF
tofu init
tofu apply -auto-approve
deploy-pve2:
name: Deploy on pve2
runs-on: self-hosted
needs: ci
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Terraform Apply on pve2
run: |
cd terraform/pve2
cat > terraform.tfvars <<EOF
proxmox_token_id = "${{ secrets.PROXMOX_TOKEN_ID }}"
proxmox_token_secret = "${{ secrets.PROXMOX_TOKEN_SECRET }}"
ssh_public_key = "${{ secrets.SSH_PUBLIC_KEY }}"
forgejo_token = "${{ secrets.FORGEJO_TOKEN }}"
forgejo_repo_url = "${{ secrets.FORGEJO_REPO_URL }}"
k3s_version = "v1.28.5+k3s1"
ubuntu_template = "ubuntu-2204-cloudinit"
storage_pool = "local-lvm"
snippets_storage = "local"
k3s_network_bridge = "k3s"
k3s_gateway = "10.100.20.1"
k3s_dns = ["10.100.20.1", "1.1.1.1"]
k3s_server_2_config = { ip = "10.100.20.20/24", cores = 6, memory = 12288, disk_size = "100G" }
EOF
tofu init
tofu apply -auto-approve
deploy-pve3:
name: Deploy on pve3
runs-on: self-hosted
needs: ci
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Terraform Apply on pve3
run: |
cd terraform/pve3
cat > terraform.tfvars <<EOF
proxmox_token_id = "${{ secrets.PROXMOX_TOKEN_ID }}"
proxmox_token_secret = "${{ secrets.PROXMOX_TOKEN_SECRET }}"
ssh_public_key = "${{ secrets.SSH_PUBLIC_KEY }}"
forgejo_token = "${{ secrets.FORGEJO_TOKEN }}"
forgejo_repo_url = "${{ secrets.FORGEJO_REPO_URL }}"
k3s_version = "v1.28.5+k3s1"
ubuntu_template = "ubuntu-2204-cloudinit"
storage_pool = "local-lvm"
snippets_storage = "local"
k3s_network_bridge = "k3s"
k3s_gateway = "10.100.20.1"
k3s_dns = ["10.100.20.1", "1.1.1.1"]
etcd_witness_config = { ip = "10.100.20.30/24", cores = 2, memory = 2048, disk_size = "20G" }
EOF
tofu init
tofu apply -auto-approve
# Validate cluster after deployment
validate-cluster:
name: Validate K3s Cluster
runs-on: self-hosted
needs: [deploy-pve1, deploy-pve2, deploy-pve3]
if: github.ref == 'refs/heads/main' && needs.deploy-pve1.result == 'success' && needs.deploy-pve2.result == 'success' && needs.deploy-pve3.result == 'success'
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Wait for K3s cluster
run: |
echo "Waiting for K3s cluster to be ready..."
sleep 300 # Wait 5 minutes for ansible-pull to configure K3s
- name: Check cluster status (optional)
run: |
echo "Cluster validation completed"
continue-on-error: true
# Notify on completion
notify:
name: Deployment Notification
runs-on: self-hosted
needs: [deploy-pve1, deploy-pve2, deploy-pve3, validate-cluster]
if: always()
steps:
- name: Deployment Summary
run: |
echo "Deployment completed!"
echo "pve1 status: ${{ needs.deploy-pve1.result }}"
echo "pve2 status: ${{ needs.deploy-pve2.result }}"
echo "pve3 status: ${{ needs.deploy-pve3.result }}"
echo "Validation: ${{ needs.validate-cluster.result }}"

View file

@ -0,0 +1,56 @@
name: Destroy Infrastructure
# Manual trigger only - for safety
on:
workflow_dispatch:
inputs:
confirm:
description: 'Type "DESTROY" to confirm'
required: true
type: string
jobs:
destroy:
name: Destroy Infrastructure
runs-on: self-hosted
if: github.event.inputs.confirm == 'DESTROY'
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup OpenTofu
run: |
if ! command -v tofu &> /dev/null; then
curl -fsSL https://get.opentofu.org/install-opentofu.sh | bash
fi
- name: Confirm Destruction
run: |
echo "⚠️ WARNING: This will destroy all infrastructure!"
echo "Proceeding in 10 seconds..."
sleep 10
- name: Terraform Destroy
run: |
cd terraform
# Create tfvars from secrets
cat > terraform.tfvars <<EOF
proxmox_token_id = "${{ secrets.PROXMOX_TOKEN_ID }}"
proxmox_token_secret = "${{ secrets.PROXMOX_TOKEN_SECRET }}"
ssh_public_key = "${{ secrets.SSH_PUBLIC_KEY }}"
forgejo_token = "${{ secrets.FORGEJO_TOKEN }}"
forgejo_repo_url = "${{ secrets.FORGEJO_REPO_URL }}"
EOF
tofu init
tofu destroy -auto-approve
env:
PM_API_URL: https://192.168.100.10:8006/api2/json
PM_API_TOKEN_ID: ${{ secrets.PROXMOX_TOKEN_ID }}
PM_API_TOKEN_SECRET: ${{ secrets.PROXMOX_TOKEN_SECRET }}
- name: Cleanup
run: |
echo "Infrastructure destroyed successfully"

20
.gitattributes vendored Normal file
View file

@ -0,0 +1,20 @@
# Auto detect text files and perform LF normalization
* text=auto
# Shell scripts
*.sh text eol=lf
# YAML files
*.yml text eol=lf
*.yaml text eol=lf
# Terraform files
*.tf text eol=lf
*.tfvars text eol=lf
# Markdown
*.md text eol=lf
# Config files
*.conf text eol=lf
*.cfg text eol=lf

42
.gitignore vendored Normal file
View file

@ -0,0 +1,42 @@
# Terraform
.terraform/
.terraform.lock.hcl
terraform.tfstate
terraform.tfstate.backup
*.tfplan
*.tfvars
!terraform.tfvars.example
.generated/
# Ansible
*.retry
.ansible_cache/
.vault_pass
# Kubernetes
kubeconfig
*.kubeconfig
# Secrets
*.pem
*.key
*-key.json
secrets/
# OS
.DS_Store
Thumbs.db
# IDE
.vscode/
.idea/
*.swp
*.swo
*~
# Logs
*.log
# Temporary files
tmp/
temp/

97
README.md Normal file
View file

@ -0,0 +1,97 @@
# Infrastructure GitOps pour K3s sur Proxmox
Ce projet met en œuvre une infrastructure entièrement déclarative pour le déploiement et la gestion d'un cluster Kubernetes (K3s) sur un environnement Proxmox. L'ensemble du système est piloté par une approche GitOps, où Git sert de source unique de vérité pour l'infrastructure et les applications.
## Principes d'Architecture
L'architecture repose sur plusieurs concepts fondamentaux conçus pour maximiser l'automatisation, la résilience et la maintenabilité.
1. **Infrastructure as Code (IaC)**: L'infrastructure physique (les machines virtuelles sur Proxmox) est définie et gérée de manière déclarative à l'aide d'OpenTofu. Cela garantit des déploiements reproductibles et cohérents.
2. **Approche GitOps multi-niveaux**: Le système utilise un modèle de "pull" à plusieurs niveaux pour synchroniser l'état souhaité (défini dans Git) avec l'état réel. Ce modèle réduit les dépendances et améliore la sécurité en limitant les permissions nécessaires.
* **Niveau 1 (Infrastructure)**: Un pipeline CI/CD sur Forgejo est le seul composant autorisé à "pousser" des changements. Il exécute OpenTofu pour créer ou mettre à jour les machines virtuelles via l'API Proxmox.
* **Niveau 2 (Configuration OS)**: Chaque machine virtuelle est responsable de sa propre configuration. Via un cronjob, elle exécute `ansible-pull` pour récupérer sa configuration depuis le dépôt Git et l'appliquer localement. Cela inclut l'installation de K3s et la configuration du système d'exploitation.
* **Niveau 3 (Applications)**: FluxCD, s'exécutant dans le cluster K3s, se synchronise en continu avec le dépôt Git pour déployer et maintenir les applications et les configurations Kubernetes.
3. **Haute Disponibilité et Résilience**: Le cluster K3s est conçu pour être résilient avec deux nœuds serveurs et un témoin etcd externe pour le quorum. Le pipeline de déploiement est lui-même résilient aux pannes d'un nœud Proxmox, permettant des mises à jour partielles de l'infrastructure.
4. **Automatisation Complète**: De la validation du code à la mise à jour des applications, en passant par les mises à jour de sécurité du système d'exploitation (`unattended-upgrades`), chaque étape est automatisée pour minimiser les interventions manuelles.
## Architecture Cible
Le diagramme suivant illustre le flux de déploiement, de la modification du code à son application sur l'infrastructure.
```
+-----------------------------------------------------------------+
| Développeur -> git push -> Dépôt Git sur Forgejo |
+-----------------------------------------------------------------+
|
| 1. Déclenche le pipeline CI/CD
v
+------------------------+ +---------------------------------+
| VM Forgejo Runner |----->| API Proxmox |
| (exécute OpenTofu) | 2. | (sur pve1, pve2, pve3) |
+------------------------+ | Crée/Met à jour les VMs |
+---------------------------------+
|
| 3. Les VMs démarrent et exécutent cloud-init
v
+-----------------------------------------------------------------+
| Cluster K3s (VMs sur Proxmox) |
| |
| 4. ansible-pull (cron) tire sa configuration depuis Forgejo |
| |
| 5. FluxCD (dans K3s) tire les manifestes d'apps depuis Forgejo |
+-----------------------------------------------------------------+
```
### Spécifications des Machines Virtuelles
| Rôle | Nom de la VM | Nœud Proxmox | CPU | RAM | Disque | Adresse IP |
| ------------ | -------------- | ------------ | --- | ---- | ------ | -------------- |
| Serveur K3s | k3s-server-1 | pve1 | 6 | 12GB | 100GB | 10.100.20.10 |
| Serveur K3s | k3s-server-2 | pve2 | 6 | 12GB | 100GB | 10.100.20.20 |
| Témoin etcd | etcd-witness | pve3 | 2 | 2GB | 20GB | 10.100.20.30 |
| Runner CI/CD | forgejo-runner | (au choix) | 2 | 2GB | 20GB | 192.168.100.50 |
## Structure du Dépôt
Le projet est organisé de la manière suivante pour séparer les préoccupations :
```
.
├── .forgejo/ # Workflows CI/CD pour Forgejo Actions.
├── ansible/ # Playbooks et rôles pour la configuration des VMs (via ansible-pull).
├── kubernetes/ # Manifestes Kubernetes gérés par FluxCD.
│ ├── flux-system/ # Configuration de FluxCD lui-même.
│ ├── apps/ # Applications à déployer sur le cluster.
│ └── infrastructure/ # Ressources Kubernetes de base (ex: Ingress, SealedSecrets).
└── terraform/ # Code OpenTofu pour la gestion de l'infrastructure sur Proxmox.
├── pve1/ # Ressources pour le nœud pve1.
├── pve2/ # Ressources pour le nœud pve2.
└── pve3/ # Ressources pour le nœud pve3.
```
## Pipeline CI/CD
Le cœur de l'automatisation est le pipeline CI/CD configuré dans `.forgejo/workflows/`.
### Intégration Continue (CI)
Déclenché à chaque `push`, le pipeline de CI valide l'intégrité et la qualité du code sur plusieurs axes :
- **Validation OpenTofu**: Assure que la syntaxe du code d'infrastructure est correcte (`fmt`, `validate`).
- **Validation Ansible**: Vérifie la syntaxe des playbooks (`ansible-playbook --syntax-check`) et leur conformité aux bonnes pratiques (`ansible-lint`).
- **Validation Kubernetes**: Valide les manifestes avec `kubeconform` pour garantir leur respect du schéma de l'API Kubernetes.
- **Analyse de Sécurité**: `Trivy` scanne le code d'infrastructure pour détecter des configurations potentiellement vulnérables.
### Déploiement Continu (CD)
Déclenché par un `push` sur la branche `main`, le pipeline de CD orchestre le déploiement à travers le modèle multi-niveaux décrit précédemment.
1. **Déploiement de l'infrastructure**: Les jobs `deploy-pve1`, `deploy-pve2`, et `deploy-pve3` s'exécutent en parallèle. Chacun utilise OpenTofu pour appliquer la configuration de son nœud respectif. Un échec sur un nœud n'interrompt pas les autres.
2. **Configuration et déploiement applicatif**: Une fois les VMs actives, les mécanismes de pull (`ansible-pull` et `FluxCD`) prennent le relais pour finaliser la configuration et déployer les applications de manière autonome.

16
ansible/ansible.cfg Normal file
View file

@ -0,0 +1,16 @@
[defaults]
inventory = localhost,
host_key_checking = False
retry_files_enabled = False
stdout_callback = yaml
callback_whitelist = profile_tasks, timer
gathering = smart
fact_caching = jsonfile
fact_caching_connection = /tmp/ansible_fact_cache
fact_caching_timeout = 3600
[privilege_escalation]
become = True
become_method = sudo
become_user = root
become_ask_pass = False

View file

@ -0,0 +1,62 @@
---
# Global variables for all nodes
# K3s Configuration
k3s_version: "v1.28.5+k3s1"
k3s_install_url: "https://get.k3s.io"
# K3s Server Configuration
k3s_server_1_ip: "10.100.20.10"
k3s_server_2_ip: "10.100.20.20"
k3s_witness_ip: "10.100.20.30"
# K3s token (shared between servers)
# In production, this should be stored in a vault
k3s_token_file: "/etc/rancher/k3s/token"
# Network Configuration
pod_cidr: "10.42.0.0/16"
service_cidr: "10.43.0.0/16"
cluster_dns: "10.43.0.10"
# System Configuration
timezone: "Europe/Paris"
swap_enabled: false
# Unattended Upgrades Configuration
unattended_upgrades_enabled: true
unattended_upgrades_automatic_reboot: true
unattended_upgrades_automatic_reboot_with_users: false
# Reboot schedule (staggered to maintain availability)
reboot_schedule:
k3s-server-1: "02:00"
k3s-server-2: "04:00"
etcd-witness: "06:00"
# FluxCD Configuration
flux_version: "v2.2.0"
flux_namespace: "flux-system"
# System packages to install on all nodes
common_packages:
- curl
- wget
- git
- vim
- htop
- net-tools
- ca-certificates
- gnupg
- lsb-release
- python3
- python3-pip
# Kernel parameters for K3s
sysctl_config:
net.bridge.bridge-nf-call-iptables: 1
net.bridge.bridge-nf-call-ip6tables: 1
net.ipv4.ip_forward: 1
vm.swappiness: 0
fs.inotify.max_user_instances: 8192
fs.inotify.max_user_watches: 524288

View file

@ -0,0 +1,6 @@
---
# Default variables for common role
timezone: "Europe/Paris"
swap_enabled: false
unattended_upgrades_enabled: true

View file

@ -0,0 +1,11 @@
---
# Handlers for common role
- name: restart unattended-upgrades
systemd:
name: unattended-upgrades
state: restarted
- name: reload systemd
systemd:
daemon_reload: yes

View file

@ -0,0 +1,95 @@
---
# Common configuration for all nodes
- name: Set timezone
timezone:
name: "{{ timezone }}"
- name: Install common packages
apt:
name: "{{ common_packages }}"
state: present
update_cache: yes
- name: Disable swap
shell: |
swapoff -a
sed -i '/swap/d' /etc/fstab
when: not swap_enabled
changed_when: false
- name: Load kernel modules
modprobe:
name: "{{ item }}"
state: present
loop:
- overlay
- br_netfilter
- name: Configure kernel modules to load at boot
copy:
dest: /etc/modules-load.d/k3s.conf
content: |
overlay
br_netfilter
mode: '0644'
- name: Configure sysctl parameters
sysctl:
name: "{{ item.key }}"
value: "{{ item.value }}"
state: present
reload: yes
sysctl_file: /etc/sysctl.d/99-k3s.conf
loop: "{{ sysctl_config | dict2items }}"
- name: Configure unattended-upgrades
include_tasks: unattended-upgrades.yml
when: unattended_upgrades_enabled
- name: Create k3s directories
file:
path: "{{ item }}"
state: directory
mode: '0755'
loop:
- /etc/rancher/k3s
- /var/lib/rancher/k3s
- name: Configure firewall rules (ufw)
block:
- name: Install ufw
apt:
name: ufw
state: present
- name: Allow SSH
ufw:
rule: allow
port: '22'
proto: tcp
- name: Allow K3s API
ufw:
rule: allow
port: '6443'
proto: tcp
- name: Allow K3s etcd
ufw:
rule: allow
port: '2379:2380'
proto: tcp
- name: Allow K3s metrics
ufw:
rule: allow
port: '10250'
proto: tcp
- name: Enable ufw
ufw:
state: enabled
policy: deny
direction: incoming
when: false # Disabled by default, enable if needed

View file

@ -0,0 +1,40 @@
---
# Configure unattended-upgrades for automatic OS updates
- name: Install unattended-upgrades package
apt:
name:
- unattended-upgrades
- apt-listchanges
state: present
- name: Get hostname
set_fact:
current_hostname: "{{ ansible_hostname }}"
- name: Set reboot time based on hostname
set_fact:
reboot_time: "{{ reboot_schedule[current_hostname] | default('03:00') }}"
- name: Configure unattended-upgrades
template:
src: 50unattended-upgrades.j2
dest: /etc/apt/apt.conf.d/50unattended-upgrades
mode: '0644'
notify: restart unattended-upgrades
- name: Enable automatic updates
copy:
dest: /etc/apt/apt.conf.d/20auto-upgrades
content: |
APT::Periodic::Update-Package-Lists "1";
APT::Periodic::Download-Upgradeable-Packages "1";
APT::Periodic::AutocleanInterval "7";
APT::Periodic::Unattended-Upgrade "1";
mode: '0644'
- name: Start and enable unattended-upgrades service
systemd:
name: unattended-upgrades
state: started
enabled: yes

View file

@ -0,0 +1,47 @@
// Unattended-Upgrade configuration
// Managed by Ansible - do not edit manually
Unattended-Upgrade::Allowed-Origins {
"${distro_id}:${distro_codename}";
"${distro_id}:${distro_codename}-security";
"${distro_id}ESMApps:${distro_codename}-apps-security";
"${distro_id}ESM:${distro_codename}-infra-security";
};
// List of packages to not update
Unattended-Upgrade::Package-Blacklist {
};
// Automatically reboot if needed
Unattended-Upgrade::Automatic-Reboot "{{ unattended_upgrades_automatic_reboot | lower }}";
// Reboot time (staggered per node)
Unattended-Upgrade::Automatic-Reboot-Time "{{ reboot_time }}";
// Automatically reboot even if users are logged in
Unattended-Upgrade::Automatic-Reboot-WithUsers "{{ unattended_upgrades_automatic_reboot_with_users | lower }}";
// Remove unused kernel packages
Unattended-Upgrade::Remove-Unused-Kernel-Packages "true";
// Remove unused dependencies
Unattended-Upgrade::Remove-Unused-Dependencies "true";
// Send email on errors
Unattended-Upgrade::Mail "";
// Always send email
Unattended-Upgrade::MailReport "on-change";
// Update package lists
Unattended-Upgrade::Update-Days {"Mon";"Tue";"Wed";"Thu";"Fri";"Sat";"Sun";};
// Automatically fix dpkg interruptions
Dpkg::Options {
"--force-confdef";
"--force-confold";
};
// Logging
Unattended-Upgrade::SyslogEnable "true";
Unattended-Upgrade::SyslogFacility "daemon";

View file

@ -0,0 +1,6 @@
---
# Default variables for etcd-witness role
k3s_version: "v1.28.5+k3s1"
k3s_install_url: "https://get.k3s.io"
k3s_token_file: "/etc/rancher/k3s/token"

View file

@ -0,0 +1,35 @@
---
# etcd witness node configuration
# This node participates in etcd quorum but does not run K8s workloads
- name: Check if K3s is already installed
stat:
path: /usr/local/bin/k3s
register: k3s_binary
- name: Get K3s token from first server
set_fact:
k3s_token: "{{ lookup('file', k3s_token_file, errors='ignore') | default('PLACEHOLDER') }}"
- name: Install K3s as server (witness mode)
shell: |
curl -sfL {{ k3s_install_url }} | INSTALL_K3S_VERSION="{{ k3s_version }}" sh -s - server \
--server https://{{ k3s_server_1_ip }}:6443 \
--token {{ k3s_token }} \
--disable-apiserver \
--disable-controller-manager \
--disable-scheduler \
--node-ip {{ ansible_default_ipv4.address }}
when: not k3s_binary.stat.exists
environment:
INSTALL_K3S_SKIP_START: "false"
- name: Enable and start k3s service
systemd:
name: k3s
state: started
enabled: yes
- name: Display witness node information
debug:
msg: "etcd witness node configured at {{ ansible_default_ipv4.address }}"

View file

@ -0,0 +1,7 @@
---
# Default variables for k3s-server role
k3s_version: "v1.28.5+k3s1"
k3s_install_url: "https://get.k3s.io"
flux_version: "v2.2.0"
flux_namespace: "flux-system"

View file

@ -0,0 +1,32 @@
#!/bin/bash
# K3s pre-reboot script
# Drains the node before system reboot to migrate workloads gracefully
set -e
# Only run if k3s is active
if systemctl is-active --quiet k3s; then
NODE_NAME=$(hostname)
echo "$(date): Starting pre-reboot drain for node $NODE_NAME" | logger -t k3s-pre-reboot
# Set KUBECONFIG
export KUBECONFIG=/etc/rancher/k3s/k3s.yaml
# Drain the node (migrate pods to other nodes)
/usr/local/bin/k3s kubectl drain "$NODE_NAME" \
--ignore-daemonsets \
--delete-emptydir-data \
--force \
--timeout=300s 2>&1 | logger -t k3s-pre-reboot
if [ $? -eq 0 ]; then
echo "$(date): Node $NODE_NAME drained successfully" | logger -t k3s-pre-reboot
else
echo "$(date): Warning - Node drain failed or timed out" | logger -t k3s-pre-reboot
fi
else
echo "$(date): k3s service not active, skipping drain" | logger -t k3s-pre-reboot
fi
exit 0

View file

@ -0,0 +1,11 @@
---
# Handlers for k3s-server role
- name: restart k3s
systemd:
name: k3s
state: restarted
- name: reload systemd
systemd:
daemon_reload: yes

View file

@ -0,0 +1,47 @@
---
# Install and configure FluxCD
- name: Check if flux is already installed
command: k3s kubectl get namespace {{ flux_namespace }}
register: flux_installed
changed_when: false
failed_when: false
- name: Download Flux CLI
get_url:
url: "https://github.com/fluxcd/flux2/releases/download/{{ flux_version }}/flux_{{ flux_version | replace('v', '') }}_linux_amd64.tar.gz"
dest: /tmp/flux.tar.gz
mode: '0644'
when: flux_installed.rc != 0
- name: Extract Flux CLI
unarchive:
src: /tmp/flux.tar.gz
dest: /usr/local/bin
remote_src: yes
creates: /usr/local/bin/flux
when: flux_installed.rc != 0
- name: Install FluxCD in cluster
shell: |
export KUBECONFIG=/etc/rancher/k3s/k3s.yaml
/usr/local/bin/flux install --namespace={{ flux_namespace }}
when: flux_installed.rc != 0
register: flux_install_result
changed_when: "'installed' in flux_install_result.stdout"
- name: Wait for FluxCD to be ready
shell: |
export KUBECONFIG=/etc/rancher/k3s/k3s.yaml
/usr/local/bin/flux check
register: flux_check
until: flux_check.rc == 0
retries: 30
delay: 10
changed_when: false
when: flux_installed.rc != 0
- name: Display FluxCD installation status
debug:
msg: "FluxCD installed successfully. Configure GitRepository in kubernetes/flux-system/"
when: flux_installed.rc != 0

View file

@ -0,0 +1,117 @@
---
# K3s server installation and configuration
- name: Check if K3s is already installed
stat:
path: /usr/local/bin/k3s
register: k3s_binary
- name: Get installed K3s version
command: k3s --version
register: installed_version
changed_when: false
failed_when: false
when: k3s_binary.stat.exists
- name: Determine if this is the first server
set_fact:
is_first_server: "{{ ansible_default_ipv4.address == k3s_server_1_ip }}"
- name: Install K3s on first server (cluster-init)
shell: |
curl -sfL {{ k3s_install_url }} | INSTALL_K3S_VERSION="{{ k3s_version }}" sh -s - server \
--cluster-init \
--tls-san {{ k3s_server_1_ip }} \
--tls-san {{ k3s_server_2_ip }} \
--write-kubeconfig-mode 644 \
--disable traefik \
--node-ip {{ ansible_default_ipv4.address }}
when:
- is_first_server
- not k3s_binary.stat.exists or (k3s_version not in installed_version.stdout)
environment:
INSTALL_K3S_SKIP_START: "false"
- name: Wait for first server to be ready
wait_for:
host: "{{ k3s_server_1_ip }}"
port: 6443
delay: 10
timeout: 300
when: is_first_server
- name: Get K3s token from first server
slurp:
src: /var/lib/rancher/k3s/server/node-token
register: k3s_token_encoded
when: is_first_server
run_once: true
- name: Save K3s token
set_fact:
k3s_token: "{{ k3s_token_encoded.content | b64decode | trim }}"
when: is_first_server
- name: Install K3s on second server (join cluster)
shell: |
curl -sfL {{ k3s_install_url }} | INSTALL_K3S_VERSION="{{ k3s_version }}" sh -s - server \
--server https://{{ k3s_server_1_ip }}:6443 \
--token {{ k3s_token | default('PLACEHOLDER') }} \
--tls-san {{ k3s_server_2_ip }} \
--write-kubeconfig-mode 644 \
--disable traefik \
--node-ip {{ ansible_default_ipv4.address }}
when:
- not is_first_server
- not k3s_binary.stat.exists or (k3s_version not in installed_version.stdout)
- name: Enable and start k3s service
systemd:
name: k3s
state: started
enabled: yes
- name: Wait for K3s to be ready
command: k3s kubectl get nodes
register: kubectl_result
until: kubectl_result.rc == 0
retries: 30
delay: 10
changed_when: false
- name: Create pre-reboot script
copy:
src: k3s-pre-reboot.sh
dest: /usr/local/bin/k3s-pre-reboot.sh
mode: '0755'
- name: Create systemd service for pre-reboot drain
copy:
dest: /etc/systemd/system/k3s-pre-reboot.service
content: |
[Unit]
Description=Drain k3s node before reboot
Before=reboot.target
Before=shutdown.target
DefaultDependencies=no
[Service]
Type=oneshot
ExecStart=/usr/local/bin/k3s-pre-reboot.sh
TimeoutStartSec=300
[Install]
WantedBy=reboot.target
WantedBy=shutdown.target
mode: '0644'
notify: reload systemd
- name: Enable pre-reboot service
systemd:
name: k3s-pre-reboot
enabled: yes
daemon_reload: yes
- name: Install FluxCD (only on first server)
include_tasks: flux.yml
when: is_first_server

45
ansible/site.yml Normal file
View file

@ -0,0 +1,45 @@
---
# Main playbook for K3s GitOps infrastructure
# This playbook is executed by ansible-pull on each VM
- name: Configure K3s Infrastructure
hosts: localhost
connection: local
become: true
vars:
# Read node role from file created by cloud-init
node_role: "{{ lookup('file', '/etc/node-role', errors='ignore') | default('undefined') }}"
pre_tasks:
- name: Display node information
debug:
msg: "Configuring node {{ ansible_hostname }} with role {{ node_role }}"
- name: Validate node role
assert:
that:
- node_role in ['server', 'witness']
fail_msg: "Invalid node role: {{ node_role }}. Expected 'server' or 'witness'"
- name: Update apt cache
apt:
update_cache: yes
cache_valid_time: 3600
roles:
# Common role applies to all nodes
- role: common
# K3s server role (server + worker)
- role: k3s-server
when: node_role == 'server'
# etcd witness role (etcd only, no k8s workloads)
- role: etcd-witness
when: node_role == 'witness'
post_tasks:
- name: Display completion message
debug:
msg: "Configuration complete for {{ ansible_hostname }} ({{ node_role }})"

View file

@ -0,0 +1,67 @@
---
# Example application deployment
# This demonstrates how FluxCD automatically deploys apps from Git
apiVersion: v1
kind: Namespace
metadata:
name: example-nginx
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx
namespace: example-nginx
labels:
app: nginx
spec:
replicas: 2
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.25-alpine
ports:
- containerPort: 80
name: http
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 200m
memory: 256Mi
livenessProbe:
httpGet:
path: /
port: 80
initialDelaySeconds: 10
periodSeconds: 5
readinessProbe:
httpGet:
path: /
port: 80
initialDelaySeconds: 5
periodSeconds: 3
---
apiVersion: v1
kind: Service
metadata:
name: nginx
namespace: example-nginx
spec:
selector:
app: nginx
ports:
- port: 80
targetPort: 80
name: http
type: ClusterIP

View file

@ -0,0 +1,4 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- deployment.yaml

View file

@ -0,0 +1,4 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- example-nginx

View file

@ -0,0 +1,43 @@
# FluxCD System Configuration
This directory contains FluxCD configuration for GitOps.
## Setup
1. **Install FluxCD** (done automatically by Ansible):
```bash
flux install --namespace=flux-system
```
2. **Create Forgejo credentials secret**:
```bash
kubectl create secret generic forgejo-credentials \
--namespace=flux-system \
--from-literal=username=git \
--from-literal=password=YOUR_FORGEJO_TOKEN
```
3. **Update GitRepository URL** in `gotk-sync.yaml`:
```yaml
url: https://forgejo.your-domain.com/your-org/infra.git
```
4. **Apply FluxCD configuration**:
```bash
kubectl apply -k kubernetes/flux-system/
```
## Monitoring
Check FluxCD status:
```bash
flux get sources git
flux get kustomizations
flux logs
```
Force reconciliation:
```bash
flux reconcile source git infra-repo
flux reconcile kustomization apps --with-source
```

View file

@ -0,0 +1,59 @@
---
# GitRepository resource - tells FluxCD where to find the Git repo
apiVersion: source.toolkit.fluxcd.io/v1
kind: GitRepository
metadata:
name: infra-repo
namespace: flux-system
spec:
interval: 1m # Poll Git every 1 minute
url: ssh://git@forgejo.tellserv.fr:222/Tellsanguis/infra.git
ref:
branch: main
secretRef:
name: forgejo-credentials
ignore: |
# Ignore files that don't need to trigger reconciliation
/*.md
/terraform/
/ansible/
/.forgejo/
---
# Kustomization resource - tells FluxCD what to deploy
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: apps
namespace: flux-system
spec:
interval: 5m # Reconcile every 5 minutes
path: ./kubernetes/apps
prune: true # Remove resources deleted from Git
sourceRef:
kind: GitRepository
name: infra-repo
timeout: 3m
wait: true
healthChecks:
- apiVersion: apps/v1
kind: Deployment
namespace: default
name: '*'
---
# Kustomization for infrastructure components
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: infrastructure
namespace: flux-system
spec:
interval: 10m
path: ./kubernetes/infrastructure
prune: true
sourceRef:
kind: GitRepository
name: infra-repo
timeout: 5m
wait: true

View file

@ -0,0 +1,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- gotk-sync.yaml
- secret-forgejo.yaml

View file

@ -0,0 +1,22 @@
---
# Secret for Forgejo authentication
# IMPORTANT: This file should contain a sealed secret or be created manually
# Never commit actual credentials to Git!
apiVersion: v1
kind: Secret
metadata:
name: forgejo-credentials
namespace: flux-system
type: Opaque
stringData:
# Create this secret manually with:
# kubectl create secret generic forgejo-credentials \
# --namespace=flux-system \
# --from-literal=username=git \
# --from-literal=password=YOUR_FORGEJO_TOKEN
# For this example, we use a placeholder
# REPLACE THIS IN PRODUCTION with sealed-secrets or external-secrets
username: git
password: REPLACE_WITH_FORGEJO_TOKEN

View file

@ -0,0 +1,2 @@
# Infrastructure components go here
# Examples: ingress controllers, cert-manager, monitoring, etc.

15
terraform/.gitignore vendored Normal file
View file

@ -0,0 +1,15 @@
# Terraform files
.terraform/
.terraform.lock.hcl
terraform.tfstate
terraform.tfstate.backup
*.tfplan
*.tfvars
!terraform.tfvars.example
# Generated cloud-init files
.generated/
# Sensitive files
*.pem
*.key

View file

@ -0,0 +1,70 @@
# Cloud-init configuration for K3s Server 1
locals {
base_user_data = {
package_upgrade = true
packages = [
"ansible",
"git",
"curl",
"wget",
"ca-certificates",
"gnupg",
"lsb-release"
]
users = [
{
name = "ansible"
sudo = "ALL=(ALL) NOPASSWD:ALL"
shell = "/bin/bash"
ssh_authorized_keys = [var.ssh_public_key]
groups = "sudo"
}
]
timezone = "Europe/Paris"
}
ansible_pull_script = <<-EOT
#!/bin/bash
set -e
source /etc/ansible-pull.conf
WORK_DIR="/var/lib/ansible-local"
mkdir -p $WORK_DIR
cd $WORK_DIR
REPO_WITH_AUTH=$(echo $REPO_URL | sed "s|https://|https://git:$FORGEJO_TOKEN@|")
if [ -d ".git" ]; then
git pull origin main 2>&1 | logger -t ansible-pull
else
git clone $REPO_WITH_AUTH . 2>&1 | logger -t ansible-pull
fi
ansible-playbook ansible/site.yml -i localhost, --connection=local -e "k3s_version=$K3S_VERSION" 2>&1 | logger -t ansible-pull
EOT
k3s_server_user_data = {
write_files = [
{
path = "/etc/node-role"
content = "server"
permissions = "0644"
},
{
path = "/etc/ansible-pull.conf"
content = "REPO_URL=${var.forgejo_repo_url}\nFORGEJO_TOKEN=${var.forgejo_token}\nK3S_VERSION=${var.k3s_version}"
permissions = "0600"
},
{
path = "/usr/local/bin/ansible-pull-wrapper.sh"
content = local.ansible_pull_script
permissions = "0755"
}
]
runcmd = [
"echo '*/15 * * * * root /usr/local/bin/ansible-pull-wrapper.sh' > /etc/cron.d/ansible-pull",
"sleep 60 && /usr/local/bin/ansible-pull-wrapper.sh &"
]
}
}
resource "local_file" "k3s_server_cloud_init" {
filename = "${path.module}/.generated/cloud-init-k3s-server-1.yaml"
content = yamlencode(merge(local.base_user_data, local.k3s_server_user_data))
}

64
terraform/pve1/main.tf Normal file
View file

@ -0,0 +1,64 @@
terraform {
required_version = ">= 1.6.0"
required_providers {
proxmox = {
source = "telmate/proxmox"
version = "~> 3.0"
}
local = {
source = "hashicorp/local"
version = "~> 2.1"
}
}
}
provider "proxmox" {
pm_api_url = var.proxmox_api_url
pm_api_token_id = var.proxmox_token_id
pm_api_token_secret = var.proxmox_token_secret
pm_tls_insecure = var.proxmox_tls_insecure
}
# K3s Server VM on pve1
resource "proxmox_vm_qemu" "k3s_server_1" {
name = "k3s-server-1"
target_node = "pve1"
clone = var.ubuntu_template
cores = var.k3s_server_1_config.cores
sockets = 1
memory = var.k3s_server_1_config.memory
agent = 1
boot = "order=scsi0"
scsihw = "virtio-scsi-single"
onboot = true
network {
model = "virtio"
bridge = var.k3s_network_bridge
}
disks {
scsi {
scsi0 {
disk {
size = var.k3s_server_1_config.disk_size
storage = var.storage_pool
iothread = true
}
}
}
}
ipconfig0 = "ip=${var.k3s_server_1_config.ip},gw=${var.k3s_gateway}"
cicustom = "user=${var.snippets_storage}:snippets/cloud-init-k3s-server-1.yaml"
nameserver = join(" ", var.k3s_dns)
lifecycle {
ignore_changes = [ network ]
}
depends_on = [local_file.k3s_server_cloud_init]
}

View file

@ -0,0 +1,8 @@
output "k3s_server_1" {
description = "K3s Server 1 VM information"
value = {
name = proxmox_vm_qemu.k3s_server_1.name
ip = var.k3s_server_1_config.ip
node = proxmox_vm_qemu.k3s_server_1.target_node
}
}

View file

@ -0,0 +1,84 @@
variable "proxmox_api_url" {
description = "Proxmox API URL"
type = string
default = "https://192.168.100.10:8006/api2/json"
}
variable "proxmox_token_id" {
description = "Proxmox API Token ID"
type = string
sensitive = true
}
variable "proxmox_token_secret" {
description = "Proxmox API Token Secret"
type = string
sensitive = true
}
variable "proxmox_tls_insecure" {
description = "Skip TLS verification for Proxmox API"
type = bool
default = true
}
variable "ssh_public_key" {
description = "SSH public key for admin access"
type = string
}
variable "forgejo_token" {
description = "Forgejo token for ansible-pull authentication"
type = string
sensitive = true
}
variable "forgejo_repo_url" {
description = "Forgejo repository URL (without credentials)"
type = string
}
variable "k3s_version" {
description = "K3s version to install"
type = string
}
variable "ubuntu_template" {
description = "Ubuntu cloud-init template name"
type = string
}
variable "storage_pool" {
description = "Proxmox storage pool for VM disks"
type = string
}
variable "snippets_storage" {
description = "Proxmox storage for cloud-init snippets"
type = string
}
variable "k3s_network_bridge" {
description = "SDN bridge for K3s VMs"
type = string
}
variable "k3s_gateway" {
description = "Gateway for K3s network"
type = string
}
variable "k3s_dns" {
description = "DNS servers for K3s network"
type = list(string)
}
variable "k3s_server_1_config" {
description = "K3s server-1 VM configuration"
type = object({
ip = string
cores = number
memory = number
disk_size = string
})
}

View file

@ -0,0 +1,70 @@
# Cloud-init configuration for K3s Server 2
locals {
base_user_data = {
package_upgrade = true
packages = [
"ansible",
"git",
"curl",
"wget",
"ca-certificates",
"gnupg",
"lsb-release"
]
users = [
{
name = "ansible"
sudo = "ALL=(ALL) NOPASSWD:ALL"
shell = "/bin/bash"
ssh_authorized_keys = [var.ssh_public_key]
groups = "sudo"
}
]
timezone = "Europe/Paris"
}
ansible_pull_script = <<-EOT
#!/bin/bash
set -e
source /etc/ansible-pull.conf
WORK_DIR="/var/lib/ansible-local"
mkdir -p $WORK_DIR
cd $WORK_DIR
REPO_WITH_AUTH=$(echo $REPO_URL | sed "s|https://|https://git:$FORGEJO_TOKEN@|")
if [ -d ".git" ]; then
git pull origin main 2>&1 | logger -t ansible-pull
else
git clone $REPO_WITH_AUTH . 2>&1 | logger -t ansible-pull
fi
ansible-playbook ansible/site.yml -i localhost, --connection=local -e "k3s_version=$K3S_VERSION" 2>&1 | logger -t ansible-pull
EOT
k3s_server_user_data = {
write_files = [
{
path = "/etc/node-role"
content = "server"
permissions = "0644"
},
{
path = "/etc/ansible-pull.conf"
content = "REPO_URL=${var.forgejo_repo_url}\nFORGEJO_TOKEN=${var.forgejo_token}\nK3S_VERSION=${var.k3s_version}"
permissions = "0600"
},
{
path = "/usr/local/bin/ansible-pull-wrapper.sh"
content = local.ansible_pull_script
permissions = "0755"
}
]
runcmd = [
"echo '*/15 * * * * root /usr/local/bin/ansible-pull-wrapper.sh' > /etc/cron.d/ansible-pull",
"sleep 60 && /usr/local/bin/ansible-pull-wrapper.sh &"
]
}
}
resource "local_file" "k3s_server_cloud_init" {
filename = "${path.module}/.generated/cloud-init-k3s-server-2.yaml"
content = yamlencode(merge(local.base_user_data, local.k3s_server_user_data))
}

64
terraform/pve2/main.tf Normal file
View file

@ -0,0 +1,64 @@
terraform {
required_version = ">= 1.6.0"
required_providers {
proxmox = {
source = "telmate/proxmox"
version = "~> 3.0"
}
local = {
source = "hashicorp/local"
version = "~> 2.1"
}
}
}
provider "proxmox" {
pm_api_url = var.proxmox_api_url
pm_api_token_id = var.proxmox_token_id
pm_api_token_secret = var.proxmox_token_secret
pm_tls_insecure = var.proxmox_tls_insecure
}
# K3s Server VM on pve2
resource "proxmox_vm_qemu" "k3s_server_2" {
name = "k3s-server-2"
target_node = "pve2"
clone = var.ubuntu_template
cores = var.k3s_server_2_config.cores
sockets = 1
memory = var.k3s_server_2_config.memory
agent = 1
boot = "order=scsi0"
scsihw = "virtio-scsi-single"
onboot = true
network {
model = "virtio"
bridge = var.k3s_network_bridge
}
disks {
scsi {
scsi0 {
disk {
size = var.k3s_server_2_config.disk_size
storage = var.storage_pool
iothread = true
}
}
}
}
ipconfig0 = "ip=${var.k3s_server_2_config.ip},gw=${var.k3s_gateway}"
cicustom = "user=${var.snippets_storage}:snippets/cloud-init-k3s-server-2.yaml"
nameserver = join(" ", var.k3s_dns)
lifecycle {
ignore_changes = [ network ]
}
depends_on = [local_file.k3s_server_cloud_init]
}

View file

@ -0,0 +1,8 @@
output "k3s_server_2" {
description = "K3s Server 2 VM information"
value = {
name = proxmox_vm_qemu.k3s_server_2.name
ip = var.k3s_server_2_config.ip
node = proxmox_vm_qemu.k3s_server_2.target_node
}
}

View file

@ -0,0 +1,84 @@
variable "proxmox_api_url" {
description = "Proxmox API URL"
type = string
default = "https://192.168.100.10:8006/api2/json"
}
variable "proxmox_token_id" {
description = "Proxmox API Token ID"
type = string
sensitive = true
}
variable "proxmox_token_secret" {
description = "Proxmox API Token Secret"
type = string
sensitive = true
}
variable "proxmox_tls_insecure" {
description = "Skip TLS verification for Proxmox API"
type = bool
default = true
}
variable "ssh_public_key" {
description = "SSH public key for admin access"
type = string
}
variable "forgejo_token" {
description = "Forgejo token for ansible-pull authentication"
type = string
sensitive = true
}
variable "forgejo_repo_url" {
description = "Forgejo repository URL (without credentials)"
type = string
}
variable "k3s_version" {
description = "K3s version to install"
type = string
}
variable "ubuntu_template" {
description = "Ubuntu cloud-init template name"
type = string
}
variable "storage_pool" {
description = "Proxmox storage pool for VM disks"
type = string
}
variable "snippets_storage" {
description = "Proxmox storage for cloud-init snippets"
type = string
}
variable "k3s_network_bridge" {
description = "SDN bridge for K3s VMs"
type = string
}
variable "k3s_gateway" {
description = "Gateway for K3s network"
type = string
}
variable "k3s_dns" {
description = "DNS servers for K3s network"
type = list(string)
}
variable "k3s_server_2_config" {
description = "K3s server-2 VM configuration"
type = object({
ip = string
cores = number
memory = number
disk_size = string
})
}

View file

@ -0,0 +1,70 @@
# Cloud-init configuration for etcd-witness
locals {
base_user_data = {
package_upgrade = true
packages = [
"ansible",
"git",
"curl",
"wget",
"ca-certificates",
"gnupg",
"lsb-release"
]
users = [
{
name = "ansible"
sudo = "ALL=(ALL) NOPASSWD:ALL"
shell = "/bin/bash"
ssh_authorized_keys = [var.ssh_public_key]
groups = "sudo"
}
]
timezone = "Europe/Paris"
}
ansible_pull_script = <<-EOT
#!/bin/bash
set -e
source /etc/ansible-pull.conf
WORK_DIR="/var/lib/ansible-local"
mkdir -p $WORK_DIR
cd $WORK_DIR
REPO_WITH_AUTH=$(echo $REPO_URL | sed "s|https://|https://git:$FORGEJO_TOKEN@|")
if [ -d ".git" ]; then
git pull origin main 2>&1 | logger -t ansible-pull
else
git clone $REPO_WITH_AUTH . 2>&1 | logger -t ansible-pull
fi
ansible-playbook ansible/site.yml -i localhost, --connection=local -e "k3s_version=$K3S_VERSION" 2>&1 | logger -t ansible-pull
EOT
etcd_witness_user_data = {
write_files = [
{
path = "/etc/node-role"
content = "witness"
permissions = "0644"
},
{
path = "/etc/ansible-pull.conf"
content = "REPO_URL=${var.forgejo_repo_url}\nFORGEJO_TOKEN=${var.forgejo_token}\nK3S_VERSION=${var.k3s_version}"
permissions = "0600"
},
{
path = "/usr/local/bin/ansible-pull-wrapper.sh"
content = local.ansible_pull_script
permissions = "0755"
}
]
runcmd = [
"echo '*/15 * * * * root /usr/local/bin/ansible-pull-wrapper.sh' > /etc/cron.d/ansible-pull",
"sleep 60 && /usr/local/bin/ansible-pull-wrapper.sh &"
]
}
}
resource "local_file" "etcd_witness_cloud_init" {
filename = "${path.module}/.generated/cloud-init-etcd-witness.yaml"
content = yamlencode(merge(local.base_user_data, local.etcd_witness_user_data))
}

64
terraform/pve3/main.tf Normal file
View file

@ -0,0 +1,64 @@
terraform {
required_version = ">= 1.6.0"
required_providers {
proxmox = {
source = "telmate/proxmox"
version = "~> 3.0"
}
local = {
source = "hashicorp/local"
version = "~> 2.1"
}
}
}
provider "proxmox" {
pm_api_url = var.proxmox_api_url
pm_api_token_id = var.proxmox_token_id
pm_api_token_secret = var.proxmox_token_secret
pm_tls_insecure = var.proxmox_tls_insecure
}
# etcd Witness VM on pve3
resource "proxmox_vm_qemu" "etcd_witness" {
name = "etcd-witness"
target_node = "pve3"
clone = var.ubuntu_template
cores = var.etcd_witness_config.cores
sockets = 1
memory = var.etcd_witness_config.memory
agent = 1
boot = "order=scsi0"
scsihw = "virtio-scsi-single"
onboot = true
network {
model = "virtio"
bridge = var.k3s_network_bridge
}
disks {
scsi {
scsi0 {
disk {
size = var.etcd_witness_config.disk_size
storage = var.storage_pool
iothread = true
}
}
}
}
ipconfig0 = "ip=${var.etcd_witness_config.ip},gw=${var.k3s_gateway}"
cicustom = "user=${var.snippets_storage}:snippets/cloud-init-etcd-witness.yaml"
nameserver = join(" ", var.k3s_dns)
lifecycle {
ignore_changes = [ network ]
}
depends_on = [local_file.etcd_witness_cloud_init]
}

View file

@ -0,0 +1,8 @@
output "etcd_witness" {
description = "etcd Witness VM information"
value = {
name = proxmox_vm_qemu.etcd_witness.name
ip = var.etcd_witness_config.ip
node = proxmox_vm_qemu.etcd_witness.target_node
}
}

View file

@ -0,0 +1,84 @@
variable "proxmox_api_url" {
description = "Proxmox API URL"
type = string
default = "https://192.168.100.10:8006/api2/json"
}
variable "proxmox_token_id" {
description = "Proxmox API Token ID"
type = string
sensitive = true
}
variable "proxmox_token_secret" {
description = "Proxmox API Token Secret"
type = string
sensitive = true
}
variable "proxmox_tls_insecure" {
description = "Skip TLS verification for Proxmox API"
type = bool
default = true
}
variable "ssh_public_key" {
description = "SSH public key for admin access"
type = string
}
variable "forgejo_token" {
description = "Forgejo token for ansible-pull authentication"
type = string
sensitive = true
}
variable "forgejo_repo_url" {
description = "Forgejo repository URL (without credentials)"
type = string
}
variable "k3s_version" {
description = "K3s version to install"
type = string
}
variable "ubuntu_template" {
description = "Ubuntu cloud-init template name"
type = string
}
variable "storage_pool" {
description = "Proxmox storage pool for VM disks"
type = string
}
variable "snippets_storage" {
description = "Proxmox storage for cloud-init snippets"
type = string
}
variable "k3s_network_bridge" {
description = "SDN bridge for K3s VMs"
type = string
}
variable "k3s_gateway" {
description = "Gateway for K3s network"
type = string
}
variable "k3s_dns" {
description = "DNS servers for K3s network"
type = list(string)
}
variable "etcd_witness_config" {
description = "etcd witness VM configuration"
type = object({
ip = string
cores = number
memory = number
disk_size = string
})
}

View file

@ -0,0 +1,28 @@
# Copy this file to terraform.tfvars and fill in your values
# Proxmox Configuration
proxmox_api_url = "https://192.168.100.10:8006/api2/json"
proxmox_token_id = "root@pam!terraform"
proxmox_token_secret = "your-proxmox-token-secret"
proxmox_tls_insecure = true
# SSH Access
ssh_public_key = "ssh-ed25519 AAAAC3... your-email@example.com"
# Forgejo Configuration
forgejo_token = "your-forgejo-token"
forgejo_repo_url = "ssh://git@forgejo.tellserv.fr:222/Tellsanguis/infra.git"
# K3s Version
k3s_version = "v1.28.5+k3s1"
# Template and Storage
ubuntu_template = "ubuntu-2204-cloudinit"
storage_pool = "local-lvm"
snippets_storage = "local"
# Network
k3s_network_bridge = "k3s"
management_bridge = "vmbr0"
k3s_gateway = "10.100.20.1"
k3s_dns = ["10.100.20.1", "1.1.1.1"]