From fd01ea59eee3831cc20c37e3ec9f7bc5c22ca5a0 Mon Sep 17 00:00:00 2001 From: Tellsanguis Date: Sun, 23 Nov 2025 19:40:17 +0100 Subject: [PATCH] Commit initial : infrastructure Ansible pour homeserver MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Playbooks Ansible avec rôles (common, cockpit, docker, services) - 30+ stacks Docker Compose avec reverse proxy Traefik - Ansible Vault pour gestion secrets - Intégration CrowdSec pour détection intrusions - Versions images Docker fixées pour reproductibilité --- .gitignore | 126 +++++ LICENSE | 21 + README.md | 148 +++++ inventory/hosts.yml.example | 9 + playbook.yml | 15 + roles/cockpit/handlers/main.yml | 4 + roles/cockpit/tasks/main.yml | 52 ++ roles/common/handlers/main.yml | 4 + roles/common/tasks/main.yml | 104 ++++ roles/docker/tasks/main.yml | 67 +++ roles/services/tasks/main.yml | 222 ++++++++ stacks/audiobookshelf/compose.yml | 36 ++ stacks/autoheal/compose.yml | 15 + stacks/beszel/compose.yml | 41 ++ stacks/bin/compose.yml | 53 ++ stacks/blog/compose.yml | 39 ++ stacks/clipcascade/compose.yml | 26 + stacks/crowdsec/command_api.txt | 8 + stacks/crowdsec/compose.yml | 37 ++ stacks/crowdsec/config/acquis.yaml | 4 + stacks/crowdsec/config/config.yaml | 49 ++ stacks/crowdsec/config/console.yaml | 4 + stacks/crowdsec/config/console/context.yaml | 0 stacks/crowdsec/config/dev.yaml | 47 ++ .../crowdsec/config/notifications/email.yaml | 55 ++ .../crowdsec/config/notifications/http.yaml | 43 ++ .../config/notifications/sentinel.yaml | 21 + .../crowdsec/config/notifications/slack.yaml | 42 ++ .../crowdsec/config/notifications/splunk.yaml | 28 + .../config/parsers/s02-enrich/whitelist.yaml | 6 + stacks/crowdsec/config/patterns/aws | 11 + stacks/crowdsec/config/patterns/bacula | 50 ++ stacks/crowdsec/config/patterns/bro | 13 + .../crowdsec/config/patterns/cowrie_honeypot | 1 + stacks/crowdsec/config/patterns/exim | 12 + stacks/crowdsec/config/patterns/firewalls | 86 +++ stacks/crowdsec/config/patterns/haproxy | 39 ++ stacks/crowdsec/config/patterns/java | 20 + stacks/crowdsec/config/patterns/junos | 8 + stacks/crowdsec/config/patterns/linux-syslog | 16 + stacks/crowdsec/config/patterns/mcollective | 4 + stacks/crowdsec/config/patterns/modsecurity | 18 + stacks/crowdsec/config/patterns/mongodb | 7 + stacks/crowdsec/config/patterns/mysql | 1 + stacks/crowdsec/config/patterns/nagios | 124 +++++ stacks/crowdsec/config/patterns/nginx | 19 + stacks/crowdsec/config/patterns/paths | 14 + stacks/crowdsec/config/patterns/postgresql | 2 + stacks/crowdsec/config/patterns/rails | 18 + stacks/crowdsec/config/patterns/redis | 21 + stacks/crowdsec/config/patterns/ruby | 2 + stacks/crowdsec/config/patterns/smb | 1 + stacks/crowdsec/config/patterns/ssh | 61 ++ stacks/crowdsec/config/patterns/tcpdump | 1 + stacks/crowdsec/config/profiles.yaml | 29 + stacks/crowdsec/config/simulation.yaml | 3 + stacks/crowdsec/config/user.yaml | 38 ++ stacks/dockge/compose.yml | 25 + stacks/etesync/compose.yml | 39 ++ stacks/feedropolis/compose.yml | 42 ++ stacks/freshrss/compose.yml | 28 + stacks/glance/assets/user.css | 0 stacks/glance/compose.yml | 66 +++ stacks/glance/config/glance.yml | 12 + stacks/glance/config/home.yml | 184 ++++++ stacks/glance/config/includes/containers.yml | 141 +++++ stacks/glance/config/secrets/plex-token.txt | 1 + stacks/glance/container-builder/Dockerfile | 9 + .../generate_containers_block.py | 124 +++++ .../container-builder/icon_overrides.json | 32 ++ stacks/glance/rss-builder/Dockerfile | 7 + stacks/glance/rss-builder/generate_rss.py | 37 ++ stacks/glance/rss/index.xml | 44 ++ stacks/glance/updates/updates.md | 20 + stacks/gotify/compose.yml | 34 ++ stacks/headscale/compose.yml | 52 ++ stacks/headscale/conf/config.yaml | 72 +++ stacks/joal/compose.yml | 35 ++ stacks/kavita/Dockerfile | 39 ++ stacks/kavita/compose.yml | 46 ++ stacks/kavita/kavita_script.py | 526 ++++++++++++++++++ stacks/kopia/compose.yml | 46 ++ stacks/larabouillere/compose.yml | 34 ++ stacks/loggifly/compose.yml | 23 + stacks/mobilizon/compose.yml | 44 ++ stacks/paperless/compose.yml | 57 ++ stacks/photoprism/compose.yml | 91 +++ stacks/pingvin/compose.yml | 35 ++ stacks/plex/compose.yml | 47 ++ stacks/qbittorrent/compose.yml | 42 ++ stacks/searxng/compose.yaml | 59 ++ stacks/searxng/searxng/limiter.toml | 3 + stacks/searxng/searxng/settings.yml | 18 + stacks/stirlingpdf/compose.yml | 29 + stacks/tinyauth/compose.yml | 28 + stacks/traefik/compose.yml | 62 +++ stacks/traefik/dynamic-private/cockpit.yml | 25 + .../traefik/dynamic-private/middlewares.yml | 23 + stacks/traefik/dynamic-private/proxmox.yml | 25 + stacks/traefik/dynamic-public/middlewares.yml | 16 + stacks/traefik/traefik-private.yml | 53 ++ stacks/traefik/traefik-public.yml | 58 ++ stacks/uptime-kuma/compose.yml | 27 + stacks/vaultwarden/compose.yml | 36 ++ stacks/vikunja/compose.yml | 30 + stacks/watchtower/compose.yml | 25 + stacks/webdav/compose.yaml | 59 ++ stacks/yamtrack/compose.yml | 51 ++ templates/env/crowdsec.env.j2 | 1 + templates/env/etesync.env.j2 | 2 + templates/env/feedropolis.env.j2 | 2 + templates/env/glance.env.j2 | 3 + templates/env/joal.env.j2 | 1 + templates/env/mobilizon.env.j2 | 51 ++ templates/env/photoprism.env.j2 | 6 + templates/env/plex.env.j2 | 2 + templates/env/searxng.env.j2 | 1 + templates/env/tinyauth.env.j2 | 4 + templates/env/traefik.env.j2 | 1 + templates/env/vaultwarden.env.j2 | 8 + templates/env/vikunja.env.j2 | 1 + templates/env/watchtower.env.j2 | 2 + templates/env/webdav.env.j2 | 2 + templates/env/yamtrack.env.j2 | 3 + vars/secrets.yml.example | 72 +++ 125 files changed, 4768 insertions(+) create mode 100644 .gitignore create mode 100644 LICENSE create mode 100644 README.md create mode 100644 inventory/hosts.yml.example create mode 100644 playbook.yml create mode 100644 roles/cockpit/handlers/main.yml create mode 100644 roles/cockpit/tasks/main.yml create mode 100644 roles/common/handlers/main.yml create mode 100644 roles/common/tasks/main.yml create mode 100644 roles/docker/tasks/main.yml create mode 100644 roles/services/tasks/main.yml create mode 100644 stacks/audiobookshelf/compose.yml create mode 100644 stacks/autoheal/compose.yml create mode 100644 stacks/beszel/compose.yml create mode 100644 stacks/bin/compose.yml create mode 100644 stacks/blog/compose.yml create mode 100644 stacks/clipcascade/compose.yml create mode 100644 stacks/crowdsec/command_api.txt create mode 100644 stacks/crowdsec/compose.yml create mode 100644 stacks/crowdsec/config/acquis.yaml create mode 100644 stacks/crowdsec/config/config.yaml create mode 100644 stacks/crowdsec/config/console.yaml create mode 100644 stacks/crowdsec/config/console/context.yaml create mode 100644 stacks/crowdsec/config/dev.yaml create mode 100644 stacks/crowdsec/config/notifications/email.yaml create mode 100644 stacks/crowdsec/config/notifications/http.yaml create mode 100644 stacks/crowdsec/config/notifications/sentinel.yaml create mode 100644 stacks/crowdsec/config/notifications/slack.yaml create mode 100644 stacks/crowdsec/config/notifications/splunk.yaml create mode 100644 stacks/crowdsec/config/parsers/s02-enrich/whitelist.yaml create mode 100644 stacks/crowdsec/config/patterns/aws create mode 100644 stacks/crowdsec/config/patterns/bacula create mode 100644 stacks/crowdsec/config/patterns/bro create mode 100644 stacks/crowdsec/config/patterns/cowrie_honeypot create mode 100644 stacks/crowdsec/config/patterns/exim create mode 100644 stacks/crowdsec/config/patterns/firewalls create mode 100644 stacks/crowdsec/config/patterns/haproxy create mode 100644 stacks/crowdsec/config/patterns/java create mode 100644 stacks/crowdsec/config/patterns/junos create mode 100644 stacks/crowdsec/config/patterns/linux-syslog create mode 100644 stacks/crowdsec/config/patterns/mcollective create mode 100644 stacks/crowdsec/config/patterns/modsecurity create mode 100644 stacks/crowdsec/config/patterns/mongodb create mode 100644 stacks/crowdsec/config/patterns/mysql create mode 100644 stacks/crowdsec/config/patterns/nagios create mode 100644 stacks/crowdsec/config/patterns/nginx create mode 100644 stacks/crowdsec/config/patterns/paths create mode 100644 stacks/crowdsec/config/patterns/postgresql create mode 100644 stacks/crowdsec/config/patterns/rails create mode 100644 stacks/crowdsec/config/patterns/redis create mode 100644 stacks/crowdsec/config/patterns/ruby create mode 100644 stacks/crowdsec/config/patterns/smb create mode 100644 stacks/crowdsec/config/patterns/ssh create mode 100644 stacks/crowdsec/config/patterns/tcpdump create mode 100644 stacks/crowdsec/config/profiles.yaml create mode 100644 stacks/crowdsec/config/simulation.yaml create mode 100644 stacks/crowdsec/config/user.yaml create mode 100644 stacks/dockge/compose.yml create mode 100644 stacks/etesync/compose.yml create mode 100644 stacks/feedropolis/compose.yml create mode 100644 stacks/freshrss/compose.yml create mode 100644 stacks/glance/assets/user.css create mode 100644 stacks/glance/compose.yml create mode 100644 stacks/glance/config/glance.yml create mode 100644 stacks/glance/config/home.yml create mode 100644 stacks/glance/config/includes/containers.yml create mode 100644 stacks/glance/config/secrets/plex-token.txt create mode 100644 stacks/glance/container-builder/Dockerfile create mode 100644 stacks/glance/container-builder/generate_containers_block.py create mode 100644 stacks/glance/container-builder/icon_overrides.json create mode 100644 stacks/glance/rss-builder/Dockerfile create mode 100644 stacks/glance/rss-builder/generate_rss.py create mode 100644 stacks/glance/rss/index.xml create mode 100644 stacks/glance/updates/updates.md create mode 100644 stacks/gotify/compose.yml create mode 100644 stacks/headscale/compose.yml create mode 100644 stacks/headscale/conf/config.yaml create mode 100644 stacks/joal/compose.yml create mode 100644 stacks/kavita/Dockerfile create mode 100644 stacks/kavita/compose.yml create mode 100644 stacks/kavita/kavita_script.py create mode 100644 stacks/kopia/compose.yml create mode 100644 stacks/larabouillere/compose.yml create mode 100644 stacks/loggifly/compose.yml create mode 100644 stacks/mobilizon/compose.yml create mode 100644 stacks/paperless/compose.yml create mode 100644 stacks/photoprism/compose.yml create mode 100644 stacks/pingvin/compose.yml create mode 100644 stacks/plex/compose.yml create mode 100644 stacks/qbittorrent/compose.yml create mode 100644 stacks/searxng/compose.yaml create mode 100644 stacks/searxng/searxng/limiter.toml create mode 100644 stacks/searxng/searxng/settings.yml create mode 100644 stacks/stirlingpdf/compose.yml create mode 100644 stacks/tinyauth/compose.yml create mode 100644 stacks/traefik/compose.yml create mode 100644 stacks/traefik/dynamic-private/cockpit.yml create mode 100644 stacks/traefik/dynamic-private/middlewares.yml create mode 100644 stacks/traefik/dynamic-private/proxmox.yml create mode 100644 stacks/traefik/dynamic-public/middlewares.yml create mode 100644 stacks/traefik/traefik-private.yml create mode 100644 stacks/traefik/traefik-public.yml create mode 100644 stacks/uptime-kuma/compose.yml create mode 100644 stacks/vaultwarden/compose.yml create mode 100644 stacks/vikunja/compose.yml create mode 100644 stacks/watchtower/compose.yml create mode 100644 stacks/webdav/compose.yaml create mode 100644 stacks/yamtrack/compose.yml create mode 100644 templates/env/crowdsec.env.j2 create mode 100644 templates/env/etesync.env.j2 create mode 100644 templates/env/feedropolis.env.j2 create mode 100644 templates/env/glance.env.j2 create mode 100644 templates/env/joal.env.j2 create mode 100644 templates/env/mobilizon.env.j2 create mode 100644 templates/env/photoprism.env.j2 create mode 100644 templates/env/plex.env.j2 create mode 100644 templates/env/searxng.env.j2 create mode 100644 templates/env/tinyauth.env.j2 create mode 100644 templates/env/traefik.env.j2 create mode 100644 templates/env/vaultwarden.env.j2 create mode 100644 templates/env/vikunja.env.j2 create mode 100644 templates/env/watchtower.env.j2 create mode 100644 templates/env/webdav.env.j2 create mode 100644 templates/env/yamtrack.env.j2 create mode 100644 vars/secrets.yml.example diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..b28d742 --- /dev/null +++ b/.gitignore @@ -0,0 +1,126 @@ +# ============================================================================= +# Secrets and credentials +# ============================================================================= +**/.env +**/credentials*.yaml +**/credentials*.yml +**/*_credentials.yaml +**/*_credentials.yml +**/id_ed25519 +**/id_rsa +**/*.pem +**/*.key + +# Ansible Vault password file +.vault_pass + +# Secrets file (encrypt before committing or keep out of repo) +vars/secrets.yml + +# Inventory file +inventory/hosts.yml + + +# ============================================================================= +# Databases +# ============================================================================= +**/*.db +**/*.db-shm +**/*.db-wal +**/*.mv.db +**/*.sqlite +**/*.sqlite3 + +# ============================================================================= +# Logs +# ============================================================================= +**/*.log + +# ============================================================================= +# Let's Encrypt certificates (generated at runtime) +# ============================================================================= +stacks/traefik/letsencrypt-private/ +stacks/traefik/letsencrypt-public/ + +# ============================================================================= +# Service data directories (runtime data, not configuration) +# ============================================================================= + +# Beszel +stacks/beszel/beszel_data/ +stacks/beszel/beszel_socket/ + +# Blog (Ghost) +stacks/blog/ghost/content/ + +# Clipcascade +stacks/clipcascade/cc_users/ + +# CrowdSec - data and downloaded hub content +stacks/crowdsec/data/ +stacks/crowdsec/config/hub/ +stacks/crowdsec/config/local_api_credentials.yaml +stacks/crowdsec/config/online_api_credentials.yaml + +# Dockge +stacks/dockge/data/ + +# Etesync +stacks/etesync/data/ + +# Gotify +stacks/gotify/data/ + +# Headscale +stacks/headscale/data/ + +# Kavita +stacks/kavita/data/ + +# Kopia +stacks/kopia/cache/ +stacks/kopia/config/ +stacks/kopia/logs/ + +# Larabouillere (Ghost) +stacks/larabouillere/ghost/content/ + +# Mobilizon +stacks/mobilizon/db/ +stacks/mobilizon/tzdata/ +stacks/mobilizon/uploads/ + +# Photoprism +stacks/photoprism/database/ + +# Plex +stacks/plex/config/ + +# Uptime Kuma +stacks/uptime-kuma/data/ + +# Vaultwarden +stacks/vaultwarden/vw-data/ + +# Yamtrack +stacks/yamtrack/db/ + +# ============================================================================= +# Temporary and cache files +# ============================================================================= +**/__pycache__/ +**/*.pyc +**/.DS_Store +**/Thumbs.db +**/*.tmp +**/*.temp +**/*.swp +**/*.swo +**/node_modules/ + +# ============================================================================= +# IDE and editor files +# ============================================================================= +.idea/ +.vscode/ +*.sublime-* diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..844ff64 --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2025 + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/README.md b/README.md new file mode 100644 index 0000000..f971876 --- /dev/null +++ b/README.md @@ -0,0 +1,148 @@ +# Homeserver Infrastructure + +Infrastructure as Code (IaC) pour provisionner un homeserver Ubuntu avec Ansible et Docker Compose. + +## Prérequis + +- Ansible 2.15+ +- Accès SSH au serveur cible +- Python 3.x sur la machine de contrôle + +## Architecture + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ Internet │ +└─────────────────────────┬───────────────────────────────────────┘ + │ + ┌───────────▼───────────┐ + │ Traefik Public │ ← Reverse proxy (192.168.1.2) + │ + CrowdSec Bouncer │ SSL via Cloudflare DNS + └───────────┬───────────┘ + │ + ┌─────────────────────┼─────────────────────┐ + │ │ │ + ▼ ▼ ▼ +┌────────┐ ┌──────────┐ ┌──────────┐ +│Services│ │ Services │ │ Services │ +│Publics │ │ Protégés │ │ Locaux │ +└────────┘ │(TinyAuth)│ └──────────┘ + └──────────┘ │ + │ + ┌───────────────▼───────────┐ + │ Traefik Private │ + │ (192.168.1.3) │ + └───────────────────────────┘ +``` + +### Rôles Ansible + +| Rôle | Description | +|------|-------------| +| `common` | Paquets de base, dnsmasq (DNS local `*.local.tellserv.fr`), firewalld | +| `cockpit` | Interface web d'administration serveur | +| `docker` | Docker CE, réseau `traefik_network` | +| `services` | Synchronisation et déploiement des stacks Docker | + +### Services déployés + +**Infrastructure :** Traefik, CrowdSec, Watchtower, Autoheal, Beszel, Uptime-Kuma, Dockge, Kopia + +**Productivité :** Vaultwarden, Vikunja, Paperless-ngx, Kavita, FreshRSS, SearXNG + +**Media :** Plex, Photoprism, Audiobookshelf, qBittorrent + +**Autres :** Gotify, Glance, Ghost (blog), Mobilizon, EteSync + +## Installation + +### 1. Configuration de l'inventaire + +Modifiez `inventory/hosts.yml` avec l'IP et l'utilisateur SSH de votre serveur : + +```yaml +all: + children: + homeserver: + hosts: + 192.168.x.x: + ansible_user: votre_utilisateur + ansible_ssh_private_key_file: ~/.ssh/votre_cle +``` + +### 2. Configuration des secrets + +```bash +# Copier le fichier d'exemple +cp vars/secrets.yml.example vars/secrets.yml + +# Éditer avec vos vraies valeurs +nano vars/secrets.yml + +# Chiffrer avec Ansible Vault +ansible-vault encrypt vars/secrets.yml +``` + +### 3. Exécution + +```bash +# Provisionnement complet +ansible-playbook -i inventory/hosts.yml playbook.yml --ask-vault-pass + +# Générer uniquement les fichiers .env (sans déployer) +ansible-playbook -i inventory/hosts.yml playbook.yml --tags env --ask-vault-pass + +# Déployer un service spécifique +ansible-playbook -i inventory/hosts.yml playbook.yml --tags traefik --ask-vault-pass +``` + +## Structure du projet + +``` +. +├── inventory/ +│ └── hosts.yml # Inventaire des serveurs +├── roles/ +│ ├── common/ # Configuration système de base +│ ├── cockpit/ # Interface web admin +│ ├── docker/ # Installation Docker +│ └── services/ # Déploiement des stacks +├── stacks/ # Docker Compose projects +│ ├── traefik/ +│ ├── vaultwarden/ +│ └── ... +├── templates/ +│ └── env/ # Templates .env.j2 (secrets) +├── vars/ +│ ├── secrets.yml # Secrets chiffrés (Vault) +│ └── secrets.yml.example # Template des secrets +└── playbook.yml # Playbook principal +``` + +## Ajout d'un nouveau service + +1. Créer `stacks//compose.yml` +2. Ajouter les labels Traefik pour le routage +3. Si secrets nécessaires : créer `templates/env/.env.j2` +4. Ajouter le service dans `roles/services/tasks/main.yml` + +## Sécurité + +- **Secrets** : Tous les secrets sont gérés via Ansible Vault +- **CrowdSec** : Protection IPS avec blocage automatique +- **TinyAuth** : Authentification OAuth pour les services sensibles +- **Firewalld** : Pare-feu configuré automatiquement +- **TLS** : Certificats Let's Encrypt via Cloudflare DNS challenge + +## CI/CD + +Le CI/CD n'est pas implémenté dans ce dépôt. Le projet migre vers [Forgejo](https://forgejo.tellserv.fr/Tellsanguis/Homelab). + +Une approche CI/CD possible : +- **Forgejo Runner** pour l'exécution des pipelines +- **Secrets Forgejo** pour l'IP serveur et l'utilisateur SSH (génération de `inventory/hosts.yml`) +- **Renovate Bot** pour les mises à jour d'images Docker via PR automatiques (alternative à Watchtower) + +## License + +MIT License - voir [LICENSE](LICENSE) diff --git a/inventory/hosts.yml.example b/inventory/hosts.yml.example new file mode 100644 index 0000000..0172cdb --- /dev/null +++ b/inventory/hosts.yml.example @@ -0,0 +1,9 @@ +# Copy to hosts.yml and fill with your server details +all: + children: + homeserver: + hosts: + 192.168.x.x: + ansible_user: your_username + ansible_become: true + ansible_ssh_private_key_file: ~/.ssh/your_key diff --git a/playbook.yml b/playbook.yml new file mode 100644 index 0000000..90ce142 --- /dev/null +++ b/playbook.yml @@ -0,0 +1,15 @@ +- name: Provision Homelab + hosts: homeserver + become: true + + vars: + cloudflare_dns: "1.1.1.1" + + vars_files: + - vars/secrets.yml + + roles: + - common + - cockpit + - docker + - services \ No newline at end of file diff --git a/roles/cockpit/handlers/main.yml b/roles/cockpit/handlers/main.yml new file mode 100644 index 0000000..4b67e7a --- /dev/null +++ b/roles/cockpit/handlers/main.yml @@ -0,0 +1,4 @@ +- name: Redémarrer Cockpit + service: + name: cockpit + state: restarted \ No newline at end of file diff --git a/roles/cockpit/tasks/main.yml b/roles/cockpit/tasks/main.yml new file mode 100644 index 0000000..b2251ec --- /dev/null +++ b/roles/cockpit/tasks/main.yml @@ -0,0 +1,52 @@ +- name: Ajouter la source noble-backports + copy: + dest: /etc/apt/sources.list.d/noble-backports.sources + content: | + Types: deb + URIs: http://archive.ubuntu.com/ubuntu + Suites: noble-backports + Components: main universe + Architectures: amd64 + +- name: Installer les outils nécessaires à add-apt-repository + apt: + name: software-properties-common + state: present + update_cache: yes + +- name: Ajouter le PPA pitti/cockpit-files + apt_repository: + repo: ppa:pitti/cockpit-files + state: present + +- name: Mettre à jour la liste des paquets après ajout des sources + apt: + update_cache: yes + cache_valid_time: 3600 + +- name: Installer Cockpit depuis noble-backports + apt: + name: cockpit + state: latest + default_release: noble-backports + +- name: Installer cockpit-files depuis le PPA + apt: + name: cockpit-files + state: latest + +- name: Activer Cockpit + service: + name: cockpit + state: started + enabled: true + +- name: Créer le fichier cockpit.conf pour Traefik + copy: + dest: /etc/cockpit/cockpit.conf + content: | + [WebService] + Origins = https://cockpit.local.tellserv.fr + ProtocolHeader = X-Forwarded-Proto + AllowUnencrypted = true + notify: Redémarrer Cockpit \ No newline at end of file diff --git a/roles/common/handlers/main.yml b/roles/common/handlers/main.yml new file mode 100644 index 0000000..1c6379e --- /dev/null +++ b/roles/common/handlers/main.yml @@ -0,0 +1,4 @@ +- name: Restart dnsmasq + service: + name: dnsmasq + state: restarted \ No newline at end of file diff --git a/roles/common/tasks/main.yml b/roles/common/tasks/main.yml new file mode 100644 index 0000000..af84eb8 --- /dev/null +++ b/roles/common/tasks/main.yml @@ -0,0 +1,104 @@ +- name: Installer paquets utilitaires + apt: + name: + - git + - curl + - htop + - firewalld + - mergerfs + - udev + - util-linux + - dnsmasq + state: latest + +- name: Désactiver systemd-resolved + systemd: + name: systemd-resolved + enabled: no + state: stopped + +- name: Supprimer le lien symbolique resolv.conf géré par systemd-resolved + file: + path: /etc/resolv.conf + state: absent + +- name: Créer un nouveau resolv.conf classique pointant sur dnsmasq + copy: + dest: /etc/resolv.conf + owner: root + group: root + mode: '0644' + content: | + nameserver 127.0.0.1 + +- name: Configurer dnsmasq pour résolution locale *.local.tellserv.fr et relay pour tellserv.fr (port 53) + copy: + dest: /etc/dnsmasq.d/tellserv.conf + owner: root + group: root + mode: "0644" + content: | + # Résolution locale pour *.local.tellserv.fr + address=/.local.tellserv.fr/{{ ansible_default_ipv4.address }} + # Serveur DNS en amont par défaut (tout autre domaine) + server=1.1.1.1 + # Écoute sur le port 53 + listen-address=127.0.0.1,{{ ansible_default_ipv4.address }},100.64.0.2 + port=53 + # Ne pas échouer si une interface manque + bind-dynamic + notify: Restart dnsmasq + +- name: Démarrer et activer dnsmasq + service: + name: dnsmasq + state: started + enabled: true + +- name: Configurer et démarrer firewalld + service: + name: firewalld + state: started + enabled: true + +- name: Ouvrir SSH (22/tcp) + firewalld: + port: 22/tcp + permanent: yes + state: enabled + +- name: Ouvrir HTTP (80/tcp) et HTTPS (443/tcp) + firewalld: + port: "{{ item }}" + permanent: yes + state: enabled + loop: + - 80/tcp + - 443/tcp + +- name: Ouvrir port DNSMasq (53/udp) pour la résolution locale + firewalld: + port: 53/udp + permanent: yes + state: enabled + +- name: Ouvrir port DNSMasq (53/tcp) pour la résolution locale + firewalld: + port: 53/tcp + permanent: yes + state: enabled + +- name: Ouvrir port Minecraft (25565/tcp) + firewalld: + port: 25565/tcp + permanent: yes + state: enabled + +- name: Reload firewalld to apply changes + command: firewall-cmd --reload + +- name: Créer le répertoire de stockage MergerFS + file: + path: /mnt/storage + state: directory + mode: '0755' \ No newline at end of file diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml new file mode 100644 index 0000000..b4382f3 --- /dev/null +++ b/roles/docker/tasks/main.yml @@ -0,0 +1,67 @@ +- name: Mettre à jour cache apt + apt: + update_cache: yes +- name: Installer les prérequis + apt: + name: + - ca-certificates + - curl + state: present +- name: Créer le répertoire keyrings + file: + path: /etc/apt/keyrings + state: directory + mode: '0755' +- name: Télécharger la clé GPG Docker + get_url: + url: https://download.docker.com/linux/ubuntu/gpg + dest: /etc/apt/keyrings/docker.asc + mode: '0644' +- name: Configurer le dépôt Docker + shell: | + echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \ + $(. /etc/os-release && echo "${UBUNTU_CODENAME:-$VERSION_CODENAME}") stable" | \ + tee /etc/apt/sources.list.d/docker.list > /dev/null + args: + executable: /bin/bash + creates: /etc/apt/sources.list.d/docker.list +- name: Mettre à jour cache apt après ajout du dépôt + apt: + update_cache: yes +- name: Installer Docker Engine et plugins + apt: + name: + - docker-ce + - docker-ce-cli + - containerd.io + - docker-buildx-plugin + - docker-compose-plugin + state: latest +- name: Activer et démarrer le service Docker + systemd: + name: docker + enabled: true + state: started +- name: Ajouter l'utilisateur au groupe docker (optionnel) + user: + name: "{{ ansible_user }}" + groups: docker + append: yes + +- name: Créer le réseau Docker pour Traefik + shell: docker network create traefik_network || true + become: yes + +- name: Créer le répertoire pour les logs Traefik + file: + path: /var/log/traefik + state: directory + mode: '0755' + become: yes + +- name: Créer le répertoire pour les certificats Let's Encrypt + file: + path: /etc/letsencrypt/traefik + state: directory + mode: '0755' + become: yes \ No newline at end of file diff --git a/roles/services/tasks/main.yml b/roles/services/tasks/main.yml new file mode 100644 index 0000000..692f33d --- /dev/null +++ b/roles/services/tasks/main.yml @@ -0,0 +1,222 @@ +# ============================================================================= +# Generate .env files from templates (secrets from Vault) +# ============================================================================= + +- name: Générer les fichiers .env depuis les templates + ansible.builtin.template: + src: "{{ playbook_dir }}/templates/env/{{ item }}.env.j2" + dest: "{{ playbook_dir }}/stacks/{{ item }}/.env" + mode: '0600' + loop: + - traefik + - tinyauth + - vaultwarden + - crowdsec + - photoprism + - vikunja + - mobilizon + - etesync + - plex + - yamtrack + - joal + - feedropolis + - webdav + - searxng + - glance + - watchtower + delegate_to: localhost + become: no + tags: + - env + - secrets + +# ============================================================================= +# Sync stacks to server +# ============================================================================= + +- name: Synchroniser le dossier stacks depuis la machine de gestion + ansible.builtin.copy: + src: "{{ playbook_dir }}/stacks/" + dest: /opt/stacks/ + mode: preserve + become: yes + tags: + - sync + - deploy + +# ============================================================================= +# Deploy all stacks +# ============================================================================= + +- name: Chercher tous les fichiers compose.yml + ansible.builtin.find: + paths: /opt/stacks + patterns: "compose.yml,compose.yaml" + recurse: yes + register: compose_files + tags: + - deploy + +- name: Vérifier si les containers existent déjà + ansible.builtin.shell: docker ps -a --format {% raw %}"{{.Names}}"{% endraw %} + register: existing_containers + changed_when: false + tags: + - deploy + +- name: Arrêter et supprimer les conteneurs existants si nécessaire + ansible.builtin.command: + cmd: docker compose down + chdir: "{{ item.path | dirname }}" + loop: "{{ compose_files.files }}" + loop_control: + label: "{{ item.path | dirname | basename }}" + when: item.path | dirname | basename in existing_containers.stdout_lines + ignore_errors: yes + tags: + - deploy + +- name: Mettre à jour les images + ansible.builtin.command: + cmd: docker compose pull + chdir: "{{ item.path | dirname }}" + loop: "{{ compose_files.files }}" + loop_control: + label: "{{ item.path | dirname | basename }}" + tags: + - deploy + - pull + +- name: Déployer chaque stack via docker compose + ansible.builtin.command: + cmd: docker compose up -d --build + chdir: "{{ item.path | dirname }}" + loop: "{{ compose_files.files }}" + loop_control: + label: "{{ item.path | dirname | basename }}" + tags: + - deploy + +# ============================================================================= +# Individual stack deployment tasks (use with --tags ) +# ============================================================================= + +- name: Déployer Traefik + ansible.builtin.command: + cmd: docker compose up -d + chdir: /opt/stacks/traefik + tags: [traefik, never] + +- name: Déployer CrowdSec + ansible.builtin.command: + cmd: docker compose up -d + chdir: /opt/stacks/crowdsec + tags: [crowdsec, never] + +- name: Déployer Vaultwarden + ansible.builtin.command: + cmd: docker compose up -d + chdir: /opt/stacks/vaultwarden + tags: [vaultwarden, never] + +- name: Déployer TinyAuth + ansible.builtin.command: + cmd: docker compose up -d + chdir: /opt/stacks/tinyauth + tags: [tinyauth, never] + +- name: Déployer Photoprism + ansible.builtin.command: + cmd: docker compose up -d + chdir: /opt/stacks/photoprism + tags: [photoprism, never] + +- name: Déployer Vikunja + ansible.builtin.command: + cmd: docker compose up -d + chdir: /opt/stacks/vikunja + tags: [vikunja, never] + +- name: Déployer Mobilizon + ansible.builtin.command: + cmd: docker compose up -d + chdir: /opt/stacks/mobilizon + tags: [mobilizon, never] + +- name: Déployer Plex + ansible.builtin.command: + cmd: docker compose up -d + chdir: /opt/stacks/plex + tags: [plex, never] + +- name: Déployer Kavita + ansible.builtin.command: + cmd: docker compose up -d + chdir: /opt/stacks/kavita + tags: [kavita, never] + +- name: Déployer Glance + ansible.builtin.command: + cmd: docker compose up -d + chdir: /opt/stacks/glance + tags: [glance, never] + +- name: Déployer Uptime-Kuma + ansible.builtin.command: + cmd: docker compose up -d + chdir: /opt/stacks/uptime-kuma + tags: [uptime-kuma, never] + +- name: Déployer Gotify + ansible.builtin.command: + cmd: docker compose up -d + chdir: /opt/stacks/gotify + tags: [gotify, never] + +- name: Déployer Paperless + ansible.builtin.command: + cmd: docker compose up -d + chdir: /opt/stacks/paperless + tags: [paperless, never] + +- name: Déployer FreshRSS + ansible.builtin.command: + cmd: docker compose up -d + chdir: /opt/stacks/freshrss + tags: [freshrss, never] + +- name: Déployer SearXNG + ansible.builtin.command: + cmd: docker compose up -d + chdir: /opt/stacks/searxng + tags: [searxng, never] + +- name: Déployer Headscale + ansible.builtin.command: + cmd: docker compose up -d + chdir: /opt/stacks/headscale + tags: [headscale, never] + +- name: Déployer Kopia + ansible.builtin.command: + cmd: docker compose up -d + chdir: /opt/stacks/kopia + tags: [kopia, never] + +- name: Déployer Blog + ansible.builtin.command: + cmd: docker compose up -d + chdir: /opt/stacks/blog + tags: [blog, never] + +- name: Déployer Larabouillere + ansible.builtin.command: + cmd: docker compose up -d + chdir: /opt/stacks/larabouillere + tags: [larabouillere, never] + +- name: Déployer Watchtower + ansible.builtin.command: + cmd: docker compose up -d + chdir: /opt/stacks/watchtower + tags: [watchtower, never] diff --git a/stacks/audiobookshelf/compose.yml b/stacks/audiobookshelf/compose.yml new file mode 100644 index 0000000..1e1d8b3 --- /dev/null +++ b/stacks/audiobookshelf/compose.yml @@ -0,0 +1,36 @@ +services: + audiobookshelf: + container_name: audiobookshelf + image: ghcr.io/advplyr/audiobookshelf:2.24.0 + volumes: + - /mnt/storage/audiobookshelf/audiobooks:/audiobooks + - /mnt/storage/audiobookshelf/podcasts:/podcasts + - /mnt/storage/audiobookshelf/config:/config + - /mnt/storage/audiobookshelf/metadata:/metadata + networks: + - traefik_network + labels: + - "traefik.enable=true" + # Local + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.rule=Host(`${COMPOSE_PROJECT_NAME}.local.tellserv.fr`)" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.entryPoints=local" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.tls.certresolver=cloudflare-local" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.tls=true" + # Production + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.rule=Host(`${COMPOSE_PROJECT_NAME}.tellserv.fr`)" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.entryPoints=websecure" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.tls=true" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.tls.certResolver=cloudflare" + #- "traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.middlewares=tinyauth" + - "traefik.http.services.${COMPOSE_PROJECT_NAME}.loadbalancer.server.port=80" + + # Flame Dashboard Integration + + # Watchtower Auto-Update + - "com.centurylinklabs.watchtower.enable=true" + + restart: unless-stopped + +networks: + traefik_network: + external: true diff --git a/stacks/autoheal/compose.yml b/stacks/autoheal/compose.yml new file mode 100644 index 0000000..62b1329 --- /dev/null +++ b/stacks/autoheal/compose.yml @@ -0,0 +1,15 @@ +services: + autoheal: + container_name: autoheal + image: willfarrell/autoheal:1.2.0 + restart: always + environment: + AUTOHEAL_CONTAINER_LABEL: all + AUTOHEAL_INTERVAL: 5 + AUTOHEAL_START_PERIOD: 0 + AUTOHEAL_DEFAULT_STOP_TIMEOUT: 10 + AUTOHEAL_ONLY_MONITOR_RUNNING: false + DOCKER_SOCK: /var/run/docker.sock + volumes: + - /var/run/docker.sock:/var/run/docker.sock + - /etc/localtime:/etc/localtime:ro diff --git a/stacks/beszel/compose.yml b/stacks/beszel/compose.yml new file mode 100644 index 0000000..ac0e180 --- /dev/null +++ b/stacks/beszel/compose.yml @@ -0,0 +1,41 @@ +services: + beszel: + image: henrygd/beszel:0.8.0 + container_name: beszel + restart: unless-stopped + volumes: + - ./beszel_data:/beszel_data + - ./beszel_socket:/beszel_socket + networks: + - traefik_network + labels: + - traefik.enable=true + # Accès local via Traefik + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.rule=Host(`${COMPOSE_PROJECT_NAME}.local.tellserv.fr`) + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.entryPoints=local + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.tls.certresolver=cloudflare-local" + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.tls=true + # Accès public via Traefik + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.rule=Host(`${COMPOSE_PROJECT_NAME}.tellserv.fr`) + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.entryPoints=websecure + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.tls=true + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.tls.certResolver=cloudflare + # Redirection vers le port du dashboard Beszel + - traefik.http.services.${COMPOSE_PROJECT_NAME}.loadbalancer.server.port=8090 + # --- FLAME Dashboard Integration --- + # --- Watchtower Auto-Update --- + - com.centurylinklabs.watchtower.enable=true + beszel-agent: + image: henrygd/beszel-agent:0.8.0 + container_name: beszel-agent + restart: unless-stopped + network_mode: host + volumes: + - ./beszel_socket:/beszel_socket + - /var/run/docker.sock:/var/run/docker.sock:ro + environment: + LISTEN: /beszel_socket/beszel.sock + KEY: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEr/dv9DsMLknTDqlcMZDpvUDu1FdCq7z4RG+wyWDLjh" +networks: + traefik_network: + external: true diff --git a/stacks/bin/compose.yml b/stacks/bin/compose.yml new file mode 100644 index 0000000..5ae430a --- /dev/null +++ b/stacks/bin/compose.yml @@ -0,0 +1,53 @@ +services: + microbin: + image: danielszabo99/microbin:2.4.1 + container_name: ${COMPOSE_PROJECT_NAME} + restart: unless-stopped + environment: + MICROBIN_EDITABLE: ${MICROBIN_EDITABLE} + MICROBIN_HIDE_FOOTER: ${MICROBIN_HIDE_FOOTER} + MICROBIN_HIDE_HEADER: ${MICROBIN_HIDE_HEADER} + MICROBIN_HIDE_LOGO: ${MICROBIN_HIDE_LOGO} + MICROBIN_NO_LISTING: ${MICROBIN_NO_LISTING} + MICROBIN_HIGHLIGHTSYNTAX: ${MICROBIN_HIGHLIGHTSYNTAX} + MICROBIN_BIND: 0.0.0.0 + MICROBIN_PRIVATE: ${MICROBIN_PRIVATE} + MICROBIN_DATA_DIR: microbin_data + MICROBIN_JSON_DB: ${MICROBIN_JSON_DB} + MICROBIN_TITLE: ${MICROBIN_TITLE} + MICROBIN_THREADS: ${MICROBIN_THREADS} + MICROBIN_GC_DAYS: ${MICROBIN_GC_DAYS} + MICROBIN_ENABLE_BURN_AFTER: ${MICROBIN_ENABLE_BURN_AFTER} + MICROBIN_DEFAULT_BURN_AFTER: ${MICROBIN_DEFAULT_BURN_AFTER} + MICROBIN_WIDE: ${MICROBIN_WIDE} + MICROBIN_QR: ${MICROBIN_QR} + MICROBIN_ENCRYPTION_CLIENT_SIDE: ${MICROBIN_ENCRYPTION_CLIENT_SIDE} + MICROBIN_ENCRYPTION_SERVER_SIDE: ${MICROBIN_ENCRYPTION_SERVER_SIDE} + MICROBIN_MAX_FILE_SIZE_ENCRYPTED_MB: ${MICROBIN_MAX_FILE_SIZE_ENCRYPTED_MB} + MICROBIN_MAX_FILE_SIZE_UNENCRYPTED_MB: ${MICROBIN_MAX_FILE_SIZE_UNENCRYPTED_MB} + MICROBIN_DISABLE_UPDATE_CHECKING: true + MICROBIN_DISABLE_TELEMETRY: true + volumes: + - /mnt/storage/microbin-data:/app/microbin_data + networks: + - traefik_network + labels: + - traefik.enable=true + # --- Local + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.rule=Host(`${COMPOSE_PROJECT_NAME}.local.tellserv.fr`) + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.entryPoints=local + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.tls.certresolver=cloudflare-local" + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.tls=true + # --- Prod avec TinyAuth + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.rule=Host(`${COMPOSE_PROJECT_NAME}.tellserv.fr`) + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.entryPoints=websecure + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.tls=true + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.tls.certResolver=cloudflare + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.middlewares=tinyauth + - traefik.http.services.${COMPOSE_PROJECT_NAME}.loadbalancer.server.port=8080 + # --- Flame Dashboard Integration + # --- Watchtower Auto-Update + - com.centurylinklabs.watchtower.enable=true +networks: + traefik_network: + external: true diff --git a/stacks/blog/compose.yml b/stacks/blog/compose.yml new file mode 100644 index 0000000..1000231 --- /dev/null +++ b/stacks/blog/compose.yml @@ -0,0 +1,39 @@ +services: + ghost: + image: ghost:5 + container_name: ${COMPOSE_PROJECT_NAME}_ghost + restart: unless-stopped + environment: + url: https://${COMPOSE_PROJECT_NAME}.tellserv.fr + database__client: sqlite3 + database__connection__filename: /var/lib/ghost/content/data/ghost.db + volumes: + - ./ghost/content:/var/lib/ghost/content + networks: + - traefik_network + labels: + - "traefik.enable=true" + + # --- Local + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.rule=Host(`${COMPOSE_PROJECT_NAME}.local.tellserv.fr`)" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.entryPoints=local" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.tls.certresolver=cloudflare-local" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.tls=true" + + # --- Production + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.rule=Host(`${COMPOSE_PROJECT_NAME}.tellserv.fr`)" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.entryPoints=websecure" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.tls=true" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.tls.certResolver=cloudflare" + #- "traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.middlewares=tinyauth" + + - "traefik.http.services.${COMPOSE_PROJECT_NAME}.loadbalancer.server.port=2368" + + # --- Flame Dashboard Integration + + # --- Watchtower Auto-Update + - "com.centurylinklabs.watchtower.enable=true" + +networks: + traefik_network: + external: true diff --git a/stacks/clipcascade/compose.yml b/stacks/clipcascade/compose.yml new file mode 100644 index 0000000..3951bd7 --- /dev/null +++ b/stacks/clipcascade/compose.yml @@ -0,0 +1,26 @@ +services: + clipcascade: + image: sathvikrao/clipcascade:0.7.0 + restart: unless-stopped + volumes: + - ./cc_users:/database + environment: + - CC_MAX_MESSAGE_SIZE_IN_MiB=100 + - CC_P2P_ENABLED=false + networks: + - traefik_network + labels: + - traefik.enable=true + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.rule=Host(`${COMPOSE_PROJECT_NAME}.local.tellserv.fr`) + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.entryPoints=local + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.tls.certresolver=cloudflare-local" + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.tls=true + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.rule=Host(`${COMPOSE_PROJECT_NAME}.tellserv.fr`) + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.entryPoints=websecure + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.tls=true + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.tls.certResolver=cloudflare + - traefik.http.services.${COMPOSE_PROJECT_NAME}.loadbalancer.server.port=8080 + - com.centurylinklabs.watchtower.enable=true +networks: + traefik_network: + external: true diff --git a/stacks/crowdsec/command_api.txt b/stacks/crowdsec/command_api.txt new file mode 100644 index 0000000..2bc54a3 --- /dev/null +++ b/stacks/crowdsec/command_api.txt @@ -0,0 +1,8 @@ +cd /opt/stacks/crowdsec +docker compose up -d +alias cscli='docker exec -it crowdsec cscli' +Pour persistance : +echo "alias cscli='docker exec -it crowdsec cscli'" >> ~/.bashrc +source ~/.bashrc + +cscli bouncers add traefik-bouncer \ No newline at end of file diff --git a/stacks/crowdsec/compose.yml b/stacks/crowdsec/compose.yml new file mode 100644 index 0000000..390110a --- /dev/null +++ b/stacks/crowdsec/compose.yml @@ -0,0 +1,37 @@ +services: + crowdsec: + image: crowdsecurity/crowdsec:v1.6.5 + container_name: crowdsec + restart: unless-stopped + environment: + - COLLECTIONS=crowdsecurity/traefik crowdsecurity/http-cve + - CUSTOM_HOSTNAME=crowdsec + - TZ=Europe/Paris + volumes: + - ./data:/var/lib/crowdsec/data/ + - /var/log/traefik:/var/log/traefik:ro + - ./config:/etc/crowdsec + - ./config/profiles.yaml:/etc/crowdsec/profiles.yaml:ro + - ./config/acquis.yaml:/etc/crowdsec/acquis.yaml:ro + - ./config/notifications/http.yaml:/etc/crowdsec/notifications/http.yaml:ro + networks: + - traefik_network + + crowdsec-bouncer: + image: fbonalair/traefik-crowdsec-bouncer:0.6.0 + container_name: crowdsec-bouncer + restart: unless-stopped + environment: + CROWDSEC_BOUNCER_API_KEY: ${CROWDSEC_BOUNCER_API_KEY} + CROWDSEC_AGENT_HOST: crowdsec:8080 + GIN_MODE: release + GIN_TRUSTED_PROXIES: 172.18.0.0/16 + TZ: Europe/Paris + expose: + - 8080 + networks: + - traefik_network + +networks: + traefik_network: + external: true diff --git a/stacks/crowdsec/config/acquis.yaml b/stacks/crowdsec/config/acquis.yaml new file mode 100644 index 0000000..60872da --- /dev/null +++ b/stacks/crowdsec/config/acquis.yaml @@ -0,0 +1,4 @@ +filenames: + - /var/log/traefik/access.log +labels: + type: traefik \ No newline at end of file diff --git a/stacks/crowdsec/config/config.yaml b/stacks/crowdsec/config/config.yaml new file mode 100644 index 0000000..21d255c --- /dev/null +++ b/stacks/crowdsec/config/config.yaml @@ -0,0 +1,49 @@ +common: + daemonize: false + log_media: stdout + log_level: info + log_dir: /var/log/ +config_paths: + config_dir: /etc/crowdsec/ + data_dir: /var/lib/crowdsec/data/ + simulation_path: /etc/crowdsec/simulation.yaml + hub_dir: /etc/crowdsec/hub/ + index_path: /etc/crowdsec/hub/.index.json + notification_dir: /etc/crowdsec/notifications/ + plugin_dir: /usr/local/lib/crowdsec/plugins/ +crowdsec_service: + acquisition_path: /etc/crowdsec/acquis.yaml + acquisition_dir: /etc/crowdsec/acquis.d + parser_routines: 1 +plugin_config: + user: nobody + group: nobody +cscli: + output: human +db_config: + log_level: info + type: sqlite + db_path: /var/lib/crowdsec/data/crowdsec.db + flush: + max_items: 5000 + max_age: 7d + use_wal: false +api: + client: + insecure_skip_verify: false + credentials_path: /etc/crowdsec/local_api_credentials.yaml + server: + log_level: info + listen_uri: 0.0.0.0:8080 + profiles_path: /etc/crowdsec/profiles.yaml + trusted_ips: # IP ranges, or IPs which can have admin API access + - 127.0.0.1 + - ::1 + online_client: # Central API credentials (to push signals and receive bad IPs) + credentials_path: /etc/crowdsec//online_api_credentials.yaml + enable: true +prometheus: + enabled: true + level: full + listen_addr: 0.0.0.0 + listen_port: 6060 diff --git a/stacks/crowdsec/config/console.yaml b/stacks/crowdsec/config/console.yaml new file mode 100644 index 0000000..aa0cc30 --- /dev/null +++ b/stacks/crowdsec/config/console.yaml @@ -0,0 +1,4 @@ +share_manual_decisions: false +share_custom: true +share_tainted: true +share_context: false \ No newline at end of file diff --git a/stacks/crowdsec/config/console/context.yaml b/stacks/crowdsec/config/console/context.yaml new file mode 100644 index 0000000..e69de29 diff --git a/stacks/crowdsec/config/dev.yaml b/stacks/crowdsec/config/dev.yaml new file mode 100644 index 0000000..ca1f35f --- /dev/null +++ b/stacks/crowdsec/config/dev.yaml @@ -0,0 +1,47 @@ +common: + daemonize: true + log_media: stdout + log_level: info +config_paths: + config_dir: ./config + data_dir: ./data/ + notification_dir: ./config/notifications/ + plugin_dir: ./plugins/ + #simulation_path: /etc/crowdsec/config/simulation.yaml + #hub_dir: /etc/crowdsec/hub/ + #index_path: ./config/hub/.index.json +crowdsec_service: + acquisition_path: ./config/acquis.yaml + parser_routines: 1 +plugin_config: + user: $USER # plugin process would be ran on behalf of this user + group: $USER # plugin process would be ran on behalf of this group +cscli: + output: human +db_config: + type: sqlite + db_path: ./data/crowdsec.db + user: root + password: crowdsec + db_name: crowdsec + host: "172.17.0.2" + port: 3306 + flush: + #max_items: 10000 + #max_age: 168h +api: + client: + credentials_path: ./config/local_api_credentials.yaml + server: + console_path: ./config/console.yaml + #insecure_skip_verify: true + listen_uri: 127.0.0.1:8081 + profiles_path: ./config/profiles.yaml + tls: + #cert_file: ./cert.pem + #key_file: ./key.pem + online_client: # Central API + credentials_path: ./config/online_api_credentials.yaml +prometheus: + enabled: true + level: full diff --git a/stacks/crowdsec/config/notifications/email.yaml b/stacks/crowdsec/config/notifications/email.yaml new file mode 100644 index 0000000..512633c --- /dev/null +++ b/stacks/crowdsec/config/notifications/email.yaml @@ -0,0 +1,55 @@ +type: email # Don't change +name: email_default # Must match the registered plugin in the profile + +# One of "trace", "debug", "info", "warn", "error", "off" +log_level: info + +# group_wait: # Time to wait collecting alerts before relaying a message to this plugin, eg "30s" +# group_threshold: # Amount of alerts that triggers a message before has expired, eg "10" +# max_retry: # Number of attempts to relay messages to plugins in case of error +timeout: 20s # Time to wait for response from the plugin before considering the attempt a failure, eg "10s" + +#------------------------- +# plugin-specific options + +# The following template receives a list of models.Alert objects +# The output goes in the email message body +format: | + + {{range . -}} + {{$alert := . -}} + {{range .Decisions -}} +

{{.Value}} will get {{.Type}} for next {{.Duration}} for triggering {{.Scenario}} on machine {{$alert.MachineID}}.

CrowdSec CTI

+ {{end -}} + {{end -}} + + +smtp_host: # example: smtp.gmail.com +smtp_username: # Replace with your actual username +smtp_password: # Replace with your actual password +smtp_port: # Common values are any of [25, 465, 587, 2525] +auth_type: # Valid choices are "none", "crammd5", "login", "plain" +sender_name: "CrowdSec" +sender_email: # example: foo@gmail.com +email_subject: "CrowdSec Notification" +receiver_emails: +# - email1@gmail.com +# - email2@gmail.com + +# One of "ssltls", "starttls", "none" +encryption_type: "ssltls" + +# If you need to set the HELO hostname: +# helo_host: "localhost" + +# If the email server is hitting the default timeouts (10 seconds), you can increase them here +# +# connect_timeout: 10s +# send_timeout: 10s + +--- + +# type: email +# name: email_second_notification +# ... + diff --git a/stacks/crowdsec/config/notifications/http.yaml b/stacks/crowdsec/config/notifications/http.yaml new file mode 100644 index 0000000..221409c --- /dev/null +++ b/stacks/crowdsec/config/notifications/http.yaml @@ -0,0 +1,43 @@ +type: http # Don't change +name: http_default # Must match the registered plugin in the profile + +# One of "trace", "debug", "info", "warn", "error", "off" +log_level: info + +# group_wait: # Time to wait collecting alerts before relaying a message to this plugin, eg "30s" +# group_threshold: # Amount of alerts that triggers a message before has expired, eg "10" +# max_retry: # Number of attempts to relay messages to plugins in case of error +# timeout: # Time to wait for response from the plugin before considering the attempt a failure, eg "10s" + +#------------------------- +# plugin-specific options + +# The following template receives a list of models.Alert objects +# The output goes in the http request body +format: | + {{ range . -}} + {{ $alert := . -}} + { + "extras": { + "client::display": { + "contentType": "text/markdown" + } + }, + "priority": 3, + {{range .Decisions -}} + "title": "{{.Type }} {{ .Value }} for {{.Duration}}", + "message": "{{.Scenario}} \n\n[crowdsec cti](https://app.crowdsec.net/cti/{{.Value -}}) \n\n[shodan](https://shodan.io/host/{{.Value -}})" + {{end -}} + } + {{ end -}} + +# The plugin will make requests to this url, eg: https://www.example.com/ +url: https://gotify.local.tellserv.fr/message + +# Any of the http verbs: "POST", "GET", "PUT"... +method: POST + +headers: + X-Gotify-Key: AeZtF1pTuEaMbF0 + Content-Type: application/json +# skip_tls_verification: # true or false. Default is false diff --git a/stacks/crowdsec/config/notifications/sentinel.yaml b/stacks/crowdsec/config/notifications/sentinel.yaml new file mode 100644 index 0000000..8451c3f --- /dev/null +++ b/stacks/crowdsec/config/notifications/sentinel.yaml @@ -0,0 +1,21 @@ +type: sentinel # Don't change +name: sentinel_default # Must match the registered plugin in the profile + +# One of "trace", "debug", "info", "warn", "error", "off" +log_level: info +# group_wait: # Time to wait collecting alerts before relaying a message to this plugin, eg "30s" +# group_threshold: # Amount of alerts that triggers a message before has expired, eg "10" +# max_retry: # Number of attempts to relay messages to plugins in case of error +# timeout: # Time to wait for response from the plugin before considering the attempt a failure, eg "10s" + +#------------------------- +# plugin-specific options + +# The following template receives a list of models.Alert objects +# The output goes in the http request body +format: | + {{.|toJson}} + +customer_id: XXX-XXX +shared_key: XXXXXXX +log_type: crowdsec \ No newline at end of file diff --git a/stacks/crowdsec/config/notifications/slack.yaml b/stacks/crowdsec/config/notifications/slack.yaml new file mode 100644 index 0000000..677d4b7 --- /dev/null +++ b/stacks/crowdsec/config/notifications/slack.yaml @@ -0,0 +1,42 @@ +type: slack # Don't change +name: slack_default # Must match the registered plugin in the profile + +# One of "trace", "debug", "info", "warn", "error", "off" +log_level: info + +# group_wait: # Time to wait collecting alerts before relaying a message to this plugin, eg "30s" +# group_threshold: # Amount of alerts that triggers a message before has expired, eg "10" +# max_retry: # Number of attempts to relay messages to plugins in case of error +# timeout: # Time to wait for response from the plugin before considering the attempt a failure, eg "10s" + +#------------------------- +# plugin-specific options + +# The following template receives a list of models.Alert objects +# The output goes in the slack message +format: | + {{range . -}} + {{$alert := . -}} + {{range .Decisions -}} + {{if $alert.Source.Cn -}} + :flag-{{$alert.Source.Cn}}: will get {{.Type}} for next {{.Duration}} for triggering {{.Scenario}} on machine '{{$alert.MachineID}}'. {{end}} + {{if not $alert.Source.Cn -}} + :pirate_flag: will get {{.Type}} for next {{.Duration}} for triggering {{.Scenario}} on machine '{{$alert.MachineID}}'. {{end}} + {{end -}} + {{end -}} + + +webhook: + +# API request data as defined by the Slack webhook API. +#channel: +#username: +#icon_emoji: +#icon_url: + +--- + +# type: slack +# name: slack_second_notification +# ... + diff --git a/stacks/crowdsec/config/notifications/splunk.yaml b/stacks/crowdsec/config/notifications/splunk.yaml new file mode 100644 index 0000000..43ed00b --- /dev/null +++ b/stacks/crowdsec/config/notifications/splunk.yaml @@ -0,0 +1,28 @@ +type: splunk # Don't change +name: splunk_default # Must match the registered plugin in the profile + +# One of "trace", "debug", "info", "warn", "error", "off" +log_level: info + +# group_wait: # Time to wait collecting alerts before relaying a message to this plugin, eg "30s" +# group_threshold: # Amount of alerts that triggers a message before has expired, eg "10" +# max_retry: # Number of attempts to relay messages to plugins in case of error +# timeout: # Time to wait for response from the plugin before considering the attempt a failure, eg "10s" + +#------------------------- +# plugin-specific options + +# The following template receives a list of models.Alert objects +# The output goes in the splunk notification +format: | + {{.|toJson}} + +url: +token: + +--- + +# type: splunk +# name: splunk_second_notification +# ... + diff --git a/stacks/crowdsec/config/parsers/s02-enrich/whitelist.yaml b/stacks/crowdsec/config/parsers/s02-enrich/whitelist.yaml new file mode 100644 index 0000000..695d701 --- /dev/null +++ b/stacks/crowdsec/config/parsers/s02-enrich/whitelist.yaml @@ -0,0 +1,6 @@ +name: whitelist +description: "Whitelist my server public IP" +whitelist: + reason: "Public IP of my server" + ip: + - 93.19.127.133 diff --git a/stacks/crowdsec/config/patterns/aws b/stacks/crowdsec/config/patterns/aws new file mode 100644 index 0000000..5816ce1 --- /dev/null +++ b/stacks/crowdsec/config/patterns/aws @@ -0,0 +1,11 @@ +S3_REQUEST_LINE (?:%{WORD:verb} %{NOTSPACE:request}(?: HTTP/%{NUMBER:httpversion})?|%{DATA:rawrequest}) + +S3_ACCESS_LOG %{WORD:owner} %{NOTSPACE:bucket} \[%{HTTPDATE:timestamp}\] %{IP:clientip} %{NOTSPACE:requester} %{NOTSPACE:request_id} %{NOTSPACE:operation} %{NOTSPACE:key} (?:"%{S3_REQUEST_LINE}"|-) (?:%{INT:response:int}|-) (?:-|%{NOTSPACE:error_code}) (?:%{INT:bytes:int}|-) (?:%{INT:object_size:int}|-) (?:%{INT:request_time_ms:int}|-) (?:%{INT:turnaround_time_ms:int}|-) (?:%{QS:referrer}|-) (?:"?%{QS:agent}"?|-) (?:-|%{NOTSPACE:version_id}) + +ELB_URIPATHPARAM %{URIPATH:path}(?:%{URIPARAM:params})? + +ELB_URI %{URIPROTO:proto}://(?:%{USER}(?::[^@]*)?@)?(?:%{URIHOST:urihost})?(?:%{ELB_URIPATHPARAM})? + +ELB_REQUEST_LINE (?:%{WORD:verb} %{ELB_URI:request}(?: HTTP/%{NUMBER:httpversion})?|%{DATA:rawrequest}) + +ELB_ACCESS_LOG %{TIMESTAMP_ISO8601:timestamp} %{NOTSPACE:elb} %{IP:clientip}:%{INT:clientport:int} (?:(%{IP:backendip}:?:%{INT:backendport:int})|-) %{NUMBER:request_processing_time:float} %{NUMBER:backend_processing_time:float} %{NUMBER:response_processing_time:float} %{INT:response:int} %{INT:backend_response:int} %{INT:received_bytes:int} %{INT:bytes:int} "%{ELB_REQUEST_LINE}" \ No newline at end of file diff --git a/stacks/crowdsec/config/patterns/bacula b/stacks/crowdsec/config/patterns/bacula new file mode 100644 index 0000000..96ff0e0 --- /dev/null +++ b/stacks/crowdsec/config/patterns/bacula @@ -0,0 +1,50 @@ +BACULA_TIMESTAMP %{MONTHDAY}-%{MONTH} %{HOUR}:%{MINUTE} +BACULA_HOST [a-zA-Z0-9-]+ +BACULA_VOLUME %{USER} +BACULA_DEVICE %{USER} +BACULA_DEVICEPATH %{UNIXPATH} +BACULA_CAPACITY %{INT}{1,3}(,%{INT}{3})* +BACULA_VERSION %{USER} +BACULA_JOB %{USER} + +BACULA_LOG_MAX_CAPACITY User defined maximum volume capacity %{BACULA_CAPACITY} exceeded on device \"%{BACULA_DEVICE:device}\" \(%{BACULA_DEVICEPATH}\) +BACULA_LOG_END_VOLUME End of medium on Volume \"%{BACULA_VOLUME:volume}\" Bytes=%{BACULA_CAPACITY} Blocks=%{BACULA_CAPACITY} at %{MONTHDAY}-%{MONTH}-%{YEAR} %{HOUR}:%{MINUTE}. +BACULA_LOG_NEW_VOLUME Created new Volume \"%{BACULA_VOLUME:volume}\" in catalog. +BACULA_LOG_NEW_LABEL Labeled new Volume \"%{BACULA_VOLUME:volume}\" on device \"%{BACULA_DEVICE:device}\" \(%{BACULA_DEVICEPATH}\). +BACULA_LOG_WROTE_LABEL Wrote label to prelabeled Volume \"%{BACULA_VOLUME:volume}\" on device \"%{BACULA_DEVICE}\" \(%{BACULA_DEVICEPATH}\) +BACULA_LOG_NEW_MOUNT New volume \"%{BACULA_VOLUME:volume}\" mounted on device \"%{BACULA_DEVICE:device}\" \(%{BACULA_DEVICEPATH}\) at %{MONTHDAY}-%{MONTH}-%{YEAR} %{HOUR}:%{MINUTE}. +BACULA_LOG_NOOPEN \s+Cannot open %{DATA}: ERR=%{GREEDYDATA:berror} +BACULA_LOG_NOOPENDIR \s+Could not open directory %{DATA}: ERR=%{GREEDYDATA:berror} +BACULA_LOG_NOSTAT \s+Could not stat %{DATA}: ERR=%{GREEDYDATA:berror} +BACULA_LOG_NOJOBS There are no more Jobs associated with Volume \"%{BACULA_VOLUME:volume}\". Marking it purged. +BACULA_LOG_ALL_RECORDS_PRUNED All records pruned from Volume \"%{BACULA_VOLUME:volume}\"; marking it \"Purged\" +BACULA_LOG_BEGIN_PRUNE_JOBS Begin pruning Jobs older than %{INT} month %{INT} days . +BACULA_LOG_BEGIN_PRUNE_FILES Begin pruning Files. +BACULA_LOG_PRUNED_JOBS Pruned %{INT} Jobs* for client %{BACULA_HOST:client} from catalog. +BACULA_LOG_PRUNED_FILES Pruned Files from %{INT} Jobs* for client %{BACULA_HOST:client} from catalog. +BACULA_LOG_ENDPRUNE End auto prune. +BACULA_LOG_STARTJOB Start Backup JobId %{INT}, Job=%{BACULA_JOB:job} +BACULA_LOG_STARTRESTORE Start Restore Job %{BACULA_JOB:job} +BACULA_LOG_USEDEVICE Using Device \"%{BACULA_DEVICE:device}\" +BACULA_LOG_DIFF_FS \s+%{UNIXPATH} is a different filesystem. Will not descend from %{UNIXPATH} into it. +BACULA_LOG_JOBEND Job write elapsed time = %{DATA:elapsed}, Transfer rate = %{NUMBER} (K|M|G)? Bytes/second +BACULA_LOG_NOPRUNE_JOBS No Jobs found to prune. +BACULA_LOG_NOPRUNE_FILES No Files found to prune. +BACULA_LOG_VOLUME_PREVWRITTEN Volume \"%{BACULA_VOLUME:volume}\" previously written, moving to end of data. +BACULA_LOG_READYAPPEND Ready to append to end of Volume \"%{BACULA_VOLUME:volume}\" size=%{INT} +BACULA_LOG_CANCELLING Cancelling duplicate JobId=%{INT}. +BACULA_LOG_MARKCANCEL JobId %{INT}, Job %{BACULA_JOB:job} marked to be canceled. +BACULA_LOG_CLIENT_RBJ shell command: run ClientRunBeforeJob \"%{GREEDYDATA:runjob}\" +BACULA_LOG_VSS (Generate )?VSS (Writer)? +BACULA_LOG_MAXSTART Fatal error: Job canceled because max start delay time exceeded. +BACULA_LOG_DUPLICATE Fatal error: JobId %{INT:duplicate} already running. Duplicate job not allowed. +BACULA_LOG_NOJOBSTAT Fatal error: No Job status returned from FD. +BACULA_LOG_FATAL_CONN Fatal error: bsock.c:133 Unable to connect to (Client: %{BACULA_HOST:client}|Storage daemon) on %{HOSTNAME}:%{POSINT}. ERR=%{GREEDYDATA:berror} +BACULA_LOG_NO_CONNECT Warning: bsock.c:127 Could not connect to (Client: %{BACULA_HOST:client}|Storage daemon) on %{HOSTNAME}:%{POSINT}. ERR=%{GREEDYDATA:berror} +BACULA_LOG_NO_AUTH Fatal error: Unable to authenticate with File daemon at %{HOSTNAME}. Possible causes: +BACULA_LOG_NOSUIT No prior or suitable Full backup found in catalog. Doing FULL backup. +BACULA_LOG_NOPRIOR No prior Full backup Job record found. + +BACULA_LOG_JOB (Error: )?Bacula %{BACULA_HOST} %{BACULA_VERSION} \(%{BACULA_VERSION}\): + +BACULA_LOGLINE %{BACULA_TIMESTAMP:bts} %{BACULA_HOST:hostname} JobId %{INT:jobid}: (%{BACULA_LOG_MAX_CAPACITY}|%{BACULA_LOG_END_VOLUME}|%{BACULA_LOG_NEW_VOLUME}|%{BACULA_LOG_NEW_LABEL}|%{BACULA_LOG_WROTE_LABEL}|%{BACULA_LOG_NEW_MOUNT}|%{BACULA_LOG_NOOPEN}|%{BACULA_LOG_NOOPENDIR}|%{BACULA_LOG_NOSTAT}|%{BACULA_LOG_NOJOBS}|%{BACULA_LOG_ALL_RECORDS_PRUNED}|%{BACULA_LOG_BEGIN_PRUNE_JOBS}|%{BACULA_LOG_BEGIN_PRUNE_FILES}|%{BACULA_LOG_PRUNED_JOBS}|%{BACULA_LOG_PRUNED_FILES}|%{BACULA_LOG_ENDPRUNE}|%{BACULA_LOG_STARTJOB}|%{BACULA_LOG_STARTRESTORE}|%{BACULA_LOG_USEDEVICE}|%{BACULA_LOG_DIFF_FS}|%{BACULA_LOG_JOBEND}|%{BACULA_LOG_NOPRUNE_JOBS}|%{BACULA_LOG_NOPRUNE_FILES}|%{BACULA_LOG_VOLUME_PREVWRITTEN}|%{BACULA_LOG_READYAPPEND}|%{BACULA_LOG_CANCELLING}|%{BACULA_LOG_MARKCANCEL}|%{BACULA_LOG_CLIENT_RBJ}|%{BACULA_LOG_VSS}|%{BACULA_LOG_MAXSTART}|%{BACULA_LOG_DUPLICATE}|%{BACULA_LOG_NOJOBSTAT}|%{BACULA_LOG_FATAL_CONN}|%{BACULA_LOG_NO_CONNECT}|%{BACULA_LOG_NO_AUTH}|%{BACULA_LOG_NOSUIT}|%{BACULA_LOG_JOB}|%{BACULA_LOG_NOPRIOR}) \ No newline at end of file diff --git a/stacks/crowdsec/config/patterns/bro b/stacks/crowdsec/config/patterns/bro new file mode 100644 index 0000000..e8d3749 --- /dev/null +++ b/stacks/crowdsec/config/patterns/bro @@ -0,0 +1,13 @@ +# https://www.bro.org/sphinx/script-reference/log-files.html + +# http.log +BRO_HTTP %{NUMBER:ts}\t%{NOTSPACE:uid}\t%{IP:orig_h}\t%{INT:orig_p}\t%{IP:resp_h}\t%{INT:resp_p}\t%{INT:trans_depth}\t%{GREEDYDATA:method}\t%{GREEDYDATA:domain}\t%{GREEDYDATA:uri}\t%{GREEDYDATA:referrer}\t%{GREEDYDATA:user_agent}\t%{NUMBER:request_body_len}\t%{NUMBER:response_body_len}\t%{GREEDYDATA:status_code}\t%{GREEDYDATA:status_msg}\t%{GREEDYDATA:info_code}\t%{GREEDYDATA:info_msg}\t%{GREEDYDATA:filename}\t%{GREEDYDATA:bro_tags}\t%{GREEDYDATA:username}\t%{GREEDYDATA:password}\t%{GREEDYDATA:proxied}\t%{GREEDYDATA:orig_fuids}\t%{GREEDYDATA:orig_mime_types}\t%{GREEDYDATA:resp_fuids}\t%{GREEDYDATA:resp_mime_types} + +# dns.log +BRO_DNS %{NUMBER:ts}\t%{NOTSPACE:uid}\t%{IP:orig_h}\t%{INT:orig_p}\t%{IP:resp_h}\t%{INT:resp_p}\t%{WORD:proto}\t%{INT:trans_id}\t%{GREEDYDATA:query}\t%{GREEDYDATA:qclass}\t%{GREEDYDATA:qclass_name}\t%{GREEDYDATA:qtype}\t%{GREEDYDATA:qtype_name}\t%{GREEDYDATA:rcode}\t%{GREEDYDATA:rcode_name}\t%{GREEDYDATA:AA}\t%{GREEDYDATA:TC}\t%{GREEDYDATA:RD}\t%{GREEDYDATA:RA}\t%{GREEDYDATA:Z}\t%{GREEDYDATA:answers}\t%{GREEDYDATA:TTLs}\t%{GREEDYDATA:rejected} + +# conn.log +BRO_CONN %{NUMBER:ts}\t%{NOTSPACE:uid}\t%{IP:orig_h}\t%{INT:orig_p}\t%{IP:resp_h}\t%{INT:resp_p}\t%{WORD:proto}\t%{GREEDYDATA:service}\t%{NUMBER:duration}\t%{NUMBER:orig_bytes}\t%{NUMBER:resp_bytes}\t%{GREEDYDATA:conn_state}\t%{GREEDYDATA:local_orig}\t%{GREEDYDATA:missed_bytes}\t%{GREEDYDATA:history}\t%{GREEDYDATA:orig_pkts}\t%{GREEDYDATA:orig_ip_bytes}\t%{GREEDYDATA:resp_pkts}\t%{GREEDYDATA:resp_ip_bytes}\t%{GREEDYDATA:tunnel_parents} + +# files.log +BRO_FILES %{NUMBER:ts}\t%{NOTSPACE:fuid}\t%{IP:tx_hosts}\t%{IP:rx_hosts}\t%{NOTSPACE:conn_uids}\t%{GREEDYDATA:source}\t%{GREEDYDATA:depth}\t%{GREEDYDATA:analyzers}\t%{GREEDYDATA:mime_type}\t%{GREEDYDATA:filename}\t%{GREEDYDATA:duration}\t%{GREEDYDATA:local_orig}\t%{GREEDYDATA:is_orig}\t%{GREEDYDATA:seen_bytes}\t%{GREEDYDATA:total_bytes}\t%{GREEDYDATA:missing_bytes}\t%{GREEDYDATA:overflow_bytes}\t%{GREEDYDATA:timedout}\t%{GREEDYDATA:parent_fuid}\t%{GREEDYDATA:md5}\t%{GREEDYDATA:sha1}\t%{GREEDYDATA:sha256}\t%{GREEDYDATA:extracted} \ No newline at end of file diff --git a/stacks/crowdsec/config/patterns/cowrie_honeypot b/stacks/crowdsec/config/patterns/cowrie_honeypot new file mode 100644 index 0000000..eda0c9e --- /dev/null +++ b/stacks/crowdsec/config/patterns/cowrie_honeypot @@ -0,0 +1 @@ +COWRIE_NEW_CO New connection: %{IPV4:source_ip}:[0-9]+ \(%{IPV4:dest_ip}:%{INT:dest_port}\) \[session: %{DATA:telnet_session}\]$ \ No newline at end of file diff --git a/stacks/crowdsec/config/patterns/exim b/stacks/crowdsec/config/patterns/exim new file mode 100644 index 0000000..f135561 --- /dev/null +++ b/stacks/crowdsec/config/patterns/exim @@ -0,0 +1,12 @@ +EXIM_MSGID [0-9A-Za-z]{6}-[0-9A-Za-z]{6}-[0-9A-Za-z]{2} +EXIM_FLAGS (<=|[-=>*]>|[*]{2}|==) +EXIM_DATE %{YEAR:exim_year}-%{MONTHNUM:exim_month}-%{MONTHDAY:exim_day} %{TIME:exim_time} +EXIM_PID \[%{POSINT}\] +EXIM_QT ((\d+y)?(\d+w)?(\d+d)?(\d+h)?(\d+m)?(\d+s)?) +EXIM_EXCLUDE_TERMS (Message is frozen|(Start|End) queue run| Warning: | retry time not reached | no (IP address|host name) found for (IP address|host) | unexpected disconnection while reading SMTP command | no immediate delivery: |another process is handling this message) +EXIM_REMOTE_HOST (H=(%{NOTSPACE:remote_hostname} )?(\(%{NOTSPACE:remote_heloname}\) )?\[%{IP:remote_host}\]) +EXIM_INTERFACE (I=\[%{IP:exim_interface}\](:%{NUMBER:exim_interface_port})) +EXIM_PROTOCOL (P=%{NOTSPACE:protocol}) +EXIM_MSG_SIZE (S=%{NUMBER:exim_msg_size}) +EXIM_HEADER_ID (id=%{NOTSPACE:exim_header_id}) +EXIM_SUBJECT (T=%{QS:exim_subject}) \ No newline at end of file diff --git a/stacks/crowdsec/config/patterns/firewalls b/stacks/crowdsec/config/patterns/firewalls new file mode 100644 index 0000000..fafa7ec --- /dev/null +++ b/stacks/crowdsec/config/patterns/firewalls @@ -0,0 +1,86 @@ +# NetScreen firewall logs +NETSCREENSESSIONLOG %{SYSLOGTIMESTAMP:date} %{IPORHOST:device} %{IPORHOST}: NetScreen device_id=%{WORD:device_id}%{DATA}: start_time=%{QUOTEDSTRING:start_time} duration=%{INT:duration} policy_id=%{INT:policy_id} service=%{DATA:service} proto=%{INT:proto} src zone=%{WORD:src_zone} dst zone=%{WORD:dst_zone} action=%{WORD:action} sent=%{INT:sent} rcvd=%{INT:rcvd} src=%{IPORHOST:src_ip} dst=%{IPORHOST:dst_ip} src_port=%{INT:src_port} dst_port=%{INT:dst_port} src-xlated ip=%{IPORHOST:src_xlated_ip} port=%{INT:src_xlated_port} dst-xlated ip=%{IPORHOST:dst_xlated_ip} port=%{INT:dst_xlated_port} session_id=%{INT:session_id} reason=%{GREEDYDATA:reason} + +#== Cisco ASA == +CISCOTAG [A-Z0-9]+-%{INT}-(?:[A-Z0-9_]+) +CISCOTIMESTAMP %{MONTH} +%{MONTHDAY}(?: %{YEAR})? %{TIME} +CISCO_TAGGED_SYSLOG ^<%{POSINT:syslog_pri}>%{CISCOTIMESTAMP:timestamp}( %{SYSLOGHOST:sysloghost})? ?: %%{CISCOTAG:ciscotag}: +# Common Particles +CISCO_ACTION Built|Teardown|Deny|Denied|denied|requested|permitted|denied by ACL|discarded|est-allowed|Dropping|created|deleted +CISCO_REASON Duplicate TCP SYN|Failed to locate egress interface|Invalid transport field|No matching connection|DNS Response|DNS Query|(?:%{WORD}\s*)* +CISCO_DIRECTION Inbound|inbound|Outbound|outbound +CISCO_INTERVAL first hit|%{INT}-second interval +CISCO_XLATE_TYPE static|dynamic +# ASA-1-104001 +CISCOFW104001 \((?:Primary|Secondary)\) Switching to ACTIVE - %{GREEDYDATA:switch_reason} +# ASA-1-104002 +CISCOFW104002 \((?:Primary|Secondary)\) Switching to STANDBY - %{GREEDYDATA:switch_reason} +# ASA-1-104003 +CISCOFW104003 \((?:Primary|Secondary)\) Switching to FAILED\. +# ASA-1-104004 +CISCOFW104004 \((?:Primary|Secondary)\) Switching to OK\. +# ASA-1-105003 +CISCOFW105003 \((?:Primary|Secondary)\) Monitoring on [Ii]nterface %{GREEDYDATA:interface_name} waiting +# ASA-1-105004 +CISCOFW105004 \((?:Primary|Secondary)\) Monitoring on [Ii]nterface %{GREEDYDATA:interface_name} normal +# ASA-1-105005 +CISCOFW105005 \((?:Primary|Secondary)\) Lost Failover communications with mate on [Ii]nterface %{GREEDYDATA:interface_name} +# ASA-1-105008 +CISCOFW105008 \((?:Primary|Secondary)\) Testing [Ii]nterface %{GREEDYDATA:interface_name} +# ASA-1-105009 +CISCOFW105009 \((?:Primary|Secondary)\) Testing on [Ii]nterface %{GREEDYDATA:interface_name} (?:Passed|Failed) +# ASA-2-106001 +CISCOFW106001 %{CISCO_DIRECTION:direction} %{WORD:protocol} connection %{CISCO_ACTION:action} from %{IP:src_ip}/%{INT:src_port} to %{IP:dst_ip}/%{INT:dst_port} flags %{GREEDYDATA:tcp_flags} on interface %{GREEDYDATA:interface} +# ASA-2-106006, ASA-2-106007, ASA-2-106010 +CISCOFW106006_106007_106010 %{CISCO_ACTION:action} %{CISCO_DIRECTION:direction} %{WORD:protocol} (?:from|src) %{IP:src_ip}/%{INT:src_port}(\(%{DATA:src_fwuser}\))? (?:to|dst) %{IP:dst_ip}/%{INT:dst_port}(\(%{DATA:dst_fwuser}\))? (?:on interface %{DATA:interface}|due to %{CISCO_REASON:reason}) +# ASA-3-106014 +CISCOFW106014 %{CISCO_ACTION:action} %{CISCO_DIRECTION:direction} %{WORD:protocol} src %{DATA:src_interface}:%{IP:src_ip}(\(%{DATA:src_fwuser}\))? dst %{DATA:dst_interface}:%{IP:dst_ip}(\(%{DATA:dst_fwuser}\))? \(type %{INT:icmp_type}, code %{INT:icmp_code}\) +# ASA-6-106015 +CISCOFW106015 %{CISCO_ACTION:action} %{WORD:protocol} \(%{DATA:policy_id}\) from %{IP:src_ip}/%{INT:src_port} to %{IP:dst_ip}/%{INT:dst_port} flags %{DATA:tcp_flags} on interface %{GREEDYDATA:interface} +# ASA-1-106021 +CISCOFW106021 %{CISCO_ACTION:action} %{WORD:protocol} reverse path check from %{IP:src_ip} to %{IP:dst_ip} on interface %{GREEDYDATA:interface} +# ASA-4-106023 +CISCOFW106023 %{CISCO_ACTION:action}( protocol)? %{WORD:protocol} src %{DATA:src_interface}:%{DATA:src_ip}(/%{INT:src_port})?(\(%{DATA:src_fwuser}\))? dst %{DATA:dst_interface}:%{DATA:dst_ip}(/%{INT:dst_port})?(\(%{DATA:dst_fwuser}\))?( \(type %{INT:icmp_type}, code %{INT:icmp_code}\))? by access-group "?%{DATA:policy_id}"? \[%{DATA:hashcode1}, %{DATA:hashcode2}\] +# ASA-4-106100, ASA-4-106102, ASA-4-106103 +CISCOFW106100_2_3 access-list %{NOTSPACE:policy_id} %{CISCO_ACTION:action} %{WORD:protocol} for user '%{DATA:src_fwuser}' %{DATA:src_interface}/%{IP:src_ip}\(%{INT:src_port}\) -> %{DATA:dst_interface}/%{IP:dst_ip}\(%{INT:dst_port}\) hit-cnt %{INT:hit_count} %{CISCO_INTERVAL:interval} \[%{DATA:hashcode1}, %{DATA:hashcode2}\] +# ASA-5-106100 +CISCOFW106100 access-list %{NOTSPACE:policy_id} %{CISCO_ACTION:action} %{WORD:protocol} %{DATA:src_interface}/%{IP:src_ip}\(%{INT:src_port}\)(\(%{DATA:src_fwuser}\))? -> %{DATA:dst_interface}/%{IP:dst_ip}\(%{INT:dst_port}\)(\(%{DATA:src_fwuser}\))? hit-cnt %{INT:hit_count} %{CISCO_INTERVAL:interval} \[%{DATA:hashcode1}, %{DATA:hashcode2}\] +# ASA-6-110002 +CISCOFW110002 %{CISCO_REASON:reason} for %{WORD:protocol} from %{DATA:src_interface}:%{IP:src_ip}/%{INT:src_port} to %{IP:dst_ip}/%{INT:dst_port} +# ASA-6-302010 +CISCOFW302010 %{INT:connection_count} in use, %{INT:connection_count_max} most used +# ASA-6-302013, ASA-6-302014, ASA-6-302015, ASA-6-302016 +CISCOFW302013_302014_302015_302016 %{CISCO_ACTION:action}(?: %{CISCO_DIRECTION:direction})? %{WORD:protocol} connection %{INT:connection_id} for %{DATA:src_interface}:%{IP:src_ip}/%{INT:src_port}( \(%{IP:src_mapped_ip}/%{INT:src_mapped_port}\))?(\(%{DATA:src_fwuser}\))? to %{DATA:dst_interface}:%{IP:dst_ip}/%{INT:dst_port}( \(%{IP:dst_mapped_ip}/%{INT:dst_mapped_port}\))?(\(%{DATA:dst_fwuser}\))?( duration %{TIME:duration} bytes %{INT:bytes})?(?: %{CISCO_REASON:reason})?( \(%{DATA:user}\))? +# ASA-6-302020, ASA-6-302021 +CISCOFW302020_302021 %{CISCO_ACTION:action}(?: %{CISCO_DIRECTION:direction})? %{WORD:protocol} connection for faddr %{IP:dst_ip}/%{INT:icmp_seq_num}(?:\(%{DATA:fwuser}\))? gaddr %{IP:src_xlated_ip}/%{INT:icmp_code_xlated} laddr %{IP:src_ip}/%{INT:icmp_code}( \(%{DATA:user}\))? +# ASA-6-305011 +CISCOFW305011 %{CISCO_ACTION:action} %{CISCO_XLATE_TYPE:xlate_type} %{WORD:protocol} translation from %{DATA:src_interface}:%{IP:src_ip}(/%{INT:src_port})?(\(%{DATA:src_fwuser}\))? to %{DATA:src_xlated_interface}:%{IP:src_xlated_ip}/%{DATA:src_xlated_port} +# ASA-3-313001, ASA-3-313004, ASA-3-313008 +CISCOFW313001_313004_313008 %{CISCO_ACTION:action} %{WORD:protocol} type=%{INT:icmp_type}, code=%{INT:icmp_code} from %{IP:src_ip} on interface %{DATA:interface}( to %{IP:dst_ip})? +# ASA-4-313005 +CISCOFW313005 %{CISCO_REASON:reason} for %{WORD:protocol} error message: %{WORD:err_protocol} src %{DATA:err_src_interface}:%{IP:err_src_ip}(\(%{DATA:err_src_fwuser}\))? dst %{DATA:err_dst_interface}:%{IP:err_dst_ip}(\(%{DATA:err_dst_fwuser}\))? \(type %{INT:err_icmp_type}, code %{INT:err_icmp_code}\) on %{DATA:interface} interface\. Original IP payload: %{WORD:protocol} src %{IP:orig_src_ip}/%{INT:orig_src_port}(\(%{DATA:orig_src_fwuser}\))? dst %{IP:orig_dst_ip}/%{INT:orig_dst_port}(\(%{DATA:orig_dst_fwuser}\))? +# ASA-5-321001 +CISCOFW321001 Resource '%{WORD:resource_name}' limit of %{POSINT:resource_limit} reached for system +# ASA-4-402117 +CISCOFW402117 %{WORD:protocol}: Received a non-IPSec packet \(protocol= %{WORD:orig_protocol}\) from %{IP:src_ip} to %{IP:dst_ip} +# ASA-4-402119 +CISCOFW402119 %{WORD:protocol}: Received an %{WORD:orig_protocol} packet \(SPI= %{DATA:spi}, sequence number= %{DATA:seq_num}\) from %{IP:src_ip} \(user= %{DATA:user}\) to %{IP:dst_ip} that failed anti-replay checking +# ASA-4-419001 +CISCOFW419001 %{CISCO_ACTION:action} %{WORD:protocol} packet from %{DATA:src_interface}:%{IP:src_ip}/%{INT:src_port} to %{DATA:dst_interface}:%{IP:dst_ip}/%{INT:dst_port}, reason: %{GREEDYDATA:reason} +# ASA-4-419002 +CISCOFW419002 %{CISCO_REASON:reason} from %{DATA:src_interface}:%{IP:src_ip}/%{INT:src_port} to %{DATA:dst_interface}:%{IP:dst_ip}/%{INT:dst_port} with different initial sequence number +# ASA-4-500004 +CISCOFW500004 %{CISCO_REASON:reason} for protocol=%{WORD:protocol}, from %{IP:src_ip}/%{INT:src_port} to %{IP:dst_ip}/%{INT:dst_port} +# ASA-6-602303, ASA-6-602304 +CISCOFW602303_602304 %{WORD:protocol}: An %{CISCO_DIRECTION:direction} %{GREEDYDATA:tunnel_type} SA \(SPI= %{DATA:spi}\) between %{IP:src_ip} and %{IP:dst_ip} \(user= %{DATA:user}\) has been %{CISCO_ACTION:action} +# ASA-7-710001, ASA-7-710002, ASA-7-710003, ASA-7-710005, ASA-7-710006 +CISCOFW710001_710002_710003_710005_710006 %{WORD:protocol} (?:request|access) %{CISCO_ACTION:action} from %{IP:src_ip}/%{INT:src_port} to %{DATA:dst_interface}:%{IP:dst_ip}/%{INT:dst_port} +# ASA-6-713172 +CISCOFW713172 Group = %{GREEDYDATA:group}, IP = %{IP:src_ip}, Automatic NAT Detection Status:\s+Remote end\s*%{DATA:is_remote_natted}\s*behind a NAT device\s+This\s+end\s*%{DATA:is_local_natted}\s*behind a NAT device +# ASA-4-733100 +CISCOFW733100 \[\s*%{DATA:drop_type}\s*\] drop %{DATA:drop_rate_id} exceeded. Current burst rate is %{INT:drop_rate_current_burst} per second, max configured rate is %{INT:drop_rate_max_burst}; Current average rate is %{INT:drop_rate_current_avg} per second, max configured rate is %{INT:drop_rate_max_avg}; Cumulative total count is %{INT:drop_total_count} +#== End Cisco ASA == + +# Shorewall firewall logs +SHOREWALL (%{SYSLOGTIMESTAMP:timestamp}) (%{WORD:nf_host}) kernel:.*Shorewall:(%{WORD:nf_action1})?:(%{WORD:nf_action2})?.*IN=(%{USERNAME:nf_in_interface})?.*(OUT= *MAC=(%{COMMONMAC:nf_dst_mac}):(%{COMMONMAC:nf_src_mac})?|OUT=%{USERNAME:nf_out_interface}).*SRC=(%{IPV4:nf_src_ip}).*DST=(%{IPV4:nf_dst_ip}).*LEN=(%{WORD:nf_len}).*?TOS=(%{WORD:nf_tos}).*?PREC=(%{WORD:nf_prec}).*?TTL=(%{INT:nf_ttl}).*?ID=(%{INT:nf_id}).*?PROTO=(%{WORD:nf_protocol}).*?SPT=(%{INT:nf_src_port}?.*DPT=%{INT:nf_dst_port}?.*) +#== End Shorewall \ No newline at end of file diff --git a/stacks/crowdsec/config/patterns/haproxy b/stacks/crowdsec/config/patterns/haproxy new file mode 100644 index 0000000..c71bc31 --- /dev/null +++ b/stacks/crowdsec/config/patterns/haproxy @@ -0,0 +1,39 @@ +## These patterns were tested w/ haproxy-1.4.15 + +## Documentation of the haproxy log formats can be found at the following links: +## http://code.google.com/p/haproxy-docs/wiki/HTTPLogFormat +## http://code.google.com/p/haproxy-docs/wiki/TCPLogFormat + +HAPROXYTIME %{HOUR:haproxy_hour}:%{MINUTE:haproxy_minute}(?::%{SECOND:haproxy_second}) +HAPROXYDATE %{MONTHDAY:haproxy_monthday}/%{MONTH:haproxy_month}/%{YEAR:haproxy_year}:%{HAPROXYTIME:haproxy_time}.%{INT:haproxy_milliseconds} + +# Override these default patterns to parse out what is captured in your haproxy.cfg +HAPROXYCAPTUREDREQUESTHEADERS %{DATA:captured_request_headers} +HAPROXYCAPTUREDRESPONSEHEADERS %{DATA:captured_response_headers} + +# Example: +# These haproxy config lines will add data to the logs that are captured +# by the patterns below. Place them in your custom patterns directory to +# override the defaults. +# +# capture request header Host len 40 +# capture request header X-Forwarded-For len 50 +# capture request header Accept-Language len 50 +# capture request header Referer len 200 +# capture request header User-Agent len 200 +# +# capture response header Content-Type len 30 +# capture response header Content-Encoding len 10 +# capture response header Cache-Control len 200 +# capture response header Last-Modified len 200 +# +# HAPROXYCAPTUREDREQUESTHEADERS %{DATA:request_header_host}\|%{DATA:request_header_x_forwarded_for}\|%{DATA:request_header_accept_language}\|%{DATA:request_header_referer}\|%{DATA:request_header_user_agent} +# HAPROXYCAPTUREDRESPONSEHEADERS %{DATA:response_header_content_type}\|%{DATA:response_header_content_encoding}\|%{DATA:response_header_cache_control}\|%{DATA:response_header_last_modified} + +# parse a haproxy 'httplog' line +HAPROXYHTTPBASE %{IP:client_ip}:%{INT:client_port} \[%{HAPROXYDATE:accept_date}\] %{NOTSPACE:frontend_name} %{NOTSPACE:backend_name}/%{NOTSPACE:server_name} %{INT:time_request}/%{INT:time_queue}/%{INT:time_backend_connect}/%{INT:time_backend_response}/%{NOTSPACE:time_duration} %{INT:http_status_code} %{NOTSPACE:bytes_read} %{DATA:captured_request_cookie} %{DATA:captured_response_cookie} %{NOTSPACE:termination_state} %{INT:actconn}/%{INT:feconn}/%{INT:beconn}/%{INT:srvconn}/%{NOTSPACE:retries} %{INT:srv_queue}/%{INT:backend_queue} (\{%{HAPROXYCAPTUREDREQUESTHEADERS}\})?( )?(\{%{HAPROXYCAPTUREDRESPONSEHEADERS}\})?( )?"(|(%{WORD:http_verb} (%{URIPROTO:http_proto}://)?(?:%{USER:http_user}(?::[^@]*)?@)?(?:%{URIHOST:http_host})?(?:%{URIPATHPARAM:http_request})?( HTTP/%{NUMBER:http_version})?))?" + +HAPROXYHTTP (?:%{SYSLOGTIMESTAMP:syslog_timestamp}|%{TIMESTAMP_ISO8601:timestamp8601}) %{IPORHOST:syslog_server} %{SYSLOGPROG}: %{HAPROXYHTTPBASE} + +# parse a haproxy 'tcplog' line +HAPROXYTCP (?:%{SYSLOGTIMESTAMP:syslog_timestamp}|%{TIMESTAMP_ISO8601:timestamp8601}) %{IPORHOST:syslog_server} %{SYSLOGPROG}: %{IP:client_ip}:%{INT:client_port} \[%{HAPROXYDATE:accept_date}\] %{NOTSPACE:frontend_name} %{NOTSPACE:backend_name}/%{NOTSPACE:server_name} %{INT:time_queue}/%{INT:time_backend_connect}/%{NOTSPACE:time_duration} %{NOTSPACE:bytes_read} %{NOTSPACE:termination_state} %{INT:actconn}/%{INT:feconn}/%{INT:beconn}/%{INT:srvconn}/%{NOTSPACE:retries} %{INT:srv_queue}/%{INT:backend_queue} \ No newline at end of file diff --git a/stacks/crowdsec/config/patterns/java b/stacks/crowdsec/config/patterns/java new file mode 100644 index 0000000..d0ad391 --- /dev/null +++ b/stacks/crowdsec/config/patterns/java @@ -0,0 +1,20 @@ +JAVACLASS (?:[a-zA-Z$_][a-zA-Z$_0-9]*\.)*[a-zA-Z$_][a-zA-Z$_0-9]* +#Space is an allowed character to match special cases like 'Native Method' or 'Unknown Source' +JAVAFILE (?:[A-Za-z0-9_. -]+) +#Allow special method +JAVAMETHOD (?:()|[a-zA-Z$_][a-zA-Z$_0-9]*) +#Line number is optional in special cases 'Native method' or 'Unknown source' +JAVASTACKTRACEPART %{SPACE}at %{JAVACLASS:class}\.%{JAVAMETHOD:method}\(%{JAVAFILE:file}(?::%{NUMBER:line})?\) +# Java Logs +JAVATHREAD (?:[A-Z]{2}-Processor[\d]+) +##JAVACLASS (?:[a-zA-Z0-9-]+\.)+[A-Za-z0-9$]+ +##JAVAFILE (?:[A-Za-z0-9_.-]+) +##JAVASTACKTRACEPART at %{JAVACLASS:class}\.%{WORD:method}\(%{JAVAFILE:file}:%{NUMBER:line}\) +JAVALOGMESSAGE (.*) +# MMM dd, yyyy HH:mm:ss eg: Jan 9, 2014 7:13:13 AM +CATALINA_DATESTAMP %{MONTH} %{MONTHDAY}, 20%{YEAR} %{HOUR}:?%{MINUTE}(?::?%{SECOND}) (?:AM|PM) +# yyyy-MM-dd HH:mm:ss,SSS ZZZ eg: 2014-01-09 17:32:25,527 -0800 +TOMCAT_DATESTAMP 20%{YEAR}-%{MONTHNUM}-%{MONTHDAY} %{HOUR}:?%{MINUTE}(?::?%{SECOND}) %{ISO8601_TIMEZONE} +CATALINALOG %{CATALINA_DATESTAMP:timestamp} %{JAVACLASS:class} %{JAVALOGMESSAGE:logmessage} +# 2014-01-09 20:03:28,269 -0800 | ERROR | com.example.service.ExampleService - something compeletely unexpected happened... +TOMCATLOG %{TOMCAT_DATESTAMP:timestamp} \| %{LOGLEVEL:level} \| %{JAVACLASS:class} - %{JAVALOGMESSAGE:logmessage} \ No newline at end of file diff --git a/stacks/crowdsec/config/patterns/junos b/stacks/crowdsec/config/patterns/junos new file mode 100644 index 0000000..2da91cc --- /dev/null +++ b/stacks/crowdsec/config/patterns/junos @@ -0,0 +1,8 @@ +# JUNOS 11.4 RT_FLOW patterns +RT_FLOW_EVENT (RT_FLOW_SESSION_CREATE|RT_FLOW_SESSION_CLOSE|RT_FLOW_SESSION_DENY) + +RT_FLOW1 %{RT_FLOW_EVENT:event}: %{GREEDYDATA:close-reason}: %{IP:src-ip}/%{INT:src-port}->%{IP:dst-ip}/%{INT:dst-port} %{DATA:service} %{IP:nat-src-ip}/%{INT:nat-src-port}->%{IP:nat-dst-ip}/%{INT:nat-dst-port} %{DATA:src-nat-rule-name} %{DATA:dst-nat-rule-name} %{INT:protocol-id} %{DATA:policy-name} %{DATA:from-zone} %{DATA:to-zone} %{INT:session-id} \d+\(%{DATA:sent}\) \d+\(%{DATA:received}\) %{INT:elapsed-time} .* + +RT_FLOW2 %{RT_FLOW_EVENT:event}: session created %{IP:src-ip}/%{INT:src-port}->%{IP:dst-ip}/%{INT:dst-port} %{DATA:service} %{IP:nat-src-ip}/%{INT:nat-src-port}->%{IP:nat-dst-ip}/%{INT:nat-dst-port} %{DATA:src-nat-rule-name} %{DATA:dst-nat-rule-name} %{INT:protocol-id} %{DATA:policy-name} %{DATA:from-zone} %{DATA:to-zone} %{INT:session-id} .* + +RT_FLOW3 %{RT_FLOW_EVENT:event}: session denied %{IP:src-ip}/%{INT:src-port}->%{IP:dst-ip}/%{INT:dst-port} %{DATA:service} %{INT:protocol-id}\(\d\) %{DATA:policy-name} %{DATA:from-zone} %{DATA:to-zone} .* diff --git a/stacks/crowdsec/config/patterns/linux-syslog b/stacks/crowdsec/config/patterns/linux-syslog new file mode 100644 index 0000000..0911964 --- /dev/null +++ b/stacks/crowdsec/config/patterns/linux-syslog @@ -0,0 +1,16 @@ +SYSLOG5424PRINTASCII [!-~]+ + +SYSLOGBASE2 (?:%{SYSLOGTIMESTAMP:timestamp}|%{TIMESTAMP_ISO8601:timestamp8601}) (?:%{SYSLOGFACILITY} )?%{SYSLOGHOST:logsource}+(?: %{SYSLOGPROG}:|) +SYSLOGPAMSESSION %{SYSLOGBASE} %{GREEDYDATA:message}%{WORD:pam_module}\(%{DATA:pam_caller}\): session %{WORD:pam_session_state} for user %{USERNAME:username}(?: by %{GREEDYDATA:pam_by})? + +CRON_ACTION [A-Z ]+ +CRONLOG %{SYSLOGBASE} \(%{USER:user}\) %{CRON_ACTION:action} \(%{DATA:message}\) + +SYSLOGLINE %{SYSLOGBASE2} %{GREEDYDATA:message} + +# IETF 5424 syslog(8) format (see http://www.rfc-editor.org/info/rfc5424) +SYSLOG5424PRI <%{NONNEGINT:syslog5424_pri}> +SYSLOG5424SD \[%{DATA}\]+ +SYSLOG5424BASE %{SYSLOG5424PRI}%{NONNEGINT:syslog5424_ver} +(?:%{TIMESTAMP_ISO8601:syslog5424_ts}|-) +(?:%{HOSTNAME:syslog5424_host}|-) +(-|%{SYSLOG5424PRINTASCII:syslog5424_app}) +(-|%{SYSLOG5424PRINTASCII:syslog5424_proc}) +(-|%{SYSLOG5424PRINTASCII:syslog5424_msgid}) +(?:%{SYSLOG5424SD:syslog5424_sd}|-|) + +SYSLOG5424LINE %{SYSLOG5424BASE} +%{GREEDYDATA:syslog5424_msg} \ No newline at end of file diff --git a/stacks/crowdsec/config/patterns/mcollective b/stacks/crowdsec/config/patterns/mcollective new file mode 100644 index 0000000..0389cc3 --- /dev/null +++ b/stacks/crowdsec/config/patterns/mcollective @@ -0,0 +1,4 @@ +# Remember, these can be multi-line events. +MCOLLECTIVE ., \[%{TIMESTAMP_ISO8601:timestamp} #%{POSINT:pid}\]%{SPACE}%{LOGLEVEL:event_level} + +MCOLLECTIVEAUDIT %{TIMESTAMP_ISO8601:timestamp}: \ No newline at end of file diff --git a/stacks/crowdsec/config/patterns/modsecurity b/stacks/crowdsec/config/patterns/modsecurity new file mode 100644 index 0000000..0c614dc --- /dev/null +++ b/stacks/crowdsec/config/patterns/modsecurity @@ -0,0 +1,18 @@ +APACHEERRORTIME %{DAY} %{MONTH} %{MONTHDAY} %{TIME} %{YEAR} +APACHEERRORPREFIX \[%{APACHEERRORTIME:timestamp}\] \[%{NOTSPACE:apacheseverity}\] (\[pid %{INT}:tid %{INT}\] )?\[client %{IPORHOST:sourcehost}(:%{INT:source_port})?\] (\[client %{IPORHOST}\])? +GENERICAPACHEERROR %{APACHEERRORPREFIX} %{GREEDYDATA:message} +MODSECPREFIX %{APACHEERRORPREFIX} ModSecurity: %{NOTSPACE:modsecseverity}\. %{GREEDYDATA:modsecmessage} +MODSECRULEFILE \[file %{QUOTEDSTRING:rulefile}\] +MODSECRULELINE \[line %{QUOTEDSTRING:ruleline}\] +MODSECMATCHOFFSET \[offset %{QUOTEDSTRING:matchoffset}\] +MODSECRULEID \[id %{QUOTEDSTRING:ruleid}\] +MODSECRULEREV \[rev %{QUOTEDSTRING:rulerev}\] +MODSECRULEMSG \[msg %{QUOTEDSTRING:rulemessage}\] +MODSECRULEDATA \[data %{QUOTEDSTRING:ruledata}\] +MODSECRULESEVERITY \[severity ["']%{WORD:ruleseverity}["']\] +MODSECRULEVERS \[ver "[^"]+"\] +MODSECRULETAGS (?:\[tag %{QUOTEDSTRING:ruletag0}\] )?(?:\[tag %{QUOTEDSTRING:ruletag1}\] )?(?:\[tag %{QUOTEDSTRING:ruletag2}\] )?(?:\[tag %{QUOTEDSTRING:ruletag3}\] )?(?:\[tag %{QUOTEDSTRING:ruletag4}\] )?(?:\[tag %{QUOTEDSTRING:ruletag5}\] )?(?:\[tag %{QUOTEDSTRING:ruletag6}\] )?(?:\[tag %{QUOTEDSTRING:ruletag7}\] )?(?:\[tag %{QUOTEDSTRING:ruletag8}\] )?(?:\[tag %{QUOTEDSTRING:ruletag9}\] )?(?:\[tag %{QUOTEDSTRING}\] )* +MODSECHOSTNAME \[hostname ['"]%{DATA:targethost}["']\] +MODSECURI \[uri ["']%{DATA:targeturi}["']\] +MODSECUID \[unique_id %{QUOTEDSTRING:uniqueid}\] +MODSECAPACHEERROR %{MODSECPREFIX} %{MODSECRULEFILE} %{MODSECRULELINE} (?:%{MODSECMATCHOFFSET} )?(?:%{MODSECRULEID} )?(?:%{MODSECRULEREV} )?(?:%{MODSECRULEMSG} )?(?:%{MODSECRULEDATA} )?(?:%{MODSECRULESEVERITY} )?(?:%{MODSECRULEVERS} )?%{MODSECRULETAGS}%{MODSECHOSTNAME} %{MODSECURI} %{MODSECUID} \ No newline at end of file diff --git a/stacks/crowdsec/config/patterns/mongodb b/stacks/crowdsec/config/patterns/mongodb new file mode 100644 index 0000000..126a2a5 --- /dev/null +++ b/stacks/crowdsec/config/patterns/mongodb @@ -0,0 +1,7 @@ +MONGO_LOG %{SYSLOGTIMESTAMP:timestamp} \[%{WORD:component}\] %{GREEDYDATA:message} +MONGO_QUERY \{ \{ .* \} ntoreturn: \} +MONGO_WORDDASH \b[\w-]+\b +MONGO_SLOWQUERY %{WORD} %{MONGO_WORDDASH:database}\.%{MONGO_WORDDASH:collection} %{WORD}: %{MONGO_QUERY:query} %{WORD}:%{NONNEGINT:ntoreturn} %{WORD}:%{NONNEGINT:ntoskip} %{WORD}:%{NONNEGINT:nscanned}.*nreturned:%{NONNEGINT:nreturned}..+ %{POSINT:duration}ms +MONGO3_SEVERITY \w +MONGO3_COMPONENT %{WORD}|- +MONGO3_LOG %{TIMESTAMP_ISO8601:timestamp} %{MONGO3_SEVERITY:severity} %{MONGO3_COMPONENT:component}%{SPACE}(?:\[%{DATA:context}\])? %{GREEDYDATA:message} \ No newline at end of file diff --git a/stacks/crowdsec/config/patterns/mysql b/stacks/crowdsec/config/patterns/mysql new file mode 100644 index 0000000..141a0c0 --- /dev/null +++ b/stacks/crowdsec/config/patterns/mysql @@ -0,0 +1 @@ +MYSQL_AUTH_FAIL %{TIMESTAMP_ISO8601:time} %{NUMBER} \[Note\] Access denied for user '%{DATA:user}'@'%{IP:source_ip}' \(using password: %{WORD:using_password}\) diff --git a/stacks/crowdsec/config/patterns/nagios b/stacks/crowdsec/config/patterns/nagios new file mode 100644 index 0000000..5dcba0b --- /dev/null +++ b/stacks/crowdsec/config/patterns/nagios @@ -0,0 +1,124 @@ +################################################################################## +################################################################################## +# Chop Nagios log files to smithereens! +# +# A set of GROK filters to process logfiles generated by Nagios. +# While it does not, this set intends to cover all possible Nagios logs. +# +# Some more work needs to be done to cover all External Commands: +# http://old.nagios.org/developerinfo/externalcommands/commandlist.php +# +# If you need some support on these rules please contact: +# Jelle Smet http://smetj.net +# +################################################################################# +################################################################################# + +NAGIOSTIME \[%{NUMBER:nagios_epoch}\] + +############################################### +######## Begin nagios log types +############################################### +NAGIOS_TYPE_CURRENT_SERVICE_STATE CURRENT SERVICE STATE +NAGIOS_TYPE_CURRENT_HOST_STATE CURRENT HOST STATE + +NAGIOS_TYPE_SERVICE_NOTIFICATION SERVICE NOTIFICATION +NAGIOS_TYPE_HOST_NOTIFICATION HOST NOTIFICATION + +NAGIOS_TYPE_SERVICE_ALERT SERVICE ALERT +NAGIOS_TYPE_HOST_ALERT HOST ALERT + +NAGIOS_TYPE_SERVICE_FLAPPING_ALERT SERVICE FLAPPING ALERT +NAGIOS_TYPE_HOST_FLAPPING_ALERT HOST FLAPPING ALERT + +NAGIOS_TYPE_SERVICE_DOWNTIME_ALERT SERVICE DOWNTIME ALERT +NAGIOS_TYPE_HOST_DOWNTIME_ALERT HOST DOWNTIME ALERT + +NAGIOS_TYPE_PASSIVE_SERVICE_CHECK PASSIVE SERVICE CHECK +NAGIOS_TYPE_PASSIVE_HOST_CHECK PASSIVE HOST CHECK + +NAGIOS_TYPE_SERVICE_EVENT_HANDLER SERVICE EVENT HANDLER +NAGIOS_TYPE_HOST_EVENT_HANDLER HOST EVENT HANDLER + +NAGIOS_TYPE_EXTERNAL_COMMAND EXTERNAL COMMAND +NAGIOS_TYPE_TIMEPERIOD_TRANSITION TIMEPERIOD TRANSITION +############################################### +######## End nagios log types +############################################### + +############################################### +######## Begin external check types +############################################### +NAGIOS_EC_DISABLE_SVC_CHECK DISABLE_SVC_CHECK +NAGIOS_EC_ENABLE_SVC_CHECK ENABLE_SVC_CHECK +NAGIOS_EC_DISABLE_HOST_CHECK DISABLE_HOST_CHECK +NAGIOS_EC_ENABLE_HOST_CHECK ENABLE_HOST_CHECK +NAGIOS_EC_PROCESS_SERVICE_CHECK_RESULT PROCESS_SERVICE_CHECK_RESULT +NAGIOS_EC_PROCESS_HOST_CHECK_RESULT PROCESS_HOST_CHECK_RESULT +NAGIOS_EC_SCHEDULE_SERVICE_DOWNTIME SCHEDULE_SERVICE_DOWNTIME +NAGIOS_EC_SCHEDULE_HOST_DOWNTIME SCHEDULE_HOST_DOWNTIME +NAGIOS_EC_DISABLE_HOST_SVC_NOTIFICATIONS DISABLE_HOST_SVC_NOTIFICATIONS +NAGIOS_EC_ENABLE_HOST_SVC_NOTIFICATIONS ENABLE_HOST_SVC_NOTIFICATIONS +NAGIOS_EC_DISABLE_HOST_NOTIFICATIONS DISABLE_HOST_NOTIFICATIONS +NAGIOS_EC_ENABLE_HOST_NOTIFICATIONS ENABLE_HOST_NOTIFICATIONS +NAGIOS_EC_DISABLE_SVC_NOTIFICATIONS DISABLE_SVC_NOTIFICATIONS +NAGIOS_EC_ENABLE_SVC_NOTIFICATIONS ENABLE_SVC_NOTIFICATIONS +############################################### +######## End external check types +############################################### +NAGIOS_WARNING Warning:%{SPACE}%{GREEDYDATA:nagios_message} + +NAGIOS_CURRENT_SERVICE_STATE %{NAGIOS_TYPE_CURRENT_SERVICE_STATE:nagios_type}: %{DATA:nagios_hostname};%{DATA:nagios_service};%{DATA:nagios_state};%{DATA:nagios_statetype};%{DATA:nagios_statecode};%{GREEDYDATA:nagios_message} +NAGIOS_CURRENT_HOST_STATE %{NAGIOS_TYPE_CURRENT_HOST_STATE:nagios_type}: %{DATA:nagios_hostname};%{DATA:nagios_state};%{DATA:nagios_statetype};%{DATA:nagios_statecode};%{GREEDYDATA:nagios_message} + +NAGIOS_SERVICE_NOTIFICATION %{NAGIOS_TYPE_SERVICE_NOTIFICATION:nagios_type}: %{DATA:nagios_notifyname};%{DATA:nagios_hostname};%{DATA:nagios_service};%{DATA:nagios_state};%{DATA:nagios_contact};%{GREEDYDATA:nagios_message} +NAGIOS_HOST_NOTIFICATION %{NAGIOS_TYPE_HOST_NOTIFICATION:nagios_type}: %{DATA:nagios_notifyname};%{DATA:nagios_hostname};%{DATA:nagios_state};%{DATA:nagios_contact};%{GREEDYDATA:nagios_message} + +NAGIOS_SERVICE_ALERT %{NAGIOS_TYPE_SERVICE_ALERT:nagios_type}: %{DATA:nagios_hostname};%{DATA:nagios_service};%{DATA:nagios_state};%{DATA:nagios_statelevel};%{NUMBER:nagios_attempt};%{GREEDYDATA:nagios_message} +NAGIOS_HOST_ALERT %{NAGIOS_TYPE_HOST_ALERT:nagios_type}: %{DATA:nagios_hostname};%{DATA:nagios_state};%{DATA:nagios_statelevel};%{NUMBER:nagios_attempt};%{GREEDYDATA:nagios_message} + +NAGIOS_SERVICE_FLAPPING_ALERT %{NAGIOS_TYPE_SERVICE_FLAPPING_ALERT:nagios_type}: %{DATA:nagios_hostname};%{DATA:nagios_service};%{DATA:nagios_state};%{GREEDYDATA:nagios_message} +NAGIOS_HOST_FLAPPING_ALERT %{NAGIOS_TYPE_HOST_FLAPPING_ALERT:nagios_type}: %{DATA:nagios_hostname};%{DATA:nagios_state};%{GREEDYDATA:nagios_message} + +NAGIOS_SERVICE_DOWNTIME_ALERT %{NAGIOS_TYPE_SERVICE_DOWNTIME_ALERT:nagios_type}: %{DATA:nagios_hostname};%{DATA:nagios_service};%{DATA:nagios_state};%{GREEDYDATA:nagios_comment} +NAGIOS_HOST_DOWNTIME_ALERT %{NAGIOS_TYPE_HOST_DOWNTIME_ALERT:nagios_type}: %{DATA:nagios_hostname};%{DATA:nagios_state};%{GREEDYDATA:nagios_comment} + +NAGIOS_PASSIVE_SERVICE_CHECK %{NAGIOS_TYPE_PASSIVE_SERVICE_CHECK:nagios_type}: %{DATA:nagios_hostname};%{DATA:nagios_service};%{DATA:nagios_state};%{GREEDYDATA:nagios_comment} +NAGIOS_PASSIVE_HOST_CHECK %{NAGIOS_TYPE_PASSIVE_HOST_CHECK:nagios_type}: %{DATA:nagios_hostname};%{DATA:nagios_state};%{GREEDYDATA:nagios_comment} + +NAGIOS_SERVICE_EVENT_HANDLER %{NAGIOS_TYPE_SERVICE_EVENT_HANDLER:nagios_type}: %{DATA:nagios_hostname};%{DATA:nagios_service};%{DATA:nagios_state};%{DATA:nagios_statelevel};%{DATA:nagios_event_handler_name} +NAGIOS_HOST_EVENT_HANDLER %{NAGIOS_TYPE_HOST_EVENT_HANDLER:nagios_type}: %{DATA:nagios_hostname};%{DATA:nagios_state};%{DATA:nagios_statelevel};%{DATA:nagios_event_handler_name} + +NAGIOS_TIMEPERIOD_TRANSITION %{NAGIOS_TYPE_TIMEPERIOD_TRANSITION:nagios_type}: %{DATA:nagios_service};%{DATA:nagios_unknown1};%{DATA:nagios_unknown2} + +#################### +#### External checks +#################### + +#Disable host & service check +NAGIOS_EC_LINE_DISABLE_SVC_CHECK %{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios_type}: %{NAGIOS_EC_DISABLE_SVC_CHECK:nagios_command};%{DATA:nagios_hostname};%{DATA:nagios_service} +NAGIOS_EC_LINE_DISABLE_HOST_CHECK %{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios_type}: %{NAGIOS_EC_DISABLE_HOST_CHECK:nagios_command};%{DATA:nagios_hostname} + +#Enable host & service check +NAGIOS_EC_LINE_ENABLE_SVC_CHECK %{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios_type}: %{NAGIOS_EC_ENABLE_SVC_CHECK:nagios_command};%{DATA:nagios_hostname};%{DATA:nagios_service} +NAGIOS_EC_LINE_ENABLE_HOST_CHECK %{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios_type}: %{NAGIOS_EC_ENABLE_HOST_CHECK:nagios_command};%{DATA:nagios_hostname} + +#Process host & service check +NAGIOS_EC_LINE_PROCESS_SERVICE_CHECK_RESULT %{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios_type}: %{NAGIOS_EC_PROCESS_SERVICE_CHECK_RESULT:nagios_command};%{DATA:nagios_hostname};%{DATA:nagios_service};%{DATA:nagios_state};%{GREEDYDATA:nagios_check_result} +NAGIOS_EC_LINE_PROCESS_HOST_CHECK_RESULT %{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios_type}: %{NAGIOS_EC_PROCESS_HOST_CHECK_RESULT:nagios_command};%{DATA:nagios_hostname};%{DATA:nagios_state};%{GREEDYDATA:nagios_check_result} + +#Disable host & service notifications +NAGIOS_EC_LINE_DISABLE_HOST_SVC_NOTIFICATIONS %{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios_type}: %{NAGIOS_EC_DISABLE_HOST_SVC_NOTIFICATIONS:nagios_command};%{GREEDYDATA:nagios_hostname} +NAGIOS_EC_LINE_DISABLE_HOST_NOTIFICATIONS %{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios_type}: %{NAGIOS_EC_DISABLE_HOST_NOTIFICATIONS:nagios_command};%{GREEDYDATA:nagios_hostname} +NAGIOS_EC_LINE_DISABLE_SVC_NOTIFICATIONS %{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios_type}: %{NAGIOS_EC_DISABLE_SVC_NOTIFICATIONS:nagios_command};%{DATA:nagios_hostname};%{GREEDYDATA:nagios_service} + +#Enable host & service notifications +NAGIOS_EC_LINE_ENABLE_HOST_SVC_NOTIFICATIONS %{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios_type}: %{NAGIOS_EC_ENABLE_HOST_SVC_NOTIFICATIONS:nagios_command};%{GREEDYDATA:nagios_hostname} +NAGIOS_EC_LINE_ENABLE_HOST_NOTIFICATIONS %{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios_type}: %{NAGIOS_EC_ENABLE_HOST_NOTIFICATIONS:nagios_command};%{GREEDYDATA:nagios_hostname} +NAGIOS_EC_LINE_ENABLE_SVC_NOTIFICATIONS %{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios_type}: %{NAGIOS_EC_ENABLE_SVC_NOTIFICATIONS:nagios_command};%{DATA:nagios_hostname};%{GREEDYDATA:nagios_service} + +#Schedule host & service downtime +NAGIOS_EC_LINE_SCHEDULE_HOST_DOWNTIME %{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios_type}: %{NAGIOS_EC_SCHEDULE_HOST_DOWNTIME:nagios_command};%{DATA:nagios_hostname};%{NUMBER:nagios_start_time};%{NUMBER:nagios_end_time};%{NUMBER:nagios_fixed};%{NUMBER:nagios_trigger_id};%{NUMBER:nagios_duration};%{DATA:author};%{DATA:comment} + +#End matching line +NAGIOSLOGLINE %{NAGIOSTIME} (?:%{NAGIOS_WARNING}|%{NAGIOS_CURRENT_SERVICE_STATE}|%{NAGIOS_CURRENT_HOST_STATE}|%{NAGIOS_SERVICE_NOTIFICATION}|%{NAGIOS_HOST_NOTIFICATION}|%{NAGIOS_SERVICE_ALERT}|%{NAGIOS_HOST_ALERT}|%{NAGIOS_SERVICE_FLAPPING_ALERT}|%{NAGIOS_HOST_FLAPPING_ALERT}|%{NAGIOS_SERVICE_DOWNTIME_ALERT}|%{NAGIOS_HOST_DOWNTIME_ALERT}|%{NAGIOS_PASSIVE_SERVICE_CHECK}|%{NAGIOS_PASSIVE_HOST_CHECK}|%{NAGIOS_SERVICE_EVENT_HANDLER}|%{NAGIOS_HOST_EVENT_HANDLER}|%{NAGIOS_TIMEPERIOD_TRANSITION}|%{NAGIOS_EC_LINE_DISABLE_SVC_CHECK}|%{NAGIOS_EC_LINE_ENABLE_SVC_CHECK}|%{NAGIOS_EC_LINE_DISABLE_HOST_CHECK}|%{NAGIOS_EC_LINE_ENABLE_HOST_CHECK}|%{NAGIOS_EC_LINE_PROCESS_HOST_CHECK_RESULT}|%{NAGIOS_EC_LINE_PROCESS_SERVICE_CHECK_RESULT}|%{NAGIOS_EC_LINE_SCHEDULE_HOST_DOWNTIME}|%{NAGIOS_EC_LINE_DISABLE_HOST_SVC_NOTIFICATIONS}|%{NAGIOS_EC_LINE_ENABLE_HOST_SVC_NOTIFICATIONS}|%{NAGIOS_EC_LINE_DISABLE_HOST_NOTIFICATIONS}|%{NAGIOS_EC_LINE_ENABLE_HOST_NOTIFICATIONS}|%{NAGIOS_EC_LINE_DISABLE_SVC_NOTIFICATIONS}|%{NAGIOS_EC_LINE_ENABLE_SVC_NOTIFICATIONS}) \ No newline at end of file diff --git a/stacks/crowdsec/config/patterns/nginx b/stacks/crowdsec/config/patterns/nginx new file mode 100644 index 0000000..92982fc --- /dev/null +++ b/stacks/crowdsec/config/patterns/nginx @@ -0,0 +1,19 @@ +NGUSERNAME [a-zA-Z\.\@\-\+_%]+ +NGUSER %{NGUSERNAME} + +# '$remote_addr - $remote_user [$time_local] ' +# '"$request" $status $body_bytes_sent ' +# '"$http_referer" "$http_user_agent"'; + +# 127.0.0.1 - - [28/Jan/2016:14:19:36 +0300] "GET /zero.html HTTP/1.1" 200 398 "-" "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.111 Safari/537.36" + +NOTDQUOTE [^"]* +DAY2 \d{2} + +#NGINXERRTIME %{YEAR:year}/%{MONTHNUM2:month}/%{DAY2:day} %{HOUR:hour}:%{MINUTE:minute}:%{SECOND:second} +NGINXERRTIME %{YEAR}/%{MONTHNUM2}/%{DAY2} %{HOUR}:%{MINUTE}:%{SECOND} + +NGINXACCESS %{IPORHOST:remote_addr} - %{NGUSER:remote_user} \[%{HTTPDATE:time_local}\] "%{WORD:method} %{URIPATHPARAM:request} HTTP/%{NUMBER:http_version}" %{NUMBER:status} %{NUMBER:body_bytes_sent} "%{NOTDQUOTE:http_referer}" "%{NOTDQUOTE:http_user_agent}" + +# YYYY/MM/DD HH:MM:SS [LEVEL] PID#TID: *CID MESSAGE +NGINXERROR %{NGINXERRTIME:time} \[%{LOGLEVEL:loglevel}\] %{NONNEGINT:pid}#%{NONNEGINT:tid}: (\*%{NONNEGINT:cid} )?%{GREEDYDATA:message} diff --git a/stacks/crowdsec/config/patterns/paths b/stacks/crowdsec/config/patterns/paths new file mode 100644 index 0000000..a4f0194 --- /dev/null +++ b/stacks/crowdsec/config/patterns/paths @@ -0,0 +1,14 @@ + +#DIR ^.*/ +#FILE [^/].*$ + +#URI_SPLIT ^%{GREEDYDATA:request}\?%{GREEDYDATA:http_args}$ +#FULLPATH_SPLITTER %{DIR:prefix_directory}%{FILE:file_name} + + +NAXSI_FMT ^NAXSI_FMT: ip=%{IPORHOST:src_ip}&server=%{IPORHOST:target_ip}&uri=%{PATH:http_path}&learning=\d&vers=%{DATA:naxsi_version}&total_processed=\d+&total_blocked=\d+&block=\d+(&cscore\d=%{WORD:score_label}&score\d=%{INT:score})+&zone0=%{WORD:zone} +#^NAXSI_FMT: ip=%{IPORHOST:src_ip}&server=%{IPORHOST:target_ip}&uri=%{PATH:http_path}&learning=\d&vers=%{DATA:naxsi_version}&total_processed=\d+&total_blocked=\d+&block=\d+(&cscore\d=%{WORD:score_label}&score\d=%{INT:score})+&cscore2 +#^NAXSI_FMT: ip=%{IPORHOST:src_ip}&server=%{IPORHOST:target_ip}&uri=%{PATH:http_path}(&cscore\d=%{WORD:score_label}&score\d=%{INT:score})+&cscore2 +#^NAXSI_FMT: ip=%{IPORHOST:src_ip}&server=%{IPORHOST:target_ip}&uri=%{PATH:http_path}&learning=\d&vers=%{DATA:naxsi_version}&total_processed=\d+&total_blocked=\d+&block=\d+(&cscore\d=%{WORD:score_label}&score\d=%{INT:score})+&cscore2 + +NAXSI_EXLOG ^NAXSI_EXLOG: ip=%{IPORHOST:naxsi_src_ip}&server=%{IPORHOST:naxsi_dst_ip}&uri=%{PATH:http_path}&id=%{INT:naxsi_id}&zone=%{WORD:naxsi_zone}&var_name=%{DATA:naxsi_var_name}&content= diff --git a/stacks/crowdsec/config/patterns/postgresql b/stacks/crowdsec/config/patterns/postgresql new file mode 100644 index 0000000..6d2b984 --- /dev/null +++ b/stacks/crowdsec/config/patterns/postgresql @@ -0,0 +1,2 @@ +# Default postgresql pg_log format pattern +POSTGRESQL %{DATESTAMP:timestamp} %{TZ} %{DATA:user_id} %{GREEDYDATA:connection_id} %{POSINT:pid} \ No newline at end of file diff --git a/stacks/crowdsec/config/patterns/rails b/stacks/crowdsec/config/patterns/rails new file mode 100644 index 0000000..04e4c56 --- /dev/null +++ b/stacks/crowdsec/config/patterns/rails @@ -0,0 +1,18 @@ +RUUID \s{32} +# rails controller with action +RAILS_CONSTROLLER [^#]+ +RAIL_ACTION \w+ +RCONTROLLER %{RAILS_CONSTROLLER:controller}#%{RAIL_ACTION:action} + +# this will often be the only line: +RAILS_TIMESTAMP %{YEAR}-%{MONTHNUM}-%{MONTHDAY} %{HOUR}:%{MINUTE}:%{SECOND} %{ISO8601_TIMEZONE} +RAILS3HEAD (?m)Started %{WORD:verb} "%{URIPATHPARAM:request}" for %{IPORHOST:clientip} at %{RAILS_TIMESTAMP:timestamp} +# for some a strange reason, params are stripped of {} - not sure that's a good idea. +RPROCESSING \W*Processing by %{RCONTROLLER} as %{NOTSPACE:format}(?:\W*Parameters: {%{DATA:params}}\W*)? +RAILS3PROFILE (?:\(Views: %{NUMBER:viewms}ms \| ActiveRecord: %{NUMBER:activerecordms}ms|\(ActiveRecord: %{NUMBER:activerecordms}ms)? +RAILS3FOOT Completed %{NUMBER:response}%{DATA} in %{NUMBER:totalms}ms %{RAILS3PROFILE}%{GREEDYDATA} + +RAILS_CONTEXT (?:%{DATA}\n)* + +# putting it all together +RAILS3 %{RAILS3HEAD}(?:%{RPROCESSING})?%{RAILS_CONTEXT:context}(?:%{RAILS3FOOT})? \ No newline at end of file diff --git a/stacks/crowdsec/config/patterns/redis b/stacks/crowdsec/config/patterns/redis new file mode 100644 index 0000000..6a005a8 --- /dev/null +++ b/stacks/crowdsec/config/patterns/redis @@ -0,0 +1,21 @@ + +# +# Format 1: +# +# [43569] 27 Aug 12:38:58.471 * RDB: 12 MB of memory used by copy-on-write +# + +# +# Format 2: +# +# 31493:M 17 Sep 09:02:54.807 # Server started, Redis version 3.0.2 +# 31493:M 17 Sep 09:02:54.807 # WARNING overcommit_memory is set to 0! Background save may fail under low memory condition. To fix this issue add 'vm.overcommit_memory = 1' to /etc/sysctl.conf and then reboot or run the command 'sysctl vm$ +# 31493:M 17 Sep 09:02:54.807 # WARNING: The TCP backlog setting of 511 cannot be enforced because /proc/sys/net/core/somaxconn is set to the lower value of 128. +# 31493:M 17 Sep 09:02:54.807 * DB loaded from disk: 0.000 seconds +# 31493:M 17 Sep 09:02:54.807 * The server is now ready to accept connections on port 6379 +# + +REDISTIMESTAMP %{MONTHDAY} %{MONTH} %{TIME} +REDISLOG \[%{POSINT:pid}\] %{REDISTIMESTAMP:time} \*\s +REDISLOG1 %{REDISLOG} +REDISLOG2 %{POSINT:pid}:M %{REDISTIMESTAMP:time} [*#] %{GREEDYDATA:message} \ No newline at end of file diff --git a/stacks/crowdsec/config/patterns/ruby b/stacks/crowdsec/config/patterns/ruby new file mode 100644 index 0000000..845ba0d --- /dev/null +++ b/stacks/crowdsec/config/patterns/ruby @@ -0,0 +1,2 @@ +RUBY_LOGLEVEL DEBUG|FATAL|ERROR|WARN|INFO +RUBY_LOGGER [DFEWI], \[%{TIMESTAMP_ISO8601:timestamp} #%{POSINT:pid}\] *%{RUBY_LOGLEVEL:loglevel} -- +%{DATA:progname}: %{GREEDYDATA:message} \ No newline at end of file diff --git a/stacks/crowdsec/config/patterns/smb b/stacks/crowdsec/config/patterns/smb new file mode 100644 index 0000000..38b1f4d --- /dev/null +++ b/stacks/crowdsec/config/patterns/smb @@ -0,0 +1 @@ +SMB_AUTH_FAIL Auth:%{GREEDYDATA} user \[%{DATA:smb_domain}\]\\\[%{DATA:user}\]%{GREEDYDATA} status \[NT_STATUS_NO_SUCH_USER\]%{GREEDYDATA} remote host \[ipv4:%{IP:ip_source} \ No newline at end of file diff --git a/stacks/crowdsec/config/patterns/ssh b/stacks/crowdsec/config/patterns/ssh new file mode 100644 index 0000000..bf9fd1e --- /dev/null +++ b/stacks/crowdsec/config/patterns/ssh @@ -0,0 +1,61 @@ +# sshd grok pattern + +# Start/Stop +SSHD_LISTEN Server listening on %{IP:sshd_listen_ip} port %{NUMBER:sshd_listen_port}. +SSHD_TERMINATE Received signal %{NUMBER:sshd_signal}; terminating. + +# SSH Tunnel +SSHD_TUNN_ERR1 error: connect_to %{IP:sshd_listen_ip} port %{NUMBER:sshd_listen_port}: failed. +SSHD_TUNN_ERR2 error: channel_setup_fwd_listener: cannot listen to port: %{NUMBER:sshd_listen_port} +SSHD_TUNN_ERR3 error: bind: Address already in use +SSHD_TUNN_ERR4 error: channel_setup_fwd_listener_tcpip: cannot listen to port: %{NUMBER:sshd_listen_port} +SSHD_TUNN_TIMEOUT Timeout, client not responding. + +# Normal +SSHD_SUCCESS Accepted %{WORD:sshd_auth_type} for %{USERNAME:sshd_user} from %{IP:sshd_client_ip} port %{NUMBER:sshd_port} %{WORD:sshd_protocol}: %{GREEDYDATA:sshd_cipher} +SSHD_DISCONNECT Received disconnect from %{IP:sshd_client_ip} port %{NUMBER:sshd_port}:%{NUMBER:sshd_disconnect_code}: %{GREEDYDATA:sshd_disconnect_status} +SSHD_CONN_CLOSE Connection closed by %{IP:sshd_client_ip}$ +SSHD_SESSION_OPEN pam_unix\(sshd:session\): session opened for user %{USERNAME:sshd_user} by \(uid=\d+\) +SSHD_SESSION_CLOSE pam_unix\(sshd:session\): session closed for user %{USERNAME:sshd_user} +SSHD_SESSION_FAIL pam_systemd\(sshd:session\): Failed to release session: %{GREEDYDATA:sshd_disconnect_status} +SSHD_LOGOUT_ERR syslogin_perform_logout: logout\(\) returned an error + +# Probe +SSHD_REFUSE_CONN refused connect from %{DATA:sshd_client_hostname} \(%{IPORHOST:sshd_client_ip}\) +SSHD_TCPWRAP_FAIL1 warning: %{DATA:sshd_tcpd_file}, line %{NUMBER}: can't verify hostname: getaddrinfo\(%{DATA:sshd_paranoid_hostname}, %{DATA:sshd_sa_family}\) failed +SSHD_TCPWRAP_FAIL2 warning: %{DATA:sshd_tcpd_file}, line %{NUMBER}: host name/address mismatch: %{IPORHOST:sshd_client_ip} != %{HOSTNAME:sshd_paranoid_hostname} +SSHD_TCPWRAP_FAIL3 warning: %{DATA:sshd_tcpd_file}, line %{NUMBER}: host name/name mismatch: %{HOSTNAME:sshd_paranoid_hostname_1} != %{HOSTNAME:sshd_paranoid_hostname_2} +SSHD_TCPWRAP_FAIL4 warning: %{DATA:sshd_tcpd_file}, line %{NUMBER}: host name/name mismatch: reverse lookup results in non-FQDN %{HOSTNAME:sshd_paranoid_hostname} +SSHD_TCPWRAP_FAIL5 warning: can't get client address: Connection reset by peer +SSHD_FAIL Failed %{WORD:sshd_auth_type} for %{USERNAME:sshd_invalid_user} from %{IP:sshd_client_ip} port %{NUMBER:sshd_port} %{WORD:sshd_protocol} +SSHD_USER_FAIL Failed password for invalid user %{USERNAME:sshd_invalid_user} from %{IP:sshd_client_ip} port %{NUMBER:sshd_port} %{WORD:sshd_protocol} +SSHD_INVAL_USER Invalid user\s*%{USERNAME:sshd_invalid_user}? from %{IP:sshd_client_ip} + +# preauth +SSHD_DISC_PREAUTH Disconnected from %{IP:sshd_client_ip} port %{NUMBER:sshd_port}\s*(?:\[%{GREEDYDATA:sshd_privsep}\]|) +SSHD_MAXE_PREAUTH error: maximum authentication attempts exceeded for (?:invalid user |)%{USERNAME:sshd_invalid_user} from %{IP:sshd_client_ip} port %{NUMBER:sshd_port} %{WORD:sshd_protocol}\s*(?:\[%{GREEDYDATA:sshd_privsep}\]|) +SSHD_DISR_PREAUTH Disconnecting: %{GREEDYDATA:sshd_disconnect_status} \[%{GREEDYDATA:sshd_privsep}\] +SSHD_INVA_PREAUTH input_userauth_request: invalid user %{USERNAME:sshd_invalid_user}?\s*(?:\[%{GREEDYDATA:sshd_privsep}\]|) +SSHD_REST_PREAUTH Connection reset by %{IP:sshd_client_ip} port %{NUMBER:sshd_port}\s*(?:\[%{GREEDYDATA:sshd_privsep}\]|) +SSHD_CLOS_PREAUTH Connection closed by %{IP:sshd_client_ip} port %{NUMBER:sshd_port}\s*(?:\[%{GREEDYDATA:sshd_privsep}\]|) +SSHD_FAIL_PREAUTH fatal: Unable to negotiate with %{IP:sshd_client_ip} port %{NUMBER:sshd_port}:\s*%{GREEDYDATA:sshd_disconnect_status}? \[%{GREEDYDATA:sshd_privsep}\] +SSHD_FAI2_PREAUTH fatal: %{GREEDYDATA:sshd_fatal_status}: Connection from %{IP:sshd_client_ip} port %{NUMBER:sshd_port}:\s*%{GREEDYDATA:sshd_disconnect_status}? \[%{GREEDYDATA:sshd_privsep}\] +SSHD_BADL_PREAUTH Bad packet length %{NUMBER:sshd_packet_length}. \[%{GREEDYDATA:sshd_privsep}\] + +# Corrupted +SSHD_IDENT_FAIL Did not receive identification string from %{IP:sshd_client_ip} +SSHD_MAPB_FAIL Address %{IP:sshd_client_ip} maps to %{HOSTNAME:sshd_client_hostname}, but this does not map back to the address - POSSIBLE BREAK-IN ATTEMPT! +SSHD_RMAP_FAIL reverse mapping checking getaddrinfo for %{HOSTNAME:sshd_client_hostname} \[%{IP:sshd_client_ip}\] failed - POSSIBLE BREAK-IN ATTEMPT! +SSHD_TOOMANY_AUTH Disconnecting: Too many authentication failures for %{USERNAME:sshd_invalid_user} +SSHD_CORRUPT_MAC Corrupted MAC on input +SSHD_PACKET_CORRUPT Disconnecting: Packet corrupt +SSHD_BAD_VERSION Bad protocol version identification '%{GREEDYDATA}' from %{IP:sshd_client_ip} + +#### +SSHD_INIT %{SSHD_LISTEN}|%{SSHD_TERMINATE} +SSHD_TUNN %{SSHD_TUNN_ERR1}|%{SSHD_TUNN_ERR2}|%{SSHD_TUNN_ERR3}|%{SSHD_TUNN_ERR4}|%{SSHD_TUNN_TIMEOUT} +SSHD_NORMAL_LOG %{SSHD_SUCCESS}|%{SSHD_DISCONNECT}|%{SSHD_CONN_CLOSE}|%{SSHD_SESSION_OPEN}|%{SSHD_SESSION_CLOSE}|%{SSHD_SESSION_FAIL}|%{SSHD_LOGOUT_ERR} +SSHD_PROBE_LOG %{SSHD_REFUSE_CONN}|%{SSHD_TCPWRAP_FAIL1}|%{SSHD_TCPWRAP_FAIL2}|%{SSHD_TCPWRAP_FAIL3}|%{SSHD_TCPWRAP_FAIL4}|%{SSHD_TCPWRAP_FAIL5}|%{SSHD_FAIL}|%{SSHD_USER_FAIL}|%{SSHD_INVAL_USER} +SSHD_PREAUTH %{SSHD_DISC_PREAUTH}|%{SSHD_MAXE_PREAUTH}|%{SSHD_DISR_PREAUTH}|%{SSHD_INVA_PREAUTH}|%{SSHD_REST_PREAUTH}|%{SSHD_FAIL_PREAUTH}|%{SSHD_CLOS_PREAUTH}|%{SSHD_FAI2_PREAUTH}|%{SSHD_BADL_PREAUTH} +SSHD_CORRUPTED %{SSHD_IDENT_FAIL}|%{SSHD_MAPB_FAIL}|%{SSHD_RMAP_FAIL}|%{SSHD_TOOMANY_AUTH}|%{SSHD_CORRUPT_MAC}|%{SSHD_PACKET_CORRUPT}|%{SSHD_BAD_VERSION} +SSHD_LOG %{SSHD_INIT}|%{SSHD_NORMAL_LOG}|%{SSHD_PROBE_LOG}|%{SSHD_CORRUPTED}|%{SSHD_TUNN}|%{SSHD_PREAUTH} diff --git a/stacks/crowdsec/config/patterns/tcpdump b/stacks/crowdsec/config/patterns/tcpdump new file mode 100644 index 0000000..8c76105 --- /dev/null +++ b/stacks/crowdsec/config/patterns/tcpdump @@ -0,0 +1 @@ +TCPDUMP_OUTPUT %{GREEDYDATA:timestamp} IP %{IPORHOST:source_ip}\.%{INT:source_port} > %{IPORHOST:dest_ip}\.%{INT:dest_port}: Flags \[%{GREEDYDATA:tcpflags}\], seq diff --git a/stacks/crowdsec/config/profiles.yaml b/stacks/crowdsec/config/profiles.yaml new file mode 100644 index 0000000..268509a --- /dev/null +++ b/stacks/crowdsec/config/profiles.yaml @@ -0,0 +1,29 @@ +name: default_ip_remediation +#debug: true +filters: + - Alert.Remediation == true && Alert.GetScope() == "Ip" +decisions: + - type: ban + duration: 4h +#duration_expr: Sprintf('%dh', (GetDecisionsCount(Alert.GetValue()) + 1) * 4) +notifications: +# - slack_default # Set the webhook in /etc/crowdsec/notifications/slack.yaml before enabling this. +# - splunk_default # Set the splunk url and token in /etc/crowdsec/notifications/splunk.yaml before enabling this. + - http_default # Set the required http parameters in /etc/crowdsec/notifications/http.yaml before enabling this. +# - email_default # Set the required email parameters in /etc/crowdsec/notifications/email.yaml before enabling this. +on_success: break +--- +name: default_range_remediation +#debug: true +filters: + - Alert.Remediation == true && Alert.GetScope() == "Range" +decisions: + - type: ban + duration: 4h +#duration_expr: Sprintf('%dh', (GetDecisionsCount(Alert.GetValue()) + 1) * 4) +notifications: +# - slack_default # Set the webhook in /etc/crowdsec/notifications/slack.yaml before enabling this. +# - splunk_default # Set the splunk url and token in /etc/crowdsec/notifications/splunk.yaml before enabling this. + - http_default # Set the required http parameters in /etc/crowdsec/notifications/http.yaml before enabling this. +# - email_default # Set the required email parameters in /etc/crowdsec/notifications/email.yaml before enabling this. +on_success: break \ No newline at end of file diff --git a/stacks/crowdsec/config/simulation.yaml b/stacks/crowdsec/config/simulation.yaml new file mode 100644 index 0000000..dad8502 --- /dev/null +++ b/stacks/crowdsec/config/simulation.yaml @@ -0,0 +1,3 @@ +simulation: false +# exclusions: +# - crowdsecurity/ssh-bf diff --git a/stacks/crowdsec/config/user.yaml b/stacks/crowdsec/config/user.yaml new file mode 100644 index 0000000..a1047dc --- /dev/null +++ b/stacks/crowdsec/config/user.yaml @@ -0,0 +1,38 @@ +common: + daemonize: false + log_media: stdout + log_level: info + log_dir: /var/log/ +config_paths: + config_dir: /etc/crowdsec/ + data_dir: /var/lib/crowdsec/data + #simulation_path: /etc/crowdsec/config/simulation.yaml + #hub_dir: /etc/crowdsec/hub/ + #index_path: ./config/hub/.index.json +crowdsec_service: + #acquisition_path: ./config/acquis.yaml + parser_routines: 1 +cscli: + output: human +db_config: + type: sqlite + db_path: /var/lib/crowdsec/data/crowdsec.db + user: crowdsec + #log_level: info + password: crowdsec + db_name: crowdsec + host: "127.0.0.1" + port: 3306 +api: + client: + insecure_skip_verify: false # default true + credentials_path: /etc/crowdsec/local_api_credentials.yaml + server: + #log_level: info + listen_uri: 127.0.0.1:8080 + profiles_path: /etc/crowdsec/profiles.yaml + online_client: # Central API + credentials_path: /etc/crowdsec/online_api_credentials.yaml +prometheus: + enabled: true + level: full diff --git a/stacks/dockge/compose.yml b/stacks/dockge/compose.yml new file mode 100644 index 0000000..7cd77ea --- /dev/null +++ b/stacks/dockge/compose.yml @@ -0,0 +1,25 @@ +services: + dockge: + image: louislam/dockge:1 + restart: unless-stopped + volumes: + - /var/run/docker.sock:/var/run/docker.sock + - ./data:/app/data + - /opt/stacks:/opt/stacks + environment: + - DOCKGE_STACKS_DIR=/opt/stacks + networks: + - traefik_network + labels: + - "traefik.enable=true" + # Local + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.rule=Host(`${COMPOSE_PROJECT_NAME}.local.tellserv.fr`)" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.entryPoints=local" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.tls.certresolver=cloudflare-local" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.tls=true" + # Port interne du service + - "traefik.http.services.${COMPOSE_PROJECT_NAME}.loadbalancer.server.port=5001" + +networks: + traefik_network: + external: true diff --git a/stacks/etesync/compose.yml b/stacks/etesync/compose.yml new file mode 100644 index 0000000..a60c1e4 --- /dev/null +++ b/stacks/etesync/compose.yml @@ -0,0 +1,39 @@ +services: + etebase: + image: victorrds/etesync:alpine + container_name: etebase + user: 0:0 + volumes: + - ./data:/data:rw + environment: + TIME_ZONE: Europe/Paris + SERVER: http-socket + SUPER_USER: ${SUPER_USER} + SUPER_PASS: ${SUPER_PASS} + ALLOWED_HOSTS: "etesync.tellserv.fr,etesync.local.tellserv.fr" + restart: always + networks: + - traefik_network + labels: + - traefik.enable=true + # Local + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.rule=Host(`${COMPOSE_PROJECT_NAME}.local.tellserv.fr`) + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.entryPoints=local + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.tls.certresolver=cloudflare-local" + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.tls=true + # Production + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.rule=Host(`${COMPOSE_PROJECT_NAME}.tellserv.fr`) + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.entryPoints=websecure + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.tls=true + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.tls.certResolver=cloudflare + # - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.middlewares=tinyauth" + + # Port exposé par le service + - traefik.http.services.${COMPOSE_PROJECT_NAME}.loadbalancer.server.port=3735 + # --- Flame Dashboard Integration (optionnel) + # --- Watchtower Auto-Update (optionnel) + # - "com.centurylinklabs.watchtower.enable=true" + +networks: + traefik_network: + external: true diff --git a/stacks/feedropolis/compose.yml b/stacks/feedropolis/compose.yml new file mode 100644 index 0000000..5eebaa3 --- /dev/null +++ b/stacks/feedropolis/compose.yml @@ -0,0 +1,42 @@ +services: + app: + image: docker.io/stormworks/feedropolis + container_name: feedropolis-app + privileged: true + environment: + - DATABASE_URL=postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@feedropolis-db:5432/feed + - BASE_URL=https://feedropolis.tellserv.fr/ + - DEBUG= + networks: + - traefik_network + - feedropolis_network + restart: unless-stopped + labels: + - traefik.enable=true + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.rule=Host(`${COMPOSE_PROJECT_NAME}.local.tellserv.fr`) + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.entryPoints=local + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.tls.certresolver=cloudflare-local" + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.tls=true + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.rule=Host(`${COMPOSE_PROJECT_NAME}.tellserv.fr`) + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.entryPoints=websecure + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.tls=true + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.tls.certResolver=cloudflare + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.middlewares=tinyauth + - traefik.http.services.${COMPOSE_PROJECT_NAME}.loadbalancer.server.port=3000 + - com.centurylinklabs.watchtower.enable=true + + db: + image: docker.io/library/postgres:12 + container_name: feedropolis-db + environment: + - POSTGRES_USER=${POSTGRES_USER} + - POSTGRES_PASSWORD=${POSTGRES_PASSWORD} + networks: + - feedropolis_network + restart: unless-stopped + +networks: + traefik_network: + external: true + feedropolis_network: + driver: bridge diff --git a/stacks/freshrss/compose.yml b/stacks/freshrss/compose.yml new file mode 100644 index 0000000..e80517e --- /dev/null +++ b/stacks/freshrss/compose.yml @@ -0,0 +1,28 @@ +services: + freshrss: + image: freshrss/freshrss:1.26.2 + container_name: freshrss + environment: + - TZ=Europe/Paris + - CRON_MIN=13,43 + volumes: + - /mnt/storage/freshrss/data:/var/www/FreshRSS/data + - /mnt/storage/freshrss/extensions:/var/www/FreshRSS/extensions + networks: + - traefik_network + restart: unless-stopped + labels: + - traefik.enable=true + - traefik.http.routers.freshrss-local.rule=Host(`freshrss.local.tellserv.fr`) + - traefik.http.routers.freshrss-local.entryPoints=local + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.tls.certresolver=cloudflare-local + - traefik.http.routers.freshrss-local.tls=true + - traefik.http.routers.freshrss-prod.rule=Host(`freshrss.tellserv.fr`) + - traefik.http.routers.freshrss-prod.entryPoints=websecure + - traefik.http.routers.freshrss-prod.tls=true + - traefik.http.routers.freshrss-prod.tls.certResolver=cloudflare + - traefik.http.services.freshrss.loadbalancer.server.port=80 + - com.centurylinklabs.watchtower.enable=true +networks: + traefik_network: + external: true diff --git a/stacks/glance/assets/user.css b/stacks/glance/assets/user.css new file mode 100644 index 0000000..e69de29 diff --git a/stacks/glance/compose.yml b/stacks/glance/compose.yml new file mode 100644 index 0000000..9fc9f20 --- /dev/null +++ b/stacks/glance/compose.yml @@ -0,0 +1,66 @@ +services: + glance: + container_name: glance + image: glanceapp/glance:v0.7.5 + restart: unless-stopped + volumes: + - ./config:/app/config + - ./assets:/app/assets + - /var/run/docker.sock:/var/run/docker.sock:ro + - /mnt/storage:/mnt/storage:ro + env_file: .env + networks: + - traefik_network + - internal_glance + secrets: + - plex-token + labels: + - "traefik.enable=true" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.rule=Host(`tellserv.fr`)" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.entryPoints=websecure" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.tls=true" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.tls.certResolver=cloudflare" + - "traefik.http.services.${COMPOSE_PROJECT_NAME}.loadbalancer.server.port=8080" + + glance-containers-builder: + build: ./container-builder + volumes: + - /var/run/docker.sock:/var/run/docker.sock + - ./config/includes:/output + - ./container-builder:/app/config + restart: unless-stopped + networks: + - internal_glance + entrypoint: > + sh -c "pip install docker pyyaml requests beautifulsoup4 && while true; do python3 /app/generate_containers_block.py && sleep 60; done" + + rss: + container_name: glance-rss + image: nginx:alpine + restart: unless-stopped + volumes: + - ./rss:/usr/share/nginx/html:ro + networks: + - internal_glance + + rss-builder: + build: ./rss-builder + container_name: rss-builder + restart: unless-stopped + volumes: + - ./updates:/updates:ro + - ./rss:/rss + networks: + - internal_glance + entrypoint: > + sh -c "while true; do python3 /app/generate_rss.py && sleep 60; done" + +networks: + traefik_network: + external: true + internal_glance: + name: glance_internal + +secrets: + plex-token: + file: ./config/secrets/plex-token.txt diff --git a/stacks/glance/config/glance.yml b/stacks/glance/config/glance.yml new file mode 100644 index 0000000..66ff2f9 --- /dev/null +++ b/stacks/glance/config/glance.yml @@ -0,0 +1,12 @@ +server: + assets-path: /app/assets + +theme: + # Note: assets are cached by the browser, changes to the CSS file + # will not be reflected until the browser cache is cleared (Ctrl+F5) + custom-css-file: /assets/user.css + +pages: + # It's not necessary to create a new file for each page and include it, you can simply + # put its contents here, though multiple pages are easier to manage when separated + - $include: home.yml diff --git a/stacks/glance/config/home.yml b/stacks/glance/config/home.yml new file mode 100644 index 0000000..500325f --- /dev/null +++ b/stacks/glance/config/home.yml @@ -0,0 +1,184 @@ +- name: Accueil + columns: + - size: small + widgets: + - type: calendar + + - type: rss + title: Journal du serveur + style: vertical-list + limit: 10 + collapse-after: 5 + feeds: + - url: http://rss/index.xml + + - size: full + widgets: + - type: search + title: Recherche Whoogle + search-engine: https://whoogle.tellserv.fr/search?q={QUERY} + placeholder: Recherche sur Internet… + new-tab: false + autofocus: false + + - type: docker-containers + title: Conteneurs en cours + limit: 10 + hide-by-default: true + $include: includes/containers.yml + + - size: small + widgets: + - type: weather + title: Météo à Domfront + location: Domfront, France + units: metric + hour-format: 24h + + - type: server-stats + title: Ressources serveur + show-network: true + servers: + - type: local + hide-mountpoints-by-default: true + mountpoints: + "/mnt/storage": + name: Stockage principal + hide: false + + - type: custom-api + title: Minecraft + url: https://api.mcstatus.io/v2/status/java/minecraft.tellserv.fr + cache: 30s + template: | +
+
+ {{ if .JSON.Bool "online" }} + + {{ else }} + + + + {{ end }} +
+ +
+ + {{ .JSON.String "host" }} + {{ if .JSON.Bool "online" }} + + {{ else }} + + {{ end }} + + +
    +
  • + {{ if .JSON.Bool "online" }} + {{ .JSON.String "version.name_clean" }} + {{ else }} + Offline + {{ end }} +
  • + {{ if .JSON.Bool "online" }} +
  • +
    + {{ range .JSON.Array "players.list" }}{{ .String "name_clean" }}
    {{ end }} +
    +

    + + + + {{ .JSON.Int "players.online" | formatNumber }}/{{ .JSON.Int "players.max" | formatNumber }} joueurs +

    +
  • + {{ else }} +
  • +

    + + + + 0 joueurs +

    +
  • + {{ end }} +
+
+
+ + - type: custom-api + title: Bibliothèques Plex + cache: 5m + options: + base-url: https://plex.tellserv.fr + api-key: ${secret:plex-token} + template: | + {{ $baseURL := .Options.StringOr "base-url" "" }} + {{ $apiKey := .Options.StringOr "api-key" "" }} + + {{ define "errorMsg" }} +
+
Erreur
+ + + +
+

{{ . }}

+ {{ end }} + + {{ if or (eq $baseURL "") (eq $apiKey "") }} + {{ template "errorMsg" "Paramètres manquants : base-url ou api-key" }} + {{ else }} + {{ $sectionsURL := printf "%s/library/sections" $baseURL }} + {{ $sectionsCall := newRequest $sectionsURL + | withHeader "Accept" "application/json" + | withHeader "X-Plex-Token" $apiKey + | getResponse }} + + {{ if $sectionsCall.JSON.Exists "MediaContainer.Directory" }} + {{ $sections := $sectionsCall.JSON.Array "MediaContainer.Directory" }} + +
+ {{ range $section := $sections }} + {{ $sectionID := $section.String "key" }} + {{ $title := $section.String "title" }} + {{ $type := $section.String "type" }} + + {{ $color := "#666" }} + {{ if eq $type "movie" }} + {{ $color = "#e57373" }} + {{ else if eq $type "show" }} + {{ $color = "#7986cb" }} + {{ else if eq $type "artist" }} + {{ $color = "#81c784" }} + {{ end }} + + {{ $countURL := printf "%s/library/sections/%s/all" $baseURL $sectionID }} + {{ $countCall := newRequest $countURL + | withParameter "X-Plex-Token" $apiKey + | withHeader "Accept" "application/json" + | getResponse }} + + {{ if $countCall.JSON.Exists "MediaContainer.size" }} + {{ $count := $countCall.JSON.Int "MediaContainer.size" }} +
+
{{ $title }}
+
+ {{ $count }} +
+
+ {{ end }} + {{ end }} +
+ {{ else }} + {{ template "errorMsg" "Impossible de récupérer les bibliothèques Plex" }} + {{ end }} + {{ end }} diff --git a/stacks/glance/config/includes/containers.yml b/stacks/glance/config/includes/containers.yml new file mode 100644 index 0000000..fb83797 --- /dev/null +++ b/stacks/glance/config/includes/containers.yml @@ -0,0 +1,141 @@ +containers: + LibreChat: + name: Librechat + url: https://librechat.tellserv.fr + icon: https://raw.githubusercontent.com/selfhst/icons/refs/heads/main/png/librechat.png + hide: false + 6e1a79ad77b9_plex: + name: Plex + url: https://plex.tellserv.fr + icon: https://plex.tellserv.fr/favicon.ico + hide: false + stirlingpdf: + name: Stirlingpdf + url: https://stirlingpdf.tellserv.fr + icon: https://raw.githubusercontent.com/Stirling-Tools/Stirling-PDF/main/docs/stirling.png + hide: false + beszel: + name: Beszel + url: https://beszel.tellserv.fr + icon: https://beszel.tellserv.fr/static/icon.svg + hide: false + audiobookshelf: + name: Audiobookshelf + url: https://audiobookshelf.tellserv.fr + icon: https://audiobookshelf.tellserv.fr/audiobookshelf/favicon.ico + hide: false + whoogle-search: + name: Whoogle + url: https://whoogle.tellserv.fr + icon: https://whoogle.tellserv.fr/static/img/favicon/apple-icon-57x57.png + hide: false + blog_ghost: + name: Blog + url: https://blog.tellserv.fr + icon: https://blog.tellserv.fr/favicon.ico + hide: false + larabouillere_ghost: + name: Larabouillere + url: https://larabouillere.tellserv.fr + icon: https://larabouillere.tellserv.fr/favicon.ico + hide: false + freshrss: + name: Freshrss + url: https://freshrss.tellserv.fr + icon: https://freshrss.tellserv.fr/../favicon.ico + hide: false + webdav: + name: Webdav + url: https://webdav.tellserv.fr + icon: mdi:web + hide: false + gotify: + name: Gotify + url: https://gotify.tellserv.fr + icon: https://gotify.tellserv.fr/static/apple-touch-icon-57x57.png + hide: false + vikunja-vikunja-1: + name: Vikunja + url: https://vikunja.tellserv.fr + icon: https://vikunja.tellserv.fr/favicon.ico + hide: false + searxng: + name: Searxng + url: https://searxng.tellserv.fr + icon: https://raw.githubusercontent.com/selfhst/icons/refs/heads/main/png/searxng.png + hide: false + paperless: + name: Paperless + url: https://paperless.tellserv.fr + icon: https://raw.githubusercontent.com/selfhst/icons/refs/heads/main/png/paperless-ngx.png + hide: false + pc-builds: + name: Pc-builds + url: https://pc-builds.tellserv.fr + icon: https://pc-builds.tellserv.fr/favicon.ico + hide: false + clipcascade-clipcascade-1: + name: Clipcascade + url: https://clipcascade.tellserv.fr + icon: https://clipcascade.tellserv.fr/assets/images/logo.png + hide: false + vaultwarden: + name: Vaultwarden + url: https://vaultwarden.tellserv.fr + icon: https://vaultwarden.tellserv.fr/images/apple-touch-icon.png + hide: false + yamtrack: + name: Yamtrack + url: https://yamtrack.tellserv.fr + icon: https://yamtrack.tellserv.fr/static/favicon/apple-touch-icon.png + hide: false + qbittorrent: + name: Qbittorrent + url: https://qbittorrent.tellserv.fr + icon: https://raw.githubusercontent.com/selfhst/icons/refs/heads/main/png/qbittorrent.png + hide: false + photoprism-photoprism-1: + name: Photoprism + url: https://photoprism.tellserv.fr + icon: https://photoprism.tellserv.fr/static/icons/logo/512.png + hide: false + pingvin: + name: Pingvin + url: https://pingvin.tellserv.fr + icon: https://pingvin.tellserv.fr/img/favicon.ico + hide: false + kavita: + name: Kavita + url: https://kavita.tellserv.fr + icon: https://kavita.tellserv.fr/assets/icons/apple-touch-icon.png + hide: false + glance: + name: Glance + url: https://tellserv.fr + icon: https://tellserv.fr/static/79abad6150/app-icon.png + hide: false + mobilizon-mobilizon-1: + name: Mobilizon + url: https://mobilizon.tellserv.fr + icon: https://mobilizon.tellserv.fr/img/icons/apple-touch-icon-152x152.png + hide: false + joal: + name: Joal + url: https://joal.tellserv.fr + icon: https://cdn.jsdelivr.net/gh/homarr-labs/dashboard-icons/png/joal.png + hide: false + feedropolis-app: + name: Feedropolis + url: https://feedropolis.tellserv.fr + icon: https://raw.githubusercontent.com/selfhst/icons/refs/heads/main/png/rss-com.png + hide: false + etebase: + name: Etesync + url: https://etesync.tellserv.fr + icon: https://raw.githubusercontent.com/selfhst/icons/refs/heads/main/png/etesync.png + hide: false + bin: + name: Bin + url: https://bin.tellserv.fr + icon: https://microbin.eu/img/logo-square.png + hide: false diff --git a/stacks/glance/config/secrets/plex-token.txt b/stacks/glance/config/secrets/plex-token.txt new file mode 100644 index 0000000..eb93dbe --- /dev/null +++ b/stacks/glance/config/secrets/plex-token.txt @@ -0,0 +1 @@ +L3V-pvV7o5ybND3T6Rr5 diff --git a/stacks/glance/container-builder/Dockerfile b/stacks/glance/container-builder/Dockerfile new file mode 100644 index 0000000..5c0aee2 --- /dev/null +++ b/stacks/glance/container-builder/Dockerfile @@ -0,0 +1,9 @@ +FROM python:3.11-slim + +WORKDIR /app + +COPY generate_containers_block.py . + +RUN pip install docker pyyaml requests beautifulsoup4 + +CMD ["python3", "generate_containers_block.py"] diff --git a/stacks/glance/container-builder/generate_containers_block.py b/stacks/glance/container-builder/generate_containers_block.py new file mode 100644 index 0000000..0b86d85 --- /dev/null +++ b/stacks/glance/container-builder/generate_containers_block.py @@ -0,0 +1,124 @@ +#!/usr/bin/env python3 +import docker +import yaml +import os +import json +import requests +from bs4 import BeautifulSoup + +client = docker.from_env() +containers = client.containers.list() +output = {} + +print(f"[•] Détection de {len(containers)} conteneur(s)...\n") + +override_path = "/app/config/icon_overrides.json" +overrides = {} + +# Charger les overrides si présents +if os.path.exists(override_path): + with open(override_path, "r", encoding="utf-8") as f: + overrides = json.load(f) + +def extract_image_name(image): + name = image.split("/")[-1] + return name.split(":")[0] if ":" in name else name + +def find_favicon(url, project_name, image_name): + print(f"[→] Recherche favicon pour {url}") + + # Override manuel ? + if project_name in overrides and overrides[project_name]: + print(f"[✓] Favicon forcé depuis override : {overrides[project_name]}") + return overrides[project_name] + + icon_url = None + try: + r = requests.get(url, timeout=5) + r.raise_for_status() + soup = BeautifulSoup(r.text, 'html.parser') + icons = soup.find_all("link", rel=lambda x: x and 'icon' in x.lower()) + if icons: + href = icons[0].get("href") + if href: + icon_url = href if href.startswith("http") else url.rstrip("/") + "/" + href.lstrip("/") + print(f"[✓] Favicon trouvé via HTML : {icon_url}") + return icon_url + else: + print("[✗] Aucun favicon trouvé dans HTML") + except Exception: + print(f"[✗] Impossible de charger HTML depuis {url}") + + test_favicon = f"{url.rstrip('/')}/favicon.ico" + try: + r = requests.get(test_favicon, timeout=5) + if r.ok: + print(f"[✓] Favicon trouvé via /favicon.ico : {test_favicon}") + return test_favicon + except: + pass + + github_icon = f"https://raw.githubusercontent.com/selfhst/icons/refs/heads/main/png/{project_name}.png" + try: + r = requests.get(github_icon, timeout=5) + if r.ok: + print(f"[✓] Favicon fallback via GitHub (project_name) : {github_icon}") + return github_icon + except: + print(f"[✗] Pas d’icône GitHub pour {project_name}") + + github_icon = f"https://raw.githubusercontent.com/selfhst/icons/refs/heads/main/png/{image_name}.png" + try: + r = requests.get(github_icon, timeout=5) + if r.ok: + print(f"[✓] Favicon fallback via GitHub (image) : {github_icon}") + return github_icon + except: + print(f"[✗] Pas d’icône GitHub pour {image_name}") + + print("[✗] Aucun favicon disponible\n") + return None + +for container in containers: + labels = container.labels + name = container.name + image_name = extract_image_name(container.image.tags[0]) if container.image.tags else container.image.short_id + project_name = labels.get("com.docker.compose.project", name).lower() + + print(f"[•] Analyse du conteneur : {name} (projet : {project_name})") + + domain = None + for key, value in labels.items(): + if key.startswith(f"traefik.http.routers.{project_name}-prod.rule") and "Host(`" in value: + domain = value.split("Host(`")[1].split("`)")[0] + break + + if domain: + url = f"https://{domain}" + icon = find_favicon(url, project_name, image_name) + + output[name] = { + "name": project_name.capitalize(), + "url": url, + "icon": icon or "mdi:web", + "hide": False + } + + if project_name not in overrides: + overrides[project_name] = "" + + print(f"[✓] Conteneur ajouté : {project_name} → {url}\n") + else: + print("[!] Aucune règle Traefik -prod trouvée pour ce conteneur.\n") + +# Générer fichiers +os.makedirs("/output", exist_ok=True) + +with open("/output/containers.yml", "w", encoding="utf-8") as f: + yaml.dump({"containers": output}, f, sort_keys=False) + +with open(override_path, "w", encoding="utf-8") as f: + json.dump(overrides, f, indent=2, ensure_ascii=False) + +print("✅ Fichier containers.yml généré : /output/containers.yml") +print(f"✅ Fichier overrides mis à jour : {override_path}") diff --git a/stacks/glance/container-builder/icon_overrides.json b/stacks/glance/container-builder/icon_overrides.json new file mode 100644 index 0000000..06acb16 --- /dev/null +++ b/stacks/glance/container-builder/icon_overrides.json @@ -0,0 +1,32 @@ +{ + "glance": "", + "paperless": "", + "librechat": "https://raw.githubusercontent.com/selfhst/icons/refs/heads/main/png/librechat.png", + "blog": "", + "larabouillere": "", + "yamtrack": "", + "photoprism": "", + "plex": "", + "stirlingpdf": "https://raw.githubusercontent.com/Stirling-Tools/Stirling-PDF/main/docs/stirling.png", + "etesync": "", + "crafty": "", + "bin": "https://microbin.eu/img/logo-square.png", + "beszel": "", + "audiobookshelf": "", + "gotify": "", + "clipcascade": "", + "freshrss": "", + "feedropolis": "https://raw.githubusercontent.com/selfhst/icons/refs/heads/main/png/rss-com.png", + "joal": "https://cdn.jsdelivr.net/gh/homarr-labs/dashboard-icons/png/joal.png", + "qbittorrent": "https://raw.githubusercontent.com/selfhst/icons/refs/heads/main/png/qbittorrent.png", + "kavita": "", + "whoogle": "", + "pingvin": "", + "vikunja": "", + "vaultwarden": "", + "mobilizon": "", + "random-draft": "", + "pc-builds": "", + "searxng": "", + "webdav": "" +} \ No newline at end of file diff --git a/stacks/glance/rss-builder/Dockerfile b/stacks/glance/rss-builder/Dockerfile new file mode 100644 index 0000000..9566ada --- /dev/null +++ b/stacks/glance/rss-builder/Dockerfile @@ -0,0 +1,7 @@ +FROM python:3.11-alpine + +WORKDIR /app + +COPY generate_rss.py . + +CMD ["python3", "generate_rss.py"] diff --git a/stacks/glance/rss-builder/generate_rss.py b/stacks/glance/rss-builder/generate_rss.py new file mode 100644 index 0000000..f19ce18 --- /dev/null +++ b/stacks/glance/rss-builder/generate_rss.py @@ -0,0 +1,37 @@ +#!/usr/bin/env python3 +import re +from datetime import datetime +from xml.sax.saxutils import escape + +rss_path = "/rss/index.xml" +md_path = "/updates/updates.md" + +with open(md_path, "r", encoding="utf-8") as f: + content = f.read() + +entries = re.findall(r"## (\d{4}-\d{2}-\d{2}) - (.+?)\n(.+?)(?=\n## |\Z)", content, re.DOTALL) + +rss_items = "" +for date_str, title, description in entries: + pubdate = datetime.strptime(date_str, "%Y-%m-%d").strftime("%a, %d %b %Y 12:00:00 GMT") + rss_items += f""" + {escape(title.strip())} + http://rss/index.xml#{escape(title.strip().replace(" ", "-").lower())} + {pubdate} + {escape(description.strip())} + \n""" + +rss = f""" + + + Updates serveur + http://rss/index.xml + Changelog des services Tellserv +{rss_items} + +""" + +with open(rss_path, "w", encoding="utf-8") as f: + f.write(rss) + +print("Flux RSS généré.") diff --git a/stacks/glance/rss/index.xml b/stacks/glance/rss/index.xml new file mode 100644 index 0000000..0a614c9 --- /dev/null +++ b/stacks/glance/rss/index.xml @@ -0,0 +1,44 @@ + + + + Updates serveur + http://rss/index.xml + Changelog des services Tellserv + + Changement d'infra : 2 instances Traefik, une par interface réseau (Interne / Externe). + http://rss/index.xml#changement-d'infra-:-2-instances-traefik,-une-par-interface-réseau-(interne-/-externe). + Sun, 01 Jun 2025 12:00:00 GMT + Une instance Traefik permet d'exposer sur *.tellserv.fr et l'autre sert pour les services externes afin de permettre un meilleur cloisonnement. + + + Glance remplace Flame : Nouveau dashboard, détection auto des services, widget Plex. + http://rss/index.xml#glance-remplace-flame-:-nouveau-dashboard,-détection-auto-des-services,-widget-plex. + Sat, 24 May 2025 12:00:00 GMT + Ajout d’un script pour détecter automatiquement les services exposés, et intégration d’un widget Plex affichant les différentes bibliothèques et leur nombre de médias. + + + Nouvelle collection Plex : Films non vus choisis au hasard. + http://rss/index.xml#nouvelle-collection-plex-:-films-non-vus-choisis-au-hasard. + Fri, 23 May 2025 12:00:00 GMT + Une collection aléatoire a été ajoutée sur Plex, permettant de voir un film non vu encore. + + + Relance des services restants : Minecraft, Photoprism... + http://rss/index.xml#relance-des-services-restants-:-minecraft,-photoprism... + Fri, 09 May 2025 12:00:00 GMT + Les derniers services encore non fonctionnels, comme le serveur Minecraft, sont relancés. + + + Notifications via Gotify : Mise en place d’un système d’alertes centralisé. + http://rss/index.xml#notifications-via-gotify-:-mise-en-place-d’un-système-d’alertes-centralisé. + Wed, 07 May 2025 12:00:00 GMT + Mise en place du système de notifications centralisé avec Gotify. + + + Migration vers Proxmox : Serveur déplacé dans une VM Proxmox. + http://rss/index.xml#migration-vers-proxmox-:-serveur-déplacé-dans-une-vm-proxmox. + Tue, 06 May 2025 12:00:00 GMT + Migration complète du serveur principal dans une machine virtuelle sous Proxmox. + + + diff --git a/stacks/glance/updates/updates.md b/stacks/glance/updates/updates.md new file mode 100644 index 0000000..4e55191 --- /dev/null +++ b/stacks/glance/updates/updates.md @@ -0,0 +1,20 @@ +# Mises à jour du serveur + +## 2025-06-01 - Changement d'infra : 2 instances Traefik, une par interface réseau (Interne / Externe). +Une instance Traefik permet d'exposer sur *.tellserv.fr et l'autre sert pour les services externes afin de permettre un meilleur cloisonnement. + +## 2025-05-24 - Glance remplace Flame : Nouveau dashboard, détection auto des services, widget Plex. +Ajout d’un script pour détecter automatiquement les services exposés, et intégration d’un widget Plex affichant les différentes bibliothèques et leur nombre de médias. + +## 2025-05-23 - Nouvelle collection Plex : Films non vus choisis au hasard. +Une collection aléatoire a été ajoutée sur Plex, permettant de voir un film non vu encore. + +## 2025-05-09 - Relance des services restants : Minecraft, Photoprism... +Les derniers services encore non fonctionnels, comme le serveur Minecraft, sont relancés. + +## 2025-05-07 - Notifications via Gotify : Mise en place d’un système d’alertes centralisé. +Mise en place du système de notifications centralisé avec Gotify. + +## 2025-05-06 - Migration vers Proxmox : Serveur déplacé dans une VM Proxmox. +Migration complète du serveur principal dans une machine virtuelle sous Proxmox. + diff --git a/stacks/gotify/compose.yml b/stacks/gotify/compose.yml new file mode 100644 index 0000000..0ac3568 --- /dev/null +++ b/stacks/gotify/compose.yml @@ -0,0 +1,34 @@ +services: + gotify: + image: gotify/server:2.6.1 + container_name: gotify + environment: + - TZ=Europe/Paris + restart: unless-stopped + volumes: + - ./data:/app/data + networks: + - traefik_network + labels: + - "traefik.enable=true" + # Local + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.rule=Host(`${COMPOSE_PROJECT_NAME}.local.tellserv.fr`)" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.entryPoints=local" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.tls.certresolver=cloudflare-local" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.tls=true" + # Production + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.rule=Host(`${COMPOSE_PROJECT_NAME}.tellserv.fr`)" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.entryPoints=websecure" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.tls=true" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.tls.certResolver=cloudflare" + # Port interne du service + - "traefik.http.services.${COMPOSE_PROJECT_NAME}.loadbalancer.server.port=80" + + # Flame Dashboard Integration + + # Watchtower Auto-Update + - "com.centurylinklabs.watchtower.enable=true" + +networks: + traefik_network: + external: true diff --git a/stacks/headscale/compose.yml b/stacks/headscale/compose.yml new file mode 100644 index 0000000..63d1895 --- /dev/null +++ b/stacks/headscale/compose.yml @@ -0,0 +1,52 @@ +services: + tailscale: + image: headscale/headscale:0.25 + container_name: tailscale + restart: always + environment: + - TZ=Europe/Paris + volumes: + - ./conf:/etc/headscale + - ./data:/var/lib/headscale + entrypoint: headscale serve + networks: + - traefik_network + labels: + - traefik.enable=true + - traefik.docker.network=traefik_network + # Router vers tailscale.tellserv.fr + - traefik.http.routers.tailscale.rule=Host(`tailscale.tellserv.fr`) + - traefik.http.routers.tailscale.entryPoints=websecure + - traefik.http.routers.tailscale.tls=true + - traefik.http.routers.tailscale.tls.certResolver=cloudflare + - traefik.http.services.tailscale.loadbalancer.server.port=8080 + - traefik.http.services.tailscale.loadbalancer.server.scheme=http + # Middleware CORS pour l’admin + - traefik.http.middlewares.headscale-cors.headers.accesscontrolallowmethods=GET,POST,PUT,PATCH,DELETE,OPTIONS + - traefik.http.middlewares.headscale-cors.headers.accesscontrolallowheaders=* + - traefik.http.middlewares.headscale-cors.headers.accesscontrolalloworiginlist=https://headscale.local.tellserv.fr + - traefik.http.middlewares.headscale-cors.headers.accesscontrolmaxage=100 + - traefik.http.middlewares.headscale-cors.headers.addvaryheader=true + - traefik.http.routers.tailscale.middlewares=headscale-cors + # UDP ports + - traefik.udp.services.tailscale-udp-41641.loadbalancer.server.port=41641 + - traefik.udp.services.tailscale-udp-3478.loadbalancer.server.port=3478 + headscale: + image: goodieshq/headscale-admin:0.25 + container_name: headscale + restart: unless-stopped + networks: + - traefik_network + labels: + - traefik.enable=true + - traefik.docker.network=traefik_network + # Router vers headscale.local.tellserv.fr + - traefik.http.routers.headscale.rule=Host(`headscale.local.tellserv.fr`) + - traefik.http.routers.headscale.entryPoints=local + - traefik.http.routers.headscale.tls=true + - traefik.http.routers.headscale.tls.certResolver=cloudflare-local + - traefik.http.services.headscale.loadbalancer.server.port=80 + - traefik.http.services.headscale.loadbalancer.server.scheme=http +networks: + traefik_network: + external: true diff --git a/stacks/headscale/conf/config.yaml b/stacks/headscale/conf/config.yaml new file mode 100644 index 0000000..694ddda --- /dev/null +++ b/stacks/headscale/conf/config.yaml @@ -0,0 +1,72 @@ +server_url: https://tailscale.tellserv.fr + +listen_addr: 0.0.0.0:8080 +metrics_listen_addr: 0.0.0.0:9090 +grpc_listen_addr: 0.0.0.0:50443 +grpc_allow_insecure: false + +noise: + private_key_path: /var/lib/headscale/noise_private.key + +prefixes: + v6: fd7a:115c:a1e0::/48 + v4: 100.64.0.0/10 + allocation: sequential + +derp: + server: + enabled: false + region_id: 999 + region_code: "headscale" + region_name: "Headscale Embedded DERP" + stun_listen_addr: "0.0.0.0:3478" + private_key_path: /var/lib/headscale/derp_server_private.key + automatically_add_embedded_derp_region: true + ipv4: 1.2.3.4 + ipv6: 2001:db8::1 + urls: + - https://controlplane.tailscale.com/derpmap/default + paths: [] + auto_update_enabled: true + update_frequency: 24h + +disable_check_updates: false +ephemeral_node_inactivity_timeout: 30m + +database: + type: sqlite + sqlite: + path: /var/lib/headscale/db.sqlite + write_ahead_log: true + +acme_url: https://acme-v02.api.letsencrypt.org/directory +acme_email: "" +tls_letsencrypt_hostname: "" +tls_letsencrypt_cache_dir: /var/lib/headscale/cache +tls_letsencrypt_challenge_type: HTTP-01 +tls_letsencrypt_listen: ":http" +tls_cert_path: "" +tls_key_path: "" + +log: + format: text + level: info + +policy: + path: "" + +dns: + nameservers: + global: + - 100.64.0.2 + search_domains: [] + magic_dns: false + base_domain: example.com + +unix_socket: /var/run/headscale/headscale.sock +unix_socket_permission: "0770" + +logtail: + enabled: false + +randomize_client_port: false diff --git a/stacks/joal/compose.yml b/stacks/joal/compose.yml new file mode 100644 index 0000000..830af02 --- /dev/null +++ b/stacks/joal/compose.yml @@ -0,0 +1,35 @@ +services: + joal: + image: anthonyraymond/joal:3.1.0 + container_name: joal + restart: unless-stopped + volumes: + - /mnt/storage/joal-conf:/data + command: + - --joal-conf=/data + - --spring.main.web-environment=true + - --server.port=8383 + - --joal.ui.path.prefix=joal + - --joal.ui.secret-token=${JOAL_SECRET_TOKEN} + networks: + - traefik_network + labels: + - traefik.enable=true + # Local + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.rule=Host(`${COMPOSE_PROJECT_NAME}.local.tellserv.fr`) + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.entryPoints=local + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.tls.certresolver=cloudflare-local" + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.tls=true + # Production + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.rule=Host(`${COMPOSE_PROJECT_NAME}.tellserv.fr`) + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.entryPoints=websecure + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.tls=true + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.tls.certResolver=cloudflare + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.middlewares=tinyauth + - traefik.http.services.${COMPOSE_PROJECT_NAME}.loadbalancer.server.port=8383 + # Flame Dashboard Integration + # Watchtower Auto-Update + - com.centurylinklabs.watchtower.enable=true +networks: + traefik_network: + external: true diff --git a/stacks/kavita/Dockerfile b/stacks/kavita/Dockerfile new file mode 100644 index 0000000..cbbc530 --- /dev/null +++ b/stacks/kavita/Dockerfile @@ -0,0 +1,39 @@ +FROM golang:1.21-alpine + +# Installation des dépendances système +RUN apt-get update && apt-get install -y \ + bash \ + gnupg \ + software-properties-common \ + python3 \ + python3-pip \ + wget \ + git \ + build-essential \ + libmupdf-dev \ + poppler-utils \ + && rm -rf /var/lib/apt/lists/* + +# Compiler cbconvert depuis les sources avec la bonne commande +RUN CGO_ENABLED=1 go install github.com/gen2brain/cbconvert/cmd/cbconvert@latest + +# Installation de F2 via Go (dernière version) +RUN go install github.com/ayoisaiah/f2/v2/cmd/f2@latest + +# Ajout du PATH Go +ENV PATH="/root/go/bin:${PATH}" + +# Création des répertoires de travail +WORKDIR /app +COPY kavita_script.py /app/ + +# Création d'un script wrapper +RUN echo '#!/bin/bash\n\ +python3 /app/kavita_script.py\n' > /app/entrypoint.sh && \ +chmod +x /app/entrypoint.sh + +# Création du point de montage pour les volumes +VOLUME ["/mnt/storage/kavita"] + +# Exécution du script +ENTRYPOINT ["/app/entrypoint.sh"] \ No newline at end of file diff --git a/stacks/kavita/compose.yml b/stacks/kavita/compose.yml new file mode 100644 index 0000000..61a1875 --- /dev/null +++ b/stacks/kavita/compose.yml @@ -0,0 +1,46 @@ +services: + kavita: + image: jvmilazz0/kavita:0.8.4 + container_name: kavita + volumes: + - /mnt/storage/kavita:/books + - ./data:/kavita/config + environment: + - TZ=Europe/Paris + networks: + - traefik_network + restart: unless-stopped + labels: + - "traefik.enable=true" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.rule=Host(`${COMPOSE_PROJECT_NAME}.local.tellserv.fr`)" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.entryPoints=local" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.tls.certresolver=cloudflare-local" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.tls=true" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.rule=Host(`${COMPOSE_PROJECT_NAME}.tellserv.fr`)" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.entryPoints=websecure" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.tls=true" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.tls.certResolver=cloudflare" + - "traefik.http.services.${COMPOSE_PROJECT_NAME}.loadbalancer.server.port=5000" + - "com.centurylinklabs.watchtower.enable=true" + + kavita-processor: + build: + context: . + dockerfile: Dockerfile + container_name: kavita-watcher + volumes: + - /mnt/storage/kavita:/mnt/storage/kavita + environment: + - TZ=Europe/Paris + logging: + driver: "json-file" + options: + max-size: "10m" + max-file: "3" + networks: + - traefik_network + restart: unless-stopped + +networks: + traefik_network: + external: true diff --git a/stacks/kavita/kavita_script.py b/stacks/kavita/kavita_script.py new file mode 100644 index 0000000..18d9c00 --- /dev/null +++ b/stacks/kavita/kavita_script.py @@ -0,0 +1,526 @@ +#!/usr/bin/env python3 +import os +import shutil +import subprocess +import logging +import time +import zipfile +import tempfile +from pathlib import Path +from datetime import datetime, timedelta +import threading + +# Configuration du logging +logging.basicConfig( + level=logging.INFO, + format='%(asctime)s - %(levelname)s - %(message)s' +) + +# Chemins de base +BASE_PATH = "/mnt/storage/kavita" +DOWNLOAD_DIR = f"{BASE_PATH}/download" +TO_CONVERT_DIR = f"{BASE_PATH}/to_convert" +CBZ_CONVERT_DIR = f"{BASE_PATH}/cbz_convert" + +# Chemins de destination +MANGA_DEST = f"{BASE_PATH}/scans/Mangas" +COMICS_DEST = f"{BASE_PATH}/scans/Comics" +BD_DEST = f"{BASE_PATH}/scans/BD" + +# Chemins source après conversion +MANGA_SRC = f"{CBZ_CONVERT_DIR}/manga" +COMICS_SRC = f"{CBZ_CONVERT_DIR}/comics" +BD_SRC = f"{CBZ_CONVERT_DIR}/bd" + +# Variables pour la détection de fichiers +detected_files = {} # Dictionnaire pour suivre les fichiers détectés et leur stabilité +folder_files = {} # Dictionnaire pour suivre les fichiers par dossier +files_to_process = [] # Liste des fichiers à traiter dans le cycle courant +processing_lock = threading.Lock() # Verrou pour éviter des traitements concurrents +conversion_in_progress = False # Indicateur pour suivre si une conversion est en cours + +# Extensions à ignorer +IGNORED_EXTENSIONS = ['.parts'] + +def run_command(command, cwd=None): + """Exécute une commande shell et affiche la sortie""" + logging.info(f"Exécution de la commande: {command}") + try: + result = subprocess.run( + command, + shell=True, + check=True, + cwd=cwd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True + ) + if result.stdout: + logging.info(result.stdout) + return True + except subprocess.CalledProcessError as e: + logging.error(f"Erreur lors de l'exécution de la commande: {e}") + if e.stdout: + logging.info(e.stdout) + if e.stderr: + logging.error(e.stderr) + return False + +def pdf_to_cbz(pdf_path, output_dir): + """Convertit un PDF en CBZ en utilisant pdftoppm et ZIP""" + try: + # Obtenir le nom de base du fichier (sans extension) + pdf_name = os.path.basename(pdf_path) + base_name = os.path.splitext(pdf_name)[0] + output_cbz = os.path.join(output_dir, f"{base_name}.cbz") + + logging.info(f"Conversion du PDF: {pdf_name} en CBZ") + + # Créer un répertoire temporaire pour les images extraites + with tempfile.TemporaryDirectory() as temp_dir: + # Convertir le PDF en images avec pdftoppm (partie de poppler-utils) + # Traitement page par page avec une qualité réduite pour éviter les problèmes de mémoire + pdftoppm_cmd = f"pdftoppm -jpeg -r 150 '{pdf_path}' '{temp_dir}/page'" + if not run_command(pdftoppm_cmd): + logging.error(f"Échec de l'extraction des images du PDF: {pdf_name}") + return False + + # Vérifier que des images ont été extraites + image_files = sorted([f for f in os.listdir(temp_dir) if f.endswith(('.jpg', '.jpeg'))]) + if not image_files: + logging.error(f"Aucune image extraite du PDF: {pdf_name}") + return False + + logging.info(f"Nombre de pages extraites du PDF: {len(image_files)}") + + # Créer un fichier CBZ (ZIP) contenant les images + with zipfile.ZipFile(output_cbz, 'w') as zipf: + for img_file in image_files: + img_path = os.path.join(temp_dir, img_file) + zipf.write(img_path, arcname=img_file) + + # Vérifier que le CBZ a été créé + if not os.path.exists(output_cbz): + logging.error(f"Échec de la création du CBZ: {output_cbz}") + return False + + logging.info(f"Conversion réussie: {pdf_name} -> {output_cbz}") + return True + + except Exception as e: + logging.error(f"Erreur lors de la conversion du PDF {pdf_path}: {e}") + return False + +def convert_non_pdf_files(file_path, output_dir): + """Convertit un fichier non-PDF avec cbconvert""" + file_name = os.path.basename(file_path) + logging.info(f"Conversion du fichier non-PDF: {file_name}") + + cmd = f"cbconvert convert --no-nonimage --outdir '{output_dir}' --quality 85 '{file_path}'" + success = run_command(cmd) + + # Vérifier si la conversion a réussi en cherchant le fichier de sortie + base_name = os.path.splitext(os.path.basename(file_path))[0] + expected_output = os.path.join(output_dir, f"{base_name}.cbz") + + if not os.path.exists(expected_output): + logging.error(f"La conversion a échoué, aucun fichier de sortie trouvé pour: {file_name}") + return False + + logging.info(f"Conversion réussie: {file_name} -> {expected_output}") + return True + +def convert_files(): + """Convertit les fichiers en CBZ""" + global files_to_process + + logging.info("Début de la conversion des fichiers...") + + # Copier la liste des fichiers à traiter pour ce cycle + with processing_lock: + current_files = files_to_process.copy() + logging.info(f"Traitement de {len(current_files)} fichiers dans ce cycle") + + # Si aucun fichier à traiter + if not current_files: + return True + + # Collecter tous les fichiers à traiter + all_files = [] + for file_path in current_files: + if os.path.exists(file_path) and os.path.isfile(file_path): + all_files.append(file_path) + + # Structure pour suivre la progression + file_stats = { + 'total': len(all_files), + 'success': 0, + 'failed': 0 + } + + # Convertir chaque fichier individuellement + converted_files = [] + + for file_path in all_files: + # Déterminer le répertoire de sortie + rel_path = os.path.relpath(file_path, TO_CONVERT_DIR) + output_dir = os.path.join(CBZ_CONVERT_DIR, os.path.dirname(rel_path)) + + # Créer le dossier de sortie si nécessaire + os.makedirs(output_dir, exist_ok=True) + + # Convertir le fichier en fonction de son type + success = False + if file_path.lower().endswith('.pdf'): + # Utiliser notre fonction personnalisée pour convertir les PDF + logging.info(f"Utilisation de la méthode personnalisée pour le PDF: {file_path}") + success = pdf_to_cbz(file_path, output_dir) + else: + # Utiliser cbconvert pour les autres formats + success = convert_non_pdf_files(file_path, output_dir) + + if success: + converted_files.append(file_path) + file_stats['success'] += 1 + else: + file_stats['failed'] += 1 + logging.error(f"Échec de la conversion du fichier: {file_path}") + + # Mettre à jour la liste des fichiers à traiter (retirer ceux qui ont été convertis) + with processing_lock: + for file_path in converted_files: + if file_path in files_to_process: + files_to_process.remove(file_path) + + # Rapport de conversion + logging.info(f"Conversion terminée: {file_stats['success']}/{file_stats['total']} fichiers convertis avec succès") + + # Si tous les fichiers ont été convertis avec succès, retourner True + return file_stats['failed'] == 0 + +def clean_processed_files(processed_files): + """Supprime uniquement les fichiers qui ont été traités avec succès""" + logging.info(f"Nettoyage de {len(processed_files)} fichiers traités") + + for file_path in processed_files: + try: + if os.path.exists(file_path): + os.remove(file_path) + logging.info(f"Fichier supprimé après traitement: {file_path}") + + # Supprimer les dossiers parents vides + parent_dir = os.path.dirname(file_path) + while parent_dir.startswith(TO_CONVERT_DIR) and os.path.exists(parent_dir) and not os.listdir(parent_dir): + os.rmdir(parent_dir) + logging.info(f"Dossier vide supprimé: {parent_dir}") + parent_dir = os.path.dirname(parent_dir) + except Exception as e: + logging.error(f"Erreur lors de la suppression du fichier {file_path}: {e}") + +def clean_to_convert_directory(): + """Supprime tous les fichiers du répertoire to_convert""" + logging.info(f"Nettoyage du répertoire de conversion: {TO_CONVERT_DIR}") + try: + if os.path.exists(TO_CONVERT_DIR): + for item in os.listdir(TO_CONVERT_DIR): + path = os.path.join(TO_CONVERT_DIR, item) + if os.path.isdir(path): + shutil.rmtree(path) + else: + os.remove(path) + return True + except Exception as e: + logging.error(f"Erreur lors du nettoyage du répertoire: {e}") + return False + +def rename_and_move(source_dir, dest_dir, category): + """Renomme et déplace les fichiers d'une catégorie spécifique""" + logging.info(f"Traitement de la catégorie: {category}") + + # Vérifier si le répertoire source existe + if not os.path.exists(source_dir): + logging.warning(f"Le répertoire source n'existe pas: {source_dir}") + return True # Non critique, continuez + + # Vérifier si le répertoire de destination existe, sinon le créer + if not os.path.exists(dest_dir): + os.makedirs(dest_dir, exist_ok=True) + logging.info(f"Répertoire de destination créé: {dest_dir}") + + # Renommer les fichiers dans chaque sous-dossier + for item in os.listdir(source_dir): + item_path = os.path.join(source_dir, item) + if os.path.isdir(item_path): + # Utiliser f2 pour renommer les fichiers + cmd = 'f2 -r "{{p}} v{%02d}" -e -x' + if not run_command(cmd, cwd=item_path): + logging.error(f"Échec du renommage dans: {item_path}") + + # Déplacer les dossiers vers la destination + for item in os.listdir(source_dir): + item_path = os.path.join(source_dir, item) + if os.path.isdir(item_path): + dest_path = os.path.join(dest_dir, item) + try: + # Si le dossier de destination existe déjà, fusionner + if os.path.exists(dest_path): + for file in os.listdir(item_path): + file_src = os.path.join(item_path, file) + file_dest = os.path.join(dest_path, file) + shutil.move(file_src, file_dest) + os.rmdir(item_path) + else: + # Sinon, déplacer le dossier entier + shutil.move(item_path, dest_dir) + logging.info(f"Déplacé: {item} vers {dest_dir}") + except Exception as e: + logging.error(f"Erreur lors du déplacement de {item}: {e}") + + return True + +def process_convert_directory(): + """Traite les fichiers dans le répertoire to_convert""" + global conversion_in_progress, files_to_process + + with processing_lock: + if conversion_in_progress: + return + if not files_to_process: + return + conversion_in_progress = True + current_batch = files_to_process.copy() + + try: + logging.info("Début du traitement des fichiers à convertir...") + + # 1. Convertir les fichiers + conversion_success = convert_files() + + # 2. Si la conversion a réussi, traiter les mangas, comics et BD + if conversion_success: + if rename_and_move(MANGA_SRC, MANGA_DEST, "Manga"): + if rename_and_move(COMICS_SRC, COMICS_DEST, "Comics"): + rename_and_move(BD_SRC, BD_DEST, "BD") + + # 3. Nettoyer uniquement les fichiers traités avec succès + clean_processed_files(current_batch) + + logging.info("Traitement terminé avec succès") + else: + logging.error("Échec de la conversion, certains fichiers n'ont pas été traités") + + except Exception as e: + logging.error(f"Erreur pendant le traitement: {e}") + finally: + with processing_lock: + conversion_in_progress = False + +def should_ignore_file(file_path): + """Détermine si un fichier doit être ignoré""" + # Vérifier si l'extension du fichier est dans la liste des extensions à ignorer + for ext in IGNORED_EXTENSIONS: + if file_path.lower().endswith(ext): + return True + + # Vérifier si le fichier contient .parts dans son chemin (cas des dossiers temporaires) + if '.parts' in file_path: + return True + + return False + +def get_folder_path(file_path): + """Obtient le chemin du dossier parent d'un fichier""" + return os.path.dirname(file_path) + +def update_folder_files(): + """Met à jour le dictionnaire des fichiers par dossier""" + folder_files.clear() + + for file_path in detected_files: + folder = get_folder_path(file_path) + if folder not in folder_files: + folder_files[folder] = [] + folder_files[folder].append(file_path) + +def check_folder_stability(folder): + """Vérifie si tous les fichiers d'un dossier sont stables""" + if folder not in folder_files: + return False + + for file_path in folder_files[folder]: + if file_path in detected_files and not detected_files[file_path]['stable']: + return False + + return True + +def move_folder_to_convert(folder): + """Déplace un dossier stable vers to_convert et retourne la liste des fichiers déplacés""" + if not os.path.exists(folder): + return [] + + # Créer le dossier de destination s'il n'existe pas + if not os.path.exists(TO_CONVERT_DIR): + os.makedirs(TO_CONVERT_DIR, exist_ok=True) + + # Créer le sous-dossier de destination en conservant la structure + rel_path = os.path.relpath(folder, DOWNLOAD_DIR) + dest_folder = os.path.join(TO_CONVERT_DIR, rel_path) + os.makedirs(os.path.dirname(dest_folder), exist_ok=True) + + moved_files = [] + try: + # Déplacer tous les fichiers stables + for file_path in folder_files[folder]: + if file_path in detected_files and detected_files[file_path]['stable']: + dest_file = os.path.join(TO_CONVERT_DIR, os.path.relpath(file_path, DOWNLOAD_DIR)) + os.makedirs(os.path.dirname(dest_file), exist_ok=True) + shutil.move(file_path, dest_file) + moved_files.append(dest_file) + logging.info(f"Déplacé le fichier stable vers to_convert: {file_path}") + # Supprimer le fichier de notre suivi + del detected_files[file_path] + + # Supprimer le dossier source s'il est vide + if os.path.exists(folder) and not os.listdir(folder): + os.rmdir(folder) + logging.info(f"Dossier source supprimé car vide: {folder}") + + return moved_files + except Exception as e: + logging.error(f"Erreur lors du déplacement du dossier {folder} vers to_convert: {e}") + return [] + +def process_stable_folders(): + """Traite les dossiers stables en les déplaçant vers to_convert""" + global conversion_in_progress + folders_to_process = [] + + # Si une conversion est en cours, ne pas déplacer de nouveaux fichiers + with processing_lock: + if conversion_in_progress: + logging.info("Une conversion est en cours, report du déplacement des dossiers stables") + return + + # Identifier les dossiers stables + for folder in folder_files: + if check_folder_stability(folder): + folders_to_process.append(folder) + + # Si aucun dossier stable, sortir + if not folders_to_process: + return + + # Déplacer les dossiers stables vers to_convert + moved_files = [] + for folder in folders_to_process: + files = move_folder_to_convert(folder) + if files: + moved_files.extend(files) + + # Mettre à jour la liste des fichiers à traiter + with processing_lock: + files_to_process.extend(moved_files) + + logging.info(f"Ajout de {len(moved_files)} fichiers à la liste de traitement") + +def scan_download_directory(): + """Scanne le répertoire de téléchargement pour détecter les nouveaux fichiers et leur stabilité""" + if not os.path.exists(DOWNLOAD_DIR): + os.makedirs(DOWNLOAD_DIR, exist_ok=True) + logging.info(f"Répertoire de téléchargement créé: {DOWNLOAD_DIR}") + return + + current_time = datetime.now() + + # Parcourir récursivement tous les fichiers dans le répertoire de téléchargement + for root, dirs, files in os.walk(DOWNLOAD_DIR): + for file in files: + file_path = os.path.join(root, file) + + # Ignorer les fichiers spécifiés + if should_ignore_file(file_path): + continue + + # Si le fichier n'est pas déjà dans notre liste + if file_path not in detected_files: + file_size = os.path.getsize(file_path) + detected_files[file_path] = { + 'size': file_size, + 'time': current_time, + 'stable': False, + 'stable_count': 0 + } + logging.info(f"Nouveau fichier détecté: {file_path}") + else: + # Vérifier si la taille du fichier a changé + current_size = os.path.getsize(file_path) + if current_size != detected_files[file_path]['size']: + detected_files[file_path] = { + 'size': current_size, + 'time': current_time, + 'stable': False, + 'stable_count': 0 + } + logging.info(f"Fichier modifié: {file_path}") + elif not detected_files[file_path]['stable']: + # Incrémenter le compteur de stabilité + detected_files[file_path]['stable_count'] += 1 + + # Marquer comme stable si la taille n'a pas changé pendant plusieurs vérifications + # (ici, après 5 vérifications consécutives, soit environ 150 secondes avec un intervalle de 30s) + if detected_files[file_path]['stable_count'] >= 5: + detected_files[file_path]['stable'] = True + logging.info(f"Fichier stable: {file_path}") + + # Supprimer les entrées pour les fichiers qui n'existent plus + file_paths = set() + for root, dirs, files in os.walk(DOWNLOAD_DIR): + for file in files: + file_path = os.path.join(root, file) + if not should_ignore_file(file_path): + file_paths.add(file_path) + + deleted_files = [path for path in detected_files.keys() if path not in file_paths] + for path in deleted_files: + logging.info(f"Fichier supprimé ou déplacé: {path}") + del detected_files[path] + + # Mettre à jour la liste des fichiers par dossier + update_folder_files() + +def check_to_convert_has_files(): + """Vérifie si des fichiers sont prêts à être traités""" + with processing_lock: + return len(files_to_process) > 0 + +def main(): + """Fonction principale de surveillance""" + logging.info("Démarrage de la surveillance du répertoire de téléchargement...") + logging.info(f"Extensions ignorées: {IGNORED_EXTENSIONS}") + + # Créer les répertoires nécessaires + if not os.path.exists(TO_CONVERT_DIR): + os.makedirs(TO_CONVERT_DIR, exist_ok=True) + + while True: + try: + # 1. Scanner le répertoire de téléchargement + scan_download_directory() + + # 2. Traiter les dossiers stables (déplacer vers to_convert) + process_stable_folders() + + # 3. Si des fichiers sont dans to_convert et qu'aucune conversion n'est en cours + if check_to_convert_has_files() and not conversion_in_progress: + # Lancer le processus de conversion + threading.Thread(target=process_convert_directory, daemon=True).start() + + # Attendre 30 secondes avant la prochaine vérification + time.sleep(30) + + except Exception as e: + logging.error(f"Erreur durant la surveillance: {e}") + time.sleep(60) # Attendre en cas d'erreur + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/stacks/kopia/compose.yml b/stacks/kopia/compose.yml new file mode 100644 index 0000000..4861a4a --- /dev/null +++ b/stacks/kopia/compose.yml @@ -0,0 +1,46 @@ +services: + kopia: + image: kopia/kopia:0.22.0 + container_name: kopia + user: "0:0" + restart: unless-stopped + privileged: true + cap_add: + - SYS_ADMIN + security_opt: + - apparmor:unconfined + devices: + - /dev/fuse:/dev/fuse:rwm + command: + - server + - start + - --insecure + - --disable-csrf-token-checks + - --address=0.0.0.0:51515 + - --server-username=${KOPIA_SERVER_USERNAME} + - --server-password=${KOPIA_SERVER_PASSWORD} + volumes: + - /mnt/storage/kopia/tmp:/tmp:shared + - /mnt/storage/kopia/repository:/repository + - ./config:/app/config + - ./cache:/app/cache + - ./logs:/app/logs + - /:/data:ro + environment: + KOPIA_PASSWORD: ${KOPIA_PASSWORD} + TZ: Europe/Paris + USER: ${USER} + networks: + - traefik_network + labels: + - "traefik.enable=true" + # Accès local uniquement via Traefik + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.rule=Host(`${COMPOSE_PROJECT_NAME}.local.tellserv.fr`)" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.entryPoints=local" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.tls.certresolver=cloudflare-local" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.tls=true" + - "traefik.http.services.${COMPOSE_PROJECT_NAME}.loadbalancer.server.port=51515" + +networks: + traefik_network: + external: true diff --git a/stacks/larabouillere/compose.yml b/stacks/larabouillere/compose.yml new file mode 100644 index 0000000..e37bb9f --- /dev/null +++ b/stacks/larabouillere/compose.yml @@ -0,0 +1,34 @@ +services: + ghost: + image: ghost:5 + container_name: ${COMPOSE_PROJECT_NAME}_ghost + restart: unless-stopped + environment: + url: https://${COMPOSE_PROJECT_NAME}.tellserv.fr + database__client: sqlite3 + database__connection__filename: /var/lib/ghost/content/data/ghost.db + volumes: + - ./ghost/content:/var/lib/ghost/content + networks: + - traefik_network + labels: + - traefik.enable=true + # --- Local + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.rule=Host(`${COMPOSE_PROJECT_NAME}.local.tellserv.fr`) + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.entryPoints=local + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.tls.certresolver=cloudflare-local" + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.tls=true + # --- Production + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.rule=Host(`${COMPOSE_PROJECT_NAME}.tellserv.fr`) + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.entryPoints=websecure + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.tls=true + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.tls.certResolver=cloudflare + #- "traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.middlewares=tinyauth" + + - traefik.http.services.${COMPOSE_PROJECT_NAME}.loadbalancer.server.port=2368 + # --- Flame Dashboard Integration + # --- Watchtower Auto-Update + - com.centurylinklabs.watchtower.enable=true +networks: + traefik_network: + external: true diff --git a/stacks/loggifly/compose.yml b/stacks/loggifly/compose.yml new file mode 100644 index 0000000..cf7fffb --- /dev/null +++ b/stacks/loggifly/compose.yml @@ -0,0 +1,23 @@ +services: + loggifly: + image: ghcr.io/clemcer/loggifly:1.5.0 + container_name: loggifly + restart: unless-stopped + volumes: + - /var/run/docker.sock:/var/run/docker.sock:ro + environment: + # Notifications via Gotify (Apprise) + APPRISE_URL: gotify://gotify.local.tellserv.fr/AIgXWOJhf8o8McN + # Conteneurs surveillés + CONTAINERS: vaultwarden,audiobookshelf + # Mots-clés globaux + GLOBAL_KEYWORDS: error,failed login,password + GLOBAL_KEYWORDS_WITH_ATTACHMENT: critical + EXCLUDED_KEYWORDS: expired,icon + # Optionnel : réduction du spam + NOTIFICATION_COOLDOWN: 10 + ACTION_COOLDOWN: 300 + # Watchtower auto-update + labels: + - com.centurylinklabs.watchtower.enable=true +networks: {} diff --git a/stacks/mobilizon/compose.yml b/stacks/mobilizon/compose.yml new file mode 100644 index 0000000..af1c9bd --- /dev/null +++ b/stacks/mobilizon/compose.yml @@ -0,0 +1,44 @@ +services: + mobilizon: + user: "1000:1000" + restart: always + image: docker.io/framasoft/mobilizon + env_file: .env + depends_on: + - db + volumes: + - ./uploads:/var/lib/mobilizon/uploads + - ./tzdata:/var/lib/mobilizon/tzdata + networks: + - traefik_network + - mobilizon_internal + labels: + - "traefik.enable=true" + # Local + - "traefik.http.routers.mobilizon-local.rule=Host(`mobilizon.local.tellserv.fr`)" + - "traefik.http.routers.mobilizon-local.entryPoints=local" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.tls.certresolver=cloudflare-local" + - "traefik.http.routers.mobilizon-local.tls=true" + # Production + - "traefik.http.routers.mobilizon-prod.rule=Host(`mobilizon.tellserv.fr`)" + - "traefik.http.routers.mobilizon-prod.entryPoints=websecure" + - "traefik.http.routers.mobilizon-prod.tls=true" + - "traefik.http.routers.mobilizon-prod.tls.certResolver=cloudflare" + # Port interne du service + - "traefik.http.services.mobilizon.loadbalancer.server.port=5005" + + db: + image: docker.io/postgis/postgis:15-3.4 + restart: always + env_file: .env + volumes: + - ./db:/var/lib/postgresql/data:z + networks: + - mobilizon_internal + +networks: + mobilizon_internal: + ipam: + driver: default + traefik_network: + external: true diff --git a/stacks/paperless/compose.yml b/stacks/paperless/compose.yml new file mode 100644 index 0000000..1080f76 --- /dev/null +++ b/stacks/paperless/compose.yml @@ -0,0 +1,57 @@ +services: + paperless: + image: ghcr.io/paperless-ngx/paperless-ngx:2.14.7 + container_name: paperless + environment: + - TZ=Europe/Paris + - PAPERLESS_REDIS=redis://redis:6379 + - PAPERLESS_DBHOST=db + - PAPERLESS_DBNAME=paperless + - PAPERLESS_DBUSER=paperless + - PAPERLESS_DBPASS=paperless + - PAPERLESS_URL=https://paperless.tellserv.fr + volumes: + - /mnt/storage/paperless/data:/usr/src/paperless/data + - /mnt/storage/paperless/media:/usr/src/paperless/media + - /mnt/storage/paperless/export:/usr/src/paperless/export + - /mnt/storage/paperless/consume:/usr/src/paperless/consume + depends_on: + - db + - redis + networks: + - traefik_network + restart: unless-stopped + labels: + - "traefik.enable=true" + - "traefik.http.routers.paperless-prod.rule=Host(`paperless.tellserv.fr`)" + - "traefik.http.routers.paperless-prod.entryPoints=websecure" + - "traefik.http.routers.paperless-prod.tls=true" + - "traefik.http.routers.paperless-prod.tls.certResolver=cloudflare" + - "traefik.http.services.paperless.loadbalancer.server.port=8000" + - "com.centurylinklabs.watchtower.enable=true" + + db: + image: postgres:13 + container_name: paperless-db + environment: + - POSTGRES_DB=paperless + - POSTGRES_USER=paperless + - POSTGRES_PASSWORD=paperless + volumes: + - /mnt/storage/paperless/db:/var/lib/postgresql/data + networks: + - traefik_network + restart: unless-stopped + + redis: + image: redis:6 + container_name: paperless-redis + volumes: + - /mnt/storage/paperless/redis:/data + networks: + - traefik_network + restart: unless-stopped + +networks: + traefik_network: + external: true diff --git a/stacks/photoprism/compose.yml b/stacks/photoprism/compose.yml new file mode 100644 index 0000000..00f96c0 --- /dev/null +++ b/stacks/photoprism/compose.yml @@ -0,0 +1,91 @@ +services: + photoprism: + image: photoprism/photoprism:241021 + stop_grace_period: 10s + depends_on: + - mariadb + restart: unless-stopped + security_opt: + - seccomp:unconfined + - apparmor:unconfined + working_dir: "/photoprism" + volumes: + - "/mnt/storage/photos:/photoprism/import" + - "/mnt/storage/photoprism/originals:/photoprism/originals" + - "/mnt/storage/photoprism/storage:/photoprism/storage" + environment: + - PHOTOPRISM_DATABASE_DRIVER=mysql + - PHOTOPRISM_DATABASE_SERVER=mariadb:3306 + - PHOTOPRISM_DATABASE_NAME=photoprism + - PHOTOPRISM_DATABASE_USER=${MARIADB_USER} + - PHOTOPRISM_DATABASE_PASSWORD=${PHOTOPRISM_DATABASE_PASSWORD} + - PHOTOPRISM_ADMIN_USER=${PHOTOPRISM_ADMIN_USER} + - PHOTOPRISM_ADMIN_PASSWORD=${PHOTOPRISM_ADMIN_PASSWORD} + - PHOTOPRISM_AUTH_MODE=password + - PHOTOPRISM_SITE_URL=https://photoprism.tellserv.fr/ + - PHOTOPRISM_DISABLE_TLS=true + - PHOTOPRISM_ORIGINALS_LIMIT=5000 + - PHOTOPRISM_HTTP_COMPRESSION=gzip + - PHOTOPRISM_LOG_LEVEL=info + - PHOTOPRISM_READONLY=false + - PHOTOPRISM_EXPERIMENTAL=false + - PHOTOPRISM_DISABLE_CHOWN=false + - PHOTOPRISM_DISABLE_WEBDAV=false + - PHOTOPRISM_DISABLE_SETTINGS=false + - PHOTOPRISM_DISABLE_TENSORFLOW=false + - PHOTOPRISM_DISABLE_FACES=false + - PHOTOPRISM_DISABLE_CLASSIFICATION=false + - PHOTOPRISM_DISABLE_VECTORS=false + - PHOTOPRISM_DISABLE_RAW=false + - PHOTOPRISM_RAW_PRESETS=false + - PHOTOPRISM_JPEG_QUALITY=85 + - PHOTOPRISM_DETECT_NSFW=false + - PHOTOPRISM_UPLOAD_NSFW=true + - PHOTOPRISM_SITE_CAPTION=AI-Powered Photos App + - PHOTOPRISM_SITE_DESCRIPTION= + - PHOTOPRISM_SITE_AUTHOR= + networks: + - traefik_network + labels: + - "traefik.enable=true" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.rule=Host(`${COMPOSE_PROJECT_NAME}.local.tellserv.fr`)" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.entryPoints=local" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.tls.certresolver=cloudflare-local" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.tls=true" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.rule=Host(`${COMPOSE_PROJECT_NAME}.tellserv.fr`)" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.entryPoints=websecure" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.tls=true" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.tls.certResolver=cloudflare" + - "traefik.http.services.${COMPOSE_PROJECT_NAME}.loadbalancer.server.port=2342" + - "com.centurylinklabs.watchtower.enable=true" + + mariadb: + image: mariadb:11 + restart: unless-stopped + stop_grace_period: 5s + security_opt: + - seccomp:unconfined + - apparmor:unconfined + command: > + --innodb-buffer-pool-size=512M + --transaction-isolation=READ-COMMITTED + --character-set-server=utf8mb4 + --collation-server=utf8mb4_unicode_ci + --max-connections=512 + --innodb-rollback-on-timeout=OFF + --innodb-lock-wait-timeout=120 + volumes: + - ./database:/var/lib/mysql + environment: + - MARIADB_AUTO_UPGRADE=1 + - MARIADB_INITDB_SKIP_TZINFO=1 + - MARIADB_DATABASE=photoprism + - MARIADB_USER=${MARIADB_USER} + - MARIADB_PASSWORD=${MARIADB_PASSWORD} + - MARIADB_ROOT_PASSWORD=${MARIADB_ROOT_PASSWORD} + networks: + - traefik_network + +networks: + traefik_network: + external: true diff --git a/stacks/pingvin/compose.yml b/stacks/pingvin/compose.yml new file mode 100644 index 0000000..6268021 --- /dev/null +++ b/stacks/pingvin/compose.yml @@ -0,0 +1,35 @@ +services: + pingvin-share: + image: stonith404/pingvin-share + container_name: pingvin + restart: unless-stopped + environment: + - TRUST_PROXY=true + - TZ=Europe/Paris + volumes: + - "/mnt/storage/pingvin:/opt/app/backend/data" + - "/mnt/storage/pingvin/images:/opt/app/frontend/public/img" + networks: + - traefik_network + labels: + - "traefik.enable=true" + # Local + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.rule=Host(`${COMPOSE_PROJECT_NAME}.local.tellserv.fr`)" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.entryPoints=local" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.tls.certresolver=cloudflare-local" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.tls=true" + # Production + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.rule=Host(`${COMPOSE_PROJECT_NAME}.tellserv.fr`)" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.entryPoints=websecure" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.tls=true" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.tls.certResolver=cloudflare" + - "traefik.http.services.${COMPOSE_PROJECT_NAME}.loadbalancer.server.port=3000" + + # Flame Dashboard Integration + + # Watchtower Auto-Update + - "com.centurylinklabs.watchtower.enable=true" + +networks: + traefik_network: + external: true diff --git a/stacks/plex/compose.yml b/stacks/plex/compose.yml new file mode 100644 index 0000000..7f15afb --- /dev/null +++ b/stacks/plex/compose.yml @@ -0,0 +1,47 @@ +services: + plex: + image: lscr.io/linuxserver/plex:1.41.3 + container_name: plex + environment: + - PUID=1000 + - PGID=1000 + - TZ=Europe/Paris + - VERSION=docker + - PLEX_CLAIM=claim-FRzbiufBZ8swT2iK5Q7W + devices: + - /dev/dri:/dev/dri + volumes: + - /mnt/storage/plex/config:/config + - /mnt/storage/plex/tv:/tv + - /mnt/storage/plex/movies:/movies + networks: + - traefik_network + restart: unless-stopped + labels: + - traefik.enable=true + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.rule=Host(`${COMPOSE_PROJECT_NAME}.local.tellserv.fr`) + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.entryPoints=local + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.tls.certresolver=cloudflare-local + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.tls=true + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.rule=Host(`${COMPOSE_PROJECT_NAME}.tellserv.fr`) + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.entryPoints=websecure + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.tls=true + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.tls.certResolver=cloudflare + - traefik.http.services.${COMPOSE_PROJECT_NAME}.loadbalancer.server.port=32400 + - com.centurylinklabs.watchtower.enable=true + plexautolanguages: + image: journeyover/plex-auto-languages:main + environment: + - PLEX_URL=https://${COMPOSE_PROJECT_NAME}.tellserv.fr + - PLEX_TOKEN=${PLEX_TOKEN} + - TZ=Europe/Paris + volumes: + - ./config:/config + networks: + - traefik_network + restart: unless-stopped + labels: + - com.centurylinklabs.watchtower.enable=true +networks: + traefik_network: + external: true diff --git a/stacks/qbittorrent/compose.yml b/stacks/qbittorrent/compose.yml new file mode 100644 index 0000000..134855d --- /dev/null +++ b/stacks/qbittorrent/compose.yml @@ -0,0 +1,42 @@ +services: + qbittorrent: + image: trigus42/qbittorrentvpn:qbt4.6.7 + container_name: qbittorrent + environment: + - PUID=1000 + - PGID=1000 + - TZ=Europe/Paris + - VPN_ENABLED=yes + - VPN_TYPE=wireguard + - BIND_INTERFACE=yes + - HEALTH_CHECK_TIMEOUT=30 + - HEALTH_CHECK_INTERVAL=30 + volumes: + - /mnt/storage/deluge:/config + - /mnt/storage/downloads:/downloads + - /mnt/storage/plex/movies:/movies + - /mnt/storage/plex/tv:/tv + - /mnt/storage/kavita/download:/books + - /mnt/storage/audiobookshelf/audiobooks:/audiobooks + cap_add: + - NET_ADMIN + sysctls: + - net.ipv4.conf.all.src_valid_mark=1 + networks: + - traefik_network + restart: unless-stopped + labels: + - traefik.enable=true + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.rule=Host(`${COMPOSE_PROJECT_NAME}.local.tellserv.fr`) + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.entryPoints=local + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.tls.certresolver=cloudflare-local" + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.tls=true + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.rule=Host(`${COMPOSE_PROJECT_NAME}.tellserv.fr`) + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.entryPoints=websecure + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.tls=true + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.tls.certResolver=cloudflare + - traefik.http.services.${COMPOSE_PROJECT_NAME}.loadbalancer.server.port=8080 + - com.centurylinklabs.watchtower.enable=true +networks: + traefik_network: + external: true diff --git a/stacks/searxng/compose.yaml b/stacks/searxng/compose.yaml new file mode 100644 index 0000000..b3de58e --- /dev/null +++ b/stacks/searxng/compose.yaml @@ -0,0 +1,59 @@ +services: + # Reverse proxy Traefik : déjà déployé ailleurs chez toi + # (Ce stack n'inclut PAS Traefik ; on ne fait qu'y raccorder SearXNG.) + + redis: + container_name: redis + image: docker.io/valkey/valkey:8-alpine + command: valkey-server --save 30 1 --loglevel warning + restart: unless-stopped + networks: + - searxng + volumes: + - valkey-data2:/data + logging: + driver: json-file + options: + max-size: 1m + max-file: "1" + # NOTE hôte: activer vm.overcommit_memory=1 (sysctl) pour éviter les warnings. + + searxng: + container_name: searxng + image: docker.io/searxng/searxng:2024.11.17-41079cdde + restart: unless-stopped + networks: + - searxng + - traefik_network + # Pas de "ports:" ici : Traefik s'occupe de l'exposition + volumes: + - ./searxng:/etc/searxng:rw + - searxng-data:/var/cache/searxng:rw + environment: + # Base URL utilisée par SearXNG pour générer ses liens + - SEARXNG_BASE_URL=https://searxng.tellserv.fr + # Secret via .env (réf. dans settings.yml) + - SEARXNG_SECRET=${SEARXNG_SECRET} + labels: + - traefik.enable=true + # Réseau docker que Traefik doit utiliser pour joindre ce service + - traefik.docker.network=traefik_network + # Routeur HTTPS (prod) + - traefik.http.routers.searxng-prod.rule=Host(`searxng.tellserv.fr`) + - traefik.http.routers.searxng-prod.entrypoints=websecure + - traefik.http.routers.searxng-prod.tls=true + - traefik.http.routers.searxng-prod.tls.certresolver=cloudflare + # Service interne : SearXNG écoute en 8080 + - traefik.http.services.searxng.loadbalancer.server.port=8080 + logging: + driver: json-file + options: + max-size: 1m + max-file: "1" +networks: + searxng: null + traefik_network: + external: true +volumes: + valkey-data2: null + searxng-data: null diff --git a/stacks/searxng/searxng/limiter.toml b/stacks/searxng/searxng/limiter.toml new file mode 100644 index 0000000..beab5d6 --- /dev/null +++ b/stacks/searxng/searxng/limiter.toml @@ -0,0 +1,3 @@ +[botdetection.ip_limit] +# Protection avancée anti-bots (tu peux activer si besoin) +link_token = false diff --git a/stacks/searxng/searxng/settings.yml b/stacks/searxng/searxng/settings.yml new file mode 100644 index 0000000..0154ce8 --- /dev/null +++ b/stacks/searxng/searxng/settings.yml @@ -0,0 +1,18 @@ +use_default_settings: true + +server: + # base_url vient de SEARXNG_BASE_URL (env) + secret_key: "%(ENV_SEARXNG_SECRET)s" + limiter: true # active le rate-limit (recommandé en public) + image_proxy: true + public_instance: false + +# Redis/Valkey pour le limiter +redis: + url: redis://redis:6379/0 + +ui: + default_locale: fr + +search: + language: "fr-FR" diff --git a/stacks/stirlingpdf/compose.yml b/stacks/stirlingpdf/compose.yml new file mode 100644 index 0000000..d73df52 --- /dev/null +++ b/stacks/stirlingpdf/compose.yml @@ -0,0 +1,29 @@ +services: + stirling-pdf: + container_name: stirlingpdf + image: frooodle/s-pdf:0.45.6-fat + volumes: + - /mnt/storage/stirlingpdf/trainingData:/usr/share/tesseract-ocr/4.00/tessdata + - /mnt/storage/stirlingpdf/extraConfigs:/configs + environment: + - DOCKER_ENABLE_SECURITY=true + - SECURITY_ENABLE_LOGIN=true + networks: + - traefik_network + restart: unless-stopped + labels: + - "traefik.enable=true" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.rule=Host(`${COMPOSE_PROJECT_NAME}.local.tellserv.fr`)" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.entryPoints=local" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.tls.certresolver=cloudflare-local" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.tls=true" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.rule=Host(`${COMPOSE_PROJECT_NAME}.tellserv.fr`)" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.entryPoints=websecure" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.tls=true" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.tls.certResolver=cloudflare" + - "traefik.http.services.${COMPOSE_PROJECT_NAME}.loadbalancer.server.port=8080" + - "com.centurylinklabs.watchtower.enable=true" + +networks: + traefik_network: + external: true diff --git a/stacks/tinyauth/compose.yml b/stacks/tinyauth/compose.yml new file mode 100644 index 0000000..39f4471 --- /dev/null +++ b/stacks/tinyauth/compose.yml @@ -0,0 +1,28 @@ +services: + tinyauth: + image: ghcr.io/steveiliop56/tinyauth:v3 + container_name: tinyauth + restart: unless-stopped + environment: + - SECRET=${SECRET} + - APP_URL=https://auth.tellserv.fr + - GITHUB_CLIENT_ID=${GITHUB_CLIENT_ID} + - GITHUB_CLIENT_SECRET=${GITHUB_CLIENT_SECRET} + - OAUTH_WHITELIST=${OAUTH_WHITELIST} + networks: + - traefik_network + labels: + - "traefik.enable=true" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}.rule=Host(`auth.tellserv.fr`)" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}.entrypoints=websecure" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}.tls=true" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}.tls.certresolver=cloudflare" + - "traefik.http.services.${COMPOSE_PROJECT_NAME}.loadbalancer.server.port=3000" + - "traefik.http.middlewares.${COMPOSE_PROJECT_NAME}.forwardauth.address=http://${COMPOSE_PROJECT_NAME}:3000/api/auth/traefik" + - "traefik.http.middlewares.${COMPOSE_PROJECT_NAME}.forwardauth.trustForwardHeader=true" + - "traefik.http.middlewares.${COMPOSE_PROJECT_NAME}.forwardauth.authResponseHeaders=X-Forwarded-User" + - "com.centurylinklabs.watchtower.enable=true" + +networks: + traefik_network: + external: true diff --git a/stacks/traefik/compose.yml b/stacks/traefik/compose.yml new file mode 100644 index 0000000..4c42573 --- /dev/null +++ b/stacks/traefik/compose.yml @@ -0,0 +1,62 @@ +services: + traefik-public: + image: traefik:v3 + container_name: traefik-public + restart: unless-stopped + ports: + - "192.168.1.2:80:80" + - "192.168.1.2:443:443" + extra_hosts: + - "host.docker.internal:host-gateway" + networks: + - traefik_network + volumes: + - /var/run/docker.sock:/var/run/docker.sock:ro + - ./traefik-public.yml:/etc/traefik/traefik.yml:ro + - ./dynamic-public:/etc/traefik/dynamic:ro + - ./letsencrypt-public:/letsencrypt + - /var/log/traefik:/var/log/traefik + labels: + - "traefik.enable=true" + - "traefik.http.routers.traefik-dashboard-public.rule=Host(`traefik-public.local.tellserv.fr`)" + - "traefik.http.routers.traefik-dashboard-public.entrypoints=local" + - "traefik.http.routers.traefik-dashboard-public.tls.certresolver=cloudflare-local" + - "traefik.http.routers.traefik-dashboard-public.tls=true" + - "traefik.http.routers.traefik-dashboard-public.service=api@internal" + - "traefik.http.middlewares.crowdsec-bouncer.forwardauth.address=http://crowdsec-bouncer:8080/api/v1/forwardAuth" + - "traefik.http.middlewares.crowdsec-bouncer.forwardauth.trustForwardHeader=true" + environment: + - CF_DNS_API_TOKEN=${CF_DNS_API_TOKEN} + - TZ=Europe/Paris + + traefik-private: + image: traefik:v3 + container_name: traefik-private + restart: unless-stopped + ports: + - "192.168.1.3:80:80" + - "192.168.1.3:443:443" + extra_hosts: + - "host.docker.internal:host-gateway" + networks: + - traefik_network + volumes: + - /var/run/docker.sock:/var/run/docker.sock:ro + - ./traefik-private.yml:/etc/traefik/traefik.yml:ro + - ./dynamic-private:/etc/traefik/dynamic:ro + - ./letsencrypt-private:/letsencrypt + - /var/log/traefik-local:/var/log/traefik-local + labels: + - "traefik.enable=true" + - "traefik.http.routers.traefik-dashboard-local.rule=Host(`traefik-private.local.tellserv.fr`)" + - "traefik.http.routers.traefik-dashboard-local.entrypoints=local" + - "traefik.http.routers.traefik-dashboard-local.tls.certresolver=cloudflare-local" + - "traefik.http.routers.traefik-dashboard-local.tls=true" + - "traefik.http.routers.traefik-dashboard-local.service=api@internal" + environment: + - TZ=Europe/Paris + - CF_DNS_API_TOKEN=${CF_DNS_API_TOKEN} + +networks: + traefik_network: + external: true diff --git a/stacks/traefik/dynamic-private/cockpit.yml b/stacks/traefik/dynamic-private/cockpit.yml new file mode 100644 index 0000000..3d05ec0 --- /dev/null +++ b/stacks/traefik/dynamic-private/cockpit.yml @@ -0,0 +1,25 @@ +http: + routers: + cockpit-rtr: + rule: "Host(`cockpit.local.tellserv.fr`)" + entryPoints: + - local + service: cockpit-svc + tls: + certResolver: cloudflare-local + middlewares: + - cockpit-headers + + services: + cockpit-svc: + loadBalancer: + passHostHeader: true + servers: + - url: "http://host.docker.internal:9090" + + middlewares: + cockpit-headers: + headers: + customRequestHeaders: + X-Forwarded-Proto: "https" + X-Forwarded-Port: "443" diff --git a/stacks/traefik/dynamic-private/middlewares.yml b/stacks/traefik/dynamic-private/middlewares.yml new file mode 100644 index 0000000..ee8c7df --- /dev/null +++ b/stacks/traefik/dynamic-private/middlewares.yml @@ -0,0 +1,23 @@ +http: + middlewares: + ratelimit: + rateLimit: + average: 100 + burst: 50 + period: 1s + secheaders: + headers: + stsSeconds: 31536000 + forceSTSHeader: true + evasive: + rateLimit: + average: 3 + burst: 5 + period: 1s + localonly: + ipWhiteList: + sourceRange: + - "127.0.0.1/32" + - "192.168.1.0/24" + - "100.64.0.0/10" + - "172.18.0.0/16" diff --git a/stacks/traefik/dynamic-private/proxmox.yml b/stacks/traefik/dynamic-private/proxmox.yml new file mode 100644 index 0000000..c0e8c19 --- /dev/null +++ b/stacks/traefik/dynamic-private/proxmox.yml @@ -0,0 +1,25 @@ +http: + routers: + proxmox-rtr: + rule: "Host(`proxmox.local.tellserv.fr`)" + entryPoints: + - local + service: proxmox-svc + tls: + certResolver: cloudflare-local + middlewares: + - proxmox-headers + + services: + proxmox-svc: + loadBalancer: + passHostHeader: true + servers: + - url: "https://192.168.1.29:8006" + + middlewares: + proxmox-headers: + headers: + customRequestHeaders: + X-Forwarded-Proto: "https" + X-Forwarded-Port: "443" diff --git a/stacks/traefik/dynamic-public/middlewares.yml b/stacks/traefik/dynamic-public/middlewares.yml new file mode 100644 index 0000000..a758237 --- /dev/null +++ b/stacks/traefik/dynamic-public/middlewares.yml @@ -0,0 +1,16 @@ +http: + middlewares: + ratelimit: + rateLimit: + average: 100 + burst: 50 + period: 1s + secheaders: + headers: + stsSeconds: 31536000 + forceSTSHeader: true + evasive: + rateLimit: + average: 3 + burst: 5 + period: 1s \ No newline at end of file diff --git a/stacks/traefik/traefik-private.yml b/stacks/traefik/traefik-private.yml new file mode 100644 index 0000000..612dfc9 --- /dev/null +++ b/stacks/traefik/traefik-private.yml @@ -0,0 +1,53 @@ +api: + dashboard: true + insecure: false + +entryPoints: + weblocal: + address: ":80" + http: + redirections: + entryPoint: + to: local + scheme: https + permanent: true + + local: + address: ":443" + http: + middlewares: + - localonly@file + +certificatesResolvers: + cloudflare-local: + acme: + email: "mamaloubene@yahoo.fr" + storage: /letsencrypt/cloudflare_acme.json + caServer: "https://acme-v02.api.letsencrypt.org/directory" + keyType: EC256 + dnsChallenge: + provider: cloudflare + resolvers: + - "1.1.1.1:53" + - "8.8.8.8:53" + +log: + level: DEBUG + filePath: "/var/log/traefik-local/traefik.log" + +accessLog: + filePath: "/var/log/traefik-local/access.log" + format: "json" + +providers: + docker: + exposedByDefault: false + endpoint: "unix:///var/run/docker.sock" + network: traefik_network + watch: true + file: + directory: "/etc/traefik/dynamic" + watch: true + +serversTransport: + insecureSkipVerify: true diff --git a/stacks/traefik/traefik-public.yml b/stacks/traefik/traefik-public.yml new file mode 100644 index 0000000..625f930 --- /dev/null +++ b/stacks/traefik/traefik-public.yml @@ -0,0 +1,58 @@ +api: + dashboard: true + insecure: false + +entryPoints: + web: + address: ":80" + http: + redirections: + entryPoint: + to: websecure + scheme: https + permanent: true + + websecure: + address: ":443" + http: + middlewares: + - crowdsec-bouncer@docker + - secheaders@file + - ratelimit@file + transport: + respondingTimeouts: + idleTimeout: 300s + +certificatesResolvers: + cloudflare: + acme: + email: "mamaloubene@yahoo.fr" + storage: /letsencrypt/cloudflare_acme.json + caServer: "https://acme-v02.api.letsencrypt.org/directory" + keyType: EC256 + dnsChallenge: + provider: cloudflare + resolvers: + - "1.1.1.1:53" + - "8.8.8.8:53" + +log: + level: DEBUG + filePath: "/var/log/traefik/traefik.log" + +accessLog: + filePath: "/var/log/traefik/access.log" + format: "json" + +providers: + docker: + exposedByDefault: false + endpoint: "unix:///var/run/docker.sock" + network: traefik_network + watch: true + file: + directory: "/etc/traefik/dynamic" + watch: true + +serversTransport: + insecureSkipVerify: true diff --git a/stacks/uptime-kuma/compose.yml b/stacks/uptime-kuma/compose.yml new file mode 100644 index 0000000..e5e8679 --- /dev/null +++ b/stacks/uptime-kuma/compose.yml @@ -0,0 +1,27 @@ +services: + uptime-kuma: + image: louislam/uptime-kuma:1.23.15 + container_name: uptime-kuma + restart: unless-stopped + environment: + - TZ=Europe/Paris + volumes: + - ./data:/app/data + - /var/run/docker.sock:/var/run/docker.sock + networks: + - traefik_network + labels: + - "traefik.enable=true" + # Local seulement + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.rule=Host(`${COMPOSE_PROJECT_NAME}.local.tellserv.fr`)" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.entryPoints=local" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.tls.certresolver=cloudflare-local" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.tls=true" + # Port interne du service + - "traefik.http.services.${COMPOSE_PROJECT_NAME}.loadbalancer.server.port=3001" + # Watchtower + - "com.centurylinklabs.watchtower.enable=true" + +networks: + traefik_network: + external: true diff --git a/stacks/vaultwarden/compose.yml b/stacks/vaultwarden/compose.yml new file mode 100644 index 0000000..ac79d69 --- /dev/null +++ b/stacks/vaultwarden/compose.yml @@ -0,0 +1,36 @@ +services: + vaultwarden: + image: vaultwarden/server:1.32.7 + container_name: vaultwarden + restart: unless-stopped + environment: + - TZ=Europe/Paris + - ADMIN_TOKEN=${VAULTWARDEN_ADMIN_TOKEN} + - SIGNUPS_ALLOWED=${SIGNUPS_ALLOWED} + - SMTP_FROM=${SMTP_FROM} + - SMTP_HOST=${SMTP_HOST} + - SMTP_PORT=${SMTP_PORT} + - SMTP_SECURITY=${SMTP_SECURITY} + - SMTP_USERNAME=${SMTP_USERNAME} + - SMTP_PASSWORD=${SMTP_PASSWORD} + - EXPERIMENTAL_CLIENT_FEATURE_FLAGS=ssh-key-vault-item,ssh-agent + volumes: + - ./vw-data:/data + networks: + - traefik_network + labels: + - "traefik.enable=true" + - "traefik.http.routers.vaultwarden-local.rule=Host(`vaultwarden.local.tellserv.fr`)" + - "traefik.http.routers.vaultwarden-local.entryPoints=local" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.tls.certresolver=cloudflare-local" + - "traefik.http.routers.vaultwarden-local.tls=true" + - "traefik.http.routers.vaultwarden-prod.rule=Host(`vaultwarden.tellserv.fr`)" + - "traefik.http.routers.vaultwarden-prod.entryPoints=websecure" + - "traefik.http.routers.vaultwarden-prod.tls=true" + - "traefik.http.routers.vaultwarden-prod.tls.certResolver=cloudflare" + - "traefik.http.services.vaultwarden.loadbalancer.server.port=80" + - "com.centurylinklabs.watchtower.enable=true" + +networks: + traefik_network: + external: true diff --git a/stacks/vikunja/compose.yml b/stacks/vikunja/compose.yml new file mode 100644 index 0000000..edfb466 --- /dev/null +++ b/stacks/vikunja/compose.yml @@ -0,0 +1,30 @@ +services: + vikunja: + image: vikunja/vikunja:0.24.6 + environment: + - VIKUNJA_SERVICE_JWTSECRET=${VIKUNJA_SERVICE_JWTSECRET} + - VIKUNJA_SERVICE_PUBLICURL=https://vikunja.tellserv.fr/ + - VIKUNJA_DATABASE_PATH=/db/vikunja.db + - VIKUNJA_SERVICE_ENABLEREGISTRATION=false + volumes: + - /mnt/storage/vikunja/files:/app/vikunja/files + - /mnt/storage/vikunja/db:/db + networks: + - traefik_network + restart: unless-stopped + labels: + - "traefik.enable=true" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.rule=Host(`${COMPOSE_PROJECT_NAME}.local.tellserv.fr`)" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.entryPoints=local" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.tls.certresolver=cloudflare-local" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.tls=true" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.rule=Host(`${COMPOSE_PROJECT_NAME}.tellserv.fr`)" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.entryPoints=websecure" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.tls=true" + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.tls.certResolver=cloudflare" + - "traefik.http.services.${COMPOSE_PROJECT_NAME}.loadbalancer.server.port=3456" + - "com.centurylinklabs.watchtower.enable=true" + +networks: + traefik_network: + external: true diff --git a/stacks/watchtower/compose.yml b/stacks/watchtower/compose.yml new file mode 100644 index 0000000..5d2833a --- /dev/null +++ b/stacks/watchtower/compose.yml @@ -0,0 +1,25 @@ +services: + watchtower: + image: containrrr/watchtower:1.7.1 + container_name: watchtower + volumes: + - /var/run/docker.sock:/var/run/docker.sock + environment: + - TZ=Europe/Paris + # Mode monitor-only : ne met pas à jour, notifie seulement + - WATCHTOWER_MONITOR_ONLY=true + - WATCHTOWER_CLEANUP=true + - WATCHTOWER_INCLUDE_RESTARTING=true + - WATCHTOWER_LABEL_ENABLE=true + - WATCHTOWER_POLL_INTERVAL=43200 # 12h + # Notifications via Gotify + - WATCHTOWER_NOTIFICATIONS=gotify + - WATCHTOWER_NOTIFICATION_GOTIFY_URL=${WATCHTOWER_GOTIFY_URL} + - WATCHTOWER_NOTIFICATION_GOTIFY_TOKEN=${WATCHTOWER_GOTIFY_TOKEN} + networks: + - traefik_network + restart: unless-stopped + +networks: + traefik_network: + external: true diff --git a/stacks/webdav/compose.yaml b/stacks/webdav/compose.yaml new file mode 100644 index 0000000..fbf757a --- /dev/null +++ b/stacks/webdav/compose.yaml @@ -0,0 +1,59 @@ +services: + # One-shot: applique l'ACL au dossier host monté (uid/gid 33 = www-data) + acl-init: + image: alpine:3.20 + container_name: ${COMPOSE_PROJECT_NAME:-webdav}-acl-init + command: > + /bin/sh -lc " + apk add --no-cache acl && + setfacl -m u:33:rwx,g:33:rwx -m d:u:33:rwx,d:g:33:rwx /target && + ls -ld /target && + echo 'ACL applied for uid/gid 33 on /target' + " + volumes: + - /mnt/storage/phone_backup:/target + restart: "no" + + webdav: + image: maltokyo/docker-nginx-webdav:latest + container_name: ${COMPOSE_PROJECT_NAME:-webdav} + restart: unless-stopped + environment: + - TZ=Europe/Paris + volumes: + - /mnt/storage/phone_backup:/media/data + depends_on: + acl-init: + condition: service_completed_successfully + networks: + - traefik_network + labels: + - traefik.enable=true + + # --- Router local --- + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.rule=Host(`${COMPOSE_PROJECT_NAME}.local.tellserv.fr`) + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.entryPoints=local + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.tls=true + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.tls.certresolver=cloudflare-local + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.middlewares=${COMPOSE_PROJECT_NAME}-auth + + # --- Router prod --- + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.rule=Host(`${COMPOSE_PROJECT_NAME}.tellserv.fr`) + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.entryPoints=websecure + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.tls=true + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.tls.certResolver=cloudflare + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.middlewares=${COMPOSE_PROJECT_NAME}-auth + + # --- Service backend (l'image écoute sur 80) --- + - traefik.http.services.${COMPOSE_PROJECT_NAME}.loadbalancer.server.port=80 + + # --- BasicAuth via Traefik --- + - traefik.http.middlewares.${COMPOSE_PROJECT_NAME}-auth.basicauth.removeheader=true + - traefik.http.middlewares.${COMPOSE_PROJECT_NAME}-auth.basicauth.users=${BASIC_AUTH_USER}:${BASIC_AUTH_PASS_HASH} + + # Watchtower (optionnel) + - com.centurylinklabs.watchtower.enable=true + +networks: + traefik_network: + external: true diff --git a/stacks/yamtrack/compose.yml b/stacks/yamtrack/compose.yml new file mode 100644 index 0000000..d116102 --- /dev/null +++ b/stacks/yamtrack/compose.yml @@ -0,0 +1,51 @@ +services: + yamtrack: + container_name: yamtrack + image: ghcr.io/fuzzygrim/yamtrack + restart: unless-stopped + depends_on: + - redis + environment: + - SECRET=${SECRET} + - REDIS_URL=redis://redis:6379 + - TMDB_LANG=fr + - TMDB_NSFW=true + - MAL_NSFW=true + - MU_NSFW=true + - IGDB_ID=${IGDB_ID} + - IGDB_SECRET=${IGDB_SECRET} + - IGDB_NSFW=true + - TZ=Europe/Paris + - URLS=https://yamtrack.tellserv.fr,https://yamtrack.local.tellserv.fr + volumes: + - ./db:/yamtrack/db + networks: + - traefik_network + labels: + - traefik.enable=true + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.rule=Host(`${COMPOSE_PROJECT_NAME}.local.tellserv.fr`) + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.entryPoints=local + - "traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.tls.certresolver=cloudflare-local" + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-local.tls=true + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.rule=Host(`${COMPOSE_PROJECT_NAME}.tellserv.fr`) + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.entryPoints=websecure + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.tls=true + - traefik.http.routers.${COMPOSE_PROJECT_NAME}-prod.tls.certResolver=cloudflare + - traefik.http.services.${COMPOSE_PROJECT_NAME}.loadbalancer.server.port=8000 + - com.centurylinklabs.watchtower.enable=true + + redis: + container_name: yamtrack-redis + image: redis:7-alpine + restart: unless-stopped + volumes: + - redis_data:/data + networks: + - traefik_network + +volumes: + redis_data: null + +networks: + traefik_network: + external: true diff --git a/templates/env/crowdsec.env.j2 b/templates/env/crowdsec.env.j2 new file mode 100644 index 0000000..433a666 --- /dev/null +++ b/templates/env/crowdsec.env.j2 @@ -0,0 +1 @@ +CROWDSEC_BOUNCER_API_KEY={{ crowdsec_bouncer_api_key }} diff --git a/templates/env/etesync.env.j2 b/templates/env/etesync.env.j2 new file mode 100644 index 0000000..94487ba --- /dev/null +++ b/templates/env/etesync.env.j2 @@ -0,0 +1,2 @@ +SUPER_USER={{ etesync_super_user }} +SUPER_PASS={{ etesync_super_pass }} diff --git a/templates/env/feedropolis.env.j2 b/templates/env/feedropolis.env.j2 new file mode 100644 index 0000000..0d71986 --- /dev/null +++ b/templates/env/feedropolis.env.j2 @@ -0,0 +1,2 @@ +POSTGRES_USER=feed +POSTGRES_PASSWORD={{ feedropolis_postgres_password }} diff --git a/templates/env/glance.env.j2 b/templates/env/glance.env.j2 new file mode 100644 index 0000000..eb981ac --- /dev/null +++ b/templates/env/glance.env.j2 @@ -0,0 +1,3 @@ +# Variables defined here will be available to use anywhere in the config with the syntax ${MY_SECRET_TOKEN} +# Note: making changes to this file requires re-running docker compose up +MY_SECRET_TOKEN={{ glance_secret_token }} diff --git a/templates/env/joal.env.j2 b/templates/env/joal.env.j2 new file mode 100644 index 0000000..81e2768 --- /dev/null +++ b/templates/env/joal.env.j2 @@ -0,0 +1 @@ +JOAL_SECRET_TOKEN={{ joal_secret_token }} diff --git a/templates/env/mobilizon.env.j2 b/templates/env/mobilizon.env.j2 new file mode 100644 index 0000000..169caff --- /dev/null +++ b/templates/env/mobilizon.env.j2 @@ -0,0 +1,51 @@ +###################################################### +# Instance configuration # +###################################################### + +MOBILIZON_INSTANCE_NAME=Mobilizon Tellserv +MOBILIZON_INSTANCE_HOST=mobilizon.tellserv.fr +MOBILIZON_INSTANCE_LISTEN_IP=0.0.0.0 +MOBILIZON_INSTANCE_PORT=5005 +MOBILIZON_INSTANCE_REGISTRATIONS_OPEN=true +MOBILIZON_INSTANCE_EMAIL={{ smtp_from }} +MOBILIZON_REPLY_EMAIL=contact@tellserv.fr +MOBILIZON_LOGLEVEL=info + +###################################################### +# Database settings # +###################################################### + +POSTGRES_USER=mobilizon +POSTGRES_PASSWORD={{ mobilizon_postgres_password }} +POSTGRES_DB=mobilizon + +MOBILIZON_DATABASE_USERNAME=mobilizon +MOBILIZON_DATABASE_PASSWORD={{ mobilizon_postgres_password }} +MOBILIZON_DATABASE_DBNAME=mobilizon +MOBILIZON_DATABASE_HOST=db +MOBILIZON_DATABASE_PORT=5432 +MOBILIZON_DATABASE_SSL=false + +###################################################### +# Secrets # +###################################################### + +MOBILIZON_INSTANCE_SECRET_KEY_BASE={{ mobilizon_secret_key_base }} +MOBILIZON_INSTANCE_SECRET_KEY={{ mobilizon_secret_key }} + +###################################################### +# SMTP # +###################################################### + +MOBILIZON_SMTP_SERVER={{ smtp_host }} +MOBILIZON_SMTP_PORT={{ smtp_port }} +MOBILIZON_SMTP_USERNAME={{ smtp_username }} +MOBILIZON_SMTP_PASSWORD={{ smtp_password }} +MOBILIZON_SMTP_SSL=false +MOBILIZON_SMTP_TLS=always + +###################################################### +# Upload paths (mounted volume) # +###################################################### + +MOBILIZON_UPLOADS=/var/lib/mobilizon/uploads diff --git a/templates/env/photoprism.env.j2 b/templates/env/photoprism.env.j2 new file mode 100644 index 0000000..a13ee31 --- /dev/null +++ b/templates/env/photoprism.env.j2 @@ -0,0 +1,6 @@ +PHOTOPRISM_ADMIN_USER={{ photoprism_admin_user }} +PHOTOPRISM_ADMIN_PASSWORD={{ photoprism_admin_password }} +PHOTOPRISM_DATABASE_PASSWORD={{ photoprism_database_password }} +MARIADB_PASSWORD={{ photoprism_database_password }} +MARIADB_ROOT_PASSWORD={{ photoprism_database_password }} +MARIADB_USER=photoprism diff --git a/templates/env/plex.env.j2 b/templates/env/plex.env.j2 new file mode 100644 index 0000000..53fa34a --- /dev/null +++ b/templates/env/plex.env.j2 @@ -0,0 +1,2 @@ +PLEX_CLAIM={{ plex_claim }} +PLEX_TOKEN={{ plex_token }} diff --git a/templates/env/searxng.env.j2 b/templates/env/searxng.env.j2 new file mode 100644 index 0000000..4916399 --- /dev/null +++ b/templates/env/searxng.env.j2 @@ -0,0 +1 @@ +SEARXNG_SECRET={{ searxng_secret }} diff --git a/templates/env/tinyauth.env.j2 b/templates/env/tinyauth.env.j2 new file mode 100644 index 0000000..e89dd72 --- /dev/null +++ b/templates/env/tinyauth.env.j2 @@ -0,0 +1,4 @@ +SECRET={{ tinyauth_secret }} +GITHUB_CLIENT_ID={{ tinyauth_github_client_id }} +GITHUB_CLIENT_SECRET={{ tinyauth_github_client_secret }} +OAUTH_WHITELIST={{ tinyauth_oauth_whitelist }} diff --git a/templates/env/traefik.env.j2 b/templates/env/traefik.env.j2 new file mode 100644 index 0000000..185d130 --- /dev/null +++ b/templates/env/traefik.env.j2 @@ -0,0 +1 @@ +CF_DNS_API_TOKEN={{ cf_dns_api_token }} diff --git a/templates/env/vaultwarden.env.j2 b/templates/env/vaultwarden.env.j2 new file mode 100644 index 0000000..7990a74 --- /dev/null +++ b/templates/env/vaultwarden.env.j2 @@ -0,0 +1,8 @@ +VAULTWARDEN_ADMIN_TOKEN='{{ vaultwarden_admin_token }}' +SMTP_USERNAME={{ smtp_username }} +SMTP_PASSWORD={{ smtp_password }} +SMTP_FROM={{ smtp_from }} +SMTP_HOST={{ smtp_host }} +SMTP_PORT={{ smtp_port }} +SMTP_SECURITY=starttls +SIGNUPS_ALLOWED=false diff --git a/templates/env/vikunja.env.j2 b/templates/env/vikunja.env.j2 new file mode 100644 index 0000000..30a52d8 --- /dev/null +++ b/templates/env/vikunja.env.j2 @@ -0,0 +1 @@ +VIKUNJA_SERVICE_JWTSECRET={{ vikunja_jwt_secret }} diff --git a/templates/env/watchtower.env.j2 b/templates/env/watchtower.env.j2 new file mode 100644 index 0000000..c77dbab --- /dev/null +++ b/templates/env/watchtower.env.j2 @@ -0,0 +1,2 @@ +WATCHTOWER_GOTIFY_URL={{ watchtower_gotify_url }} +WATCHTOWER_GOTIFY_TOKEN={{ watchtower_gotify_token }} diff --git a/templates/env/webdav.env.j2 b/templates/env/webdav.env.j2 new file mode 100644 index 0000000..0734deb --- /dev/null +++ b/templates/env/webdav.env.j2 @@ -0,0 +1,2 @@ +BASIC_AUTH_USER={{ webdav_user }} +BASIC_AUTH_PASS_HASH={{ webdav_pass_hash }} diff --git a/templates/env/yamtrack.env.j2 b/templates/env/yamtrack.env.j2 new file mode 100644 index 0000000..e630297 --- /dev/null +++ b/templates/env/yamtrack.env.j2 @@ -0,0 +1,3 @@ +SECRET={{ yamtrack_secret }} +IGDB_ID={{ yamtrack_igdb_id }} +IGDB_SECRET={{ yamtrack_igdb_secret }} diff --git a/vars/secrets.yml.example b/vars/secrets.yml.example new file mode 100644 index 0000000..906a530 --- /dev/null +++ b/vars/secrets.yml.example @@ -0,0 +1,72 @@ +# SECRETS EXAMPLE - Copy to secrets.yml and encrypt with Ansible Vault +# To encrypt: ansible-vault encrypt vars/secrets.yml +# To edit: ansible-vault edit vars/secrets.yml +# To run playbook: ansible-playbook -i inventory/hosts.yml playbook.yml --ask-vault-pass + +# Cloudflare (Traefik DNS challenge) +cf_dns_api_token: "your-cloudflare-api-token" + +# SMTP +smtp_host: "smtp.example.com" +smtp_port: 587 +smtp_username: "your-smtp-username" +smtp_password: "your-smtp-password" +smtp_from: "noreply@example.com" + +# TinyAuth (OAuth proxy) +tinyauth_secret: "generate-random-32-char-string" +tinyauth_github_client_id: "your-github-oauth-client-id" +tinyauth_github_client_secret: "your-github-oauth-client-secret" +tinyauth_oauth_whitelist: "your@email.com" + +# Vaultwarden +vaultwarden_admin_token: "your-argon2-hashed-admin-token" + +# CrowdSec +crowdsec_bouncer_api_key: "your-crowdsec-bouncer-api-key" + +# Photoprism +photoprism_admin_user: "admin" +photoprism_admin_password: "your-secure-password" +photoprism_database_password: "your-db-password" + +# Vikunja +vikunja_jwt_secret: "generate-128-char-hex-string" + +# Mobilizon +mobilizon_postgres_password: "your-db-password" +mobilizon_secret_key_base: "generate-64-char-random-string" +mobilizon_secret_key: "generate-64-char-random-string" + +# Etesync +etesync_super_user: "admin" +etesync_super_pass: "your-secure-password" + +# Plex +plex_claim: "claim-xxxxxxxxxxxxxxxxxxxxxx" +plex_token: "your-plex-token" + +# Yamtrack +yamtrack_secret: "your-random-secret" +yamtrack_igdb_id: "your-igdb-client-id" +yamtrack_igdb_secret: "your-igdb-client-secret" + +# Joal +joal_secret_token: "your-secret-token" + +# Feedropolis +feedropolis_postgres_password: "your-db-password" + +# WebDAV +webdav_user: "your-username" +webdav_pass_hash: "your-bcrypt-hash" + +# SearXNG +searxng_secret: "generate-random-string" + +# Glance +glance_secret_token: "your-token-for-glance-api" + +# Watchtower (notifications Gotify) +watchtower_gotify_url: "https://gotify.example.com" +watchtower_gotify_token: "your-gotify-app-token"