2.1 #2
8 changed files with 908 additions and 15 deletions
105
.forgejo/workflows/test.yml
Normal file
105
.forgejo/workflows/test.yml
Normal file
|
|
@ -0,0 +1,105 @@
|
|||
name: Tests et Vérifications
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ main, 2.1 ]
|
||||
pull_request:
|
||||
branches: [ main ]
|
||||
|
||||
jobs:
|
||||
tests:
|
||||
name: Tests unitaires BATS
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout du code
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Installation de BATS
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y bats
|
||||
|
||||
- name: Afficher version BATS
|
||||
run: bats --version
|
||||
|
||||
- name: Exécuter tests unitaires
|
||||
run: |
|
||||
cd tests
|
||||
bats *.bats
|
||||
|
||||
- name: Upload résultats des tests
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: test-results
|
||||
path: tests/*.log
|
||||
retention-days: 7
|
||||
|
||||
shellcheck:
|
||||
name: Vérification ShellCheck
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout du code
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Installation de ShellCheck
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y shellcheck
|
||||
|
||||
- name: Afficher version ShellCheck
|
||||
run: shellcheck --version
|
||||
|
||||
- name: Vérifier le script principal
|
||||
run: |
|
||||
shellcheck -x zfs-nfs-replica.sh || true
|
||||
|
||||
- name: Vérifier les scripts de test
|
||||
run: |
|
||||
shellcheck -x tests/*.bash || true
|
||||
|
||||
syntax:
|
||||
name: Vérification syntaxe Bash
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout du code
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Vérifier syntaxe du script principal
|
||||
run: |
|
||||
bash -n zfs-nfs-replica.sh
|
||||
|
||||
- name: Vérifier syntaxe des helpers de test
|
||||
run: |
|
||||
bash -n tests/test_helper.bash
|
||||
|
||||
summary:
|
||||
name: Résumé des tests
|
||||
runs-on: ubuntu-latest
|
||||
needs: [tests, shellcheck, syntax]
|
||||
if: always()
|
||||
|
||||
steps:
|
||||
- name: Afficher résumé
|
||||
run: |
|
||||
echo "=========================================="
|
||||
echo "RÉSUMÉ DES TESTS"
|
||||
echo "=========================================="
|
||||
echo ""
|
||||
echo "Tests unitaires: ${{ needs.tests.result }}"
|
||||
echo "ShellCheck: ${{ needs.shellcheck.result }}"
|
||||
echo "Syntaxe Bash: ${{ needs.syntax.result }}"
|
||||
echo ""
|
||||
|
||||
if [[ "${{ needs.tests.result }}" == "success" ]] && \
|
||||
[[ "${{ needs.shellcheck.result }}" == "success" ]] && \
|
||||
[[ "${{ needs.syntax.result }}" == "success" ]]; then
|
||||
echo "✓ Tous les tests sont passés avec succès"
|
||||
exit 0
|
||||
else
|
||||
echo "✗ Certains tests ont échoué"
|
||||
exit 1
|
||||
fi
|
||||
9
tests/fixtures/zfs_list_snapshots.txt
vendored
Normal file
9
tests/fixtures/zfs_list_snapshots.txt
vendored
Normal file
|
|
@ -0,0 +1,9 @@
|
|||
NAME USED AVAIL REFER MOUNTPOINT
|
||||
zpool1 5.12T 2.55T 192K /zpool1
|
||||
zpool1/data-nfs-share 4.89T 2.55T 4.89T /zpool1/data-nfs-share
|
||||
zpool1/data-nfs-share@autosnap_2024-12-29_14:00:00 128M - 4.89T -
|
||||
zpool1/data-nfs-share@autosnap_2024-12-29_14:15:00 256M - 4.89T -
|
||||
zpool1/data-nfs-share@autosnap_2024-12-29_14:30:00 64M - 4.89T -
|
||||
zpool1/pbs-backups 230G 2.55T 230G /zpool1/pbs-backups
|
||||
zpool1/pbs-backups@autosnap_2024-12-29_14:00:00 10M - 230G -
|
||||
zpool1/pbs-backups@autosnap_2024-12-29_14:15:00 15M - 230G -
|
||||
15
tests/fixtures/zpool_status_degraded.txt
vendored
Normal file
15
tests/fixtures/zpool_status_degraded.txt
vendored
Normal file
|
|
@ -0,0 +1,15 @@
|
|||
pool: zpool1
|
||||
state: DEGRADED
|
||||
status: One or more devices has been removed by the administrator.
|
||||
Sufficient replicas exist for the pool to continue functioning in a
|
||||
degraded state.
|
||||
action: Online the device using 'zpool online' or replace the device with
|
||||
'zpool replace'.
|
||||
scan: scrub repaired 0B in 0 days 02:15:32 with 0 errors on Sun Dec 15 02:39:32 2024
|
||||
config:
|
||||
|
||||
NAME STATE READ WRITE CKSUM
|
||||
zpool1 DEGRADED 0 0 0
|
||||
/dev/disk/by-id/wwn-0x5000cca2dfe2e414 UNAVAIL 0 0 0
|
||||
|
||||
errors: No known data errors
|
||||
10
tests/fixtures/zpool_status_healthy.txt
vendored
Normal file
10
tests/fixtures/zpool_status_healthy.txt
vendored
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
pool: zpool1
|
||||
state: ONLINE
|
||||
scan: scrub repaired 0B in 0 days 02:15:32 with 0 errors on Sun Dec 15 02:39:32 2024
|
||||
config:
|
||||
|
||||
NAME STATE READ WRITE CKSUM
|
||||
zpool1 ONLINE 0 0 0
|
||||
/dev/disk/by-id/wwn-0x5000cca2dfe2e414 ONLINE 0 0 0
|
||||
|
||||
errors: No known data errors
|
||||
300
tests/test_health_checks.bats
Normal file
300
tests/test_health_checks.bats
Normal file
|
|
@ -0,0 +1,300 @@
|
|||
#!/usr/bin/env bats
|
||||
#
|
||||
# Tests unitaires pour les fonctions de health check
|
||||
# Test des vérifications de santé des disques et pools ZFS
|
||||
#
|
||||
|
||||
load test_helper
|
||||
|
||||
# Charger uniquement les fonctions du script (pas le code principal)
|
||||
setup() {
|
||||
# Setup environnement
|
||||
setup_script_env
|
||||
|
||||
# Sourcer le script en mode test (le BATS_TEST_MODE évite l'exécution du main)
|
||||
export BATS_TEST_MODE=true
|
||||
source "${BATS_TEST_DIRNAME}/../zfs-nfs-replica.sh"
|
||||
}
|
||||
|
||||
teardown() {
|
||||
cleanup_script_env
|
||||
}
|
||||
|
||||
# ============================================================================
|
||||
# Tests: get_pool_disk_uuids()
|
||||
# ============================================================================
|
||||
|
||||
@test "get_pool_disk_uuids: retourne des UUIDs pour un pool sain" {
|
||||
run get_pool_disk_uuids "zpool1"
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "wwn-0x5000cca2dfe2e414" ]]
|
||||
}
|
||||
|
||||
@test "get_pool_disk_uuids: retourne vide pour pool inexistant" {
|
||||
# Mock zpool pour retourner une erreur
|
||||
zpool() {
|
||||
if [[ "$1" == "status" ]]; then
|
||||
echo "cannot open 'fakerpool': no such pool" >&2
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
export -f zpool
|
||||
|
||||
run get_pool_disk_uuids "fakerpool"
|
||||
|
||||
# La fonction doit gérer l'erreur gracieusement
|
||||
[ "$status" -ne 0 ] || [ -z "$output" ]
|
||||
}
|
||||
|
||||
# ============================================================================
|
||||
# Tests: init_disk_tracking()
|
||||
# ============================================================================
|
||||
|
||||
@test "init_disk_tracking: crée le fichier d'état avec UUIDs" {
|
||||
run init_disk_tracking "zpool1"
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
[ -f "${STATE_DIR}/disk-uuids-zpool1.txt" ]
|
||||
|
||||
# Vérifier le contenu
|
||||
grep -q "initialized=true" "${STATE_DIR}/disk-uuids-zpool1.txt"
|
||||
grep -q "pool=zpool1" "${STATE_DIR}/disk-uuids-zpool1.txt"
|
||||
grep -q "wwn-0x" "${STATE_DIR}/disk-uuids-zpool1.txt"
|
||||
}
|
||||
|
||||
@test "init_disk_tracking: ne réinitialise pas si déjà initialisé" {
|
||||
# Créer un fichier déjà initialisé
|
||||
create_disk_uuid_file "zpool1"
|
||||
|
||||
# Modifier le timestamp pour vérifier qu'il ne change pas
|
||||
original_content=$(cat "${STATE_DIR}/disk-uuids-zpool1.txt")
|
||||
|
||||
run init_disk_tracking "zpool1"
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
# Le fichier ne doit pas avoir changé
|
||||
new_content=$(cat "${STATE_DIR}/disk-uuids-zpool1.txt")
|
||||
[ "$original_content" == "$new_content" ]
|
||||
}
|
||||
|
||||
# ============================================================================
|
||||
# Tests: verify_disk_presence()
|
||||
# ============================================================================
|
||||
|
||||
@test "verify_disk_presence: succès si tous les disques présents" {
|
||||
create_disk_uuid_file "zpool1" "wwn-0x5000cca2dfe2e414"
|
||||
export TEST_DISK_PRESENT=true
|
||||
|
||||
run verify_disk_presence "zpool1"
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
}
|
||||
|
||||
@test "verify_disk_presence: échec si disque manquant" {
|
||||
# Créer un fichier avec UUID fictif
|
||||
create_disk_uuid_file "zpool1" "wwn-0xFAKE_MISSING_DISK"
|
||||
export TEST_DISK_PRESENT=false
|
||||
|
||||
run verify_disk_presence "zpool1"
|
||||
|
||||
[ "$status" -eq 1 ]
|
||||
[[ "$output" =~ "manquant" ]] || [[ "$output" =~ "MISSING" ]]
|
||||
}
|
||||
|
||||
@test "verify_disk_presence: retourne erreur si fichier d'état absent" {
|
||||
# Pas de fichier disk-uuids
|
||||
rm -f "${STATE_DIR}/disk-uuids-zpool1.txt"
|
||||
|
||||
run verify_disk_presence "zpool1"
|
||||
|
||||
[ "$status" -eq 1 ]
|
||||
}
|
||||
|
||||
# ============================================================================
|
||||
# Tests: check_pool_health_status()
|
||||
# ============================================================================
|
||||
|
||||
@test "check_pool_health_status: succès pour pool ONLINE avec espace libre" {
|
||||
export TEST_POOL_STATE="ONLINE"
|
||||
export TEST_POOL_CAPACITY=67
|
||||
|
||||
run check_pool_health_status "zpool1"
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
}
|
||||
|
||||
@test "check_pool_health_status: échec pour pool DEGRADED" {
|
||||
export TEST_POOL_STATE="DEGRADED"
|
||||
export TEST_POOL_CAPACITY=67
|
||||
|
||||
run check_pool_health_status "zpool1"
|
||||
|
||||
[ "$status" -eq 1 ]
|
||||
}
|
||||
|
||||
@test "check_pool_health_status: échec si espace disque critique (>95%)" {
|
||||
export TEST_POOL_STATE="ONLINE"
|
||||
export TEST_POOL_CAPACITY=96
|
||||
|
||||
run check_pool_health_status "zpool1"
|
||||
|
||||
[ "$status" -eq 1 ]
|
||||
[[ "$output" =~ "espace libre" ]] || [[ "$output" =~ "capacity" ]]
|
||||
}
|
||||
|
||||
@test "check_pool_health_status: succès avec exactement 95% (limite)" {
|
||||
export TEST_POOL_STATE="ONLINE"
|
||||
export TEST_POOL_CAPACITY=95
|
||||
|
||||
run check_pool_health_status "zpool1"
|
||||
|
||||
# 95% = 5% libre, c'est la limite, doit passer
|
||||
[ "$status" -eq 0 ]
|
||||
}
|
||||
|
||||
# ============================================================================
|
||||
# Tests: triple_health_check()
|
||||
# ============================================================================
|
||||
|
||||
@test "triple_health_check: succès si 3/3 tentatives réussissent" {
|
||||
create_disk_uuid_file "zpool1"
|
||||
export TEST_POOL_STATE="ONLINE"
|
||||
export TEST_POOL_CAPACITY=67
|
||||
export TEST_DISK_PRESENT=true
|
||||
export CHECK_DELAY=0 # Pas de délai dans tests
|
||||
|
||||
run triple_health_check "zpool1"
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
}
|
||||
|
||||
@test "triple_health_check: échec si les 3 tentatives échouent" {
|
||||
create_disk_uuid_file "zpool1" "wwn-0xFAKE_MISSING"
|
||||
export TEST_DISK_PRESENT=false
|
||||
export CHECK_DELAY=0
|
||||
|
||||
run triple_health_check "zpool1"
|
||||
|
||||
[ "$status" -eq 1 ]
|
||||
}
|
||||
|
||||
@test "triple_health_check: fait vraiment 3 tentatives (pas d'early return)" {
|
||||
create_disk_uuid_file "zpool1"
|
||||
export TEST_POOL_STATE="DEGRADED"
|
||||
export TEST_DISK_PRESENT=true
|
||||
export CHECK_DELAY=0
|
||||
|
||||
run triple_health_check "zpool1"
|
||||
|
||||
[ "$status" -eq 1 ]
|
||||
|
||||
# Vérifier qu'il y a bien 3 lignes d'erreur (3 tentatives)
|
||||
attempt_count=$(echo "$output" | grep -c "Vérification santé #" || echo "0")
|
||||
[ "$attempt_count" -eq 3 ]
|
||||
}
|
||||
|
||||
# ============================================================================
|
||||
# Tests: check_recent_critical_error()
|
||||
# ============================================================================
|
||||
|
||||
@test "check_recent_critical_error: retourne 0 si erreur récente (<1h)" {
|
||||
# Erreur il y a 30 minutes (1800 secondes)
|
||||
local current_epoch=1735481400
|
||||
local error_epoch=$((current_epoch - 1800))
|
||||
|
||||
export TEST_CURRENT_EPOCH=$current_epoch
|
||||
create_critical_error_file "zpool1" "$error_epoch"
|
||||
|
||||
run check_recent_critical_error "zpool1"
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
}
|
||||
|
||||
@test "check_recent_critical_error: retourne 1 si erreur ancienne (>1h)" {
|
||||
# Erreur il y a 2 heures (7200 secondes)
|
||||
local current_epoch=1735481400
|
||||
local error_epoch=$((current_epoch - 7200))
|
||||
|
||||
export TEST_CURRENT_EPOCH=$current_epoch
|
||||
create_critical_error_file "zpool1" "$error_epoch"
|
||||
|
||||
run check_recent_critical_error "zpool1"
|
||||
|
||||
[ "$status" -eq 1 ]
|
||||
}
|
||||
|
||||
@test "check_recent_critical_error: retourne 1 si pas de fichier d'erreur" {
|
||||
rm -f "${STATE_DIR}/critical-errors-zpool1.txt"
|
||||
|
||||
run check_recent_critical_error "zpool1"
|
||||
|
||||
[ "$status" -eq 1 ]
|
||||
}
|
||||
|
||||
# ============================================================================
|
||||
# Tests: record_critical_error()
|
||||
# ============================================================================
|
||||
|
||||
@test "record_critical_error: crée fichier avec toutes les infos" {
|
||||
run record_critical_error "zpool1" "Test failure reason" "lxc_migrated"
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
[ -f "${STATE_DIR}/critical-errors-zpool1.txt" ]
|
||||
|
||||
grep -q "reason=Test failure reason" "${STATE_DIR}/critical-errors-zpool1.txt"
|
||||
grep -q "action=lxc_migrated" "${STATE_DIR}/critical-errors-zpool1.txt"
|
||||
grep -q "epoch=" "${STATE_DIR}/critical-errors-zpool1.txt"
|
||||
}
|
||||
|
||||
@test "record_critical_error: écrase le fichier précédent" {
|
||||
# Créer une première erreur
|
||||
create_critical_error_file "zpool1" "1735400000"
|
||||
|
||||
# Enregistrer une nouvelle erreur
|
||||
run record_critical_error "zpool1" "New error" "lxc_stopped"
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
# Vérifier que c'est la nouvelle erreur
|
||||
grep -q "reason=New error" "${STATE_DIR}/critical-errors-zpool1.txt"
|
||||
grep -q "action=lxc_stopped" "${STATE_DIR}/critical-errors-zpool1.txt"
|
||||
}
|
||||
|
||||
# ============================================================================
|
||||
# Tests: handle_health_failure()
|
||||
# ============================================================================
|
||||
|
||||
@test "handle_health_failure: migre le LXC si première erreur" {
|
||||
# Pas d'erreur récente
|
||||
rm -f "${STATE_DIR}/critical-errors-zpool1.txt"
|
||||
|
||||
export REMOTE_NODE_NAME="acemagician"
|
||||
|
||||
run handle_health_failure "zpool1" "Disk failure"
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "MIGRATION" ]] || [[ "$output" =~ "migrate" ]]
|
||||
|
||||
# Vérifier que l'erreur a été enregistrée
|
||||
[ -f "${STATE_DIR}/critical-errors-zpool1.txt" ]
|
||||
grep -q "action=lxc_migrated" "${STATE_DIR}/critical-errors-zpool1.txt"
|
||||
}
|
||||
|
||||
@test "handle_health_failure: arrête le LXC si erreur récente (<1h)" {
|
||||
# Erreur récente (30 min)
|
||||
local current_epoch=1735481400
|
||||
local error_epoch=$((current_epoch - 1800))
|
||||
|
||||
export TEST_CURRENT_EPOCH=$current_epoch
|
||||
create_critical_error_file "zpool1" "$error_epoch"
|
||||
|
||||
run handle_health_failure "zpool1" "Another disk failure"
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "ARRÊT" ]] || [[ "$output" =~ "stop" ]] || [[ "$output" =~ "ping-pong" ]]
|
||||
|
||||
# Vérifier que l'erreur a été mise à jour
|
||||
grep -q "action=lxc_stopped" "${STATE_DIR}/critical-errors-zpool1.txt"
|
||||
}
|
||||
261
tests/test_helper.bash
Normal file
261
tests/test_helper.bash
Normal file
|
|
@ -0,0 +1,261 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Helpers et mocks pour les tests BATS
|
||||
# Ce fichier fournit des simulations de toutes les commandes système
|
||||
# utilisées par zfs-nfs-replica.sh, permettant de tester sans ZFS réel
|
||||
#
|
||||
|
||||
# Variables globales pour les tests
|
||||
export TEST_FIXTURES_DIR="${BATS_TEST_DIRNAME}/fixtures"
|
||||
export TEST_POOL_STATE="${TEST_POOL_STATE:-ONLINE}"
|
||||
export TEST_POOL_CAPACITY="${TEST_POOL_CAPACITY:-67}"
|
||||
export TEST_LXC_STATUS="${TEST_LXC_STATUS:-running}"
|
||||
export TEST_DISK_PRESENT="${TEST_DISK_PRESENT:-true}"
|
||||
|
||||
# Mock: zpool - Simuler les commandes ZFS pool
|
||||
zpool() {
|
||||
case "$1" in
|
||||
status)
|
||||
if [[ "$2" == "-P" ]]; then
|
||||
# Format détaillé avec chemins physiques
|
||||
if [[ "$TEST_POOL_STATE" == "ONLINE" ]]; then
|
||||
cat "${TEST_FIXTURES_DIR}/zpool_status_healthy.txt"
|
||||
else
|
||||
cat "${TEST_FIXTURES_DIR}/zpool_status_degraded.txt"
|
||||
fi
|
||||
else
|
||||
# Format simple
|
||||
echo "zpool1 ${TEST_POOL_STATE} - - -"
|
||||
fi
|
||||
;;
|
||||
list)
|
||||
local format="${4:-name,health}"
|
||||
if [[ "$3" == "-o" ]]; then
|
||||
case "$4" in
|
||||
health)
|
||||
echo "${TEST_POOL_STATE}"
|
||||
;;
|
||||
capacity)
|
||||
echo "${TEST_POOL_CAPACITY}%"
|
||||
;;
|
||||
*)
|
||||
echo "zpool1"
|
||||
;;
|
||||
esac
|
||||
else
|
||||
echo "zpool1 7.67T 5.12T 2.55T ${TEST_POOL_CAPACITY}% ${TEST_POOL_STATE} -"
|
||||
fi
|
||||
;;
|
||||
import)
|
||||
echo "pool zpool1 imported"
|
||||
return 0
|
||||
;;
|
||||
*)
|
||||
echo "Mock zpool: commande non supportée: $*" >&2
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# Mock: zfs - Simuler les commandes ZFS dataset
|
||||
zfs() {
|
||||
case "$1" in
|
||||
list)
|
||||
if [[ "$2" == "-t" && "$3" == "snapshot" ]]; then
|
||||
cat "${TEST_FIXTURES_DIR}/zfs_list_snapshots.txt"
|
||||
else
|
||||
cat "${TEST_FIXTURES_DIR}/zfs_list_snapshots.txt"
|
||||
fi
|
||||
;;
|
||||
get)
|
||||
echo "5120000000000" # 5.12TB en bytes
|
||||
;;
|
||||
*)
|
||||
echo "Mock zfs: commande non supportée: $*" >&2
|
||||
return 0
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# Mock: pct - Simuler les commandes Proxmox LXC
|
||||
pct() {
|
||||
case "$1" in
|
||||
status)
|
||||
echo "status: ${TEST_LXC_STATUS}"
|
||||
;;
|
||||
exec)
|
||||
# Simuler une exécution réussie dans le container
|
||||
return 0
|
||||
;;
|
||||
stop)
|
||||
echo "Stopping CT ${2}"
|
||||
TEST_LXC_STATUS="stopped"
|
||||
return 0
|
||||
;;
|
||||
start)
|
||||
echo "Starting CT ${2}"
|
||||
TEST_LXC_STATUS="running"
|
||||
return 0
|
||||
;;
|
||||
*)
|
||||
echo "Mock pct: commande non supportée: $*" >&2
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# Mock: ha-manager - Simuler Proxmox HA manager
|
||||
ha-manager() {
|
||||
case "$1" in
|
||||
migrate)
|
||||
echo "Migrating ${2} to ${3}"
|
||||
return 0
|
||||
;;
|
||||
status)
|
||||
echo "ct:103 started elitedesk"
|
||||
return 0
|
||||
;;
|
||||
*)
|
||||
echo "Mock ha-manager: commande non supportée: $*" >&2
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# Mock: ssh - Simuler les connexions SSH
|
||||
ssh() {
|
||||
# Ignorer les options SSH (-i, -o, etc.)
|
||||
local cmd=""
|
||||
for arg in "$@"; do
|
||||
if [[ ! "$arg" =~ ^- ]] && [[ "$arg" != *"@"* ]] && [[ "$arg" != "root" ]]; then
|
||||
cmd="$arg"
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ "$cmd" == "echo OK" ]] || [[ "$cmd" == *"echo"* ]]; then
|
||||
echo "OK"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Simuler les commandes distantes
|
||||
eval "$cmd"
|
||||
}
|
||||
|
||||
# Mock: syncoid - Simuler la réplication Syncoid
|
||||
syncoid() {
|
||||
echo "Sending incremental zpool1@autosnap_2024-12-29_14:30:00"
|
||||
echo "2.15GB 0:00:45 [48.9MB/s]"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Mock: logger - Simuler syslog
|
||||
logger() {
|
||||
# Silencieux pour les tests, sauf si DEBUG
|
||||
if [[ "${BATS_TEST_DEBUG:-}" == "true" ]]; then
|
||||
echo "[SYSLOG] $*" >&2
|
||||
fi
|
||||
}
|
||||
|
||||
# Mock: hostname - Retourner un hostname de test
|
||||
hostname() {
|
||||
echo "${TEST_HOSTNAME:-elitedesk}"
|
||||
}
|
||||
|
||||
# Mock: readlink - Simuler la résolution de symlinks
|
||||
readlink() {
|
||||
if [[ "$1" == "-f" ]]; then
|
||||
# Retourner un chemin /dev/sdX simulé
|
||||
echo "/dev/sda1"
|
||||
else
|
||||
echo "/dev/disk/by-id/wwn-0x5000cca2dfe2e414"
|
||||
fi
|
||||
}
|
||||
|
||||
# Mock: ls pour /dev/disk/by-id/
|
||||
ls() {
|
||||
if [[ "$*" =~ /dev/disk/by-id ]]; then
|
||||
if [[ "$TEST_DISK_PRESENT" == "true" ]]; then
|
||||
echo "lrwxrwxrwx 1 root root 9 Dec 29 14:00 wwn-0x5000cca2dfe2e414 -> ../../sda1"
|
||||
fi
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Appeler le vrai ls pour autres cas
|
||||
command ls "$@"
|
||||
}
|
||||
|
||||
# Mock: date - Contrôler le temps dans les tests
|
||||
date() {
|
||||
if [[ "$1" == "+%s" ]]; then
|
||||
echo "${TEST_CURRENT_EPOCH:-1735481400}"
|
||||
elif [[ "$1" == "+%Y-%m-%d_%H:%M:%S" ]]; then
|
||||
echo "2024-12-29_14:30:00"
|
||||
else
|
||||
command date "$@"
|
||||
fi
|
||||
}
|
||||
|
||||
# Helper: Setup des variables d'environnement pour le script
|
||||
setup_script_env() {
|
||||
export ZPOOLS=("zpool1")
|
||||
export CTID=103
|
||||
export CONTAINER_NAME="nfs-server"
|
||||
export STATE_DIR="${BATS_TMPDIR}/zfs-nfs-replica"
|
||||
export LOG_DIR="${BATS_TMPDIR}/logs"
|
||||
export HEALTH_CHECK_MIN_FREE_SPACE=5
|
||||
export HEALTH_CHECK_ERROR_COOLDOWN=3600
|
||||
export NOTIFICATION_ENABLED=false
|
||||
export AUTO_UPDATE_ENABLED=false
|
||||
export CHECK_DELAY=0 # Pas de délai dans les tests
|
||||
|
||||
# Cluster nodes
|
||||
declare -gA CLUSTER_NODES=(
|
||||
["acemagician"]="192.168.100.10"
|
||||
["elitedesk"]="192.168.100.20"
|
||||
)
|
||||
|
||||
# Créer les répertoires nécessaires
|
||||
mkdir -p "$STATE_DIR"
|
||||
mkdir -p "$LOG_DIR"
|
||||
}
|
||||
|
||||
# Helper: Nettoyer l'environnement de test
|
||||
cleanup_script_env() {
|
||||
rm -rf "${BATS_TMPDIR}/zfs-nfs-replica"
|
||||
rm -rf "${BATS_TMPDIR}/logs"
|
||||
}
|
||||
|
||||
# Helper: Créer un fichier d'état disk-uuids
|
||||
create_disk_uuid_file() {
|
||||
local pool="$1"
|
||||
local uuid="${2:-wwn-0x5000cca2dfe2e414}"
|
||||
|
||||
cat > "${STATE_DIR}/disk-uuids-${pool}.txt" <<EOF
|
||||
initialized=true
|
||||
timestamp=2024-12-29_14:00:00
|
||||
hostname=elitedesk
|
||||
pool=${pool}
|
||||
# Physical disk UUIDs
|
||||
${uuid}
|
||||
EOF
|
||||
}
|
||||
|
||||
# Helper: Créer un fichier d'erreur critique
|
||||
create_critical_error_file() {
|
||||
local pool="$1"
|
||||
local epoch="${2:-${TEST_CURRENT_EPOCH:-1735481400}}"
|
||||
|
||||
cat > "${STATE_DIR}/critical-errors-${pool}.txt" <<EOF
|
||||
timestamp=2024-12-29_14:00:00
|
||||
epoch=${epoch}
|
||||
reason=Test error
|
||||
action=lxc_migrated
|
||||
target_node=acemagician
|
||||
EOF
|
||||
}
|
||||
|
||||
# Exporter tous les mocks
|
||||
export -f zpool zfs pct ha-manager ssh syncoid logger hostname readlink ls date
|
||||
export -f setup_script_env cleanup_script_env
|
||||
export -f create_disk_uuid_file create_critical_error_file
|
||||
170
tests/test_node_config.bats
Normal file
170
tests/test_node_config.bats
Normal file
|
|
@ -0,0 +1,170 @@
|
|||
#!/usr/bin/env bats
|
||||
#
|
||||
# Tests unitaires pour la configuration des nœuds
|
||||
# Test de la logique de découverte du nœud distant
|
||||
#
|
||||
|
||||
load test_helper
|
||||
|
||||
setup() {
|
||||
setup_script_env
|
||||
}
|
||||
|
||||
teardown() {
|
||||
cleanup_script_env
|
||||
}
|
||||
|
||||
# ============================================================================
|
||||
# Tests: Configuration des nœuds avec CLUSTER_NODES
|
||||
# ============================================================================
|
||||
|
||||
@test "CLUSTER_NODES: contient acemagician et elitedesk" {
|
||||
# Vérifier que le tableau associatif est bien défini
|
||||
[ -n "${CLUSTER_NODES[acemagician]}" ]
|
||||
[ -n "${CLUSTER_NODES[elitedesk]}" ]
|
||||
}
|
||||
|
||||
@test "CLUSTER_NODES: IPs correctes pour chaque nœud" {
|
||||
[ "${CLUSTER_NODES[acemagician]}" = "192.168.100.10" ]
|
||||
[ "${CLUSTER_NODES[elitedesk]}" = "192.168.100.20" ]
|
||||
}
|
||||
|
||||
# ============================================================================
|
||||
# Tests: Détection du nœud distant
|
||||
# ============================================================================
|
||||
|
||||
@test "Nœud distant: elitedesk détecte acemagician" {
|
||||
export TEST_HOSTNAME="elitedesk"
|
||||
LOCAL_NODE=$(hostname)
|
||||
|
||||
# Trouver le nœud distant
|
||||
REMOTE_NODE_NAME=""
|
||||
REMOTE_NODE_IP=""
|
||||
for node in "${!CLUSTER_NODES[@]}"; do
|
||||
if [[ "$node" != "$LOCAL_NODE" ]]; then
|
||||
REMOTE_NODE_NAME="$node"
|
||||
REMOTE_NODE_IP="${CLUSTER_NODES[$node]}"
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
[ "$REMOTE_NODE_NAME" = "acemagician" ]
|
||||
[ "$REMOTE_NODE_IP" = "192.168.100.10" ]
|
||||
}
|
||||
|
||||
@test "Nœud distant: acemagician détecte elitedesk" {
|
||||
export TEST_HOSTNAME="acemagician"
|
||||
LOCAL_NODE=$(hostname)
|
||||
|
||||
# Trouver le nœud distant
|
||||
REMOTE_NODE_NAME=""
|
||||
REMOTE_NODE_IP=""
|
||||
for node in "${!CLUSTER_NODES[@]}"; do
|
||||
if [[ "$node" != "$LOCAL_NODE" ]]; then
|
||||
REMOTE_NODE_NAME="$node"
|
||||
REMOTE_NODE_IP="${CLUSTER_NODES[$node]}"
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
[ "$REMOTE_NODE_NAME" = "elitedesk" ]
|
||||
[ "$REMOTE_NODE_IP" = "192.168.100.20" ]
|
||||
}
|
||||
|
||||
@test "Nœud distant: erreur si nœud local inconnu" {
|
||||
export TEST_HOSTNAME="unknown-node"
|
||||
LOCAL_NODE=$(hostname)
|
||||
|
||||
# Vérifier que le nœud local n'est pas dans la config
|
||||
if [[ ! -v "CLUSTER_NODES[$LOCAL_NODE]" ]]; then
|
||||
# Comportement attendu : erreur
|
||||
run echo "Node not found"
|
||||
[ "$status" -eq 0 ]
|
||||
else
|
||||
# Ne devrait pas arriver ici
|
||||
false
|
||||
fi
|
||||
}
|
||||
|
||||
@test "Nœud distant: erreur si cluster à 1 seul nœud" {
|
||||
# Créer un cluster avec un seul nœud
|
||||
declare -A TEST_CLUSTER=(
|
||||
["lonely-node"]="192.168.100.99"
|
||||
)
|
||||
|
||||
export TEST_HOSTNAME="lonely-node"
|
||||
LOCAL_NODE=$(hostname)
|
||||
|
||||
# Chercher nœud distant
|
||||
REMOTE_NODE_NAME=""
|
||||
REMOTE_NODE_IP=""
|
||||
for node in "${!TEST_CLUSTER[@]}"; do
|
||||
if [[ "$node" != "$LOCAL_NODE" ]]; then
|
||||
REMOTE_NODE_NAME="$node"
|
||||
REMOTE_NODE_IP="${TEST_CLUSTER[$node]}"
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
# Aucun nœud distant trouvé
|
||||
[ -z "$REMOTE_NODE_NAME" ]
|
||||
[ -z "$REMOTE_NODE_IP" ]
|
||||
}
|
||||
|
||||
# ============================================================================
|
||||
# Tests: Extension à 3+ nœuds
|
||||
# ============================================================================
|
||||
|
||||
@test "Cluster 3 nœuds: détecte le premier nœud distant disponible" {
|
||||
# Créer un cluster avec 3 nœuds
|
||||
declare -A EXTENDED_CLUSTER=(
|
||||
["node1"]="192.168.100.10"
|
||||
["node2"]="192.168.100.20"
|
||||
["node3"]="192.168.100.30"
|
||||
)
|
||||
|
||||
export TEST_HOSTNAME="node1"
|
||||
LOCAL_NODE=$(hostname)
|
||||
|
||||
# Trouver le premier nœud distant
|
||||
REMOTE_NODE_NAME=""
|
||||
REMOTE_NODE_IP=""
|
||||
for node in "${!EXTENDED_CLUSTER[@]}"; do
|
||||
if [[ "$node" != "$LOCAL_NODE" ]]; then
|
||||
REMOTE_NODE_NAME="$node"
|
||||
REMOTE_NODE_IP="${EXTENDED_CLUSTER[$node]}"
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
# Un nœud distant doit être trouvé (node2 ou node3)
|
||||
[ -n "$REMOTE_NODE_NAME" ]
|
||||
[ -n "$REMOTE_NODE_IP" ]
|
||||
[[ "$REMOTE_NODE_NAME" != "node1" ]]
|
||||
}
|
||||
|
||||
# ============================================================================
|
||||
# Tests: Validation des variables de configuration
|
||||
# ============================================================================
|
||||
|
||||
@test "Variables de config: ZPOOLS est un tableau non vide" {
|
||||
[ "${#ZPOOLS[@]}" -gt 0 ]
|
||||
}
|
||||
|
||||
@test "Variables de config: CTID est défini" {
|
||||
[ -n "$CTID" ]
|
||||
[ "$CTID" -eq "$CTID" ] 2>/dev/null # Vérifier que c'est un nombre
|
||||
}
|
||||
|
||||
@test "Variables de config: CONTAINER_NAME est défini" {
|
||||
[ -n "$CONTAINER_NAME" ]
|
||||
}
|
||||
|
||||
@test "Variables de config: HEALTH_CHECK_MIN_FREE_SPACE valide" {
|
||||
[ "$HEALTH_CHECK_MIN_FREE_SPACE" -ge 0 ]
|
||||
[ "$HEALTH_CHECK_MIN_FREE_SPACE" -le 100 ]
|
||||
}
|
||||
|
||||
@test "Variables de config: HEALTH_CHECK_ERROR_COOLDOWN valide" {
|
||||
[ "$HEALTH_CHECK_ERROR_COOLDOWN" -gt 0 ]
|
||||
}
|
||||
|
|
@ -1,7 +1,7 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Script de réplication ZFS automatique pour NFS HA (Multi-pools)
|
||||
# À déployer sur acemagician et elitedesk
|
||||
# À déployer sur tous les nœuds de production du cluster Proxmox
|
||||
#
|
||||
# Ce script version 2.1 :
|
||||
# - Supporte la réplication de plusieurs pools ZFS simultanément
|
||||
|
|
@ -30,6 +30,7 @@ SCRIPT_URL="${REPO_URL}/raw/branch/main/zfs-nfs-replica.sh"
|
|||
SCRIPT_PATH="${BASH_SOURCE[0]}"
|
||||
AUTO_UPDATE_ENABLED=true # Mettre à false pour désactiver l'auto-update
|
||||
|
||||
# Configuration du container LXC
|
||||
CTID=103
|
||||
CONTAINER_NAME="nfs-server"
|
||||
|
||||
|
|
@ -37,6 +38,14 @@ CONTAINER_NAME="nfs-server"
|
|||
# Ajouter ou retirer des pools selon vos besoins
|
||||
ZPOOLS=("zpool1" "zpool2")
|
||||
|
||||
# Configuration des nœuds du cluster
|
||||
# Format: NODE_NAME:IP_ADDRESS
|
||||
# Ajouter tous les nœuds de production du cluster
|
||||
declare -A CLUSTER_NODES=(
|
||||
["acemagician"]="192.168.100.10"
|
||||
["elitedesk"]="192.168.100.20"
|
||||
)
|
||||
|
||||
CHECK_DELAY=2 # Délai entre chaque vérification (secondes)
|
||||
LOG_FACILITY="local0"
|
||||
SSH_KEY="/root/.ssh/id_ed25519_zfs_replication"
|
||||
|
|
@ -1219,6 +1228,11 @@ replicate_pool() {
|
|||
# SCRIPT PRINCIPAL
|
||||
################################################################################
|
||||
|
||||
# Ne pas exécuter le script principal si on est en mode test BATS
|
||||
if [[ "${BATS_TEST_MODE:-false}" == "true" ]]; then
|
||||
return 0 2>/dev/null || exit 0
|
||||
fi
|
||||
|
||||
# Initialiser le système de logs
|
||||
init_logging
|
||||
|
||||
|
|
@ -1253,20 +1267,29 @@ elif [[ "${NOTIFICATION_ENABLED}" == "true" ]] && [[ -z "${APPRISE_URLS}" ]]; th
|
|||
fi
|
||||
|
||||
# Déterminer le nœud distant et son IP
|
||||
case "$LOCAL_NODE" in
|
||||
"acemagician")
|
||||
REMOTE_NODE_NAME="elitedesk"
|
||||
REMOTE_NODE_IP="192.168.100.20"
|
||||
;;
|
||||
"elitedesk")
|
||||
REMOTE_NODE_NAME="acemagician"
|
||||
REMOTE_NODE_IP="192.168.100.10"
|
||||
;;
|
||||
*)
|
||||
log "error" "Nœud inconnu: ${LOCAL_NODE}. Ce script doit s'exécuter sur acemagician ou elitedesk."
|
||||
# Vérifier que le nœud local est dans la configuration
|
||||
if [[ ! -v "CLUSTER_NODES[$LOCAL_NODE]" ]]; then
|
||||
local valid_nodes="${!CLUSTER_NODES[@]}"
|
||||
log "error" "Nœud inconnu: ${LOCAL_NODE}. Nœuds valides: ${valid_nodes}"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
# Trouver le nœud distant (le premier nœud différent du local)
|
||||
REMOTE_NODE_NAME=""
|
||||
REMOTE_NODE_IP=""
|
||||
for node in "${!CLUSTER_NODES[@]}"; do
|
||||
if [[ "$node" != "$LOCAL_NODE" ]]; then
|
||||
REMOTE_NODE_NAME="$node"
|
||||
REMOTE_NODE_IP="${CLUSTER_NODES[$node]}"
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
# Vérifier qu'un nœud distant a été trouvé
|
||||
if [[ -z "$REMOTE_NODE_NAME" ]]; then
|
||||
log "error" "Aucun nœud distant trouvé dans CLUSTER_NODES. Vérifier la configuration."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log "info" "Nœud distant configuré: ${REMOTE_NODE_NAME} (${REMOTE_NODE_IP})"
|
||||
log "info" "Pools configurés: ${ZPOOLS[*]}"
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue