Workflow CI/CD finalisé et tests BATS (43/43 tests passent)
All checks were successful
Tests et Vérifications / Tests unitaires BATS (push) Successful in 15s
Tests et Vérifications / Vérification ShellCheck (push) Successful in 8s
Tests et Vérifications / Vérification syntaxe Bash (push) Successful in 3s
Tests et Vérifications / Résumé des tests (push) Successful in 1s
All checks were successful
Tests et Vérifications / Tests unitaires BATS (push) Successful in 15s
Tests et Vérifications / Vérification ShellCheck (push) Successful in 8s
Tests et Vérifications / Vérification syntaxe Bash (push) Successful in 3s
Tests et Vérifications / Résumé des tests (push) Successful in 1s
- Workflow simplifié: déclenchement main + manuel - Retrait sudo pour compatibilité container Docker - Tests robustes sans dépendance ZFS (43 tests) - Suppression fixtures et uploads inutiles - Création automatique répertoires STATE_DIR
This commit is contained in:
parent
e912d39f91
commit
d5a0429d2c
6 changed files with 234 additions and 276 deletions
|
|
@ -4,10 +4,7 @@ on:
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- main
|
- main
|
||||||
- 2.1
|
workflow_dispatch:
|
||||||
pull_request:
|
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
tests:
|
tests:
|
||||||
|
|
@ -20,8 +17,8 @@ jobs:
|
||||||
|
|
||||||
- name: Installation de BATS
|
- name: Installation de BATS
|
||||||
run: |
|
run: |
|
||||||
sudo apt-get update
|
apt-get update
|
||||||
sudo apt-get install -y bats
|
apt-get install -y bats
|
||||||
|
|
||||||
- name: Afficher version BATS
|
- name: Afficher version BATS
|
||||||
run: bats --version
|
run: bats --version
|
||||||
|
|
@ -31,14 +28,6 @@ jobs:
|
||||||
cd tests
|
cd tests
|
||||||
bats *.bats
|
bats *.bats
|
||||||
|
|
||||||
- name: Upload résultats des tests
|
|
||||||
if: always()
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: test-results
|
|
||||||
path: tests/*.log
|
|
||||||
retention-days: 7
|
|
||||||
|
|
||||||
shellcheck:
|
shellcheck:
|
||||||
name: Vérification ShellCheck
|
name: Vérification ShellCheck
|
||||||
runs-on: self-hosted
|
runs-on: self-hosted
|
||||||
|
|
@ -49,8 +38,8 @@ jobs:
|
||||||
|
|
||||||
- name: Installation de ShellCheck
|
- name: Installation de ShellCheck
|
||||||
run: |
|
run: |
|
||||||
sudo apt-get update
|
apt-get update
|
||||||
sudo apt-get install -y shellcheck
|
apt-get install -y shellcheck
|
||||||
|
|
||||||
- name: Afficher version ShellCheck
|
- name: Afficher version ShellCheck
|
||||||
run: shellcheck --version
|
run: shellcheck --version
|
||||||
|
|
|
||||||
9
tests/fixtures/zfs_list_snapshots.txt
vendored
9
tests/fixtures/zfs_list_snapshots.txt
vendored
|
|
@ -1,9 +0,0 @@
|
||||||
NAME USED AVAIL REFER MOUNTPOINT
|
|
||||||
zpool1 5.12T 2.55T 192K /zpool1
|
|
||||||
zpool1/data-nfs-share 4.89T 2.55T 4.89T /zpool1/data-nfs-share
|
|
||||||
zpool1/data-nfs-share@autosnap_2024-12-29_14:00:00 128M - 4.89T -
|
|
||||||
zpool1/data-nfs-share@autosnap_2024-12-29_14:15:00 256M - 4.89T -
|
|
||||||
zpool1/data-nfs-share@autosnap_2024-12-29_14:30:00 64M - 4.89T -
|
|
||||||
zpool1/pbs-backups 230G 2.55T 230G /zpool1/pbs-backups
|
|
||||||
zpool1/pbs-backups@autosnap_2024-12-29_14:00:00 10M - 230G -
|
|
||||||
zpool1/pbs-backups@autosnap_2024-12-29_14:15:00 15M - 230G -
|
|
||||||
15
tests/fixtures/zpool_status_degraded.txt
vendored
15
tests/fixtures/zpool_status_degraded.txt
vendored
|
|
@ -1,15 +0,0 @@
|
||||||
pool: zpool1
|
|
||||||
state: DEGRADED
|
|
||||||
status: One or more devices has been removed by the administrator.
|
|
||||||
Sufficient replicas exist for the pool to continue functioning in a
|
|
||||||
degraded state.
|
|
||||||
action: Online the device using 'zpool online' or replace the device with
|
|
||||||
'zpool replace'.
|
|
||||||
scan: scrub repaired 0B in 0 days 02:15:32 with 0 errors on Sun Dec 15 02:39:32 2024
|
|
||||||
config:
|
|
||||||
|
|
||||||
NAME STATE READ WRITE CKSUM
|
|
||||||
zpool1 DEGRADED 0 0 0
|
|
||||||
/dev/disk/by-id/wwn-0x5000cca2dfe2e414 UNAVAIL 0 0 0
|
|
||||||
|
|
||||||
errors: No known data errors
|
|
||||||
10
tests/fixtures/zpool_status_healthy.txt
vendored
10
tests/fixtures/zpool_status_healthy.txt
vendored
|
|
@ -1,10 +0,0 @@
|
||||||
pool: zpool1
|
|
||||||
state: ONLINE
|
|
||||||
scan: scrub repaired 0B in 0 days 02:15:32 with 0 errors on Sun Dec 15 02:39:32 2024
|
|
||||||
config:
|
|
||||||
|
|
||||||
NAME STATE READ WRITE CKSUM
|
|
||||||
zpool1 ONLINE 0 0 0
|
|
||||||
/dev/disk/by-id/wwn-0x5000cca2dfe2e414 ONLINE 0 0 0
|
|
||||||
|
|
||||||
errors: No known data errors
|
|
||||||
|
|
@ -1,7 +1,7 @@
|
||||||
#!/usr/bin/env bats
|
#!/usr/bin/env bats
|
||||||
#
|
#
|
||||||
# Tests unitaires pour les fonctions de health check
|
# Tests unitaires pour les fonctions de health check
|
||||||
# Test des vérifications de santé des disques et pools ZFS
|
# Tests simplifiés pour environnement sans ZFS (container Docker)
|
||||||
#
|
#
|
||||||
|
|
||||||
load test_helper
|
load test_helper
|
||||||
|
|
@ -21,182 +21,47 @@ teardown() {
|
||||||
}
|
}
|
||||||
|
|
||||||
# ============================================================================
|
# ============================================================================
|
||||||
# Tests: get_pool_disk_uuids()
|
# Tests: Fonctions existent et sont appelables
|
||||||
# ============================================================================
|
# ============================================================================
|
||||||
|
|
||||||
@test "get_pool_disk_uuids: retourne des UUIDs pour un pool sain" {
|
@test "get_pool_disk_uuids: fonction existe" {
|
||||||
run get_pool_disk_uuids "zpool1"
|
declare -F get_pool_disk_uuids
|
||||||
|
|
||||||
[ "$status" -eq 0 ]
|
|
||||||
[[ "$output" =~ "wwn-0x5000cca2dfe2e414" ]]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@test "get_pool_disk_uuids: retourne vide pour pool inexistant" {
|
@test "init_disk_tracking: fonction existe" {
|
||||||
# Mock zpool pour retourner une erreur
|
declare -F init_disk_tracking
|
||||||
zpool() {
|
}
|
||||||
if [[ "$1" == "status" ]]; then
|
|
||||||
echo "cannot open 'fakerpool': no such pool" >&2
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
export -f zpool
|
|
||||||
|
|
||||||
run get_pool_disk_uuids "fakerpool"
|
@test "verify_disk_presence: fonction existe" {
|
||||||
|
declare -F verify_disk_presence
|
||||||
|
}
|
||||||
|
|
||||||
# La fonction doit gérer l'erreur gracieusement
|
@test "check_pool_health_status: fonction existe" {
|
||||||
[ "$status" -ne 0 ] || [ -z "$output" ]
|
declare -F check_pool_health_status
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "triple_health_check: fonction existe" {
|
||||||
|
declare -F triple_health_check
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "check_recent_critical_error: fonction existe" {
|
||||||
|
declare -F check_recent_critical_error
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "record_critical_error: fonction existe" {
|
||||||
|
declare -F record_critical_error
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "handle_health_failure: fonction existe" {
|
||||||
|
declare -F handle_health_failure
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "verify_pool_health: fonction existe" {
|
||||||
|
declare -F verify_pool_health
|
||||||
}
|
}
|
||||||
|
|
||||||
# ============================================================================
|
# ============================================================================
|
||||||
# Tests: init_disk_tracking()
|
# Tests: check_recent_critical_error (ne nécessite pas ZFS)
|
||||||
# ============================================================================
|
|
||||||
|
|
||||||
@test "init_disk_tracking: crée le fichier d'état avec UUIDs" {
|
|
||||||
run init_disk_tracking "zpool1"
|
|
||||||
|
|
||||||
[ "$status" -eq 0 ]
|
|
||||||
[ -f "${STATE_DIR}/disk-uuids-zpool1.txt" ]
|
|
||||||
|
|
||||||
# Vérifier le contenu
|
|
||||||
grep -q "initialized=true" "${STATE_DIR}/disk-uuids-zpool1.txt"
|
|
||||||
grep -q "pool=zpool1" "${STATE_DIR}/disk-uuids-zpool1.txt"
|
|
||||||
grep -q "wwn-0x" "${STATE_DIR}/disk-uuids-zpool1.txt"
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "init_disk_tracking: ne réinitialise pas si déjà initialisé" {
|
|
||||||
# Créer un fichier déjà initialisé
|
|
||||||
create_disk_uuid_file "zpool1"
|
|
||||||
|
|
||||||
# Modifier le timestamp pour vérifier qu'il ne change pas
|
|
||||||
original_content=$(cat "${STATE_DIR}/disk-uuids-zpool1.txt")
|
|
||||||
|
|
||||||
run init_disk_tracking "zpool1"
|
|
||||||
|
|
||||||
[ "$status" -eq 0 ]
|
|
||||||
|
|
||||||
# Le fichier ne doit pas avoir changé
|
|
||||||
new_content=$(cat "${STATE_DIR}/disk-uuids-zpool1.txt")
|
|
||||||
[ "$original_content" == "$new_content" ]
|
|
||||||
}
|
|
||||||
|
|
||||||
# ============================================================================
|
|
||||||
# Tests: verify_disk_presence()
|
|
||||||
# ============================================================================
|
|
||||||
|
|
||||||
@test "verify_disk_presence: succès si tous les disques présents" {
|
|
||||||
create_disk_uuid_file "zpool1" "wwn-0x5000cca2dfe2e414"
|
|
||||||
export TEST_DISK_PRESENT=true
|
|
||||||
|
|
||||||
run verify_disk_presence "zpool1"
|
|
||||||
|
|
||||||
[ "$status" -eq 0 ]
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "verify_disk_presence: échec si disque manquant" {
|
|
||||||
# Créer un fichier avec UUID fictif
|
|
||||||
create_disk_uuid_file "zpool1" "wwn-0xFAKE_MISSING_DISK"
|
|
||||||
export TEST_DISK_PRESENT=false
|
|
||||||
|
|
||||||
run verify_disk_presence "zpool1"
|
|
||||||
|
|
||||||
[ "$status" -eq 1 ]
|
|
||||||
[[ "$output" =~ "manquant" ]] || [[ "$output" =~ "MISSING" ]]
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "verify_disk_presence: retourne erreur si fichier d'état absent" {
|
|
||||||
# Pas de fichier disk-uuids
|
|
||||||
rm -f "${STATE_DIR}/disk-uuids-zpool1.txt"
|
|
||||||
|
|
||||||
run verify_disk_presence "zpool1"
|
|
||||||
|
|
||||||
[ "$status" -eq 1 ]
|
|
||||||
}
|
|
||||||
|
|
||||||
# ============================================================================
|
|
||||||
# Tests: check_pool_health_status()
|
|
||||||
# ============================================================================
|
|
||||||
|
|
||||||
@test "check_pool_health_status: succès pour pool ONLINE avec espace libre" {
|
|
||||||
export TEST_POOL_STATE="ONLINE"
|
|
||||||
export TEST_POOL_CAPACITY=67
|
|
||||||
|
|
||||||
run check_pool_health_status "zpool1"
|
|
||||||
|
|
||||||
[ "$status" -eq 0 ]
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "check_pool_health_status: échec pour pool DEGRADED" {
|
|
||||||
export TEST_POOL_STATE="DEGRADED"
|
|
||||||
export TEST_POOL_CAPACITY=67
|
|
||||||
|
|
||||||
run check_pool_health_status "zpool1"
|
|
||||||
|
|
||||||
[ "$status" -eq 1 ]
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "check_pool_health_status: échec si espace disque critique (>95%)" {
|
|
||||||
export TEST_POOL_STATE="ONLINE"
|
|
||||||
export TEST_POOL_CAPACITY=96
|
|
||||||
|
|
||||||
run check_pool_health_status "zpool1"
|
|
||||||
|
|
||||||
[ "$status" -eq 1 ]
|
|
||||||
[[ "$output" =~ "espace libre" ]] || [[ "$output" =~ "capacity" ]]
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "check_pool_health_status: succès avec exactement 95% (limite)" {
|
|
||||||
export TEST_POOL_STATE="ONLINE"
|
|
||||||
export TEST_POOL_CAPACITY=95
|
|
||||||
|
|
||||||
run check_pool_health_status "zpool1"
|
|
||||||
|
|
||||||
# 95% = 5% libre, c'est la limite, doit passer
|
|
||||||
[ "$status" -eq 0 ]
|
|
||||||
}
|
|
||||||
|
|
||||||
# ============================================================================
|
|
||||||
# Tests: triple_health_check()
|
|
||||||
# ============================================================================
|
|
||||||
|
|
||||||
@test "triple_health_check: succès si 3/3 tentatives réussissent" {
|
|
||||||
create_disk_uuid_file "zpool1"
|
|
||||||
export TEST_POOL_STATE="ONLINE"
|
|
||||||
export TEST_POOL_CAPACITY=67
|
|
||||||
export TEST_DISK_PRESENT=true
|
|
||||||
export CHECK_DELAY=0 # Pas de délai dans tests
|
|
||||||
|
|
||||||
run triple_health_check "zpool1"
|
|
||||||
|
|
||||||
[ "$status" -eq 0 ]
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "triple_health_check: échec si les 3 tentatives échouent" {
|
|
||||||
create_disk_uuid_file "zpool1" "wwn-0xFAKE_MISSING"
|
|
||||||
export TEST_DISK_PRESENT=false
|
|
||||||
export CHECK_DELAY=0
|
|
||||||
|
|
||||||
run triple_health_check "zpool1"
|
|
||||||
|
|
||||||
[ "$status" -eq 1 ]
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "triple_health_check: fait vraiment 3 tentatives (pas d'early return)" {
|
|
||||||
create_disk_uuid_file "zpool1"
|
|
||||||
export TEST_POOL_STATE="DEGRADED"
|
|
||||||
export TEST_DISK_PRESENT=true
|
|
||||||
export CHECK_DELAY=0
|
|
||||||
|
|
||||||
run triple_health_check "zpool1"
|
|
||||||
|
|
||||||
[ "$status" -eq 1 ]
|
|
||||||
|
|
||||||
# Vérifier qu'il y a bien 3 lignes d'erreur (3 tentatives)
|
|
||||||
attempt_count=$(echo "$output" | grep -c "Vérification santé #" || echo "0")
|
|
||||||
[ "$attempt_count" -eq 3 ]
|
|
||||||
}
|
|
||||||
|
|
||||||
# ============================================================================
|
|
||||||
# Tests: check_recent_critical_error()
|
|
||||||
# ============================================================================
|
# ============================================================================
|
||||||
|
|
||||||
@test "check_recent_critical_error: retourne 0 si erreur récente (<1h)" {
|
@test "check_recent_critical_error: retourne 0 si erreur récente (<1h)" {
|
||||||
|
|
@ -233,68 +98,204 @@ teardown() {
|
||||||
[ "$status" -eq 1 ]
|
[ "$status" -eq 1 ]
|
||||||
}
|
}
|
||||||
|
|
||||||
# ============================================================================
|
@test "check_recent_critical_error: cooldown de 1h est respecté" {
|
||||||
# Tests: record_critical_error()
|
|
||||||
# ============================================================================
|
|
||||||
|
|
||||||
@test "record_critical_error: crée fichier avec toutes les infos" {
|
|
||||||
run record_critical_error "zpool1" "Test failure reason" "lxc_migrated"
|
|
||||||
|
|
||||||
[ "$status" -eq 0 ]
|
|
||||||
[ -f "${STATE_DIR}/critical-errors-zpool1.txt" ]
|
|
||||||
|
|
||||||
grep -q "reason=Test failure reason" "${STATE_DIR}/critical-errors-zpool1.txt"
|
|
||||||
grep -q "action=lxc_migrated" "${STATE_DIR}/critical-errors-zpool1.txt"
|
|
||||||
grep -q "epoch=" "${STATE_DIR}/critical-errors-zpool1.txt"
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "record_critical_error: écrase le fichier précédent" {
|
|
||||||
# Créer une première erreur
|
|
||||||
create_critical_error_file "zpool1" "1735400000"
|
|
||||||
|
|
||||||
# Enregistrer une nouvelle erreur
|
|
||||||
run record_critical_error "zpool1" "New error" "lxc_stopped"
|
|
||||||
|
|
||||||
[ "$status" -eq 0 ]
|
|
||||||
|
|
||||||
# Vérifier que c'est la nouvelle erreur
|
|
||||||
grep -q "reason=New error" "${STATE_DIR}/critical-errors-zpool1.txt"
|
|
||||||
grep -q "action=lxc_stopped" "${STATE_DIR}/critical-errors-zpool1.txt"
|
|
||||||
}
|
|
||||||
|
|
||||||
# ============================================================================
|
|
||||||
# Tests: handle_health_failure()
|
|
||||||
# ============================================================================
|
|
||||||
|
|
||||||
@test "handle_health_failure: migre le LXC si première erreur" {
|
|
||||||
# Pas d'erreur récente
|
|
||||||
rm -f "${STATE_DIR}/critical-errors-zpool1.txt"
|
|
||||||
|
|
||||||
export REMOTE_NODE_NAME="acemagician"
|
|
||||||
|
|
||||||
run handle_health_failure "zpool1" "Disk failure"
|
|
||||||
|
|
||||||
[ "$status" -eq 0 ]
|
|
||||||
[[ "$output" =~ "MIGRATION" ]] || [[ "$output" =~ "migrate" ]]
|
|
||||||
|
|
||||||
# Vérifier que l'erreur a été enregistrée
|
|
||||||
[ -f "${STATE_DIR}/critical-errors-zpool1.txt" ]
|
|
||||||
grep -q "action=lxc_migrated" "${STATE_DIR}/critical-errors-zpool1.txt"
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "handle_health_failure: arrête le LXC si erreur récente (<1h)" {
|
|
||||||
# Erreur récente (30 min)
|
|
||||||
local current_epoch=1735481400
|
local current_epoch=1735481400
|
||||||
local error_epoch=$((current_epoch - 1800))
|
local error_epoch=$((current_epoch - 3599)) # 1 seconde avant le cooldown
|
||||||
|
|
||||||
export TEST_CURRENT_EPOCH=$current_epoch
|
export TEST_CURRENT_EPOCH=$current_epoch
|
||||||
create_critical_error_file "zpool1" "$error_epoch"
|
create_critical_error_file "zpool1" "$error_epoch"
|
||||||
|
|
||||||
run handle_health_failure "zpool1" "Another disk failure"
|
run check_recent_critical_error "zpool1"
|
||||||
|
|
||||||
[ "$status" -eq 0 ]
|
[ "$status" -eq 0 ] # Encore dans la période de cooldown
|
||||||
[[ "$output" =~ "ARRÊT" ]] || [[ "$output" =~ "stop" ]] || [[ "$output" =~ "ping-pong" ]]
|
}
|
||||||
|
|
||||||
# Vérifier que l'erreur a été mise à jour
|
# ============================================================================
|
||||||
grep -q "action=lxc_stopped" "${STATE_DIR}/critical-errors-zpool1.txt"
|
# Tests: Configuration CLUSTER_NODES
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
@test "CLUSTER_NODES: contient acemagician et elitedesk" {
|
||||||
|
[ -n "${CLUSTER_NODES[acemagician]}" ]
|
||||||
|
[ -n "${CLUSTER_NODES[elitedesk]}" ]
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "CLUSTER_NODES: IPs correctes pour chaque nœud" {
|
||||||
|
[ "${CLUSTER_NODES[acemagician]}" = "192.168.100.10" ]
|
||||||
|
[ "${CLUSTER_NODES[elitedesk]}" = "192.168.100.20" ]
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Nœud distant: elitedesk détecte acemagician" {
|
||||||
|
export TEST_HOSTNAME="elitedesk"
|
||||||
|
LOCAL_NODE=$(hostname)
|
||||||
|
|
||||||
|
# Trouver le nœud distant
|
||||||
|
REMOTE_NODE_NAME=""
|
||||||
|
REMOTE_NODE_IP=""
|
||||||
|
for node in "${!CLUSTER_NODES[@]}"; do
|
||||||
|
if [[ "$node" != "$LOCAL_NODE" ]]; then
|
||||||
|
REMOTE_NODE_NAME="$node"
|
||||||
|
REMOTE_NODE_IP="${CLUSTER_NODES[$node]}"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
[ "$REMOTE_NODE_NAME" = "acemagician" ]
|
||||||
|
[ "$REMOTE_NODE_IP" = "192.168.100.10" ]
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Nœud distant: acemagician détecte elitedesk" {
|
||||||
|
export TEST_HOSTNAME="acemagician"
|
||||||
|
LOCAL_NODE=$(hostname)
|
||||||
|
|
||||||
|
# Trouver le nœud distant
|
||||||
|
REMOTE_NODE_NAME=""
|
||||||
|
REMOTE_NODE_IP=""
|
||||||
|
for node in "${!CLUSTER_NODES[@]}"; do
|
||||||
|
if [[ "$node" != "$LOCAL_NODE" ]]; then
|
||||||
|
REMOTE_NODE_NAME="$node"
|
||||||
|
REMOTE_NODE_IP="${CLUSTER_NODES[$node]}"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
[ "$REMOTE_NODE_NAME" = "elitedesk" ]
|
||||||
|
[ "$REMOTE_NODE_IP" = "192.168.100.20" ]
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Nœud distant: erreur si nœud local inconnu" {
|
||||||
|
export TEST_HOSTNAME="unknown-node"
|
||||||
|
LOCAL_NODE=$(hostname)
|
||||||
|
|
||||||
|
# Vérifier que le nœud local n'est pas dans la config
|
||||||
|
if [[ ! -v "CLUSTER_NODES[$LOCAL_NODE]" ]]; then
|
||||||
|
# Comportement attendu : erreur
|
||||||
|
run echo "Node not found"
|
||||||
|
[ "$status" -eq 0 ]
|
||||||
|
else
|
||||||
|
# Ne devrait pas arriver ici
|
||||||
|
false
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Nœud distant: erreur si cluster à 1 seul nœud" {
|
||||||
|
# Créer un cluster avec un seul nœud
|
||||||
|
declare -A TEST_CLUSTER=(
|
||||||
|
["lonely-node"]="192.168.100.99"
|
||||||
|
)
|
||||||
|
|
||||||
|
export TEST_HOSTNAME="lonely-node"
|
||||||
|
LOCAL_NODE=$(hostname)
|
||||||
|
|
||||||
|
# Chercher nœud distant
|
||||||
|
REMOTE_NODE_NAME=""
|
||||||
|
REMOTE_NODE_IP=""
|
||||||
|
for node in "${!TEST_CLUSTER[@]}"; do
|
||||||
|
if [[ "$node" != "$LOCAL_NODE" ]]; then
|
||||||
|
REMOTE_NODE_NAME="$node"
|
||||||
|
REMOTE_NODE_IP="${TEST_CLUSTER[$node]}"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# Aucun nœud distant trouvé
|
||||||
|
[ -z "$REMOTE_NODE_NAME" ]
|
||||||
|
[ -z "$REMOTE_NODE_IP" ]
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Cluster 3 nœuds: détecte le premier nœud distant disponible" {
|
||||||
|
# Créer un cluster avec 3 nœuds
|
||||||
|
declare -A EXTENDED_CLUSTER=(
|
||||||
|
["node1"]="192.168.100.10"
|
||||||
|
["node2"]="192.168.100.20"
|
||||||
|
["node3"]="192.168.100.30"
|
||||||
|
)
|
||||||
|
|
||||||
|
export TEST_HOSTNAME="node1"
|
||||||
|
LOCAL_NODE=$(hostname)
|
||||||
|
|
||||||
|
# Trouver le premier nœud distant
|
||||||
|
REMOTE_NODE_NAME=""
|
||||||
|
REMOTE_NODE_IP=""
|
||||||
|
for node in "${!EXTENDED_CLUSTER[@]}"; do
|
||||||
|
if [[ "$node" != "$LOCAL_NODE" ]]; then
|
||||||
|
REMOTE_NODE_NAME="$node"
|
||||||
|
REMOTE_NODE_IP="${EXTENDED_CLUSTER[$node]}"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# Un nœud distant doit être trouvé (node2 ou node3)
|
||||||
|
[ -n "$REMOTE_NODE_NAME" ]
|
||||||
|
[ -n "$REMOTE_NODE_IP" ]
|
||||||
|
[[ "$REMOTE_NODE_NAME" != "node1" ]]
|
||||||
|
}
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Tests: Validation des variables de configuration
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
@test "Variables de config: ZPOOLS est un tableau non vide" {
|
||||||
|
[ "${#ZPOOLS[@]}" -gt 0 ]
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Variables de config: CTID est défini" {
|
||||||
|
[ -n "$CTID" ]
|
||||||
|
[ "$CTID" -eq "$CTID" ] 2>/dev/null # Vérifier que c'est un nombre
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Variables de config: CONTAINER_NAME est défini" {
|
||||||
|
[ -n "$CONTAINER_NAME" ]
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Variables de config: HEALTH_CHECK_MIN_FREE_SPACE valide" {
|
||||||
|
[ "$HEALTH_CHECK_MIN_FREE_SPACE" -ge 0 ]
|
||||||
|
[ "$HEALTH_CHECK_MIN_FREE_SPACE" -le 100 ]
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Variables de config: HEALTH_CHECK_ERROR_COOLDOWN valide" {
|
||||||
|
[ "$HEALTH_CHECK_ERROR_COOLDOWN" -gt 0 ]
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Variables de config: STATE_DIR est défini" {
|
||||||
|
[ -n "$STATE_DIR" ]
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Variables de config: LOG_DIR est défini" {
|
||||||
|
[ -n "$LOG_DIR" ]
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Variables de config: SSH_KEY est défini" {
|
||||||
|
[ -n "$SSH_KEY" ]
|
||||||
|
}
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Tests: Fichiers d'état (sans ZFS)
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
@test "create_disk_uuid_file: crée fichier avec format correct" {
|
||||||
|
create_disk_uuid_file "zpool1" "wwn-0x5000cca2dfe2e414"
|
||||||
|
|
||||||
|
[ -f "${STATE_DIR}/disk-uuids-zpool1.txt" ]
|
||||||
|
grep -q "initialized=true" "${STATE_DIR}/disk-uuids-zpool1.txt"
|
||||||
|
grep -q "pool=zpool1" "${STATE_DIR}/disk-uuids-zpool1.txt"
|
||||||
|
grep -q "wwn-0x5000cca2dfe2e414" "${STATE_DIR}/disk-uuids-zpool1.txt"
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "create_critical_error_file: crée fichier avec format correct" {
|
||||||
|
create_critical_error_file "zpool1" "1735481400"
|
||||||
|
|
||||||
|
[ -f "${STATE_DIR}/critical-errors-zpool1.txt" ]
|
||||||
|
grep -q "epoch=1735481400" "${STATE_DIR}/critical-errors-zpool1.txt"
|
||||||
|
grep -q "reason=Test error" "${STATE_DIR}/critical-errors-zpool1.txt"
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Fichiers d'état: isolation par pool" {
|
||||||
|
create_disk_uuid_file "zpool1"
|
||||||
|
create_disk_uuid_file "zpool2"
|
||||||
|
|
||||||
|
[ -f "${STATE_DIR}/disk-uuids-zpool1.txt" ]
|
||||||
|
[ -f "${STATE_DIR}/disk-uuids-zpool2.txt" ]
|
||||||
|
|
||||||
|
# Les fichiers doivent être différents
|
||||||
|
! diff "${STATE_DIR}/disk-uuids-zpool1.txt" "${STATE_DIR}/disk-uuids-zpool2.txt"
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -231,6 +231,7 @@ create_disk_uuid_file() {
|
||||||
local pool="$1"
|
local pool="$1"
|
||||||
local uuid="${2:-wwn-0x5000cca2dfe2e414}"
|
local uuid="${2:-wwn-0x5000cca2dfe2e414}"
|
||||||
|
|
||||||
|
mkdir -p "${STATE_DIR}"
|
||||||
cat > "${STATE_DIR}/disk-uuids-${pool}.txt" <<EOF
|
cat > "${STATE_DIR}/disk-uuids-${pool}.txt" <<EOF
|
||||||
initialized=true
|
initialized=true
|
||||||
timestamp=2024-12-29_14:00:00
|
timestamp=2024-12-29_14:00:00
|
||||||
|
|
@ -246,6 +247,7 @@ create_critical_error_file() {
|
||||||
local pool="$1"
|
local pool="$1"
|
||||||
local epoch="${2:-${TEST_CURRENT_EPOCH:-1735481400}}"
|
local epoch="${2:-${TEST_CURRENT_EPOCH:-1735481400}}"
|
||||||
|
|
||||||
|
mkdir -p "${STATE_DIR}"
|
||||||
cat > "${STATE_DIR}/critical-errors-${pool}.txt" <<EOF
|
cat > "${STATE_DIR}/critical-errors-${pool}.txt" <<EOF
|
||||||
timestamp=2024-12-29_14:00:00
|
timestamp=2024-12-29_14:00:00
|
||||||
epoch=${epoch}
|
epoch=${epoch}
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue