18 changed files with 215 additions and 897 deletions
@ -1,119 +0,0 @@ |
|||||
"""Test PostgreSQL backup creation and VirtioFS storage. |
|
||||
|
|
||||
These tests verify that: |
|
||||
- The backup oneshot service can be triggered manually and runs to completion. |
|
||||
- The expected backup artefacts land in the VirtioFS share (accessible from |
|
||||
the test runner's host filesystem without SSH). |
|
||||
- The backup retention policy removes stale backups. |
|
||||
|
|
||||
Note: tests within a module share a single VM (module-scoped fixture), so |
|
||||
the order of test execution matters here: the backup files checked in later |
|
||||
tests are created by the earlier trigger test. |
|
||||
""" |
|
||||
|
|
||||
import time |
|
||||
from pathlib import Path |
|
||||
|
|
||||
|
|
||||
# --------------------------------------------------------------------------- |
|
||||
# Trigger and completion |
|
||||
# --------------------------------------------------------------------------- |
|
||||
|
|
||||
def test_create_database_and_table(postgresql_vm, test_ssh_key): |
|
||||
"""Create a test database and table with some data to ensure the backup has |
|
||||
something to capture.""" |
|
||||
postgresql_vm.ssh_run( |
|
||||
"podman exec postgresql-server psql -U postgres -c \"CREATE DATABASE test;\"", |
|
||||
test_ssh_key, |
|
||||
) |
|
||||
postgresql_vm.ssh_run( |
|
||||
"podman exec postgresql-server psql -U postgres -d test -c \"CREATE TABLE witness (id SERIAL PRIMARY KEY, version VARCHAR); INSERT INTO witness (version) SELECT version();\"", |
|
||||
test_ssh_key, |
|
||||
) |
|
||||
|
|
||||
def test_trigger_backup(postgresql_vm, test_ssh_key): |
|
||||
"""Starting postgresql-backup.service must succeed (no immediate error).""" |
|
||||
postgresql_vm.ssh_run( |
|
||||
"systemctl start postgresql-backup.service", |
|
||||
test_ssh_key, |
|
||||
) |
|
||||
|
|
||||
|
|
||||
def test_backup_completes_successfully(postgresql_vm, test_ssh_key): |
|
||||
"""postgresql-backup.service must finish in ``inactive`` state (not ``failed``).""" |
|
||||
state = postgresql_vm.wait_for_unit_done( |
|
||||
"postgresql-backup.service", test_ssh_key, timeout=120 |
|
||||
) |
|
||||
assert state == "inactive", ( |
|
||||
f"Backup service ended in unexpected state {state!r}. " |
|
||||
"Run: systemctl status postgresql-backup.service --no-pager" |
|
||||
) |
|
||||
|
|
||||
|
|
||||
# --------------------------------------------------------------------------- |
|
||||
# VirtioFS artefacts (verified from the host — no SSH required) |
|
||||
# --------------------------------------------------------------------------- |
|
||||
|
|
||||
|
|
||||
def test_backup_directory_exists_in_virtiofs(virtiofs_dir: Path): |
|
||||
"""The postgresql/backup sub-directory must exist in the VirtioFS share.""" |
|
||||
backup_root = virtiofs_dir / "postgresql" / "backup" |
|
||||
assert backup_root.is_dir(), f"Backup directory not found on host: {backup_root}" |
|
||||
|
|
||||
|
|
||||
def test_at_least_one_backup_present(virtiofs_dir: Path): |
|
||||
"""At least one timestamped backup sub-directory must exist.""" |
|
||||
backup_root = virtiofs_dir / "postgresql" / "backup" |
|
||||
backups = sorted(backup_root.iterdir()) |
|
||||
assert backups, f"No backup sub-directories found under {backup_root}" |
|
||||
|
|
||||
|
|
||||
def test_backup_manifest_present(virtiofs_dir: Path): |
|
||||
"""The latest backup must contain a ``backup_manifest`` file (pg_basebackup).""" |
|
||||
backup_root = virtiofs_dir / "postgresql" / "backup" |
|
||||
latest = sorted(backup_root.iterdir())[-1] |
|
||||
assert (latest / "backup_manifest").exists(), ( |
|
||||
f"backup_manifest missing in {latest}" |
|
||||
) |
|
||||
|
|
||||
|
|
||||
def test_backup_base_tar_present(virtiofs_dir: Path): |
|
||||
"""The latest backup must contain a ``base.tar`` cluster archive.""" |
|
||||
backup_root = virtiofs_dir / "postgresql" / "backup" |
|
||||
latest = sorted(backup_root.iterdir())[-1] |
|
||||
assert (latest / "base.tar").exists(), f"base.tar missing in {latest}" |
|
||||
|
|
||||
|
|
||||
def test_database_dump_present(virtiofs_dir: Path): |
|
||||
"""At least one ``dump-test.sql.gz`` file must exist alongside the cluster backup.""" |
|
||||
backup_root = virtiofs_dir / "postgresql" / "backup" |
|
||||
latest = sorted(backup_root.iterdir())[-1] |
|
||||
dumps = list(latest.glob("dump-test.sql.gz")) |
|
||||
assert dumps, f"No dump-test.sql.gz files found in {latest}" |
|
||||
|
|
||||
# --------------------------------------------------------------------------- |
|
||||
# Retention policy |
|
||||
# --------------------------------------------------------------------------- |
|
||||
|
|
||||
|
|
||||
def test_backup_retention_enforced(postgresql_vm, test_ssh_key, virtiofs_dir: Path): |
|
||||
"""After triggering several extra backups the count must stay within the |
|
||||
configured retention limit (POSTGRES_BACKUP_RETENTION=7).""" |
|
||||
retention = 7 |
|
||||
|
|
||||
# Trigger ten additional backups so the rotation code has something to do. |
|
||||
for _ in range(10): |
|
||||
postgresql_vm.ssh_run( |
|
||||
"systemctl start postgresql-backup.service", test_ssh_key |
|
||||
) |
|
||||
state = postgresql_vm.wait_for_unit_done( |
|
||||
"postgresql-backup.service", test_ssh_key, timeout=120 |
|
||||
) |
|
||||
assert state == "inactive" |
|
||||
time.sleep(1) # ensure distinct timestamp directories |
|
||||
|
|
||||
backup_root = virtiofs_dir / "postgresql" / "backup" |
|
||||
count = len(list(backup_root.iterdir())) |
|
||||
assert count <= retention, ( |
|
||||
f"Retention policy failed: {count} backups present, expected ≤ {retention}" |
|
||||
) |
|
||||
@ -1,151 +0,0 @@ |
|||||
"""Test that a fresh PostgreSQL installation is healthy. |
|
||||
|
|
||||
These tests run against a brand-new VM booted from the cookbook's default |
|
||||
ignition (PG_MAJOR=14, example credentials). They verify: |
|
||||
- All expected systemd units are in the correct state. |
|
||||
- The PostgreSQL server is listening and accepts queries. |
|
||||
- VirtioFS is mounted and the expected directories exist. |
|
||||
""" |
|
||||
|
|
||||
from pathlib import Path |
|
||||
|
|
||||
from helpers import PG_MAJOR_DEFAULT, run_sql |
|
||||
|
|
||||
|
|
||||
# --------------------------------------------------------------------------- |
|
||||
# Systemd unit state |
|
||||
# --------------------------------------------------------------------------- |
|
||||
|
|
||||
|
|
||||
def test_postgresql_target_active(pg_host): |
|
||||
"""postgresql.target must be active once the full startup chain completes.""" |
|
||||
assert pg_host.service("postgresql.target").is_running |
|
||||
|
|
||||
|
|
||||
def test_postgresql_server_running(pg_host): |
|
||||
"""The long-running PostgreSQL server container must be active.""" |
|
||||
assert pg_host.service("postgresql-server.service").is_running |
|
||||
|
|
||||
|
|
||||
def test_set_major_oneshot_completed(pg_host): |
|
||||
"""postgresql-set-major.service (oneshot) must have finished — not still running.""" |
|
||||
result = pg_host.run("systemctl is-active postgresql-set-major.service") |
|
||||
assert result.stdout.strip() == "inactive" |
|
||||
|
|
||||
|
|
||||
def test_init_oneshot_completed(pg_host): |
|
||||
"""postgresql-init.service (oneshot) must have finished after initialization.""" |
|
||||
result = pg_host.run("systemctl is-active postgresql-init.service") |
|
||||
assert result.stdout.strip() == "inactive" |
|
||||
|
|
||||
|
|
||||
def test_upgrade_oneshot_completed(pg_host): |
|
||||
"""postgresql-upgrade.service (oneshot) must have finished — no upgrade needed |
|
||||
on a fresh install.""" |
|
||||
result = pg_host.run("systemctl is-active postgresql-upgrade.service") |
|
||||
assert result.stdout.strip() == "inactive" |
|
||||
|
|
||||
|
|
||||
def test_backup_timer_scheduled(pg_host): |
|
||||
"""The daily backup timer must be active (scheduled).""" |
|
||||
assert pg_host.service("postgresql-backup.timer").is_running |
|
||||
|
|
||||
|
|
||||
# --------------------------------------------------------------------------- |
|
||||
# Network / socket |
|
||||
# --------------------------------------------------------------------------- |
|
||||
|
|
||||
|
|
||||
def test_postgresql_port_listening(pg_host): |
|
||||
"""PostgreSQL must be listening on 127.0.0.1:5432 (POSTGRES_ARGS=-h 127.0.0.1).""" |
|
||||
assert pg_host.socket("tcp://127.0.0.1:5432").is_listening |
|
||||
|
|
||||
|
|
||||
# --------------------------------------------------------------------------- |
|
||||
# Filesystem layout |
|
||||
# --------------------------------------------------------------------------- |
|
||||
|
|
||||
|
|
||||
def test_virtiofs_mounted(pg_host): |
|
||||
"""The VirtioFS share must be mounted at /var/lib/virtiofs/data.""" |
|
||||
mount = pg_host.mount_point("/var/lib/virtiofs/data") |
|
||||
assert mount.exists |
|
||||
assert mount.filesystem == "virtiofs" |
|
||||
|
|
||||
|
|
||||
def test_virtiofs_postgresql_dir(pg_host): |
|
||||
"""/var/lib/virtiofs/data/postgresql must be created by tmpfiles.d.""" |
|
||||
assert pg_host.file("/var/lib/virtiofs/data/postgresql").is_directory |
|
||||
|
|
||||
|
|
||||
def test_virtiofs_backup_dir(pg_host): |
|
||||
"""/var/lib/virtiofs/data/postgresql/backup must be created by tmpfiles.d.""" |
|
||||
assert pg_host.file("/var/lib/virtiofs/data/postgresql/backup").is_directory |
|
||||
|
|
||||
|
|
||||
def test_data_dir_exists(pg_host): |
|
||||
"""/var/lib/quadlets/postgresql must exist with the correct ownership.""" |
|
||||
f = pg_host.file("/var/lib/quadlets/postgresql") |
|
||||
assert f.is_directory |
|
||||
assert f.user == "postgresql" |
|
||||
assert f.user.uid == 10004 |
|
||||
assert f.group == "itix-svc" |
|
||||
assert f.group.uid == 10000 |
|
||||
|
|
||||
def test_latest_symlink_exists(pg_host): |
|
||||
"""The 'latest' symlink must point to the active major-version directory.""" |
|
||||
link = pg_host.file("/var/lib/quadlets/postgresql/latest") |
|
||||
assert link.exists |
|
||||
assert link.is_symlink |
|
||||
|
|
||||
|
|
||||
def test_version_dir_exists(pg_host): |
|
||||
"""A directory named after PG_MAJOR_DEFAULT must exist under the data dir.""" |
|
||||
assert pg_host.file( |
|
||||
f"/var/lib/quadlets/postgresql/{PG_MAJOR_DEFAULT}" |
|
||||
).is_directory |
|
||||
|
|
||||
|
|
||||
def test_initialized_flag_exists(pg_host): |
|
||||
"""The .initialized sentinel file must be written after a successful init.""" |
|
||||
assert pg_host.file("/var/lib/quadlets/postgresql/.initialized").exists |
|
||||
|
|
||||
|
|
||||
def test_config_env_present(pg_host): |
|
||||
"""/etc/quadlets/postgresql/config.env must be present and not world-readable.""" |
|
||||
f = pg_host.file("/etc/quadlets/postgresql/config.env") |
|
||||
assert f.exists |
|
||||
# mode 0600 — world and group bits must be 0 |
|
||||
assert f.mode & 0o077 == 0 |
|
||||
|
|
||||
|
|
||||
# --------------------------------------------------------------------------- |
|
||||
# Database connectivity |
|
||||
# --------------------------------------------------------------------------- |
|
||||
|
|
||||
|
|
||||
def test_postgresql_accepts_connections(postgresql_vm, test_ssh_key): |
|
||||
"""PostgreSQL must respond to a trivial SQL query.""" |
|
||||
output = run_sql(postgresql_vm, test_ssh_key, "SELECT 1 AS probe") |
|
||||
assert "1" in output |
|
||||
|
|
||||
|
|
||||
def test_postgresql_version_matches_config(postgresql_vm, test_ssh_key): |
|
||||
"""The running PostgreSQL server must report the version from PG_MAJOR_DEFAULT.""" |
|
||||
output = run_sql(postgresql_vm, test_ssh_key, "SHOW server_version") |
|
||||
assert PG_MAJOR_DEFAULT in output |
|
||||
|
|
||||
|
|
||||
def test_can_create_database(postgresql_vm, test_ssh_key): |
|
||||
"""Should be possible to create a new database.""" |
|
||||
run_sql( |
|
||||
postgresql_vm, |
|
||||
test_ssh_key, |
|
||||
"CREATE DATABASE install_test_db", |
|
||||
) |
|
||||
output = run_sql( |
|
||||
postgresql_vm, |
|
||||
test_ssh_key, |
|
||||
"SELECT datname FROM pg_database WHERE datname = 'install_test_db'", |
|
||||
) |
|
||||
assert "install_test_db" in output |
|
||||
@ -1,154 +0,0 @@ |
|||||
"""Test PostgreSQL automatic crash recovery. |
|
||||
|
|
||||
Scenarios covered: |
|
||||
1. Container crash (SIGKILL via ``podman kill``) → systemd restarts the |
|
||||
service automatically (Restart=always, RestartSec=10). |
|
||||
2. Hard VM reboot → all services start cleanly and data is intact. |
|
||||
|
|
||||
All tests share the module-scoped ``postgresql_vm`` fixture. Because some |
|
||||
tests are destructive (they kill the container), they are intentionally |
|
||||
sequenced: create data → crash → verify recovery → create more data → |
|
||||
reboot → verify recovery. |
|
||||
""" |
|
||||
|
|
||||
import time |
|
||||
|
|
||||
from helpers import run_sql |
|
||||
|
|
||||
# Data written before the crash that must survive each recovery scenario. |
|
||||
CRASH_WITNESS_TABLE = "crash_witness" |
|
||||
CRASH_WITNESS_VALUE = "before_crash" |
|
||||
|
|
||||
REBOOT_WITNESS_TABLE = "reboot_witness" |
|
||||
REBOOT_WITNESS_VALUE = "before_reboot" |
|
||||
|
|
||||
|
|
||||
# --------------------------------------------------------------------------- |
|
||||
# Scenario 1: container crash |
|
||||
# --------------------------------------------------------------------------- |
|
||||
|
|
||||
|
|
||||
def test_server_running_before_crash(pg_host): |
|
||||
"""Precondition: postgresql-server.service must be active before we crash it.""" |
|
||||
assert pg_host.service("postgresql-server.service").is_running |
|
||||
|
|
||||
|
|
||||
def test_create_data_before_crash(postgresql_vm, test_ssh_key): |
|
||||
"""Insert a row that must survive the container crash.""" |
|
||||
run_sql( |
|
||||
postgresql_vm, |
|
||||
test_ssh_key, |
|
||||
( |
|
||||
f"CREATE TABLE IF NOT EXISTS {CRASH_WITNESS_TABLE} " |
|
||||
f"(id SERIAL PRIMARY KEY, message TEXT NOT NULL); " |
|
||||
f"INSERT INTO {CRASH_WITNESS_TABLE} (message) " |
|
||||
f"VALUES ('{CRASH_WITNESS_VALUE}');" |
|
||||
), |
|
||||
) |
|
||||
|
|
||||
|
|
||||
def test_kill_postgresql_container(postgresql_vm, test_ssh_key): |
|
||||
"""Simulate a process crash by sending SIGKILL to the container. |
|
||||
|
|
||||
``podman kill`` delivers SIGKILL to the container's PID 1. Systemd will |
|
||||
detect the exit and restart the service after RestartSec=10 seconds. |
|
||||
""" |
|
||||
postgresql_vm.ssh_run( |
|
||||
"podman kill --signal SIGKILL postgresql-server", |
|
||||
test_ssh_key, |
|
||||
) |
|
||||
|
|
||||
|
|
||||
def test_service_restarts_automatically(postgresql_vm, test_ssh_key): |
|
||||
"""postgresql-server.service must be active again after the crash. |
|
||||
|
|
||||
Allow up to 60 seconds: systemd waits RestartSec=10 s before restarting, |
|
||||
then the container start-up and health check take additional time. |
|
||||
""" |
|
||||
# Brief pause to let systemd register the exit before we start polling. |
|
||||
time.sleep(5) |
|
||||
postgresql_vm.wait_for_service( |
|
||||
"postgresql-server.service", test_ssh_key, timeout=120 |
|
||||
) |
|
||||
|
|
||||
|
|
||||
def test_data_intact_after_crash_recovery(postgresql_vm, test_ssh_key): |
|
||||
"""Rows written before the crash must be present after automatic recovery.""" |
|
||||
output = run_sql( |
|
||||
postgresql_vm, |
|
||||
test_ssh_key, |
|
||||
f"SELECT message FROM {CRASH_WITNESS_TABLE} " |
|
||||
f"WHERE message = '{CRASH_WITNESS_VALUE}'", |
|
||||
) |
|
||||
assert CRASH_WITNESS_VALUE in output, ( |
|
||||
f"Crash witness row not found after recovery. Query returned: {output!r}" |
|
||||
) |
|
||||
|
|
||||
|
|
||||
def test_target_still_active_after_crash(pg_host): |
|
||||
"""postgresql.target must remain active after the container recovery.""" |
|
||||
assert pg_host.service("postgresql.target").is_running |
|
||||
|
|
||||
|
|
||||
# --------------------------------------------------------------------------- |
|
||||
# Scenario 2: hard reboot |
|
||||
# --------------------------------------------------------------------------- |
|
||||
|
|
||||
|
|
||||
def test_create_data_before_reboot(postgresql_vm, test_ssh_key): |
|
||||
"""Insert a row that must survive a full VM reboot.""" |
|
||||
run_sql( |
|
||||
postgresql_vm, |
|
||||
test_ssh_key, |
|
||||
( |
|
||||
f"CREATE TABLE IF NOT EXISTS {REBOOT_WITNESS_TABLE} " |
|
||||
f"(id SERIAL PRIMARY KEY, message TEXT NOT NULL); " |
|
||||
f"INSERT INTO {REBOOT_WITNESS_TABLE} (message) " |
|
||||
f"VALUES ('{REBOOT_WITNESS_VALUE}');" |
|
||||
), |
|
||||
) |
|
||||
|
|
||||
|
|
||||
def test_reboot_vm(postgresql_vm, test_ssh_key): |
|
||||
"""Trigger a graceful OS reboot. SSH will temporarily drop.""" |
|
||||
postgresql_vm.ssh_run("systemctl reboot", test_ssh_key, check=False) |
|
||||
# Wait for the VM to go down before polling for SSH again. |
|
||||
time.sleep(15) |
|
||||
|
|
||||
|
|
||||
def test_ssh_available_after_reboot(postgresql_vm, test_ssh_key): |
|
||||
"""SSH must become available again within 5 minutes of the reboot.""" |
|
||||
# Reset the cached IP so wait_ssh re-probes it. |
|
||||
postgresql_vm._ip = None |
|
||||
postgresql_vm.wait_ssh(ssh_key=test_ssh_key, timeout=300) |
|
||||
|
|
||||
|
|
||||
def test_postgresql_target_active_after_reboot(postgresql_vm, test_ssh_key): |
|
||||
"""postgresql.target must come up automatically on reboot (enabled in ignition).""" |
|
||||
postgresql_vm.wait_for_service( |
|
||||
"postgresql.target", ssh_key=test_ssh_key, timeout=300 |
|
||||
) |
|
||||
|
|
||||
|
|
||||
def test_data_intact_after_reboot(postgresql_vm, test_ssh_key): |
|
||||
"""Rows written before the reboot must still be present after boot.""" |
|
||||
output = run_sql( |
|
||||
postgresql_vm, |
|
||||
test_ssh_key, |
|
||||
f"SELECT message FROM {REBOOT_WITNESS_TABLE} " |
|
||||
f"WHERE message = '{REBOOT_WITNESS_VALUE}'", |
|
||||
) |
|
||||
assert REBOOT_WITNESS_VALUE in output, ( |
|
||||
f"Reboot witness row not found. Query returned: {output!r}" |
|
||||
) |
|
||||
|
|
||||
|
|
||||
def test_crash_witness_also_intact_after_reboot(postgresql_vm, test_ssh_key): |
|
||||
"""Data written before the crash must also survive the subsequent reboot.""" |
|
||||
output = run_sql( |
|
||||
postgresql_vm, |
|
||||
test_ssh_key, |
|
||||
f"SELECT message FROM {CRASH_WITNESS_TABLE} " |
|
||||
f"WHERE message = '{CRASH_WITNESS_VALUE}'", |
|
||||
) |
|
||||
assert CRASH_WITNESS_VALUE in output |
|
||||
@ -1,59 +0,0 @@ |
|||||
"""Test that a fresh PostgreSQL installation is secure. |
|
||||
|
|
||||
These tests run against a brand-new VM booted from the cookbook's default |
|
||||
ignition (PG_MAJOR=14, example credentials). They verify: |
|
||||
- The PostgreSQL port is NOT exposed to the network. |
|
||||
- The PostgreSQL backup directory has the correct ownership and permissions. |
|
||||
""" |
|
||||
|
|
||||
from pathlib import Path |
|
||||
import socket |
|
||||
|
|
||||
# --------------------------------------------------------------------------- |
|
||||
# Network / socket |
|
||||
# --------------------------------------------------------------------------- |
|
||||
|
|
||||
def test_postgresql_port_listening(pg_host): |
|
||||
"""PostgreSQL must be listening on 127.0.0.1:5432 (POSTGRES_ARGS=-h 127.0.0.1).""" |
|
||||
assert pg_host.socket("tcp://127.0.0.1:5432").is_listening |
|
||||
|
|
||||
|
|
||||
def test_postgresql_port_not_exposed(postgresql_vm): |
|
||||
"""PostgreSQL must NOT be exposed to the network.""" |
|
||||
|
|
||||
# Positive control: port 22 (SSH) must be reachable |
|
||||
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) |
|
||||
s.settimeout(3) |
|
||||
assert s.connect_ex((postgresql_vm.ip, 22)) == 0, ( |
|
||||
f"Port 22 is NOT reachable from the host on {postgresql_vm.ip}!" |
|
||||
) |
|
||||
s.close() |
|
||||
|
|
||||
# Negative control: port 23 must NOT be reachable |
|
||||
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) |
|
||||
s.settimeout(3) |
|
||||
assert s.connect_ex((postgresql_vm.ip, 23)) != 0, ( |
|
||||
f"Port 23 is reachable from the host on {postgresql_vm.ip}!" |
|
||||
) |
|
||||
s.close() |
|
||||
|
|
||||
# The real test: port 5432 must NOT be reachable |
|
||||
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) |
|
||||
s.settimeout(3) |
|
||||
assert s.connect_ex((postgresql_vm.ip, 5432)) != 0, ( |
|
||||
f"Port 5432 is reachable from the host on {postgresql_vm.ip}!" |
|
||||
) |
|
||||
s.close() |
|
||||
|
|
||||
# --------------------------------------------------------------------------- |
|
||||
# VirtioFS permissions (verified from the host — no SSH required) |
|
||||
# --------------------------------------------------------------------------- |
|
||||
|
|
||||
def test_backup_directory_exists_in_virtiofs(virtiofs_dir: Path): |
|
||||
"""The postgresql/backup sub-directory must exist in the VirtioFS share.""" |
|
||||
backup_root = virtiofs_dir / "postgresql" / "backup" |
|
||||
assert backup_root.exists(), f"Backup directory not found on host: {backup_root}" |
|
||||
# mode 0700 — world and group bits must be 0 |
|
||||
assert backup_root.stat().st_mode & 0o077 == 0 |
|
||||
assert backup_root.stat().st_uid == 10004, f"Backup directory must be owned by postgres (uid 10004), but got {backup_root.stat().st_uid}" |
|
||||
assert backup_root.stat().st_gid == 10000, f"Backup directory must be owned by postgres (gid 10000), but got {backup_root.stat().st_gid}" |
|
||||
@ -1,163 +0,0 @@ |
|||||
"""Test the PostgreSQL major version upgrade path: PG 14 → PG 17. |
|
||||
|
|
||||
The upgrade mechanism works as follows: |
|
||||
1. postgresql-set-major.service updates the ``latest`` symlink to point at |
|
||||
the new PG_MAJOR directory (e.g. /var/lib/quadlets/postgresql/17/). |
|
||||
2. postgresql-upgrade.service detects that |
|
||||
``latest/docker/PG_VERSION`` does not exist (the 17/ directory is |
|
||||
empty) and triggers pgautoupgrade. |
|
||||
3. pg_upgrade migrates data from the old directory to the new one. |
|
||||
4. postgresql-server.service starts against the upgraded data. |
|
||||
|
|
||||
All tests in this module share a single ``upgrade_vm`` fixture that starts |
|
||||
with PG_MAJOR_UPGRADE_FROM (14). Tests are intentionally ordered to form a |
|
||||
sequential scenario: create data → trigger upgrade → verify outcome. |
|
||||
""" |
|
||||
|
|
||||
from pathlib import Path |
|
||||
|
|
||||
from helpers import PG_MAJOR_UPGRADE_FROM, PG_MAJOR_UPGRADE_TO, run_sql |
|
||||
|
|
||||
# Sentinel table and row used to verify data survives the upgrade. |
|
||||
WITNESS_TABLE = "upgrade_witness" |
|
||||
WITNESS_VALUE = "before_upgrade" |
|
||||
|
|
||||
|
|
||||
# --------------------------------------------------------------------------- |
|
||||
# Pre-upgrade baseline |
|
||||
# --------------------------------------------------------------------------- |
|
||||
|
|
||||
|
|
||||
def test_initial_version_is_upgrade_from(upgrade_vm, test_ssh_key): |
|
||||
"""Precondition: the VM must be running PG_MAJOR_UPGRADE_FROM.""" |
|
||||
output = run_sql(upgrade_vm, test_ssh_key, "SHOW server_version") |
|
||||
assert PG_MAJOR_UPGRADE_FROM in output, ( |
|
||||
f"Expected PG {PG_MAJOR_UPGRADE_FROM}, got: {output!r}" |
|
||||
) |
|
||||
|
|
||||
|
|
||||
def test_create_witness_data(upgrade_vm, test_ssh_key): |
|
||||
"""Insert a row that must survive the major version upgrade.""" |
|
||||
run_sql( |
|
||||
upgrade_vm, |
|
||||
test_ssh_key, |
|
||||
( |
|
||||
f"CREATE TABLE IF NOT EXISTS {WITNESS_TABLE} " |
|
||||
f"(id SERIAL PRIMARY KEY, message TEXT NOT NULL); " |
|
||||
f"INSERT INTO {WITNESS_TABLE} (message) VALUES ('{WITNESS_VALUE}');" |
|
||||
), |
|
||||
) |
|
||||
output = run_sql( |
|
||||
upgrade_vm, |
|
||||
test_ssh_key, |
|
||||
f"SELECT message FROM {WITNESS_TABLE} WHERE message = '{WITNESS_VALUE}'", |
|
||||
) |
|
||||
assert WITNESS_VALUE in output |
|
||||
|
|
||||
|
|
||||
# --------------------------------------------------------------------------- |
|
||||
# Trigger the upgrade |
|
||||
# --------------------------------------------------------------------------- |
|
||||
|
|
||||
|
|
||||
def test_bump_pg_major_in_config(upgrade_vm, test_ssh_key): |
|
||||
"""Change PG_MAJOR in config.env from UPGRADE_FROM to UPGRADE_TO.""" |
|
||||
upgrade_vm.ssh_run( |
|
||||
f"sed -i 's/^PG_MAJOR={PG_MAJOR_UPGRADE_FROM}$/PG_MAJOR={PG_MAJOR_UPGRADE_TO}/' " |
|
||||
"/etc/quadlets/postgresql/config.env", |
|
||||
test_ssh_key, |
|
||||
) |
|
||||
# Verify the substitution worked. |
|
||||
result = upgrade_vm.ssh_run( |
|
||||
"grep ^PG_MAJOR= /etc/quadlets/postgresql/config.env", |
|
||||
test_ssh_key, |
|
||||
) |
|
||||
assert f"PG_MAJOR={PG_MAJOR_UPGRADE_TO}" in result.stdout |
|
||||
|
|
||||
|
|
||||
def test_restart_postgresql_target(upgrade_vm, test_ssh_key): |
|
||||
"""Restart postgresql.target to kick off the upgrade chain.""" |
|
||||
upgrade_vm.ssh_run("systemctl restart postgresql.target", test_ssh_key) |
|
||||
|
|
||||
|
|
||||
def test_upgrade_service_completes(upgrade_vm, test_ssh_key): |
|
||||
"""postgresql-upgrade.service must finish in ``inactive`` state (not ``failed``). |
|
||||
|
|
||||
pgautoupgrade can take several minutes for large databases; allow up to |
|
||||
10 minutes. |
|
||||
""" |
|
||||
state = upgrade_vm.wait_for_unit_done( |
|
||||
"postgresql-upgrade.service", test_ssh_key, timeout=600 |
|
||||
) |
|
||||
assert state == "inactive", ( |
|
||||
f"Upgrade service ended in state {state!r}. " |
|
||||
"Inspect with: systemctl status postgresql-upgrade.service --no-pager " |
|
||||
"and: journalctl -u postgresql-upgrade.service" |
|
||||
) |
|
||||
|
|
||||
|
|
||||
def test_server_active_after_upgrade(upgrade_vm, test_ssh_key): |
|
||||
"""postgresql-server.service must be active after the upgrade.""" |
|
||||
upgrade_vm.wait_for_service( |
|
||||
"postgresql-server.service", test_ssh_key, timeout=120 |
|
||||
) |
|
||||
|
|
||||
|
|
||||
# --------------------------------------------------------------------------- |
|
||||
# Post-upgrade verification |
|
||||
# --------------------------------------------------------------------------- |
|
||||
|
|
||||
|
|
||||
def test_new_version_is_running(upgrade_vm, test_ssh_key): |
|
||||
"""PostgreSQL must now report PG_MAJOR_UPGRADE_TO as the server version.""" |
|
||||
output = run_sql(upgrade_vm, test_ssh_key, "SHOW server_version") |
|
||||
assert PG_MAJOR_UPGRADE_TO in output, ( |
|
||||
f"Expected PG {PG_MAJOR_UPGRADE_TO} after upgrade, got: {output!r}" |
|
||||
) |
|
||||
|
|
||||
|
|
||||
def test_witness_data_preserved(upgrade_vm, test_ssh_key): |
|
||||
"""The row inserted before the upgrade must still be present and correct.""" |
|
||||
output = run_sql( |
|
||||
upgrade_vm, |
|
||||
test_ssh_key, |
|
||||
f"SELECT message FROM {WITNESS_TABLE} WHERE message = '{WITNESS_VALUE}'", |
|
||||
) |
|
||||
assert WITNESS_VALUE in output, ( |
|
||||
f"Witness row '{WITNESS_VALUE}' not found after upgrade. " |
|
||||
f"Query returned: {output!r}" |
|
||||
) |
|
||||
|
|
||||
|
|
||||
def test_old_data_dir_removed(upgrade_vm, test_ssh_key): |
|
||||
"""pgautoupgrade must remove the source data directory after a clean upgrade.""" |
|
||||
result = upgrade_vm.ssh_run( |
|
||||
f"test -d /var/lib/quadlets/postgresql/{PG_MAJOR_UPGRADE_FROM}/docker", |
|
||||
test_ssh_key, |
|
||||
check=False, |
|
||||
) |
|
||||
assert result.returncode != 0, ( |
|
||||
f"Old data directory for PG {PG_MAJOR_UPGRADE_FROM} still exists — " |
|
||||
"upgrade may not have cleaned up properly" |
|
||||
) |
|
||||
|
|
||||
|
|
||||
def test_latest_symlink_points_to_new_version(upgrade_vm, test_ssh_key): |
|
||||
"""The ``latest`` symlink must now point at the PG_MAJOR_UPGRADE_TO directory.""" |
|
||||
result = upgrade_vm.ssh_run( |
|
||||
"readlink /var/lib/quadlets/postgresql/latest", |
|
||||
test_ssh_key, |
|
||||
) |
|
||||
assert PG_MAJOR_UPGRADE_TO in result.stdout, ( |
|
||||
f"latest symlink does not point at PG {PG_MAJOR_UPGRADE_TO}: " |
|
||||
f"{result.stdout.strip()!r}" |
|
||||
) |
|
||||
|
|
||||
|
|
||||
def test_new_data_dir_has_pg_version_file(upgrade_vm, test_ssh_key): |
|
||||
"""PG_VERSION file must exist in the new data directory (server is healthy).""" |
|
||||
result = upgrade_vm.ssh_run( |
|
||||
f"cat /var/lib/quadlets/postgresql/{PG_MAJOR_UPGRADE_TO}/docker/PG_VERSION", |
|
||||
test_ssh_key, |
|
||||
) |
|
||||
assert PG_MAJOR_UPGRADE_TO in result.stdout |
|
||||
Loading…
Reference in new issue