Browse Source

postgresql test: almost complete

main
Nicolas Massé 1 month ago
parent
commit
f96cabe3c0
  1. 2
      README.md
  2. 106
      conftest.py
  3. 0
      cookbooks/base/config/examples/fastfetch.jsonc
  4. 3
      cookbooks/base/install-fastfetch.service
  5. 16
      cookbooks/base/profile.d/fastfetch.sh
  6. 63
      cookbooks/postgresql/tests/conftest.py
  7. 10
      cookbooks/postgresql/tests/helpers.py
  8. 119
      cookbooks/postgresql/tests/test_01_install_upgrade_backup.py
  9. 97
      cookbooks/postgresql/tests/test_02_restore.py
  10. 119
      cookbooks/postgresql/tests/test_backup.py
  11. 151
      cookbooks/postgresql/tests/test_install.py
  12. 154
      cookbooks/postgresql/tests/test_recovery.py
  13. 59
      cookbooks/postgresql/tests/test_security.py
  14. 163
      cookbooks/postgresql/tests/test_upgrade.py
  15. 1
      pyproject.toml
  16. 27
      scripts/common.mk
  17. 1
      scripts/default-butane-spec.sh
  18. 21
      tests/fcos_vm.py

2
README.md

@ -35,7 +35,7 @@ This repository gathers all the recipes (hence the name "Cookbook") to deploy Op
- `Makefile`: Cookbook's Makefile. Includes `../common.mk`. (**REQUIRED**)
- `overlay.bu`: Fedora CoreOS Butane Specifications to include in the generated Ignition files. (_OPTIONAL_)
- `fcos.bu`: The Fedora CoreOS Butane Specifications to build the test FCOS Virtual Machine. (_OPTIONAL_)
- `fcos.bu`: The Fedora CoreOS Butane Specifications to build the dev & test FCOS Virtual Machine. (_OPTIONAL_)
- `config/*`: Cookbook's configuration files (read-only). Goes into `/etc/quadlets/$(PROJECT_NAME)`.
- `config/examples/*`: Cookbook configuration files (sample configuration, to be overwritten for each deployment). Goes into `/etc/quadlets/$(PROJECT_NAME)`.
- `config/examples/*.env`: Systemd environment files, potentially containing secrets (to be overwritten for each deployment). Goes into `/etc/quadlets/$(PROJECT_NAME)`.

106
conftest.py

@ -1,12 +1,28 @@
"""Pytest fixtures for the Podman Quadlets cookbooks.
Prerequisites:
- Must run as root (KVM/libvirt access).
- The Fedora CoreOS base QCOW2 image must be present at /var/lib/libvirt/images/library/fedora-coreos.qcow2.
Run ``coreos-installer download -p qemu -f qcow2.xz -d -C /var/lib/libvirt/images/library/`` to fetch it.
- fcos-test.ign for the cookbook is built on demand by ``make butane`` if it is missing.
"""
import subprocess
from pathlib import Path
import shutil
import os
import sys
import pytest
import testinfra
import textwrap
# Persistent directory used when --keep-vm is active.
_KEEP_VM_CACHE_DIR = Path.home() / ".cache" / "podman-quadlet-cookbook-tests"
from fcos_vm import FCOSVirtualMachine, ensure_fcos_ign # noqa: E402
# Persistent directory used when --keep-vm is active.
_KEEP_VM_CACHE_DIR = Path.home() / ".cache" / "pytest"
# You can pass --keep-vm on the command line to keep the test VM alive after the test run and reuse it on the next run.
# Speeds up iteration: the VM is created once and never destroyed. The SSH key is stored persistently in ~/.cache/pytest.
def pytest_addoption(parser: pytest.Parser) -> None:
parser.addoption(
"--keep-vm",
@ -20,7 +36,6 @@ def pytest_addoption(parser: pytest.Parser) -> None:
),
)
@pytest.fixture(scope="session")
def keep_vm(request: pytest.FixtureRequest) -> bool:
"""True when --keep-vm was passed on the command line."""
@ -63,3 +78,86 @@ def test_ssh_key(
def test_ssh_pubkey(test_ssh_key: Path) -> str:
"""Public key string corresponding to test_ssh_key."""
return test_ssh_key.with_suffix(".pub").read_text().strip()
# The virtiofs is where important and persistent data are stored.
# We keep it for the entire test session.
@pytest.fixture(scope="package")
def virtiofs_dirs(request, keep_vm: bool) -> list[tuple[Path, str]]:
"""VirtioFS host directories for the default test VM.
With --keep-vm the directories are persistent so the VM can be reused across
test runs. Without it unique per-process paths are used and cleaned up
on teardown.
"""
cookbook_dir = Path(request.path).parent.parent
if keep_vm:
d = Path("/srv") / f"fcos-test-{cookbook_dir.name}-dev"
else:
d = Path("/srv") / f"fcos-test-{cookbook_dir.name}-{os.getpid()}"
d.mkdir(parents=True, exist_ok=True)
yield [(d, "data",)] # <-- tests run here with access to the virtiofs directories
if not keep_vm and d.exists():
shutil.rmtree(d)
# However, the VM itself is recreated for each test module to ensure a clean state.
@pytest.fixture(scope="module")
def fcos_host(fcos_vm: FCOSVirtualMachine, test_ssh_key: Path):
"""testinfra SSH host connected to the default FCOS VM."""
return testinfra.get_host(
f"ssh://root@{fcos_vm.ip}",
ssh_extra_args=(
f"-i {test_ssh_key}"
" -o StrictHostKeyChecking=no"
" -o UserKnownHostsFile=/dev/null"
),
)
# Default VM configuration (memory in MB, vCPUs, root disk size in GB, /var disk size in GB).
@pytest.fixture(scope="package")
def fcos_vm_config() -> tuple[int, int, int, int]:
"""Default VM configuration (memory in MB, vCPUs, root disk size in GB, /var disk size in GB)."""
return (4096, 2, 50, 100) # (memory in MB, vCPUs, disk size for / and /var in GB)
# PostgreSQL VM are kept for the duration of a test module, backed with a persistent Virtiofs directory.
@pytest.fixture(scope="module")
def fcos_vm(
request, # Fixture that provides information about the requesting test function, class or module.
keep_vm: bool, # Fixture passed from command line option --keep-vm to determine whether to keep the VM after tests for debugging purposes.
fcos_vm_config: tuple[int, int, int, int], # Fixture that provides the VM configuration (memory in MB, vCPUs, root disk size in GB, /var disk size in GB).
test_ssh_key: Path, # Fixture that provides the path to the SSH private key to connect to the VM.
test_ssh_pubkey: str, # Fixture that provides the content of the SSH public key to inject into the VM for SSH access.
virtiofs_dirs: list[tuple[Path, str]], # Fixture that provides a list of tuples containing host directories and their corresponding target directories in the VM to be exposed via VirtioFS.
tmp_path_factory: pytest.TempPathFactory, # Fixture that provides a factory for creating temporary directories.
) -> FCOSVirtualMachine:
"""Running CoreOS VM with Quadlets installed.
With --keep-vm the VM is reused across runs: it is created only if it
does not already exist and is never destroyed on teardown.
"""
module_name = request.module.__name__.split(".")[-1].replace("test_", "").replace("_", "-")
cookbook_dir = Path(request.path).parent.parent
pg_major = getattr(request.module, "PG_MAJOR_DEFAULT", 0)
vm = FCOSVirtualMachine(
cookbook_name=cookbook_dir.name,
instance_name=module_name,
keep=keep_vm,
virtiofs_dirs=virtiofs_dirs,
vm_config = fcos_vm_config,
)
if not (keep_vm and vm.exists()):
fcos_ign = ensure_fcos_ign(cookbook_dir)
vm.ignition.ignition_files.append(fcos_ign)
vm.ignition.extra_files.update(getattr(request.module, "PYTEST_FCOS_EXTRA_FILES", {}))
vm.ignition.ssh_key = test_ssh_pubkey
vm.create()
vm.wait_ssh(ssh_key=test_ssh_key, timeout=300)
yield vm # <-- tests run here with access to the VM instance
if not keep_vm:
vm.destroy()

0
cookbooks/base/config/fastfetch.jsonc → cookbooks/base/config/examples/fastfetch.jsonc

3
cookbooks/base/install-fastfetch.service

@ -3,12 +3,13 @@ Description=Install fastfetch
Wants=network-online.target
After=network-online.target
ConditionPathExists=!/usr/local/bin/fastfetch
ConditionPathExists=/etc/quadlets/base/fastfetch.env
[Service]
Type=oneshot
RemainAfterExit=yes
ExecStart=/etc/quadlets/base/install-fastfetch.sh
EnvironmentFile=-/etc/quadlets/base/fastfetch.env
EnvironmentFile=/etc/quadlets/base/fastfetch.env
[Install]
WantedBy=multi-user.target

16
cookbooks/base/profile.d/fastfetch.sh

@ -1,9 +1,11 @@
#!/bin/sh
declare -a FASTFETCH_OPTIONS=( -c /etc/quadlets/base/fastfetch.jsonc )
if [ "$USER" == "root" ]; then
FASTFETCH_OPTIONS+=( --custom-key-color dim_red --color-keys red --title-color-user red )
else
FASTFETCH_OPTIONS+=( --custom-key-color dim_blue --color-keys blue --title-color-user green )
if [ -x /usr/local/bin/fastfetch ]; then
declare -a FASTFETCH_OPTIONS=( -c /etc/quadlets/base/fastfetch.jsonc )
if [ "$USER" == "root" ]; then
FASTFETCH_OPTIONS+=( --custom-key-color dim_red --color-keys red --title-color-user red )
else
FASTFETCH_OPTIONS+=( --custom-key-color dim_blue --color-keys blue --title-color-user green )
fi
fastfetch "${FASTFETCH_OPTIONS[@]}"
unset FASTFETCH_OPTIONS
fi
fastfetch "${FASTFETCH_OPTIONS[@]}"
unset FASTFETCH_OPTIONS

63
cookbooks/postgresql/tests/conftest.py

@ -1,58 +1,13 @@
"""Pytest fixtures for the PostgreSQL cookbook end-to-end tests.
Prerequisites:
- Must run as root (KVM/libvirt access).
- The Fedora CoreOS base QCOW2 image must be present at /var/lib/libvirt/images/library/fedora-coreos.qcow2.
Run ``coreos-installer download -p qemu -f qcow2.xz -d -C /var/lib/libvirt/images/library/`` to fetch it.
- fcos.ign for the postgresql cookbook is built on demand by ``make -C postgresql butane`` if it is missing.
"""
import shutil
import sys
import pytest
import testinfra
from pathlib import Path
THIS_COOKBOOK_DIR = Path(__file__).parent.parent
COOKBOOKS_DIR = THIS_COOKBOOK_DIR.parent
TOP_LEVEL_DIR = COOKBOOKS_DIR.parent
THIS_COOKBOOK_NAME = THIS_COOKBOOK_DIR.name
# Add directories to the path so we can import local helpers and shared vm.py.
sys.path.insert(0, str(Path(__file__).parent))
sys.path.insert(0, str(TOP_LEVEL_DIR / "tests"))
from fcos_vm import FCOSVirtualMachine, ensure_fcos_ign # noqa: E402
# The virtiofs is where important and persistent data are stored.
# We keep it for the entire test session.
@pytest.fixture(scope="session")
def virtiofs_dirs(keep_vm: bool) -> list[tuple[Path, str]]:
"""VirtioFS host directories for the default test VM.
With --keep-vm the directories are persistent so the VM can be reused across
test runs. Without it unique per-process paths are used and cleaned up
on teardown.
"""
if keep_vm:
d = Path("/srv") / f"fcos-test-{THIS_COOKBOOK_NAME}-dev"
else:
d = Path("/srv") / f"fcos-test-{THIS_COOKBOOK_NAME}-{os.getpid()}"
d.mkdir(parents=True, exist_ok=True)
yield [(d, "data",)] # <-- tests run here with access to the virtiofs directories
# Because PostgreSQL init & upgrades can take a long time, we give the VM more resources.
@pytest.fixture(scope="package")
def fcos_vm_config() -> tuple[int, int, int, int]:
"""Default VM configuration (memory in MB, vCPUs, root disk size in GB, /var disk size in GB)."""
return (8192, 4, 50, 100) # (memory in MB, vCPUs, disk size for / and /var in GB)
if not keep_vm and d.exists():
shutil.rmtree(d)
# PostgreSQL major versions to test during upgrade from PG_MAJOR_DEFAULT.
@pytest.fixture(scope="package", params=[15, 16, 17, 18])
def pg_upgrade_major(request) -> int:
return int(request.param)
@pytest.fixture(scope="module")
def fcos_host(fcos_vm: FCOSVirtualMachine, test_ssh_key: Path):
"""testinfra SSH host connected to the default FCOS VM."""
return testinfra.get_host(
f"ssh://root@{fcos_vm.ip}",
ssh_extra_args=(
f"-i {test_ssh_key}"
" -o StrictHostKeyChecking=no"
" -o UserKnownHostsFile=/dev/null"
),
)

10
cookbooks/postgresql/tests/helpers.py

@ -1,15 +1,7 @@
import sys
import pytest
import testinfra
from pathlib import Path
THIS_COOKBOOK_DIR = Path(__file__).parent.parent
COOKBOOKS_DIR = THIS_COOKBOOK_DIR.parent
TOP_LEVEL_DIR = COOKBOOKS_DIR.parent
THIS_COOKBOOK_NAME = THIS_COOKBOOK_DIR.name
sys.path.insert(0, str(TOP_LEVEL_DIR / "tests"))
import test_quadlet # noqa: E402
import test_quadlet # noqa: F401
"""
Verify that the postgresql Quadlet is correctly installed and configured on a fresh VM boot.

119
cookbooks/postgresql/tests/test_01_install_upgrade_backup.py

@ -1,97 +1,48 @@
import sys
import pytest
import testinfra
import os
import shutil
import subprocess
import textwrap
# Add the current cookbook's tests directory to the path so we can import helpers.py.
from pathlib import Path
THIS_COOKBOOK_DIR = Path(__file__).parent.parent
COOKBOOKS_DIR = THIS_COOKBOOK_DIR.parent
TOP_LEVEL_DIR = COOKBOOKS_DIR.parent
THIS_COOKBOOK_NAME = THIS_COOKBOOK_DIR.name
# Add directories to the path so we can import Python modules from the top level "tests" directory and current directory.
import sys
sys.path.insert(0, str(Path(__file__).parent))
sys.path.insert(0, str(TOP_LEVEL_DIR / "tests"))
import helpers # noqa: E402
from fcos_vm import FCOSVirtualMachine, ensure_fcos_ign # noqa: E402
# PostgreSQL major versions to test during upgrade from PG_MAJOR_DEFAULT.
@pytest.fixture(scope="session", params=[15, 16, 17, 18])
def pg_upgrade_major(request) -> int:
return int(request.param)
# Major version of PostgreSQL to install by default on a fresh VM boot.
PG_MAJOR_DEFAULT = 14
# PostgreSQL VM are kept for the duration of a test module, backed with a persistent Virtiofs directory.
@pytest.fixture(scope="module")
def fcos_vm(
request,
keep_vm: bool,
test_ssh_key: Path,
test_ssh_pubkey: str,
virtiofs_dirs: list[tuple[Path, str]],
tmp_path_factory: pytest.TempPathFactory,
) -> FCOSVirtualMachine:
"""Running CoreOS VM with Quadlets installed.
With --keep-vm the VM is reused across runs: it is created only if it
does not already exist and is never destroyed on teardown.
"""
module_name = request.module.__name__.split(".")[-1].replace("test_", "").replace("_", "-")
vm = FCOSVirtualMachine(
cookbook_name=THIS_COOKBOOK_NAME,
instance_name=module_name,
keep=keep_vm,
virtiofs_dirs=virtiofs_dirs,
)
if not (keep_vm and vm.exists()):
fcos_ign = ensure_fcos_ign(THIS_COOKBOOK_DIR)
vm.ignition.ignition_files.append(fcos_ign)
vm.ignition.extra_files.update({
"/etc/quadlets/postgresql/config.env": (
textwrap.dedent(f"""
# This file is generated by conftest.py for testing purposes.
POSTGRES_USER=postgres
POSTGRES_PASSWORD=postgres
POSTGRES_DB=postgres
POSTGRES_HOST_AUTH_METHOD=scram-sha-256
POSTGRES_INITDB_ARGS=--auth-host=scram-sha-256
POSTGRES_ARGS=-h 127.0.0.1
PGPORT=5432
PG_MAJOR={PG_MAJOR_DEFAULT}
POSTGRES_BACKUP_RETENTION=7
"""),
0,
0,
0o600,
),
"/etc/quadlets/postgresql/init.d/test.sql": (
textwrap.dedent("""
-- This file is generated by conftest.py for testing purposes.
CREATE USER test WITH PASSWORD 'test';
CREATE DATABASE testdb OWNER test;
GRANT ALL PRIVILEGES ON DATABASE testdb TO test;
ALTER ROLE test SET client_encoding TO 'utf8';
"""),
10004,
10000,
0o600,
),
})
vm.ignition.ssh_key = test_ssh_pubkey
vm.create()
vm.wait_ssh(ssh_key=test_ssh_key, timeout=300)
yield vm # <-- tests run here with access to the VM instance
if not keep_vm:
vm.destroy()
# Extra files to inject into the FCOS image for the tests in this file.
# The config.env is used to configure the PostgreSQL Quadlet, and the init.d/test.sql file is an init hook that creates a test database and user on the first boot.
PYTEST_FCOS_EXTRA_FILES = {
"/etc/quadlets/postgresql/config.env": (
textwrap.dedent(f"""
# This file is generated by conftest.py for testing purposes.
POSTGRES_USER=postgres
POSTGRES_PASSWORD=postgres
POSTGRES_DB=postgres
POSTGRES_HOST_AUTH_METHOD=scram-sha-256
POSTGRES_INITDB_ARGS=--auth-host=scram-sha-256
POSTGRES_ARGS=-h 127.0.0.1
PGPORT=5432
PG_MAJOR={PG_MAJOR_DEFAULT}
POSTGRES_BACKUP_RETENTION=7
"""),
0,
0,
0o600,
),
"/etc/quadlets/postgresql/init.d/test.sql": (
textwrap.dedent("""
-- This file is generated by conftest.py for testing purposes.
CREATE USER test WITH PASSWORD 'test';
CREATE DATABASE testdb OWNER test;
GRANT ALL PRIVILEGES ON DATABASE testdb TO test;
ALTER ROLE test SET client_encoding TO 'utf8';
"""),
10004,
10000,
0o600,
),
}
"""
Verify that the postgresql Quadlet is correctly installed and configured on a fresh VM boot.

97
cookbooks/postgresql/tests/test_02_restore.py

@ -1,83 +1,40 @@
import sys
import pytest
import testinfra
import os
import shutil
import subprocess
import textwrap
# Add the current cookbook's tests directory to the path so we can import helpers.py.
from pathlib import Path
THIS_COOKBOOK_DIR = Path(__file__).parent.parent
COOKBOOKS_DIR = THIS_COOKBOOK_DIR.parent
TOP_LEVEL_DIR = COOKBOOKS_DIR.parent
THIS_COOKBOOK_NAME = THIS_COOKBOOK_DIR.name
# Add directories to the path so we can import Python modules from the top level "tests" directory and current directory.
import sys
sys.path.insert(0, str(Path(__file__).parent))
sys.path.insert(0, str(TOP_LEVEL_DIR / "tests"))
import helpers # noqa: E402
from fcos_vm import FCOSVirtualMachine, ensure_fcos_ign # noqa: E402
# Major version of PostgreSQL to install by default on a fresh VM boot.
PG_MAJOR_DEFAULT = 18
# PostgreSQL VM are kept for the duration of a test module, backed with a persistent Virtiofs directory.
@pytest.fixture(scope="module")
def fcos_vm(
request,
keep_vm: bool,
test_ssh_key: Path,
test_ssh_pubkey: str,
virtiofs_dirs: list[tuple[Path, str]],
tmp_path_factory: pytest.TempPathFactory,
) -> FCOSVirtualMachine:
"""Running CoreOS VM with Quadlets installed.
With --keep-vm the VM is reused across runs: it is created only if it
does not already exist and is never destroyed on teardown.
"""
module_name = request.module.__name__.split(".")[-1].replace("test_", "").replace("_", "-")
vm = FCOSVirtualMachine(
cookbook_name=THIS_COOKBOOK_NAME,
instance_name=module_name,
keep=keep_vm,
virtiofs_dirs=virtiofs_dirs,
)
if not (keep_vm and vm.exists()):
fcos_ign = ensure_fcos_ign(THIS_COOKBOOK_DIR)
vm.ignition.ignition_files.append(fcos_ign)
vm.ignition.extra_files.update({
"/etc/quadlets/postgresql/config.env": (
textwrap.dedent(f"""
# This file is generated by conftest.py for testing purposes.
POSTGRES_USER=postgres
POSTGRES_PASSWORD=postgres
POSTGRES_DB=postgres
POSTGRES_HOST_AUTH_METHOD=scram-sha-256
POSTGRES_INITDB_ARGS=--auth-host=scram-sha-256
POSTGRES_ARGS=-h 127.0.0.1
PGPORT=5432
PG_MAJOR={PG_MAJOR_DEFAULT}
POSTGRES_BACKUP_RETENTION=7
"""),
0,
0,
0o600,
),
})
vm.ignition.ssh_key = test_ssh_pubkey
vm.create()
vm.wait_ssh(ssh_key=test_ssh_key, timeout=300)
yield vm # <-- tests run here with access to the VM instance
if not keep_vm:
vm.destroy()
# Extra files to inject into the FCOS image for the tests in this file.
# The config.env is used to configure the PostgreSQL Quadlet.
PYTEST_FCOS_EXTRA_FILES = {
"/etc/quadlets/postgresql/config.env": (
textwrap.dedent(f"""
# This file is generated by conftest.py for testing purposes.
POSTGRES_USER=postgres
POSTGRES_PASSWORD=postgres
POSTGRES_DB=postgres
POSTGRES_HOST_AUTH_METHOD=scram-sha-256
POSTGRES_INITDB_ARGS=--auth-host=scram-sha-256
POSTGRES_ARGS=-h 127.0.0.1
PGPORT=5432
PG_MAJOR={PG_MAJOR_DEFAULT}
POSTGRES_BACKUP_RETENTION=7
"""),
0,
0,
0o600,
),
}
"""
Verify that the postgresql Quadlet correctly restores a database from a backup.
Verify that the postgresql Quadlet correctly restores a database from a backup
on a fresh VM with the backup data present in the virtiofs.
"""
class TestPostgresqlQuadletRestore(helpers.TestPostgresqlQuadlet):
expected_pg_major = PG_MAJOR_DEFAULT
@ -95,5 +52,5 @@ class TestPostgresqlQuadletRestore(helpers.TestPostgresqlQuadlet):
assert result.exit_status == 0, f"SQL query failed with exit code {result.exit_status}: {result.stderr}"
# Check that the upgrade_path table contains the initial postgresql version (14)
output = self._run_sql(fcos_host, "SELECT version FROM upgrade_path ORDER BY version ASC LIMIT 1", database="upgrade_path_db")
assert output.startswith("14."), f"Unexpected output from SQL query: {output}"
output = self._run_sql(fcos_host, "SELECT LEFT(version, 14) FROM upgrade_path ORDER BY version ASC LIMIT 1", database="upgrade_path_db")
assert output.startswith("PostgreSQL 14."), f"Unexpected output from SQL query: {output}"

119
cookbooks/postgresql/tests/test_backup.py

@ -1,119 +0,0 @@
"""Test PostgreSQL backup creation and VirtioFS storage.
These tests verify that:
- The backup oneshot service can be triggered manually and runs to completion.
- The expected backup artefacts land in the VirtioFS share (accessible from
the test runner's host filesystem without SSH).
- The backup retention policy removes stale backups.
Note: tests within a module share a single VM (module-scoped fixture), so
the order of test execution matters here: the backup files checked in later
tests are created by the earlier trigger test.
"""
import time
from pathlib import Path
# ---------------------------------------------------------------------------
# Trigger and completion
# ---------------------------------------------------------------------------
def test_create_database_and_table(postgresql_vm, test_ssh_key):
"""Create a test database and table with some data to ensure the backup has
something to capture."""
postgresql_vm.ssh_run(
"podman exec postgresql-server psql -U postgres -c \"CREATE DATABASE test;\"",
test_ssh_key,
)
postgresql_vm.ssh_run(
"podman exec postgresql-server psql -U postgres -d test -c \"CREATE TABLE witness (id SERIAL PRIMARY KEY, version VARCHAR); INSERT INTO witness (version) SELECT version();\"",
test_ssh_key,
)
def test_trigger_backup(postgresql_vm, test_ssh_key):
"""Starting postgresql-backup.service must succeed (no immediate error)."""
postgresql_vm.ssh_run(
"systemctl start postgresql-backup.service",
test_ssh_key,
)
def test_backup_completes_successfully(postgresql_vm, test_ssh_key):
"""postgresql-backup.service must finish in ``inactive`` state (not ``failed``)."""
state = postgresql_vm.wait_for_unit_done(
"postgresql-backup.service", test_ssh_key, timeout=120
)
assert state == "inactive", (
f"Backup service ended in unexpected state {state!r}. "
"Run: systemctl status postgresql-backup.service --no-pager"
)
# ---------------------------------------------------------------------------
# VirtioFS artefacts (verified from the host — no SSH required)
# ---------------------------------------------------------------------------
def test_backup_directory_exists_in_virtiofs(virtiofs_dir: Path):
"""The postgresql/backup sub-directory must exist in the VirtioFS share."""
backup_root = virtiofs_dir / "postgresql" / "backup"
assert backup_root.is_dir(), f"Backup directory not found on host: {backup_root}"
def test_at_least_one_backup_present(virtiofs_dir: Path):
"""At least one timestamped backup sub-directory must exist."""
backup_root = virtiofs_dir / "postgresql" / "backup"
backups = sorted(backup_root.iterdir())
assert backups, f"No backup sub-directories found under {backup_root}"
def test_backup_manifest_present(virtiofs_dir: Path):
"""The latest backup must contain a ``backup_manifest`` file (pg_basebackup)."""
backup_root = virtiofs_dir / "postgresql" / "backup"
latest = sorted(backup_root.iterdir())[-1]
assert (latest / "backup_manifest").exists(), (
f"backup_manifest missing in {latest}"
)
def test_backup_base_tar_present(virtiofs_dir: Path):
"""The latest backup must contain a ``base.tar`` cluster archive."""
backup_root = virtiofs_dir / "postgresql" / "backup"
latest = sorted(backup_root.iterdir())[-1]
assert (latest / "base.tar").exists(), f"base.tar missing in {latest}"
def test_database_dump_present(virtiofs_dir: Path):
"""At least one ``dump-test.sql.gz`` file must exist alongside the cluster backup."""
backup_root = virtiofs_dir / "postgresql" / "backup"
latest = sorted(backup_root.iterdir())[-1]
dumps = list(latest.glob("dump-test.sql.gz"))
assert dumps, f"No dump-test.sql.gz files found in {latest}"
# ---------------------------------------------------------------------------
# Retention policy
# ---------------------------------------------------------------------------
def test_backup_retention_enforced(postgresql_vm, test_ssh_key, virtiofs_dir: Path):
"""After triggering several extra backups the count must stay within the
configured retention limit (POSTGRES_BACKUP_RETENTION=7)."""
retention = 7
# Trigger ten additional backups so the rotation code has something to do.
for _ in range(10):
postgresql_vm.ssh_run(
"systemctl start postgresql-backup.service", test_ssh_key
)
state = postgresql_vm.wait_for_unit_done(
"postgresql-backup.service", test_ssh_key, timeout=120
)
assert state == "inactive"
time.sleep(1) # ensure distinct timestamp directories
backup_root = virtiofs_dir / "postgresql" / "backup"
count = len(list(backup_root.iterdir()))
assert count <= retention, (
f"Retention policy failed: {count} backups present, expected ≤ {retention}"
)

151
cookbooks/postgresql/tests/test_install.py

@ -1,151 +0,0 @@
"""Test that a fresh PostgreSQL installation is healthy.
These tests run against a brand-new VM booted from the cookbook's default
ignition (PG_MAJOR=14, example credentials). They verify:
- All expected systemd units are in the correct state.
- The PostgreSQL server is listening and accepts queries.
- VirtioFS is mounted and the expected directories exist.
"""
from pathlib import Path
from helpers import PG_MAJOR_DEFAULT, run_sql
# ---------------------------------------------------------------------------
# Systemd unit state
# ---------------------------------------------------------------------------
def test_postgresql_target_active(pg_host):
"""postgresql.target must be active once the full startup chain completes."""
assert pg_host.service("postgresql.target").is_running
def test_postgresql_server_running(pg_host):
"""The long-running PostgreSQL server container must be active."""
assert pg_host.service("postgresql-server.service").is_running
def test_set_major_oneshot_completed(pg_host):
"""postgresql-set-major.service (oneshot) must have finished — not still running."""
result = pg_host.run("systemctl is-active postgresql-set-major.service")
assert result.stdout.strip() == "inactive"
def test_init_oneshot_completed(pg_host):
"""postgresql-init.service (oneshot) must have finished after initialization."""
result = pg_host.run("systemctl is-active postgresql-init.service")
assert result.stdout.strip() == "inactive"
def test_upgrade_oneshot_completed(pg_host):
"""postgresql-upgrade.service (oneshot) must have finished — no upgrade needed
on a fresh install."""
result = pg_host.run("systemctl is-active postgresql-upgrade.service")
assert result.stdout.strip() == "inactive"
def test_backup_timer_scheduled(pg_host):
"""The daily backup timer must be active (scheduled)."""
assert pg_host.service("postgresql-backup.timer").is_running
# ---------------------------------------------------------------------------
# Network / socket
# ---------------------------------------------------------------------------
def test_postgresql_port_listening(pg_host):
"""PostgreSQL must be listening on 127.0.0.1:5432 (POSTGRES_ARGS=-h 127.0.0.1)."""
assert pg_host.socket("tcp://127.0.0.1:5432").is_listening
# ---------------------------------------------------------------------------
# Filesystem layout
# ---------------------------------------------------------------------------
def test_virtiofs_mounted(pg_host):
"""The VirtioFS share must be mounted at /var/lib/virtiofs/data."""
mount = pg_host.mount_point("/var/lib/virtiofs/data")
assert mount.exists
assert mount.filesystem == "virtiofs"
def test_virtiofs_postgresql_dir(pg_host):
"""/var/lib/virtiofs/data/postgresql must be created by tmpfiles.d."""
assert pg_host.file("/var/lib/virtiofs/data/postgresql").is_directory
def test_virtiofs_backup_dir(pg_host):
"""/var/lib/virtiofs/data/postgresql/backup must be created by tmpfiles.d."""
assert pg_host.file("/var/lib/virtiofs/data/postgresql/backup").is_directory
def test_data_dir_exists(pg_host):
"""/var/lib/quadlets/postgresql must exist with the correct ownership."""
f = pg_host.file("/var/lib/quadlets/postgresql")
assert f.is_directory
assert f.user == "postgresql"
assert f.user.uid == 10004
assert f.group == "itix-svc"
assert f.group.uid == 10000
def test_latest_symlink_exists(pg_host):
"""The 'latest' symlink must point to the active major-version directory."""
link = pg_host.file("/var/lib/quadlets/postgresql/latest")
assert link.exists
assert link.is_symlink
def test_version_dir_exists(pg_host):
"""A directory named after PG_MAJOR_DEFAULT must exist under the data dir."""
assert pg_host.file(
f"/var/lib/quadlets/postgresql/{PG_MAJOR_DEFAULT}"
).is_directory
def test_initialized_flag_exists(pg_host):
"""The .initialized sentinel file must be written after a successful init."""
assert pg_host.file("/var/lib/quadlets/postgresql/.initialized").exists
def test_config_env_present(pg_host):
"""/etc/quadlets/postgresql/config.env must be present and not world-readable."""
f = pg_host.file("/etc/quadlets/postgresql/config.env")
assert f.exists
# mode 0600 — world and group bits must be 0
assert f.mode & 0o077 == 0
# ---------------------------------------------------------------------------
# Database connectivity
# ---------------------------------------------------------------------------
def test_postgresql_accepts_connections(postgresql_vm, test_ssh_key):
"""PostgreSQL must respond to a trivial SQL query."""
output = run_sql(postgresql_vm, test_ssh_key, "SELECT 1 AS probe")
assert "1" in output
def test_postgresql_version_matches_config(postgresql_vm, test_ssh_key):
"""The running PostgreSQL server must report the version from PG_MAJOR_DEFAULT."""
output = run_sql(postgresql_vm, test_ssh_key, "SHOW server_version")
assert PG_MAJOR_DEFAULT in output
def test_can_create_database(postgresql_vm, test_ssh_key):
"""Should be possible to create a new database."""
run_sql(
postgresql_vm,
test_ssh_key,
"CREATE DATABASE install_test_db",
)
output = run_sql(
postgresql_vm,
test_ssh_key,
"SELECT datname FROM pg_database WHERE datname = 'install_test_db'",
)
assert "install_test_db" in output

154
cookbooks/postgresql/tests/test_recovery.py

@ -1,154 +0,0 @@
"""Test PostgreSQL automatic crash recovery.
Scenarios covered:
1. Container crash (SIGKILL via ``podman kill``) systemd restarts the
service automatically (Restart=always, RestartSec=10).
2. Hard VM reboot all services start cleanly and data is intact.
All tests share the module-scoped ``postgresql_vm`` fixture. Because some
tests are destructive (they kill the container), they are intentionally
sequenced: create data crash verify recovery create more data
reboot verify recovery.
"""
import time
from helpers import run_sql
# Data written before the crash that must survive each recovery scenario.
CRASH_WITNESS_TABLE = "crash_witness"
CRASH_WITNESS_VALUE = "before_crash"
REBOOT_WITNESS_TABLE = "reboot_witness"
REBOOT_WITNESS_VALUE = "before_reboot"
# ---------------------------------------------------------------------------
# Scenario 1: container crash
# ---------------------------------------------------------------------------
def test_server_running_before_crash(pg_host):
"""Precondition: postgresql-server.service must be active before we crash it."""
assert pg_host.service("postgresql-server.service").is_running
def test_create_data_before_crash(postgresql_vm, test_ssh_key):
"""Insert a row that must survive the container crash."""
run_sql(
postgresql_vm,
test_ssh_key,
(
f"CREATE TABLE IF NOT EXISTS {CRASH_WITNESS_TABLE} "
f"(id SERIAL PRIMARY KEY, message TEXT NOT NULL); "
f"INSERT INTO {CRASH_WITNESS_TABLE} (message) "
f"VALUES ('{CRASH_WITNESS_VALUE}');"
),
)
def test_kill_postgresql_container(postgresql_vm, test_ssh_key):
"""Simulate a process crash by sending SIGKILL to the container.
``podman kill`` delivers SIGKILL to the container's PID 1. Systemd will
detect the exit and restart the service after RestartSec=10 seconds.
"""
postgresql_vm.ssh_run(
"podman kill --signal SIGKILL postgresql-server",
test_ssh_key,
)
def test_service_restarts_automatically(postgresql_vm, test_ssh_key):
"""postgresql-server.service must be active again after the crash.
Allow up to 60 seconds: systemd waits RestartSec=10 s before restarting,
then the container start-up and health check take additional time.
"""
# Brief pause to let systemd register the exit before we start polling.
time.sleep(5)
postgresql_vm.wait_for_service(
"postgresql-server.service", test_ssh_key, timeout=120
)
def test_data_intact_after_crash_recovery(postgresql_vm, test_ssh_key):
"""Rows written before the crash must be present after automatic recovery."""
output = run_sql(
postgresql_vm,
test_ssh_key,
f"SELECT message FROM {CRASH_WITNESS_TABLE} "
f"WHERE message = '{CRASH_WITNESS_VALUE}'",
)
assert CRASH_WITNESS_VALUE in output, (
f"Crash witness row not found after recovery. Query returned: {output!r}"
)
def test_target_still_active_after_crash(pg_host):
"""postgresql.target must remain active after the container recovery."""
assert pg_host.service("postgresql.target").is_running
# ---------------------------------------------------------------------------
# Scenario 2: hard reboot
# ---------------------------------------------------------------------------
def test_create_data_before_reboot(postgresql_vm, test_ssh_key):
"""Insert a row that must survive a full VM reboot."""
run_sql(
postgresql_vm,
test_ssh_key,
(
f"CREATE TABLE IF NOT EXISTS {REBOOT_WITNESS_TABLE} "
f"(id SERIAL PRIMARY KEY, message TEXT NOT NULL); "
f"INSERT INTO {REBOOT_WITNESS_TABLE} (message) "
f"VALUES ('{REBOOT_WITNESS_VALUE}');"
),
)
def test_reboot_vm(postgresql_vm, test_ssh_key):
"""Trigger a graceful OS reboot. SSH will temporarily drop."""
postgresql_vm.ssh_run("systemctl reboot", test_ssh_key, check=False)
# Wait for the VM to go down before polling for SSH again.
time.sleep(15)
def test_ssh_available_after_reboot(postgresql_vm, test_ssh_key):
"""SSH must become available again within 5 minutes of the reboot."""
# Reset the cached IP so wait_ssh re-probes it.
postgresql_vm._ip = None
postgresql_vm.wait_ssh(ssh_key=test_ssh_key, timeout=300)
def test_postgresql_target_active_after_reboot(postgresql_vm, test_ssh_key):
"""postgresql.target must come up automatically on reboot (enabled in ignition)."""
postgresql_vm.wait_for_service(
"postgresql.target", ssh_key=test_ssh_key, timeout=300
)
def test_data_intact_after_reboot(postgresql_vm, test_ssh_key):
"""Rows written before the reboot must still be present after boot."""
output = run_sql(
postgresql_vm,
test_ssh_key,
f"SELECT message FROM {REBOOT_WITNESS_TABLE} "
f"WHERE message = '{REBOOT_WITNESS_VALUE}'",
)
assert REBOOT_WITNESS_VALUE in output, (
f"Reboot witness row not found. Query returned: {output!r}"
)
def test_crash_witness_also_intact_after_reboot(postgresql_vm, test_ssh_key):
"""Data written before the crash must also survive the subsequent reboot."""
output = run_sql(
postgresql_vm,
test_ssh_key,
f"SELECT message FROM {CRASH_WITNESS_TABLE} "
f"WHERE message = '{CRASH_WITNESS_VALUE}'",
)
assert CRASH_WITNESS_VALUE in output

59
cookbooks/postgresql/tests/test_security.py

@ -1,59 +0,0 @@
"""Test that a fresh PostgreSQL installation is secure.
These tests run against a brand-new VM booted from the cookbook's default
ignition (PG_MAJOR=14, example credentials). They verify:
- The PostgreSQL port is NOT exposed to the network.
- The PostgreSQL backup directory has the correct ownership and permissions.
"""
from pathlib import Path
import socket
# ---------------------------------------------------------------------------
# Network / socket
# ---------------------------------------------------------------------------
def test_postgresql_port_listening(pg_host):
"""PostgreSQL must be listening on 127.0.0.1:5432 (POSTGRES_ARGS=-h 127.0.0.1)."""
assert pg_host.socket("tcp://127.0.0.1:5432").is_listening
def test_postgresql_port_not_exposed(postgresql_vm):
"""PostgreSQL must NOT be exposed to the network."""
# Positive control: port 22 (SSH) must be reachable
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(3)
assert s.connect_ex((postgresql_vm.ip, 22)) == 0, (
f"Port 22 is NOT reachable from the host on {postgresql_vm.ip}!"
)
s.close()
# Negative control: port 23 must NOT be reachable
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(3)
assert s.connect_ex((postgresql_vm.ip, 23)) != 0, (
f"Port 23 is reachable from the host on {postgresql_vm.ip}!"
)
s.close()
# The real test: port 5432 must NOT be reachable
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(3)
assert s.connect_ex((postgresql_vm.ip, 5432)) != 0, (
f"Port 5432 is reachable from the host on {postgresql_vm.ip}!"
)
s.close()
# ---------------------------------------------------------------------------
# VirtioFS permissions (verified from the host — no SSH required)
# ---------------------------------------------------------------------------
def test_backup_directory_exists_in_virtiofs(virtiofs_dir: Path):
"""The postgresql/backup sub-directory must exist in the VirtioFS share."""
backup_root = virtiofs_dir / "postgresql" / "backup"
assert backup_root.exists(), f"Backup directory not found on host: {backup_root}"
# mode 0700 — world and group bits must be 0
assert backup_root.stat().st_mode & 0o077 == 0
assert backup_root.stat().st_uid == 10004, f"Backup directory must be owned by postgres (uid 10004), but got {backup_root.stat().st_uid}"
assert backup_root.stat().st_gid == 10000, f"Backup directory must be owned by postgres (gid 10000), but got {backup_root.stat().st_gid}"

163
cookbooks/postgresql/tests/test_upgrade.py

@ -1,163 +0,0 @@
"""Test the PostgreSQL major version upgrade path: PG 14 → PG 17.
The upgrade mechanism works as follows:
1. postgresql-set-major.service updates the ``latest`` symlink to point at
the new PG_MAJOR directory (e.g. /var/lib/quadlets/postgresql/17/).
2. postgresql-upgrade.service detects that
``latest/docker/PG_VERSION`` does not exist (the 17/ directory is
empty) and triggers pgautoupgrade.
3. pg_upgrade migrates data from the old directory to the new one.
4. postgresql-server.service starts against the upgraded data.
All tests in this module share a single ``upgrade_vm`` fixture that starts
with PG_MAJOR_UPGRADE_FROM (14). Tests are intentionally ordered to form a
sequential scenario: create data trigger upgrade verify outcome.
"""
from pathlib import Path
from helpers import PG_MAJOR_UPGRADE_FROM, PG_MAJOR_UPGRADE_TO, run_sql
# Sentinel table and row used to verify data survives the upgrade.
WITNESS_TABLE = "upgrade_witness"
WITNESS_VALUE = "before_upgrade"
# ---------------------------------------------------------------------------
# Pre-upgrade baseline
# ---------------------------------------------------------------------------
def test_initial_version_is_upgrade_from(upgrade_vm, test_ssh_key):
"""Precondition: the VM must be running PG_MAJOR_UPGRADE_FROM."""
output = run_sql(upgrade_vm, test_ssh_key, "SHOW server_version")
assert PG_MAJOR_UPGRADE_FROM in output, (
f"Expected PG {PG_MAJOR_UPGRADE_FROM}, got: {output!r}"
)
def test_create_witness_data(upgrade_vm, test_ssh_key):
"""Insert a row that must survive the major version upgrade."""
run_sql(
upgrade_vm,
test_ssh_key,
(
f"CREATE TABLE IF NOT EXISTS {WITNESS_TABLE} "
f"(id SERIAL PRIMARY KEY, message TEXT NOT NULL); "
f"INSERT INTO {WITNESS_TABLE} (message) VALUES ('{WITNESS_VALUE}');"
),
)
output = run_sql(
upgrade_vm,
test_ssh_key,
f"SELECT message FROM {WITNESS_TABLE} WHERE message = '{WITNESS_VALUE}'",
)
assert WITNESS_VALUE in output
# ---------------------------------------------------------------------------
# Trigger the upgrade
# ---------------------------------------------------------------------------
def test_bump_pg_major_in_config(upgrade_vm, test_ssh_key):
"""Change PG_MAJOR in config.env from UPGRADE_FROM to UPGRADE_TO."""
upgrade_vm.ssh_run(
f"sed -i 's/^PG_MAJOR={PG_MAJOR_UPGRADE_FROM}$/PG_MAJOR={PG_MAJOR_UPGRADE_TO}/' "
"/etc/quadlets/postgresql/config.env",
test_ssh_key,
)
# Verify the substitution worked.
result = upgrade_vm.ssh_run(
"grep ^PG_MAJOR= /etc/quadlets/postgresql/config.env",
test_ssh_key,
)
assert f"PG_MAJOR={PG_MAJOR_UPGRADE_TO}" in result.stdout
def test_restart_postgresql_target(upgrade_vm, test_ssh_key):
"""Restart postgresql.target to kick off the upgrade chain."""
upgrade_vm.ssh_run("systemctl restart postgresql.target", test_ssh_key)
def test_upgrade_service_completes(upgrade_vm, test_ssh_key):
"""postgresql-upgrade.service must finish in ``inactive`` state (not ``failed``).
pgautoupgrade can take several minutes for large databases; allow up to
10 minutes.
"""
state = upgrade_vm.wait_for_unit_done(
"postgresql-upgrade.service", test_ssh_key, timeout=600
)
assert state == "inactive", (
f"Upgrade service ended in state {state!r}. "
"Inspect with: systemctl status postgresql-upgrade.service --no-pager "
"and: journalctl -u postgresql-upgrade.service"
)
def test_server_active_after_upgrade(upgrade_vm, test_ssh_key):
"""postgresql-server.service must be active after the upgrade."""
upgrade_vm.wait_for_service(
"postgresql-server.service", test_ssh_key, timeout=120
)
# ---------------------------------------------------------------------------
# Post-upgrade verification
# ---------------------------------------------------------------------------
def test_new_version_is_running(upgrade_vm, test_ssh_key):
"""PostgreSQL must now report PG_MAJOR_UPGRADE_TO as the server version."""
output = run_sql(upgrade_vm, test_ssh_key, "SHOW server_version")
assert PG_MAJOR_UPGRADE_TO in output, (
f"Expected PG {PG_MAJOR_UPGRADE_TO} after upgrade, got: {output!r}"
)
def test_witness_data_preserved(upgrade_vm, test_ssh_key):
"""The row inserted before the upgrade must still be present and correct."""
output = run_sql(
upgrade_vm,
test_ssh_key,
f"SELECT message FROM {WITNESS_TABLE} WHERE message = '{WITNESS_VALUE}'",
)
assert WITNESS_VALUE in output, (
f"Witness row '{WITNESS_VALUE}' not found after upgrade. "
f"Query returned: {output!r}"
)
def test_old_data_dir_removed(upgrade_vm, test_ssh_key):
"""pgautoupgrade must remove the source data directory after a clean upgrade."""
result = upgrade_vm.ssh_run(
f"test -d /var/lib/quadlets/postgresql/{PG_MAJOR_UPGRADE_FROM}/docker",
test_ssh_key,
check=False,
)
assert result.returncode != 0, (
f"Old data directory for PG {PG_MAJOR_UPGRADE_FROM} still exists — "
"upgrade may not have cleaned up properly"
)
def test_latest_symlink_points_to_new_version(upgrade_vm, test_ssh_key):
"""The ``latest`` symlink must now point at the PG_MAJOR_UPGRADE_TO directory."""
result = upgrade_vm.ssh_run(
"readlink /var/lib/quadlets/postgresql/latest",
test_ssh_key,
)
assert PG_MAJOR_UPGRADE_TO in result.stdout, (
f"latest symlink does not point at PG {PG_MAJOR_UPGRADE_TO}: "
f"{result.stdout.strip()!r}"
)
def test_new_data_dir_has_pg_version_file(upgrade_vm, test_ssh_key):
"""PG_VERSION file must exist in the new data directory (server is healthy)."""
result = upgrade_vm.ssh_run(
f"cat /var/lib/quadlets/postgresql/{PG_MAJOR_UPGRADE_TO}/docker/PG_VERSION",
test_ssh_key,
)
assert PG_MAJOR_UPGRADE_TO in result.stdout

1
pyproject.toml

@ -18,6 +18,7 @@ dependencies = [
log_cli = true
log_cli_level = "INFO"
addopts = "-v"
pythonpath = ["tests"]
[tool.setuptools]
# This repo is not a Python package — suppress automatic package discovery.

27
scripts/common.mk

@ -118,7 +118,10 @@ I_KNOW_WHAT_I_AM_DOING ?=
# List of all ignition files corresponding to the dependencies
# Here, we inject the "base" project as a dependency. It can therefore be assumed to always be embeddable in project's butane specs.
DEPENDENCIES_IGNITION_FILES := $(shell for dep in $$(if [ "$(PROJECT_NAME)" != "base" ]; then echo base; fi) $(DEPENDENCIES); do echo $(COOKBOOKS_DIR)/$$dep/$$dep.ign $(COOKBOOKS_DIR)/$$dep/$$dep-examples.ign; done)
DEPENDENCIES_IGNITION_FILES := $(shell for dep in $$(if [ "$(PROJECT_NAME)" != "base" ]; then echo base; fi) $(DEPENDENCIES); do echo $(COOKBOOKS_DIR)/$$dep/$$dep.ign; done)
# Variation of the previous variable with the built-in examples.
DEPENDENCIES_IGNITION_EXAMPLES_FILES := $(shell for dep in $$(if [ "$(PROJECT_NAME)" != "base" ]; then echo base; fi) $(DEPENDENCIES); do echo $(COOKBOOKS_DIR)/$$dep/$$dep.ign $(COOKBOOKS_DIR)/$$dep/$$dep-examples.ign; done)
# User and group IDs to own the project files and directories.
PROJECT_UID ?= 0
@ -366,24 +369,30 @@ $(PROJECT_NAME).ign $(PROJECT_NAME)-examples.ign: %.ign: %.bu
butane --strict -o $@ $<
# Build the Butane specifications + Ignition files suitable for Fedora CoreOS, including those of the dependencies of this project.
butane: fcos.ign
butane: fcos-dev.ign fcos-test.ign
# Generate the local Butane spec + Ignition file (the one containing local customizations).
$(TOP_LEVEL_DIR)/local.ign: $(TOP_LEVEL_DIR)/local.bu
butane --strict -o $@ $<
.INTERMEDIATE: fcos.bu
fcos.bu: DEPS := $(if $(filter-out base,$(PROJECT_NAME)),base $(DEPENDENCIES),$(DEPENDENCIES))
fcos.bu: %.bu: Makefile $(SCRIPTS_DIR)/default-butane-spec.sh
.INTERMEDIATE: fcos-dev.bu fcos-test.bu
# Generate the Butane specs for development and testing by merging the current project's spec with those of the dependencies.
# The development spec also includes the examples of the dependencies.
# Whereas the testing spec only includes the main specs of the dependencies.
fcos-dev.bu fcos-test.bu: DEPS := $(if $(filter-out base,$(PROJECT_NAME)),base $(DEPENDENCIES),$(DEPENDENCIES))
fcos-dev.bu: DEPS := $(DEPS) $(addsuffix -examples,$(DEPS))
fcos-dev.bu fcos-test.bu: %.bu: Makefile $(SCRIPTS_DIR)/default-butane-spec.sh
$(SCRIPTS_DIR)/default-butane-spec.sh $(PROJECT_NAME) $(DEPS) > $@
# Generate the final Fedora CoreOS ignition file by merging the Butane spec with the local and project-specific ignition files, as well as those of the dependencies.
fcos.ign: fcos.bu $(TOP_LEVEL_DIR)/local.ign $(PROJECT_NAME).ign $(PROJECT_NAME)-examples.ign $(DEPENDENCIES_IGNITION_FILES)
# Generate the final Fedora CoreOS ignition files (dev & test) by merging the Butane spec with the local and project-specific ignition files, as well as those of the dependencies.
fcos-dev.ign: $(TOP_LEVEL_DIR)/local.ign $(PROJECT_NAME).ign $(PROJECT_NAME)-examples.ign $(DEPENDENCIES_IGNITION_EXAMPLES_FILES)
fcos-test.ign: $(TOP_LEVEL_DIR)/local.ign $(PROJECT_NAME).ign $(DEPENDENCIES_IGNITION_FILES)
fcos-dev.ign fcos-test.ign: fcos-%.ign: fcos-%.bu
@run() { echo $$*; "$$@"; }; \
set -Eeuo pipefail; \
tmp=$$(mktemp -d /tmp/butane-XXXXXX); \
run cp $(filter %.ign,$^) $$tmp; \
run butane --strict -d $$tmp -o $@ fcos.bu; \
run butane --strict -d $$tmp -o $@ $<; \
run rm -rf $$tmp
# Fetch the latest version of the Fedora CoreOS QCOW2 image.
@ -399,7 +408,7 @@ fcos.ign: fcos.bu $(TOP_LEVEL_DIR)/local.ign $(PROJECT_NAME).ign $(PROJECT_NAME)
run mv "$$qcow2" $@
# Copy the ignition file.
/var/lib/libvirt/images/fcos-$(PROJECT_NAME)/fcos.ign: fcos.ign
/var/lib/libvirt/images/fcos-$(PROJECT_NAME)/fcos.ign: fcos-dev.ign
install -D -o root -g root -m 0644 $< $@
# Copy the Fedora CoreOS base image to create a new QCOW2 image for the VM.

1
scripts/default-butane-spec.sh

@ -11,6 +11,5 @@ ignition:
EOF
for dep in "$@"; do
echo " - local: ${dep}.ign"
echo " - local: ${dep}-examples.ign"
done
echo " - local: local.ign"

21
tests/fcos_vm.py

@ -15,7 +15,6 @@ Typical usage:
vm.destroy()
"""
import base64
import re
import shutil
import subprocess
@ -32,8 +31,8 @@ FCOS_BASE_IMAGE = LIBVIRT_IMAGES_DIR / "library" / "fedora-coreos.qcow2"
BUTANE_VERSION = "1.4.0"
def ensure_fcos_ign(cookbook_dir: Path) -> Path:
"""Return the path to fcos.ign, building it via ``make butane`` if absent."""
fcos_ign = cookbook_dir / "fcos.ign"
"""Return the path to fcos-test.ign, building it via ``make butane`` if absent."""
fcos_ign = cookbook_dir / "fcos-test.ign"
if not fcos_ign.exists():
subprocess.run(
["make", "-C", str(cookbook_dir), "butane"],
@ -51,15 +50,15 @@ class FCOSIgnition:
teardown).
"""
def __init__(self, ignition_files: list[Path], ssh_key: str | None = None, extra_files: dict[str, tuple[str | int, str | int, int, str]] | None = None) -> None:
def __init__(self, ignition_files: list[Path] | None = None, ssh_key: str | None = None, extra_files: dict[str, tuple[str | int, str | int, int, str]] | None = None) -> None:
"""
Args:
ignition_files: List of paths to the compiled Ignition (.ign) files.
ssh_key: Optional SSH key to inject into the Ignition.
extra_files: Optional dictionary of extra files to inject into the Ignition.
"""
self.ignition_files = [Path(f) for f in ignition_files]
self.extra_files = extra_files or {}
self.ignition_files = ignition_files or list()
self.extra_files = extra_files or dict()
self.ssh_key = ssh_key
def _build_extra_files_butane(self) -> str | None:
@ -191,19 +190,22 @@ class FCOSVirtualMachine:
teardown).
"""
def __init__(self, cookbook_name: str, instance_name: str, keep: bool = False, ignition: FCOSIgnition = FCOSIgnition([]), virtiofs_dirs: list[tuple[Path, str]] = [], vm_config: tuple[int, int, int, int] = (4096, 2, 50, 100)) -> None:
def __init__(self, cookbook_name: str, instance_name: str, keep: bool = False, ignition: FCOSIgnition | None = None, virtiofs_dirs: list[tuple[Path, str]] = [], vm_config: tuple[int, int, int, int] = (4096, 2, 50, 100)) -> None:
"""
Args:
cookbook_name: Short identifier appended to "fcos-test-" to form the
libvirt domain name. Keep it unique across parallel tests.
instance_name: Short identifier appended to the domain name to allow multiple VM for the same cookbook.
keep: If True, the VM and its associated resources will not be automatically destroyed on teardown. Useful for debugging.
ignition: FCOSIgnition instance to build the Ignition (.ign) file.
virtiofs_dirs: List of host directories and virtiofs target directories that will be exposed inside the VM.
vm_config: Tuple containing VM configuration (memory in MB, vCPUs, root disk size in GB, /var disk size in GB).
"""
if keep:
self.vm_name = f"fcos-test-{cookbook_name}-{instance_name}-dev"
else:
self.vm_name = f"fcos-test-{cookbook_name}-{instance_name}-{os.getpid()}"
self.ignition = ignition
self.ignition = ignition or FCOSIgnition()
self.virtiofs_dirs = virtiofs_dirs
self.vm_config = vm_config
self._images_dir = LIBVIRT_IMAGES_DIR / self.vm_name
@ -287,9 +289,6 @@ class FCOSVirtualMachine:
)
if self._images_dir.exists():
shutil.rmtree(self._images_dir)
for host_dir, _ in self.virtiofs_dirs:
if Path(host_dir).exists():
shutil.rmtree(host_dir)
# ------------------------------------------------------------------
# Readiness polling

Loading…
Cancel
Save