Compare commits

...

7 Commits

  1. 4
      CLAUDE.md
  2. 2
      README.md
  3. 146
      conftest.py
  4. 3
      cookbooks/Makefile
  5. 0
      cookbooks/base/config/examples/fastfetch.jsonc
  6. 3
      cookbooks/base/install-fastfetch.service
  7. 12
      cookbooks/base/profile.d/fastfetch.sh
  8. 2
      cookbooks/gitea/gitea.container
  9. 6
      cookbooks/gitea/gitea.image
  10. 2
      cookbooks/lego/lego-renew.container
  11. 2
      cookbooks/lego/lego-run.container
  12. 6
      cookbooks/lego/lego.image
  13. 2
      cookbooks/miniflux/miniflux.container
  14. 6
      cookbooks/miniflux/miniflux.image
  15. 2
      cookbooks/nextcloud/nextcloud-app.container
  16. 2
      cookbooks/nextcloud/nextcloud-collabora.container
  17. 6
      cookbooks/nextcloud/nextcloud-collabora.image
  18. 2
      cookbooks/nextcloud/nextcloud-cron.container
  19. 2
      cookbooks/nextcloud/nextcloud-init.container
  20. 2
      cookbooks/nextcloud/nextcloud-nginx.container
  21. 13
      cookbooks/nextcloud/nextcloud-nginx.image
  22. 2
      cookbooks/nextcloud/nextcloud-redis.container
  23. 13
      cookbooks/nextcloud/nextcloud-redis.image
  24. 2
      cookbooks/nextcloud/nextcloud-upgrade.container
  25. 13
      cookbooks/nextcloud/nextcloud.image
  26. 6
      cookbooks/nginx/nginx-git.image
  27. 2
      cookbooks/nginx/nginx-init.container
  28. 4
      cookbooks/nginx/nginx-server.container
  29. 2
      cookbooks/nginx/nginx-update.container
  30. 6
      cookbooks/nginx/nginx.image
  31. 90
      cookbooks/nginx/tests/test_01_install.py
  32. 2
      cookbooks/postgresql/postgresql-init.container
  33. 4
      cookbooks/postgresql/postgresql.image
  34. 0
      cookbooks/postgresql/tests/__init__.py
  35. 172
      cookbooks/postgresql/tests/conftest.py
  36. 90
      cookbooks/postgresql/tests/helpers.py
  37. 155
      cookbooks/postgresql/tests/test_01_install_upgrade_backup.py
  38. 56
      cookbooks/postgresql/tests/test_02_restore.py
  39. 119
      cookbooks/postgresql/tests/test_backup.py
  40. 149
      cookbooks/postgresql/tests/test_install.py
  41. 154
      cookbooks/postgresql/tests/test_recovery.py
  42. 163
      cookbooks/postgresql/tests/test_upgrade.py
  43. 2
      cookbooks/restic-server/restic-server.container
  44. 5
      cookbooks/restic-server/restic-server.image
  45. 2
      cookbooks/seedbox/flaresolverr.container
  46. 6
      cookbooks/seedbox/flaresolverr.image
  47. 2
      cookbooks/seedbox/jellyfin.container
  48. 6
      cookbooks/seedbox/jellyfin.image
  49. 2
      cookbooks/seedbox/lidarr.container
  50. 6
      cookbooks/seedbox/lidarr.image
  51. 2
      cookbooks/seedbox/prowlarr.container
  52. 6
      cookbooks/seedbox/prowlarr.image
  53. 2
      cookbooks/seedbox/qbittorrent.container
  54. 5
      cookbooks/seedbox/qbittorrent.image
  55. 2
      cookbooks/seedbox/radarr.container
  56. 6
      cookbooks/seedbox/radarr.image
  57. 2
      cookbooks/seedbox/sonarr.container
  58. 6
      cookbooks/seedbox/sonarr.image
  59. 2
      cookbooks/traefik/traefik.container
  60. 6
      cookbooks/traefik/traefik.image
  61. 2
      cookbooks/unifi/unifi-app.container
  62. 6
      cookbooks/unifi/unifi-app.image
  63. 2
      cookbooks/unifi/unifi-mongo.container
  64. 13
      cookbooks/unifi/unifi-mongo.image
  65. 2
      cookbooks/vaultwarden/vaultwarden.container
  66. 6
      cookbooks/vaultwarden/vaultwarden.image
  67. 2
      cookbooks/vmagent/vmagent.container
  68. 5
      cookbooks/vmagent/vmagent.image
  69. 1
      pyproject.toml
  70. 37
      scripts/common.mk
  71. 1
      scripts/default-butane-spec.sh
  72. 0
      tests/__init__.py
  73. 349
      tests/fcos_vm.py
  74. 291
      tests/test_quadlet.py
  75. 384
      tests/vm.py

4
CLAUDE.md

@ -6,8 +6,8 @@ You may be given Kustomize manifests, Helm charts, Docker Compose files, etc. th
## Rules
- NEVER alter the files at the root of this GIT repository!
- You are ONLY ALLOWED to edit files in the sub-directories.
- NEVER alter the files outside the "cookbooks" directory!
- You are ONLY ALLOWED to edit files in the sub-directories of the "cookbooks" directory.
- Before writing or editing a Podman Quadlet, you MUST read the `podman-systemd.unit(5)` man page!
## Architecture

2
README.md

@ -35,7 +35,7 @@ This repository gathers all the recipes (hence the name "Cookbook") to deploy Op
- `Makefile`: Cookbook's Makefile. Includes `../common.mk`. (**REQUIRED**)
- `overlay.bu`: Fedora CoreOS Butane Specifications to include in the generated Ignition files. (_OPTIONAL_)
- `fcos.bu`: The Fedora CoreOS Butane Specifications to build the test FCOS Virtual Machine. (_OPTIONAL_)
- `fcos.bu`: The Fedora CoreOS Butane Specifications to build the dev & test FCOS Virtual Machine. (_OPTIONAL_)
- `config/*`: Cookbook's configuration files (read-only). Goes into `/etc/quadlets/$(PROJECT_NAME)`.
- `config/examples/*`: Cookbook configuration files (sample configuration, to be overwritten for each deployment). Goes into `/etc/quadlets/$(PROJECT_NAME)`.
- `config/examples/*.env`: Systemd environment files, potentially containing secrets (to be overwritten for each deployment). Goes into `/etc/quadlets/$(PROJECT_NAME)`.

146
conftest.py

@ -1,12 +1,69 @@
"""Pytest fixtures for the Podman Quadlets cookbooks.
Prerequisites:
- Must run as root (KVM/libvirt access).
- The Fedora CoreOS base QCOW2 image must be present at /var/lib/libvirt/images/library/fedora-coreos.qcow2.
Run ``coreos-installer download -p qemu -f qcow2.xz -d -C /var/lib/libvirt/images/library/`` to fetch it.
- fcos-test.ign for the cookbook is built on demand by ``make butane`` if it is missing.
"""
import subprocess
from pathlib import Path
import shutil
import os
import sys
import pytest
import testinfra
import textwrap
from fcos_vm import FCOSVirtualMachine, ensure_fcos_ign # noqa: E402
# Persistent directory used when --keep-vm is active.
_KEEP_VM_CACHE_DIR = Path.home() / ".cache" / "pytest"
# You can pass --keep-vm on the command line to keep the test VM alive after the test run and reuse it on the next run.
# Speeds up iteration: the VM is created once and never destroyed. The SSH key is stored persistently in ~/.cache/pytest.
def pytest_addoption(parser: pytest.Parser) -> None:
parser.addoption(
"--keep-vm",
action="store_true",
default=False,
help=(
"Keep the test VM alive after the test run and reuse it on the next run. "
"Speeds up iteration: the VM is created once and never destroyed. "
"The SSH key is stored persistently in "
f"{_KEEP_VM_CACHE_DIR}."
),
)
@pytest.fixture(scope="session")
def keep_vm(request: pytest.FixtureRequest) -> bool:
"""True when --keep-vm was passed on the command line."""
return request.config.getoption("--keep-vm")
@pytest.fixture(scope="session")
def test_ssh_key(tmp_path_factory: pytest.TempPathFactory) -> Path:
"""Generate a temporary SSH key pair (no passphrase) for VM access."""
def test_ssh_key(
keep_vm: bool,
tmp_path_factory: pytest.TempPathFactory,
) -> Path:
"""SSH key pair for VM access.
When --keep-vm is set the key is stored persistently so that subsequent
runs can re-use the same VM without re-injecting a new key.
"""
if keep_vm:
key_dir = _KEEP_VM_CACHE_DIR
key_dir.mkdir(parents=True, exist_ok=True)
key_path = key_dir / "id_ed25519"
if not key_path.exists():
subprocess.run(
["ssh-keygen", "-t", "ed25519", "-N", "", "-f", str(key_path)],
check=True,
capture_output=True,
)
return key_path
key_dir = tmp_path_factory.mktemp("ssh-key")
key_path = key_dir / "id_ed25519"
subprocess.run(
@ -21,3 +78,86 @@ def test_ssh_key(tmp_path_factory: pytest.TempPathFactory) -> Path:
def test_ssh_pubkey(test_ssh_key: Path) -> str:
"""Public key string corresponding to test_ssh_key."""
return test_ssh_key.with_suffix(".pub").read_text().strip()
# The virtiofs is where important and persistent data are stored.
# We keep it for the entire test session.
@pytest.fixture(scope="package")
def virtiofs_dirs(request, keep_vm: bool) -> list[tuple[Path, str]]:
"""VirtioFS host directories for the default test VM.
With --keep-vm the directories are persistent so the VM can be reused across
test runs. Without it unique per-process paths are used and cleaned up
on teardown.
"""
cookbook_dir = Path(request.path).parent.parent
if keep_vm:
d = Path("/srv") / f"fcos-test-{cookbook_dir.name}-dev"
else:
d = Path("/srv") / f"fcos-test-{cookbook_dir.name}-{os.getpid()}"
d.mkdir(parents=True, exist_ok=True)
yield [(d, "data",)] # <-- tests run here with access to the virtiofs directories
if not keep_vm and d.exists():
shutil.rmtree(d)
# However, the VM itself is recreated for each test module to ensure a clean state.
@pytest.fixture(scope="module")
def fcos_host(fcos_vm: FCOSVirtualMachine, test_ssh_key: Path):
"""testinfra SSH host connected to the default FCOS VM."""
return testinfra.get_host(
f"ssh://root@{fcos_vm.ip}",
ssh_extra_args=(
f"-i {test_ssh_key}"
" -o StrictHostKeyChecking=no"
" -o UserKnownHostsFile=/dev/null"
),
)
# Default VM configuration (memory in MB, vCPUs, root disk size in GB, /var disk size in GB).
@pytest.fixture(scope="package")
def fcos_vm_config() -> tuple[int, int, int, int]:
"""Default VM configuration (memory in MB, vCPUs, root disk size in GB, /var disk size in GB)."""
return (4096, 2, 50, 100) # (memory in MB, vCPUs, disk size for / and /var in GB)
# PostgreSQL VM are kept for the duration of a test module, backed with a persistent Virtiofs directory.
@pytest.fixture(scope="module")
def fcos_vm(
request, # Fixture that provides information about the requesting test function, class or module.
keep_vm: bool, # Fixture passed from command line option --keep-vm to determine whether to keep the VM after tests for debugging purposes.
fcos_vm_config: tuple[int, int, int, int], # Fixture that provides the VM configuration (memory in MB, vCPUs, root disk size in GB, /var disk size in GB).
test_ssh_key: Path, # Fixture that provides the path to the SSH private key to connect to the VM.
test_ssh_pubkey: str, # Fixture that provides the content of the SSH public key to inject into the VM for SSH access.
virtiofs_dirs: list[tuple[Path, str]], # Fixture that provides a list of tuples containing host directories and their corresponding target directories in the VM to be exposed via VirtioFS.
tmp_path_factory: pytest.TempPathFactory, # Fixture that provides a factory for creating temporary directories.
) -> FCOSVirtualMachine:
"""Running CoreOS VM with Quadlets installed.
With --keep-vm the VM is reused across runs: it is created only if it
does not already exist and is never destroyed on teardown.
"""
module_name = request.module.__name__.split(".")[-1].replace("test_", "").replace("_", "-")
cookbook_dir = Path(request.path).parent.parent
pg_major = getattr(request.module, "PG_MAJOR_DEFAULT", 0)
vm = FCOSVirtualMachine(
cookbook_name=cookbook_dir.name,
instance_name=module_name,
keep=keep_vm,
virtiofs_dirs=virtiofs_dirs,
vm_config = fcos_vm_config,
)
if not (keep_vm and vm.exists()):
fcos_ign = ensure_fcos_ign(cookbook_dir)
vm.ignition.ignition_files.append(fcos_ign)
vm.ignition.extra_files.update(getattr(request.module, "PYTEST_FCOS_EXTRA_FILES", {}))
vm.ignition.ssh_key = test_ssh_pubkey
vm.create()
vm.wait_ssh(ssh_key=test_ssh_key, timeout=300)
yield vm # <-- tests run here with access to the VM instance
if not keep_vm:
vm.destroy()

3
cookbooks/Makefile

@ -14,6 +14,8 @@ help:
@echo " fcos-vm - Launch a Fedora CoreOS VM with the generated Butane spec"
@echo " clean-vm - Clean up the Fedora CoreOS VM and its resources"
@echo " uninstall - Uninstall the generated resources"
@echo " pytest - Run integration tests on a clean Fedora CoreOS VM"
dryrun: $(SUBDIRS)
butane: $(SUBDIRS)
@ -23,6 +25,7 @@ clean: $(SUBDIRS)
fcos-vm: $(SUBDIRS)
clean-vm: $(SUBDIRS)
uninstall: $(SUBDIRS)
pytest: $(SUBDIRS)
$(SUBDIRS):
$(MAKE) -C $@ $(MAKECMDGOALS)

0
cookbooks/base/config/fastfetch.jsonc → cookbooks/base/config/examples/fastfetch.jsonc

3
cookbooks/base/install-fastfetch.service

@ -3,12 +3,13 @@ Description=Install fastfetch
Wants=network-online.target
After=network-online.target
ConditionPathExists=!/usr/local/bin/fastfetch
ConditionPathExists=/etc/quadlets/base/fastfetch.env
[Service]
Type=oneshot
RemainAfterExit=yes
ExecStart=/etc/quadlets/base/install-fastfetch.sh
EnvironmentFile=-/etc/quadlets/base/fastfetch.env
EnvironmentFile=/etc/quadlets/base/fastfetch.env
[Install]
WantedBy=multi-user.target

12
cookbooks/base/profile.d/fastfetch.sh

@ -1,9 +1,11 @@
#!/bin/sh
declare -a FASTFETCH_OPTIONS=( -c /etc/quadlets/base/fastfetch.jsonc )
if [ "$USER" == "root" ]; then
if [ -x /usr/local/bin/fastfetch ]; then
declare -a FASTFETCH_OPTIONS=( -c /etc/quadlets/base/fastfetch.jsonc )
if [ "$USER" == "root" ]; then
FASTFETCH_OPTIONS+=( --custom-key-color dim_red --color-keys red --title-color-user red )
else
else
FASTFETCH_OPTIONS+=( --custom-key-color dim_blue --color-keys blue --title-color-user green )
fi
fastfetch "${FASTFETCH_OPTIONS[@]}"
unset FASTFETCH_OPTIONS
fi
fastfetch "${FASTFETCH_OPTIONS[@]}"
unset FASTFETCH_OPTIONS

2
cookbooks/gitea/gitea.container

@ -12,7 +12,7 @@ PartOf=gitea.target
[Container]
ContainerName=gitea
Image=docker.gitea.com/gitea:latest
Image=gitea.image
AutoUpdate=registry
# Network configuration

6
cookbooks/gitea/gitea.image

@ -0,0 +1,6 @@
[Unit]
Description=podman pull docker.gitea.com/gitea
Documentation=https://docs.gitea.com/
[Image]
Image=docker.gitea.com/gitea:latest

2
cookbooks/lego/lego-renew.container

@ -15,7 +15,7 @@ User=10023
Group=10000
# Image
Image=docker.io/goacme/lego:latest
Image=lego.image
AutoUpdate=registry
# Network configuration

2
cookbooks/lego/lego-run.container

@ -16,7 +16,7 @@ User=10023
Group=10000
# Image
Image=docker.io/goacme/lego:latest
Image=lego.image
AutoUpdate=registry
# Network configuration

6
cookbooks/lego/lego.image

@ -0,0 +1,6 @@
[Unit]
Description=podman pull docker.io/goacme/lego
Documentation=https://go-acme.github.io/lego/
[Image]
Image=docker.io/goacme/lego:latest

2
cookbooks/miniflux/miniflux.container

@ -11,7 +11,7 @@ PartOf=miniflux.target
[Container]
ContainerName=miniflux
Image=ghcr.io/miniflux/miniflux:latest
Image=miniflux.image
AutoUpdate=registry
# Network configuration

6
cookbooks/miniflux/miniflux.image

@ -0,0 +1,6 @@
[Unit]
Description=podman pull ghcr.io/miniflux/miniflux
Documentation=https://github.com/miniflux/v2
[Image]
Image=ghcr.io/miniflux/miniflux:latest

2
cookbooks/nextcloud/nextcloud-app.container

@ -14,7 +14,7 @@ PartOf=nextcloud.target
[Container]
ContainerName=nextcloud-app
Image=docker.io/library/nextcloud:${NEXTCLOUD_MAJOR}-fpm-alpine
Image=nextcloud.image
# No need for root privileges
User=www-data

2
cookbooks/nextcloud/nextcloud-collabora.container

@ -11,7 +11,7 @@ PartOf=nextcloud.target
[Container]
ContainerName=nextcloud-collabora
Image=docker.io/collabora/code:latest
Image=nextcloud-collabora.image
# No need for root privileges
User=1001

6
cookbooks/nextcloud/nextcloud-collabora.image

@ -0,0 +1,6 @@
[Unit]
Description=podman pull docker.io/collabora/code
Documentation=https://hub.docker.com/r/collabora/code/
[Image]
Image=docker.io/collabora/code:latest

2
cookbooks/nextcloud/nextcloud-cron.container

@ -11,7 +11,7 @@ ConditionPathExists=/var/lib/virtiofs/data/nextcloud/config/config.php
[Container]
ContainerName=nextcloud-cron-job
Image=docker.io/library/nextcloud:${NEXTCLOUD_MAJOR}-fpm-alpine
Image=nextcloud.image
# No need for root privileges
User=www-data

2
cookbooks/nextcloud/nextcloud-init.container

@ -15,7 +15,7 @@ PartOf=nextcloud.target
[Container]
ContainerName=nextcloud-init-job
Image=docker.io/library/nextcloud:${NEXTCLOUD_MAJOR}-fpm-alpine
Image=nextcloud.image
# No need for root privileges
User=www-data

2
cookbooks/nextcloud/nextcloud-nginx.container

@ -12,7 +12,7 @@ PartOf=nextcloud.target
[Container]
ContainerName=nextcloud-nginx
Image=docker.io/nginxinc/nginx-unprivileged:${NGINX_MAJOR}-alpine
Image=nextcloud-nginx.image
# Network configuration
Network=host

13
cookbooks/nextcloud/nextcloud-nginx.image

@ -0,0 +1,13 @@
[Unit]
Description=podman pull docker.io/nginxinc/nginx-unprivileged
Documentation=https://hub.docker.com/r/nginxinc/nginx-unprivileged/
# Only start if Nextcloud has been configured
ConditionPathExists=/etc/quadlets/nextcloud/config.env
[Image]
Image=docker.io/nginxinc/nginx-unprivileged:${NGINX_MAJOR}-alpine
[Service]
# These environment variables are sourced to be used by systemd in the Exec* commands
EnvironmentFile=/etc/quadlets/nextcloud/config.env

2
cookbooks/nextcloud/nextcloud-redis.container

@ -11,7 +11,7 @@ PartOf=nextcloud.target
[Container]
ContainerName=nextcloud-redis
Image=docker.io/library/redis:${REDIS_MAJOR}-alpine
Image=nextcloud-redis.image
# Network configuration
Network=host

13
cookbooks/nextcloud/nextcloud-redis.image

@ -0,0 +1,13 @@
[Unit]
Description=podman pull docker.io/library/redis
Documentation=https://hub.docker.com/_/redis/
# Only start if Nextcloud has been configured
ConditionPathExists=/etc/quadlets/nextcloud/config.env
[Image]
Image=docker.io/library/redis:${REDIS_MAJOR}-alpine
[Service]
# These environment variables are sourced to be used by systemd in the Exec* commands
EnvironmentFile=/etc/quadlets/nextcloud/config.env

2
cookbooks/nextcloud/nextcloud-upgrade.container

@ -15,7 +15,7 @@ PartOf=nextcloud.target
[Container]
ContainerName=nextcloud-upgrade-to-${NEXTCLOUD_MAJOR}-job
Image=docker.io/library/nextcloud:${NEXTCLOUD_MAJOR}-fpm-alpine
Image=nextcloud.image
# No need for root privileges
User=www-data

13
cookbooks/nextcloud/nextcloud.image

@ -0,0 +1,13 @@
[Unit]
Description=podman pull docker.io/library/nextcloud
Documentation=https://hub.docker.com/_/nextcloud/
# Only start if Nextcloud has been configured
ConditionPathExists=/etc/quadlets/nextcloud/config.env
[Image]
Image=docker.io/library/nextcloud:${NEXTCLOUD_MAJOR}-fpm-alpine
[Service]
# These environment variables are sourced to be used by systemd in the Exec* commands
EnvironmentFile=/etc/quadlets/nextcloud/config.env

6
cookbooks/nginx/nginx-git.image

@ -0,0 +1,6 @@
[Unit]
Description=podman pull docker.io/alpine/git
Documentation=https://hub.docker.com/r/alpine/git
[Image]
Image=docker.io/alpine/git:latest

2
cookbooks/nginx/nginx-init.container

@ -14,7 +14,7 @@ PartOf=nginx.target
[Container]
ContainerName=nginx-init-job
Image=docker.io/alpine/git:latest
Image=nginx-git.image
# Network configuration
Network=host

4
cookbooks/nginx/nginx-server.container

@ -14,7 +14,7 @@ PartOf=nginx.target
[Container]
ContainerName=nginx-server
Image=docker.io/library/nginx:mainline-alpine
Image=nginx.image
AutoUpdate=registry
# Network configuration
@ -24,7 +24,7 @@ Network=host
EnvironmentFile=/etc/quadlets/nginx/config.env
# Volume mounts
Volume=/var/lib/quadlets/nginx/nginx/website:/usr/share/nginx/html:z
Volume=/var/lib/quadlets/nginx/cookbooks/nginx/website:/usr/share/nginx/html:z
# Health check
HealthCmd=curl -sSf -o /dev/null http://localhost/

2
cookbooks/nginx/nginx-update.container

@ -14,7 +14,7 @@ PartOf=nginx.target
[Container]
ContainerName=nginx-update-job
Image=docker.io/alpine/git:latest
Image=nginx-git.image
# Network configuration
Network=host

6
cookbooks/nginx/nginx.image

@ -0,0 +1,6 @@
[Unit]
Description=podman pull docker.io/library/nginx
Documentation=https://hub.docker.com/_/nginx
[Image]
Image=docker.io/library/nginx:mainline-alpine

90
cookbooks/nginx/tests/test_01_install.py

@ -0,0 +1,90 @@
import textwrap
import test_quadlet # noqa: F401
# Extra files to inject into the FCOS image for the tests in this file.
# The config.env is used to configure the nginx Quadlet.
PYTEST_FCOS_EXTRA_FILES = {
"/etc/quadlets/nginx/config.env": (
textwrap.dedent("""
# This file is generated for testing purposes.
GIT_REPO=https://github.com/nmasse-itix/podman-quadlet-cookbook.git
GIT_BRANCH=main
NGINX_PORT=80
NGINX_HOST=localhost
"""),
0,
0,
0o600,
),
}
"""
Verify that the nginx Quadlet is correctly installed and configured on a fresh VM boot.
"""
class TestNginxQuadlet(test_quadlet.TestQuadlet):
expected_services = [
{ "name": "nginx.target", "state": "active", "exists": True },
{ "name": "nginx-server.service", "state": "active", "exists": True },
{ "name": "nginx-init.service", "state": "inactive", "exists": True },
{ "name": "nginx-update.service", "state": "inactive", "exists": True },
{ "name": "nginx-update.timer", "state": "active", "exists": True },
]
expected_sockets = [
{ "uri": "tcp://127.0.0.1:80", "state": "listening" },
]
expected_ports = [
{ "number": 80, "protocol": "tcp", "state": "open" },
{ "number": 22, "protocol": "tcp", "state": "open" },
]
expected_files = [
{ "path": "/var/lib/quadlets/nginx", "type": "directory", "owner": "root", "group": "root", "mode": 0o755 },
{ "path": "/etc/quadlets/nginx/config.env", "type": "file", "owner": "root", "group": "root", "mode": 0o600 },
{ "path": "/var/lib/quadlets/nginx/.git", "type": "directory" },
]
expected_podman_images = [
{ "name": "docker.io/library/nginx", "tag": "mainline-alpine", "state": "present" },
]
expected_podman_containers = [
{ "name": "nginx-server", "state": "present" },
]
expected_main_service = "nginx.target"
expected_main_service_timeout = 300
def test_nginx_serves_content(self, fcos_host):
"""Nginx must serve an HTTP 200 response on port 80."""
result = fcos_host.run("curl -sSf -o /dev/null -w '%{http_code}' http://localhost/")
assert result.rc == 0, f"curl failed with exit code {result.rc}: {result.stderr}"
assert result.stdout.strip() == "200", f"Expected HTTP 200, got: {result.stdout.strip()}"
def test_nginx_serves_expected_html(self, fcos_host):
"""Nginx must serve the expected HTML content cloned from the git repository."""
result = fcos_host.run("curl -sSf http://localhost/")
assert result.rc == 0, f"curl failed with exit code {result.rc}: {result.stderr}"
assert "Hello World" in result.stdout, f"Expected 'Hello World' in the response, but got: {result.stdout}"
def test_nginx_update_cycle(self, fcos_host):
"""Restarting nginx.target must trigger nginx-update (git pull) and nginx must keep serving content."""
result = fcos_host.run("systemctl restart nginx.target")
assert result.rc == 0, f"Failed to restart nginx.target: {result.stderr}"
# Wait for nginx.target to become active again after the update
self.wait_for_service(fcos_host, "nginx.target", timeout=120)
# nginx-update.service must have run (git pull) and completed (oneshot → inactive)
# nginx-init.service must NOT have run again (.git already exists, condition not met)
self.check_expected_services(fcos_host, [
{ "name": "nginx-update.service", "state": "inactive", "exists": True },
{ "name": "nginx-init.service", "state": "inactive", "exists": True },
{ "name": "nginx-server.service", "state": "active", "exists": True },
])
# nginx must still serve the expected content after the update cycle
result = fcos_host.run("curl -sSf http://localhost/")
assert result.rc == 0, f"curl failed after update cycle: {result.stderr}"
assert "Hello World" in result.stdout, f"Expected 'Hello World' after update cycle, but got: {result.stdout}"

2
cookbooks/postgresql/postgresql-init.container

@ -53,7 +53,7 @@ Volume=/etc/quadlets/postgresql/init.d:/docker-entrypoint-initdb.d:z,ro
[Service]
Restart=no
TimeoutStartSec=30
TimeoutStartSec=60
# These environment variables are sourced to be used by systemd in the Exec* commands
EnvironmentFile=/etc/quadlets/postgresql/config.env

4
cookbooks/postgresql/postgresql.image

@ -1,6 +1,6 @@
[Unit]
Description=podman pull docker.io/pgautoupgrade/pgautoupgrade
Documentation=https://hub.docker.com/r/pgautoupgrade/pgautoupgrade
Description=podman pull docker.io/library/postgres
Documentation=https://hub.docker.com/_/postgres/
# Only start if PostgreSQL has been configured
ConditionPathExists=/etc/quadlets/postgresql/config.env

0
cookbooks/postgresql/tests/__init__.py

172
cookbooks/postgresql/tests/conftest.py

@ -1,167 +1,13 @@
"""Pytest fixtures for the PostgreSQL cookbook end-to-end tests.
Prerequisites:
- Must run as root (KVM/libvirt access).
- The Fedora CoreOS base QCOW2 image must be present at
/var/lib/libvirt/images/library/fedora-coreos.qcow2.
Run ``coreos-installer download -p qemu -f qcow2.xz -d
-C /var/lib/libvirt/images/library/`` to fetch it.
- fcos.ign for the postgresql cookbook is built on demand by
``make -C postgresql butane`` if it is missing. This requires
local.bu (SSH keys, user setup) to be present at the repository root.
"""
import os
import shutil
import subprocess
import sys
from pathlib import Path
import pytest
import testinfra
REPO_ROOT = Path(__file__).parent.parent.parent
POSTGRESQL_DIR = REPO_ROOT / "postgresql"
# Add directories to the path so we can import local helpers and shared vm.py.
sys.path.insert(0, str(Path(__file__).parent))
sys.path.insert(0, str(REPO_ROOT / "tests"))
from vm import FCOSVirtualMachine, build_test_ignition, ensure_fcos_ign # noqa: E402
from helpers import (
PG_DB,
PG_MAJOR_DEFAULT,
PG_MAJOR_UPGRADE_FROM,
PG_MAJOR_UPGRADE_TO,
PG_PASSWORD,
PG_USER,
run_sql,
)
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------
def _default_config_env(pg_major: str) -> dict[str, str]:
"""Return the full default config.env content as a dict for the given PG major."""
return {
"PG_MAJOR": pg_major,
"POSTGRES_USER": PG_USER,
"POSTGRES_PASSWORD": PG_PASSWORD,
"POSTGRES_DB": PG_DB,
"POSTGRES_HOST_AUTH_METHOD": "scram-sha-256",
"POSTGRES_INITDB_ARGS": "--auth-host=scram-sha-256",
"POSTGRES_ARGS": "-h 127.0.0.1",
"PGPORT": "5432",
"POSTGRES_BACKUP_RETENTION": "7",
}
# ---------------------------------------------------------------------------
# Shared fixtures (module-scoped → one VM per test module)
# ---------------------------------------------------------------------------
@pytest.fixture(scope="module")
def virtiofs_dir() -> Path:
"""Unique VirtioFS host directory for the default test VM."""
d = Path("/srv") / f"fcos-test-postgresql-{os.getpid()}"
d.mkdir(parents=True, exist_ok=True)
yield d
if d.exists():
shutil.rmtree(d)
@pytest.fixture(scope="module")
def postgresql_vm(
test_ssh_key: Path,
test_ssh_pubkey: str,
virtiofs_dir: Path,
tmp_path_factory: pytest.TempPathFactory,
) -> FCOSVirtualMachine:
"""Running CoreOS VM with PostgreSQL installed at the default PG version.
The VM is created once per test module and destroyed in teardown.
All tests in the same module share this VM instance.
"""
fcos_ign = ensure_fcos_ign(POSTGRESQL_DIR)
test_ign = tmp_path_factory.mktemp("ign") / "fcos-test.ign"
build_test_ignition(
base_ignition=fcos_ign,
ssh_pubkey=test_ssh_pubkey,
output=test_ign,
)
vm = FCOSVirtualMachine(
name=f"postgresql-{os.getpid()}",
ignition_file=test_ign,
virtiofs_dir=virtiofs_dir,
)
vm.create()
vm.wait_ssh(ssh_key=test_ssh_key, timeout=300)
vm.wait_for_service("postgresql.target", ssh_key=test_ssh_key, timeout=300)
yield vm
vm.destroy()
@pytest.fixture(scope="module")
def pg_host(postgresql_vm: FCOSVirtualMachine, test_ssh_key: Path):
"""testinfra SSH host connected to the default PostgreSQL VM."""
return testinfra.get_host(
f"ssh://root@{postgresql_vm.ip}",
ssh_extra_args=(
f"-i {test_ssh_key}"
" -o StrictHostKeyChecking=no"
" -o UserKnownHostsFile=/dev/null"
),
)
@pytest.fixture(scope="module")
def upgrade_virtiofs_dir() -> Path:
"""Unique VirtioFS host directory for the upgrade test VM."""
d = Path("/srv") / f"fcos-test-pg-upgrade-{os.getpid()}"
d.mkdir(parents=True, exist_ok=True)
yield d
if d.exists():
shutil.rmtree(d)
@pytest.fixture(scope="module")
def upgrade_vm(
test_ssh_key: Path,
test_ssh_pubkey: str,
upgrade_virtiofs_dir: Path,
tmp_path_factory: pytest.TempPathFactory,
) -> FCOSVirtualMachine:
"""Running CoreOS VM with PostgreSQL installed at PG_MAJOR_UPGRADE_FROM.
Used exclusively by test_upgrade.py to verify the major version upgrade path.
The config.env is overridden via the ignition overlay so the VM boots
directly with PG_MAJOR_UPGRADE_FROM, regardless of the cookbook's default.
"""
fcos_ign = ensure_fcos_ign(POSTGRESQL_DIR)
test_ign = tmp_path_factory.mktemp("ign-upgrade") / "fcos-upgrade.ign"
build_test_ignition(
base_ignition=fcos_ign,
ssh_pubkey=test_ssh_pubkey,
output=test_ign,
config_env_overrides=_default_config_env(PG_MAJOR_UPGRADE_FROM),
)
vm = FCOSVirtualMachine(
name=f"pg-upgrade-{os.getpid()}",
ignition_file=test_ign,
virtiofs_dir=upgrade_virtiofs_dir,
)
vm.create()
vm.wait_ssh(ssh_key=test_ssh_key, timeout=300)
vm.wait_for_service("postgresql.target", ssh_key=test_ssh_key, timeout=300)
# Because PostgreSQL init & upgrades can take a long time, we give the VM more resources.
@pytest.fixture(scope="package")
def fcos_vm_config() -> tuple[int, int, int, int]:
"""Default VM configuration (memory in MB, vCPUs, root disk size in GB, /var disk size in GB)."""
return (8192, 4, 50, 100) # (memory in MB, vCPUs, disk size for / and /var in GB)
yield vm
# PostgreSQL major versions to test during upgrade from PG_MAJOR_DEFAULT.
@pytest.fixture(scope="package", params=[15, 16, 17, 18])
def pg_upgrade_major(request) -> int:
return int(request.param)
vm.destroy()

90
cookbooks/postgresql/tests/helpers.py

@ -1,27 +1,54 @@
"""Shared constants and helper functions for PostgreSQL integration tests.
import sys
import pytest
import testinfra
import test_quadlet # noqa: F401
These are extracted from conftest.py so that test modules can import them
without conflicting with pytest's conftest discovery mechanism.
"""
Verify that the postgresql Quadlet is correctly installed and configured on a fresh VM boot.
"""
class TestPostgresqlQuadlet(test_quadlet.TestQuadlet):
expected_services = [
{ "name": "postgresql-server.service", "state": "active", "exists": True },
{ "name": "postgresql-init.service", "state": "inactive", "exists": True },
{ "name": "postgresql-upgrade.service", "state": "inactive", "exists": True },
{ "name": "postgresql-backup.service", "state": "inactive", "exists": True },
{ "name": "postgresql-set-major.service", "state": "inactive", "exists": True },
{ "name": "postgresql-backup.timer", "state": "active", "exists": True },
{ "name": "postgresql.target", "state": "active", "exists": True },
]
from pathlib import Path
expected_sockets = [
{ "uri": "tcp://127.0.0.1:5432", "state": "listening" },
{ "uri": "tcp://0.0.0.0:5432", "state": "closed" },
{ "uri": "tcp://:::5432", "state": "closed" },
]
# Default version shipped in the example config.env.
PG_MAJOR_DEFAULT = "14"
expected_ports = [
{ "number": 5432, "protocol": "tcp", "state": "closed" },
{ "number": 22, "protocol": "tcp", "state": "open" },
]
# Version to start from in the major-upgrade scenario.
PG_MAJOR_UPGRADE_FROM = "14"
expected_files = [
{ "path": "/var/lib/quadlets/postgresql", "type": "directory", "owner": "postgresql", "group": "itix-svc", "mode": 0o755 },
{ "path": "/etc/quadlets/postgresql/config.env", "type": "file", "owner": "root", "group": "root", "mode": 0o600 },
{ "path": "/var/lib/virtiofs/data/postgresql", "type": "directory", "owner": "postgresql", "group": "itix-svc", "mode": 0o700 },
{ "path": "/var/lib/virtiofs/data/postgresql/backup", "type": "directory", "owner": "postgresql", "group": "itix-svc", "mode": 0o700 },
{ "path": "/var/lib/quadlets/postgresql/.initialized", "type": "file", "owner": "root", "group": "root", "mode": 0o644 },
]
# Version to upgrade to in the major-upgrade scenario.
PG_MAJOR_UPGRADE_TO = "17"
expected_podman_images = [
]
# Default credentials from config/examples/config.env.
PG_USER = "postgres"
PG_PASSWORD = "postgres"
PG_DB = "postgres"
expected_podman_containers = [
{ "name": "postgresql-server", "state": "present", "pid1": { "owner": "10004", "group": "10000", "commandline": "postgres -h 127.0.0.1" } },
]
expected_main_service = "postgresql.target"
expected_main_service_timeout = 300
expected_pg_major = 0 # TODO: set this variable in subclasses
def run_sql(vm, ssh_key: Path, sql: str) -> str:
def _run_sql(self, fcos_host, query: str, check: bool = True, database: str = "postgres") -> str:
"""Execute *sql* via ``podman exec`` on the running postgresql-server container.
Uses the Unix socket at /var/run/postgresql inside the container (mapped
@ -32,8 +59,35 @@ def run_sql(vm, ssh_key: Path, sql: str) -> str:
Returns:
Stripped stdout of the psql command.
"""
result = vm.ssh_run(
f"podman exec postgresql-server psql -U {PG_USER} -t -c \"{sql}\"",
ssh_key,
result = fcos_host.run(
f"podman exec postgresql-server psql -U postgres -d {database} --csv -t -c %s", query
)
if check:
assert result.exit_status == 0, f"SQL query \"{query}\" failed with exit code {result.exit_status}: {result.stderr}"
return result.stdout.strip()
def test_postgresql_major_version_items(self, fcos_host):
"""The major version from the config must be reflected in the filesystem and in the running Podman image."""
self.check_expected_files(fcos_host, [
{ "path": f"/var/lib/quadlets/postgresql/{self.expected_pg_major}", "type": "directory", "owner": "postgresql", "group": "itix-svc", "mode": 0o755 },
])
self.check_expected_podman_images(fcos_host, [
{ "name": "docker.io/library/postgres", "tag": f"{self.expected_pg_major}-alpine", "state": "present" },
])
def test_latest_symlink_exists(self, fcos_host):
"""The 'latest' symlink must point to the active major-version directory."""
link = fcos_host.file("/var/lib/quadlets/postgresql/latest")
assert link.exists
assert link.is_symlink
assert link.linked_to == f"/var/lib/quadlets/postgresql/{self.expected_pg_major}"
def test_postgresql_accepts_connections(self, fcos_host):
"""PostgreSQL must respond to a trivial SQL query."""
output = self._run_sql(fcos_host, "SELECT 1 AS probe")
assert output == "1", f"Unexpected output from SQL query: {output}"
def test_postgresql_version_matches_config(self, fcos_host):
"""The running PostgreSQL server must report the version from PG_MAJOR_DEFAULT."""
output = self._run_sql(fcos_host, "SHOW server_version")
assert output.startswith(f"{self.expected_pg_major}."), f"Expected PostgreSQL server version to start with {self.expected_pg_major}, but got {output}"

155
cookbooks/postgresql/tests/test_01_install_upgrade_backup.py

@ -0,0 +1,155 @@
import pytest
import textwrap
# Add the current cookbook's tests directory to the path so we can import helpers.py.
from pathlib import Path
import sys
sys.path.insert(0, str(Path(__file__).parent))
import helpers # noqa: E402
# Major version of PostgreSQL to install by default on a fresh VM boot.
PG_MAJOR_DEFAULT = 14
# Extra files to inject into the FCOS image for the tests in this file.
# The config.env is used to configure the PostgreSQL Quadlet, and the init.d/test.sql file is an init hook that creates a test database and user on the first boot.
PYTEST_FCOS_EXTRA_FILES = {
"/etc/quadlets/postgresql/config.env": (
textwrap.dedent(f"""
# This file is generated by conftest.py for testing purposes.
POSTGRES_USER=postgres
POSTGRES_PASSWORD=postgres
POSTGRES_DB=postgres
POSTGRES_HOST_AUTH_METHOD=scram-sha-256
POSTGRES_INITDB_ARGS=--auth-host=scram-sha-256
POSTGRES_ARGS=-h 127.0.0.1
PGPORT=5432
PG_MAJOR={PG_MAJOR_DEFAULT}
POSTGRES_BACKUP_RETENTION=7
"""),
0,
0,
0o600,
),
"/etc/quadlets/postgresql/init.d/test.sql": (
textwrap.dedent("""
-- This file is generated by conftest.py for testing purposes.
CREATE USER test WITH PASSWORD 'test';
CREATE DATABASE testdb OWNER test;
GRANT ALL PRIVILEGES ON DATABASE testdb TO test;
ALTER ROLE test SET client_encoding TO 'utf8';
"""),
10004,
10000,
0o600,
),
}
"""
Verify that the postgresql Quadlet is correctly installed and configured on a fresh VM boot.
"""
class TestPostgresqlQuadletInstallUpgradeBackup(helpers.TestPostgresqlQuadlet):
expected_pg_major = PG_MAJOR_DEFAULT
def test_can_create_database(self, fcos_host):
"""Should be possible to create a new database."""
self._run_sql(fcos_host, "CREATE DATABASE upgrade_path_db")
output = self._run_sql(fcos_host, "SELECT datname FROM pg_database WHERE datname = 'upgrade_path_db'")
assert output == "upgrade_path_db", f"Unexpected output from SQL query: {output}"
output = self._run_sql(fcos_host, "CREATE TABLE upgrade_path (version VARCHAR);", database="upgrade_path_db")
output = self._run_sql(fcos_host, "INSERT INTO upgrade_path (version) SELECT version();", database="upgrade_path_db")
def test_init_hook_has_created_database(self, fcos_host):
"""The injected init hook has created the test database and user."""
output = self._run_sql(fcos_host, "SELECT datname FROM pg_database WHERE datname = 'testdb'")
assert output == "testdb", f"Unexpected output from SQL query: {output}"
output = self._run_sql(fcos_host, "SELECT 1 FROM pg_roles WHERE rolname = 'test'")
assert output == "1", f"Unexpected output from SQL query: {output}"
def test_created_database_and_user_is_working(self, fcos_host):
"""Should be able to connect to the test database with the test user."""
result = fcos_host.run(
"podman exec postgresql-server psql -U test -d testdb --csv -t -c %s", "SELECT 1 AS probe"
)
assert result.exit_status == 0, f"SQL query failed with exit code {result.exit_status}: {result.stderr}"
output = result.stdout.strip()
assert output == "1", f"Unexpected output from SQL query: {output}"
def test_upgrade_postgresql(self, fcos_host, pg_upgrade_major):
"""Should be able to upgrade PostgreSQL by changing PG_MAJOR and rebooting."""
# Stop the server to release the data directory
result = fcos_host.run("systemctl stop postgresql.target")
assert result.exit_status == 0, f"Failed to stop postgresql.target with exit code {result.exit_status}: {result.stderr}"
self.check_expected_services(fcos_host, expected_services=[
{ "name": "postgresql-server.service", "state": "inactive", "exists": True },
])
# Change PG_MAJOR in the config.env
fcos_host.run(f"sed -i 's/^PG_MAJOR=.*/PG_MAJOR={pg_upgrade_major}/' /etc/quadlets/postgresql/config.env")
# Start the server after changing the data directory
result = fcos_host.run("systemctl start postgresql.target")
assert result.exit_status == 0, f"Failed to start postgresql.target with exit code {result.exit_status}: {result.stderr}"
self.check_expected_services(fcos_host, expected_services=[
{ "name": "postgresql-server.service", "state": "active", "exists": True },
{ "name": "postgresql-init.service", "state": "inactive", "exists": True },
{ "name": "postgresql-upgrade.service", "state": "inactive", "exists": True },
])
# The server_version must reflect the new major version after the upgrade
output = self._run_sql(fcos_host, "SHOW server_version")
assert output.startswith(f"{pg_upgrade_major}."), f"Expected PostgreSQL server version to start with {pg_upgrade_major}, but got {output}"
def test_data_is_still_there_after_upgrade(self, fcos_host, pg_upgrade_major):
"""Data created before the upgrade must still be there after the upgrade."""
# Check that the old data is still there after the upgrade
output = self._run_sql(fcos_host, "SELECT datname FROM pg_database WHERE datname = 'upgrade_path_db'")
assert output == "upgrade_path_db", f"Unexpected output from SQL query: {output}"
output = self._run_sql(fcos_host, "SELECT datname FROM pg_database WHERE datname = 'testdb'")
assert output == "testdb", f"Unexpected output from SQL query: {output}"
result = fcos_host.run(
"podman exec postgresql-server psql -U test -d testdb --csv -t -c %s", "SELECT 1 AS probe"
)
assert result.exit_status == 0, f"SQL query failed with exit code {result.exit_status}: {result.stderr}"
def test_insert_version(self, fcos_host, pg_upgrade_major):
"""Should be able to insert data into the database after the upgrade."""
output = self._run_sql(fcos_host, "INSERT INTO upgrade_path (version) SELECT version();", database="upgrade_path_db")
def test_upgraded_postgresql_version_is_correct(self, fcos_host, pg_upgrade_major):
"""The running PostgreSQL server must report the updated version."""
# The server_version must reflect the new major version after the upgrade
output = self._run_sql(fcos_host, "SHOW server_version")
assert output.startswith(f"{pg_upgrade_major}."), f"Expected PostgreSQL server version to start with {pg_upgrade_major}, but got {output}"
# The new PostgreSQL major version's image must be pulled and present in Podman after the upgrade
self.check_expected_podman_images(fcos_host, expected_podman_images=[
{ "name": "docker.io/library/postgres", "tag": f"{pg_upgrade_major}-alpine", "state": "present" },
])
def test_latest_symlink_has_expected_target(self, fcos_host, pg_upgrade_major):
"""The 'latest' symlink must point to the active major-version directory."""
link = fcos_host.file("/var/lib/quadlets/postgresql/latest")
assert link.exists
assert link.is_symlink
assert link.linked_to == f"/var/lib/quadlets/postgresql/{pg_upgrade_major}"
def test_create_backup(self, fcos_host):
"""Should be able to create a backup using the backup service."""
result = fcos_host.run("systemctl start postgresql-backup.service")
assert result.exit_status == 0, f"Failed to start postgresql-backup.service with exit code {result.exit_status}: {result.stderr}"
# Check that a backup file has been created in the backup directory
backup_dir = fcos_host.file("/var/lib/virtiofs/data/postgresql/backup")
assert backup_dir.exists
assert backup_dir.is_directory
backup_list = backup_dir.listdir()
assert len(backup_list) > 0, "No backup files found in the backup directory after running the backup service!"
latest_backup = max(backup_list)
latest_backup_content = fcos_host.file(f"/var/lib/virtiofs/data/postgresql/backup/{latest_backup}").listdir()
assert len(latest_backup_content) > 0, "No files found in the latest backup directory after running the backup service!"
assert "backup_manifest" in latest_backup_content, f"Expected 'backup_manifest' file in the backup, but got: {latest_backup_content}"
assert "base.tar" in latest_backup_content, f"Expected 'base.tar' file in the backup, but got: {latest_backup_content}"
assert "pg_wal.tar" in latest_backup_content, f"Expected 'pg_wal.tar' file in the backup, but got: {latest_backup_content}"
assert "dump-upgrade_path_db.sql.gz" in latest_backup_content, f"Expected 'dump-upgrade_path_db.sql.gz' file in the backup, but got: {latest_backup_content}"
assert "dump-testdb.sql.gz" in latest_backup_content, f"Expected 'dump-testdb.sql.gz' file in the backup, but got: {latest_backup_content}"

56
cookbooks/postgresql/tests/test_02_restore.py

@ -0,0 +1,56 @@
import pytest
import textwrap
# Add the current cookbook's tests directory to the path so we can import helpers.py.
from pathlib import Path
import sys
sys.path.insert(0, str(Path(__file__).parent))
import helpers # noqa: E402
# Major version of PostgreSQL to install by default on a fresh VM boot.
PG_MAJOR_DEFAULT = 18
# Extra files to inject into the FCOS image for the tests in this file.
# The config.env is used to configure the PostgreSQL Quadlet.
PYTEST_FCOS_EXTRA_FILES = {
"/etc/quadlets/postgresql/config.env": (
textwrap.dedent(f"""
# This file is generated by conftest.py for testing purposes.
POSTGRES_USER=postgres
POSTGRES_PASSWORD=postgres
POSTGRES_DB=postgres
POSTGRES_HOST_AUTH_METHOD=scram-sha-256
POSTGRES_INITDB_ARGS=--auth-host=scram-sha-256
POSTGRES_ARGS=-h 127.0.0.1
PGPORT=5432
PG_MAJOR={PG_MAJOR_DEFAULT}
POSTGRES_BACKUP_RETENTION=7
"""),
0,
0,
0o600,
),
}
"""
Verify that the postgresql Quadlet correctly restores a database from a backup
on a fresh VM with the backup data present in the virtiofs.
"""
class TestPostgresqlQuadletRestore(helpers.TestPostgresqlQuadlet):
expected_pg_major = PG_MAJOR_DEFAULT
def test_data_is_still_there_after_restore(self, fcos_host):
"""Data created before the restore must still be there after the restore."""
# Check that the old data is still there after the restore
output = self._run_sql(fcos_host, "SELECT datname FROM pg_database WHERE datname = 'upgrade_path_db'")
assert output == "upgrade_path_db", f"Unexpected output from SQL query: {output}"
output = self._run_sql(fcos_host, "SELECT datname FROM pg_database WHERE datname = 'testdb'")
assert output == "testdb", f"Unexpected output from SQL query: {output}"
result = fcos_host.run(
"podman exec postgresql-server psql -U test -d testdb --csv -t -c %s", "SELECT 1 AS probe"
)
assert result.exit_status == 0, f"SQL query failed with exit code {result.exit_status}: {result.stderr}"
# Check that the upgrade_path table contains the initial postgresql version (14)
output = self._run_sql(fcos_host, "SELECT LEFT(version, 14) FROM upgrade_path ORDER BY version ASC LIMIT 1", database="upgrade_path_db")
assert output.startswith("PostgreSQL 14."), f"Unexpected output from SQL query: {output}"

119
cookbooks/postgresql/tests/test_backup.py

@ -1,119 +0,0 @@
"""Test PostgreSQL backup creation and VirtioFS storage.
These tests verify that:
- The backup oneshot service can be triggered manually and runs to completion.
- The expected backup artefacts land in the VirtioFS share (accessible from
the test runner's host filesystem without SSH).
- The backup retention policy removes stale backups.
Note: tests within a module share a single VM (module-scoped fixture), so
the order of test execution matters here: the backup files checked in later
tests are created by the earlier trigger test.
"""
import time
from pathlib import Path
# ---------------------------------------------------------------------------
# Trigger and completion
# ---------------------------------------------------------------------------
def test_create_database_and_table(postgresql_vm, test_ssh_key):
"""Create a test database and table with some data to ensure the backup has
something to capture."""
postgresql_vm.ssh_run(
"podman exec postgresql-server psql -U postgres -c \"CREATE DATABASE test;\"",
test_ssh_key,
)
postgresql_vm.ssh_run(
"podman exec postgresql-server psql -U postgres -d test -c \"CREATE TABLE witness (id SERIAL PRIMARY KEY, version VARCHAR); INSERT INTO witness (version) SELECT version();\"",
test_ssh_key,
)
def test_trigger_backup(postgresql_vm, test_ssh_key):
"""Starting postgresql-backup.service must succeed (no immediate error)."""
postgresql_vm.ssh_run(
"systemctl start postgresql-backup.service",
test_ssh_key,
)
def test_backup_completes_successfully(postgresql_vm, test_ssh_key):
"""postgresql-backup.service must finish in ``inactive`` state (not ``failed``)."""
state = postgresql_vm.wait_for_unit_done(
"postgresql-backup.service", test_ssh_key, timeout=120
)
assert state == "inactive", (
f"Backup service ended in unexpected state {state!r}. "
"Run: systemctl status postgresql-backup.service --no-pager"
)
# ---------------------------------------------------------------------------
# VirtioFS artefacts (verified from the host — no SSH required)
# ---------------------------------------------------------------------------
def test_backup_directory_exists_in_virtiofs(virtiofs_dir: Path):
"""The postgresql/backup sub-directory must exist in the VirtioFS share."""
backup_root = virtiofs_dir / "postgresql" / "backup"
assert backup_root.is_dir(), f"Backup directory not found on host: {backup_root}"
def test_at_least_one_backup_present(virtiofs_dir: Path):
"""At least one timestamped backup sub-directory must exist."""
backup_root = virtiofs_dir / "postgresql" / "backup"
backups = sorted(backup_root.iterdir())
assert backups, f"No backup sub-directories found under {backup_root}"
def test_backup_manifest_present(virtiofs_dir: Path):
"""The latest backup must contain a ``backup_manifest`` file (pg_basebackup)."""
backup_root = virtiofs_dir / "postgresql" / "backup"
latest = sorted(backup_root.iterdir())[-1]
assert (latest / "backup_manifest").exists(), (
f"backup_manifest missing in {latest}"
)
def test_backup_base_tar_present(virtiofs_dir: Path):
"""The latest backup must contain a ``base.tar`` cluster archive."""
backup_root = virtiofs_dir / "postgresql" / "backup"
latest = sorted(backup_root.iterdir())[-1]
assert (latest / "base.tar").exists(), f"base.tar missing in {latest}"
def test_database_dump_present(virtiofs_dir: Path):
"""At least one ``dump-test.sql.gz`` file must exist alongside the cluster backup."""
backup_root = virtiofs_dir / "postgresql" / "backup"
latest = sorted(backup_root.iterdir())[-1]
dumps = list(latest.glob("dump-test.sql.gz"))
assert dumps, f"No dump-test.sql.gz files found in {latest}"
# ---------------------------------------------------------------------------
# Retention policy
# ---------------------------------------------------------------------------
def test_backup_retention_enforced(postgresql_vm, test_ssh_key, virtiofs_dir: Path):
"""After triggering several extra backups the count must stay within the
configured retention limit (POSTGRES_BACKUP_RETENTION=7)."""
retention = 7
# Trigger ten additional backups so the rotation code has something to do.
for _ in range(10):
postgresql_vm.ssh_run(
"systemctl start postgresql-backup.service", test_ssh_key
)
state = postgresql_vm.wait_for_unit_done(
"postgresql-backup.service", test_ssh_key, timeout=120
)
assert state == "inactive"
time.sleep(1) # ensure distinct timestamp directories
backup_root = virtiofs_dir / "postgresql" / "backup"
count = len(list(backup_root.iterdir()))
assert count <= retention, (
f"Retention policy failed: {count} backups present, expected ≤ {retention}"
)

149
cookbooks/postgresql/tests/test_install.py

@ -1,149 +0,0 @@
"""Test that a fresh PostgreSQL installation is healthy.
These tests run against a brand-new VM booted from the cookbook's default
ignition (PG_MAJOR=14, example credentials). They verify:
- All expected systemd units are in the correct state.
- The PostgreSQL server is listening and accepts queries.
- VirtioFS is mounted and the expected directories exist.
"""
from pathlib import Path
from helpers import PG_MAJOR_DEFAULT, run_sql
# ---------------------------------------------------------------------------
# Systemd unit state
# ---------------------------------------------------------------------------
def test_postgresql_target_active(pg_host):
"""postgresql.target must be active once the full startup chain completes."""
assert pg_host.service("postgresql.target").is_running
def test_postgresql_server_running(pg_host):
"""The long-running PostgreSQL server container must be active."""
assert pg_host.service("postgresql-server.service").is_running
def test_set_major_oneshot_completed(pg_host):
"""postgresql-set-major.service (oneshot) must have finished — not still running."""
result = pg_host.run("systemctl is-active postgresql-set-major.service")
assert result.stdout.strip() == "inactive"
def test_init_oneshot_completed(pg_host):
"""postgresql-init.service (oneshot) must have finished after initialization."""
result = pg_host.run("systemctl is-active postgresql-init.service")
assert result.stdout.strip() == "inactive"
def test_upgrade_oneshot_completed(pg_host):
"""postgresql-upgrade.service (oneshot) must have finished — no upgrade needed
on a fresh install."""
result = pg_host.run("systemctl is-active postgresql-upgrade.service")
assert result.stdout.strip() == "inactive"
def test_backup_timer_scheduled(pg_host):
"""The daily backup timer must be active (scheduled)."""
assert pg_host.service("postgresql-backup.timer").is_running
# ---------------------------------------------------------------------------
# Network / socket
# ---------------------------------------------------------------------------
def test_postgresql_port_listening(pg_host):
"""PostgreSQL must be listening on 127.0.0.1:5432 (POSTGRES_ARGS=-h 127.0.0.1)."""
assert pg_host.socket("tcp://127.0.0.1:5432").is_listening
# ---------------------------------------------------------------------------
# Filesystem layout
# ---------------------------------------------------------------------------
def test_virtiofs_mounted(pg_host):
"""The VirtioFS share must be mounted at /var/lib/virtiofs/data."""
mount = pg_host.mount_point("/var/lib/virtiofs/data")
assert mount.exists
assert mount.filesystem == "virtiofs"
def test_virtiofs_postgresql_dir(pg_host):
"""/var/lib/virtiofs/data/postgresql must be created by tmpfiles.d."""
assert pg_host.file("/var/lib/virtiofs/data/postgresql").is_directory
def test_virtiofs_backup_dir(pg_host):
"""/var/lib/virtiofs/data/postgresql/backup must be created by tmpfiles.d."""
assert pg_host.file("/var/lib/virtiofs/data/postgresql/backup").is_directory
def test_data_dir_exists(pg_host):
"""/var/lib/quadlets/postgresql must exist with the correct ownership."""
f = pg_host.file("/var/lib/quadlets/postgresql")
assert f.is_directory
assert f.user == "postgresql"
def test_latest_symlink_exists(pg_host):
"""The 'latest' symlink must point to the active major-version directory."""
link = pg_host.file("/var/lib/quadlets/postgresql/latest")
assert link.exists
assert link.is_symlink
def test_version_dir_exists(pg_host):
"""A directory named after PG_MAJOR_DEFAULT must exist under the data dir."""
assert pg_host.file(
f"/var/lib/quadlets/postgresql/{PG_MAJOR_DEFAULT}"
).is_directory
def test_initialized_flag_exists(pg_host):
"""The .initialized sentinel file must be written after a successful init."""
assert pg_host.file("/var/lib/quadlets/postgresql/.initialized").exists
def test_config_env_present(pg_host):
"""/etc/quadlets/postgresql/config.env must be present and not world-readable."""
f = pg_host.file("/etc/quadlets/postgresql/config.env")
assert f.exists
# mode 0600 — world and group bits must be 0
assert f.mode & 0o077 == 0
# ---------------------------------------------------------------------------
# Database connectivity
# ---------------------------------------------------------------------------
def test_postgresql_accepts_connections(postgresql_vm, test_ssh_key):
"""PostgreSQL must respond to a trivial SQL query."""
output = run_sql(postgresql_vm, test_ssh_key, "SELECT 1 AS probe")
assert "1" in output
def test_postgresql_version_matches_config(postgresql_vm, test_ssh_key):
"""The running PostgreSQL server must report the version from PG_MAJOR_DEFAULT."""
output = run_sql(postgresql_vm, test_ssh_key, "SHOW server_version")
assert PG_MAJOR_DEFAULT in output
def test_can_create_database(postgresql_vm, test_ssh_key):
"""Should be possible to create a new database."""
run_sql(
postgresql_vm,
test_ssh_key,
"CREATE DATABASE install_test_db",
)
output = run_sql(
postgresql_vm,
test_ssh_key,
"SELECT datname FROM pg_database WHERE datname = 'install_test_db'",
)
assert "install_test_db" in output

154
cookbooks/postgresql/tests/test_recovery.py

@ -1,154 +0,0 @@
"""Test PostgreSQL automatic crash recovery.
Scenarios covered:
1. Container crash (SIGKILL via ``podman kill``) systemd restarts the
service automatically (Restart=always, RestartSec=10).
2. Hard VM reboot all services start cleanly and data is intact.
All tests share the module-scoped ``postgresql_vm`` fixture. Because some
tests are destructive (they kill the container), they are intentionally
sequenced: create data crash verify recovery create more data
reboot verify recovery.
"""
import time
from helpers import run_sql
# Data written before the crash that must survive each recovery scenario.
CRASH_WITNESS_TABLE = "crash_witness"
CRASH_WITNESS_VALUE = "before_crash"
REBOOT_WITNESS_TABLE = "reboot_witness"
REBOOT_WITNESS_VALUE = "before_reboot"
# ---------------------------------------------------------------------------
# Scenario 1: container crash
# ---------------------------------------------------------------------------
def test_server_running_before_crash(pg_host):
"""Precondition: postgresql-server.service must be active before we crash it."""
assert pg_host.service("postgresql-server.service").is_running
def test_create_data_before_crash(postgresql_vm, test_ssh_key):
"""Insert a row that must survive the container crash."""
run_sql(
postgresql_vm,
test_ssh_key,
(
f"CREATE TABLE IF NOT EXISTS {CRASH_WITNESS_TABLE} "
f"(id SERIAL PRIMARY KEY, message TEXT NOT NULL); "
f"INSERT INTO {CRASH_WITNESS_TABLE} (message) "
f"VALUES ('{CRASH_WITNESS_VALUE}');"
),
)
def test_kill_postgresql_container(postgresql_vm, test_ssh_key):
"""Simulate a process crash by sending SIGKILL to the container.
``podman kill`` delivers SIGKILL to the container's PID 1. Systemd will
detect the exit and restart the service after RestartSec=10 seconds.
"""
postgresql_vm.ssh_run(
"podman kill --signal SIGKILL postgresql-server",
test_ssh_key,
)
def test_service_restarts_automatically(postgresql_vm, test_ssh_key):
"""postgresql-server.service must be active again after the crash.
Allow up to 60 seconds: systemd waits RestartSec=10 s before restarting,
then the container start-up and health check take additional time.
"""
# Brief pause to let systemd register the exit before we start polling.
time.sleep(5)
postgresql_vm.wait_for_service(
"postgresql-server.service", test_ssh_key, timeout=120
)
def test_data_intact_after_crash_recovery(postgresql_vm, test_ssh_key):
"""Rows written before the crash must be present after automatic recovery."""
output = run_sql(
postgresql_vm,
test_ssh_key,
f"SELECT message FROM {CRASH_WITNESS_TABLE} "
f"WHERE message = '{CRASH_WITNESS_VALUE}'",
)
assert CRASH_WITNESS_VALUE in output, (
f"Crash witness row not found after recovery. Query returned: {output!r}"
)
def test_target_still_active_after_crash(pg_host):
"""postgresql.target must remain active after the container recovery."""
assert pg_host.service("postgresql.target").is_running
# ---------------------------------------------------------------------------
# Scenario 2: hard reboot
# ---------------------------------------------------------------------------
def test_create_data_before_reboot(postgresql_vm, test_ssh_key):
"""Insert a row that must survive a full VM reboot."""
run_sql(
postgresql_vm,
test_ssh_key,
(
f"CREATE TABLE IF NOT EXISTS {REBOOT_WITNESS_TABLE} "
f"(id SERIAL PRIMARY KEY, message TEXT NOT NULL); "
f"INSERT INTO {REBOOT_WITNESS_TABLE} (message) "
f"VALUES ('{REBOOT_WITNESS_VALUE}');"
),
)
def test_reboot_vm(postgresql_vm, test_ssh_key):
"""Trigger a graceful OS reboot. SSH will temporarily drop."""
postgresql_vm.ssh_run("systemctl reboot", test_ssh_key, check=False)
# Wait for the VM to go down before polling for SSH again.
time.sleep(15)
def test_ssh_available_after_reboot(postgresql_vm, test_ssh_key):
"""SSH must become available again within 5 minutes of the reboot."""
# Reset the cached IP so wait_ssh re-probes it.
postgresql_vm._ip = None
postgresql_vm.wait_ssh(ssh_key=test_ssh_key, timeout=300)
def test_postgresql_target_active_after_reboot(postgresql_vm, test_ssh_key):
"""postgresql.target must come up automatically on reboot (enabled in ignition)."""
postgresql_vm.wait_for_service(
"postgresql.target", ssh_key=test_ssh_key, timeout=300
)
def test_data_intact_after_reboot(postgresql_vm, test_ssh_key):
"""Rows written before the reboot must still be present after boot."""
output = run_sql(
postgresql_vm,
test_ssh_key,
f"SELECT message FROM {REBOOT_WITNESS_TABLE} "
f"WHERE message = '{REBOOT_WITNESS_VALUE}'",
)
assert REBOOT_WITNESS_VALUE in output, (
f"Reboot witness row not found. Query returned: {output!r}"
)
def test_crash_witness_also_intact_after_reboot(postgresql_vm, test_ssh_key):
"""Data written before the crash must also survive the subsequent reboot."""
output = run_sql(
postgresql_vm,
test_ssh_key,
f"SELECT message FROM {CRASH_WITNESS_TABLE} "
f"WHERE message = '{CRASH_WITNESS_VALUE}'",
)
assert CRASH_WITNESS_VALUE in output

163
cookbooks/postgresql/tests/test_upgrade.py

@ -1,163 +0,0 @@
"""Test the PostgreSQL major version upgrade path: PG 14 → PG 17.
The upgrade mechanism works as follows:
1. postgresql-set-major.service updates the ``latest`` symlink to point at
the new PG_MAJOR directory (e.g. /var/lib/quadlets/postgresql/17/).
2. postgresql-upgrade.service detects that
``latest/docker/PG_VERSION`` does not exist (the 17/ directory is
empty) and triggers pgautoupgrade.
3. pg_upgrade migrates data from the old directory to the new one.
4. postgresql-server.service starts against the upgraded data.
All tests in this module share a single ``upgrade_vm`` fixture that starts
with PG_MAJOR_UPGRADE_FROM (14). Tests are intentionally ordered to form a
sequential scenario: create data trigger upgrade verify outcome.
"""
from pathlib import Path
from helpers import PG_MAJOR_UPGRADE_FROM, PG_MAJOR_UPGRADE_TO, run_sql
# Sentinel table and row used to verify data survives the upgrade.
WITNESS_TABLE = "upgrade_witness"
WITNESS_VALUE = "before_upgrade"
# ---------------------------------------------------------------------------
# Pre-upgrade baseline
# ---------------------------------------------------------------------------
def test_initial_version_is_upgrade_from(upgrade_vm, test_ssh_key):
"""Precondition: the VM must be running PG_MAJOR_UPGRADE_FROM."""
output = run_sql(upgrade_vm, test_ssh_key, "SHOW server_version")
assert PG_MAJOR_UPGRADE_FROM in output, (
f"Expected PG {PG_MAJOR_UPGRADE_FROM}, got: {output!r}"
)
def test_create_witness_data(upgrade_vm, test_ssh_key):
"""Insert a row that must survive the major version upgrade."""
run_sql(
upgrade_vm,
test_ssh_key,
(
f"CREATE TABLE IF NOT EXISTS {WITNESS_TABLE} "
f"(id SERIAL PRIMARY KEY, message TEXT NOT NULL); "
f"INSERT INTO {WITNESS_TABLE} (message) VALUES ('{WITNESS_VALUE}');"
),
)
output = run_sql(
upgrade_vm,
test_ssh_key,
f"SELECT message FROM {WITNESS_TABLE} WHERE message = '{WITNESS_VALUE}'",
)
assert WITNESS_VALUE in output
# ---------------------------------------------------------------------------
# Trigger the upgrade
# ---------------------------------------------------------------------------
def test_bump_pg_major_in_config(upgrade_vm, test_ssh_key):
"""Change PG_MAJOR in config.env from UPGRADE_FROM to UPGRADE_TO."""
upgrade_vm.ssh_run(
f"sed -i 's/^PG_MAJOR={PG_MAJOR_UPGRADE_FROM}$/PG_MAJOR={PG_MAJOR_UPGRADE_TO}/' "
"/etc/quadlets/postgresql/config.env",
test_ssh_key,
)
# Verify the substitution worked.
result = upgrade_vm.ssh_run(
"grep ^PG_MAJOR= /etc/quadlets/postgresql/config.env",
test_ssh_key,
)
assert f"PG_MAJOR={PG_MAJOR_UPGRADE_TO}" in result.stdout
def test_restart_postgresql_target(upgrade_vm, test_ssh_key):
"""Restart postgresql.target to kick off the upgrade chain."""
upgrade_vm.ssh_run("systemctl restart postgresql.target", test_ssh_key)
def test_upgrade_service_completes(upgrade_vm, test_ssh_key):
"""postgresql-upgrade.service must finish in ``inactive`` state (not ``failed``).
pgautoupgrade can take several minutes for large databases; allow up to
10 minutes.
"""
state = upgrade_vm.wait_for_unit_done(
"postgresql-upgrade.service", test_ssh_key, timeout=600
)
assert state == "inactive", (
f"Upgrade service ended in state {state!r}. "
"Inspect with: systemctl status postgresql-upgrade.service --no-pager "
"and: journalctl -u postgresql-upgrade.service"
)
def test_server_active_after_upgrade(upgrade_vm, test_ssh_key):
"""postgresql-server.service must be active after the upgrade."""
upgrade_vm.wait_for_service(
"postgresql-server.service", test_ssh_key, timeout=120
)
# ---------------------------------------------------------------------------
# Post-upgrade verification
# ---------------------------------------------------------------------------
def test_new_version_is_running(upgrade_vm, test_ssh_key):
"""PostgreSQL must now report PG_MAJOR_UPGRADE_TO as the server version."""
output = run_sql(upgrade_vm, test_ssh_key, "SHOW server_version")
assert PG_MAJOR_UPGRADE_TO in output, (
f"Expected PG {PG_MAJOR_UPGRADE_TO} after upgrade, got: {output!r}"
)
def test_witness_data_preserved(upgrade_vm, test_ssh_key):
"""The row inserted before the upgrade must still be present and correct."""
output = run_sql(
upgrade_vm,
test_ssh_key,
f"SELECT message FROM {WITNESS_TABLE} WHERE message = '{WITNESS_VALUE}'",
)
assert WITNESS_VALUE in output, (
f"Witness row '{WITNESS_VALUE}' not found after upgrade. "
f"Query returned: {output!r}"
)
def test_old_data_dir_removed(upgrade_vm, test_ssh_key):
"""pgautoupgrade must remove the source data directory after a clean upgrade."""
result = upgrade_vm.ssh_run(
f"test -d /var/lib/quadlets/postgresql/{PG_MAJOR_UPGRADE_FROM}/docker",
test_ssh_key,
check=False,
)
assert result.returncode != 0, (
f"Old data directory for PG {PG_MAJOR_UPGRADE_FROM} still exists — "
"upgrade may not have cleaned up properly"
)
def test_latest_symlink_points_to_new_version(upgrade_vm, test_ssh_key):
"""The ``latest`` symlink must now point at the PG_MAJOR_UPGRADE_TO directory."""
result = upgrade_vm.ssh_run(
"readlink /var/lib/quadlets/postgresql/latest",
test_ssh_key,
)
assert PG_MAJOR_UPGRADE_TO in result.stdout, (
f"latest symlink does not point at PG {PG_MAJOR_UPGRADE_TO}: "
f"{result.stdout.strip()!r}"
)
def test_new_data_dir_has_pg_version_file(upgrade_vm, test_ssh_key):
"""PG_VERSION file must exist in the new data directory (server is healthy)."""
result = upgrade_vm.ssh_run(
f"cat /var/lib/quadlets/postgresql/{PG_MAJOR_UPGRADE_TO}/docker/PG_VERSION",
test_ssh_key,
)
assert PG_MAJOR_UPGRADE_TO in result.stdout

2
cookbooks/restic-server/restic-server.container

@ -8,7 +8,7 @@ RequiresMountsFor=/var/lib/virtiofs/data /var
ContainerName=restic
# Image
Image=docker.io/restic/rest-server:latest
Image=restic-server.image
AutoUpdate=registry
# Security

5
cookbooks/restic-server/restic-server.image

@ -0,0 +1,5 @@
[Unit]
Description=podman pull docker.io/restic/rest-server
[Image]
Image=docker.io/restic/rest-server:latest

2
cookbooks/seedbox/flaresolverr.container

@ -8,7 +8,7 @@ After=network-online.target
ContainerName=flaresolverr
# Image
Image=ghcr.io/flaresolverr/flaresolverr:latest
Image=flaresolverr.image
AutoUpdate=registry
# Security

6
cookbooks/seedbox/flaresolverr.image

@ -0,0 +1,6 @@
[Unit]
Description=podman pull ghcr.io/flaresolverr/flaresolverr
Documentation=https://github.com/FlareSolverr/FlareSolverr/
[Image]
Image=ghcr.io/flaresolverr/flaresolverr:latest

2
cookbooks/seedbox/jellyfin.container

@ -10,7 +10,7 @@ ConditionPathIsMountPoint=/var/lib/virtiofs/data
ContainerName=jellyfin
# Image
Image=lscr.io/linuxserver/jellyfin:latest
Image=jellyfin.image
AutoUpdate=registry
# Security

6
cookbooks/seedbox/jellyfin.image

@ -0,0 +1,6 @@
[Unit]
Description=podman pull lscr.io/linuxserver/jellyfin
Documentation=https://docs.linuxserver.io/images/docker-jellyfin/
[Image]
Image=lscr.io/linuxserver/jellyfin:latest

2
cookbooks/seedbox/lidarr.container

@ -10,7 +10,7 @@ ConditionPathIsMountPoint=/var/lib/virtiofs/data
ContainerName=lidarr
# Image
Image=lscr.io/linuxserver/lidarr:latest
Image=lidarr.image
AutoUpdate=registry
# Security

6
cookbooks/seedbox/lidarr.image

@ -0,0 +1,6 @@
[Unit]
Description=podman pull lscr.io/linuxserver/lidarr
Documentation=https://docs.linuxserver.io/images/docker-lidarr/
[Image]
Image=lscr.io/linuxserver/lidarr:latest

2
cookbooks/seedbox/prowlarr.container

@ -10,7 +10,7 @@ ConditionPathIsMountPoint=/var/lib/virtiofs/data
ContainerName=prowlarr
# Image
Image=lscr.io/linuxserver/prowlarr:latest
Image=prowlarr.image
AutoUpdate=registry
# Security

6
cookbooks/seedbox/prowlarr.image

@ -0,0 +1,6 @@
[Unit]
Description=podman pull lscr.io/linuxserver/prowlarr
Documentation=https://docs.linuxserver.io/images/docker-prowlarr/
[Image]
Image=lscr.io/linuxserver/prowlarr:latest

2
cookbooks/seedbox/qbittorrent.container

@ -9,7 +9,7 @@ ConditionPathIsMountPoint=/var/lib/virtiofs/data
ContainerName=qbittorrent
# Image
Image=lscr.io/linuxserver/qbittorrent:latest
Image=qbittorrent.image
AutoUpdate=registry
# Security

5
cookbooks/seedbox/qbittorrent.image

@ -0,0 +1,5 @@
[Unit]
Description=podman pull lscr.io/linuxserver/qbittorrent
[Image]
Image=lscr.io/linuxserver/qbittorrent:latest

2
cookbooks/seedbox/radarr.container

@ -10,7 +10,7 @@ ConditionPathIsMountPoint=/var/lib/virtiofs/data
ContainerName=radarr
# Image
Image=lscr.io/linuxserver/radarr:latest
Image=radarr.image
AutoUpdate=registry
# Security

6
cookbooks/seedbox/radarr.image

@ -0,0 +1,6 @@
[Unit]
Description=podman pull lscr.io/linuxserver/radarr
Documentation=https://docs.linuxserver.io/images/docker-radarr/
[Image]
Image=lscr.io/linuxserver/radarr:latest

2
cookbooks/seedbox/sonarr.container

@ -10,7 +10,7 @@ ConditionPathIsMountPoint=/var/lib/virtiofs/data
ContainerName=sonarr
# Image
Image=lscr.io/linuxserver/sonarr:latest
Image=sonarr.image
AutoUpdate=registry
# Security

6
cookbooks/seedbox/sonarr.image

@ -0,0 +1,6 @@
[Unit]
Description=podman pull lscr.io/linuxserver/sonarr
Documentation=https://docs.linuxserver.io/images/docker-sonarr/
[Image]
Image=lscr.io/linuxserver/sonarr:latest

2
cookbooks/traefik/traefik.container

@ -11,7 +11,7 @@ PartOf=traefik.target
ContainerName=traefik
# Image
Image=docker.io/library/traefik:v3.4
Image=traefik.image
AutoUpdate=registry
# No need for root privileges

6
cookbooks/traefik/traefik.image

@ -0,0 +1,6 @@
[Unit]
Description=podman pull docker.io/library/traefik
Documentation=https://github.com/traefik/traefik-library-image
[Image]
Image=docker.io/library/traefik:v3.4

2
cookbooks/unifi/unifi-app.container

@ -12,7 +12,7 @@ PartOf=unifi.target
[Container]
ContainerName=unifi-app
Image=lscr.io/linuxserver/unifi-network-application:latest
Image=unifi-app.image
AutoUpdate=registry
# Network configuration

6
cookbooks/unifi/unifi-app.image

@ -0,0 +1,6 @@
[Unit]
Description=podman pull lscr.io/linuxserver/unifi-network-application
Documentation=https://docs.linuxserver.io/images/docker-unifi-network-application/
[Image]
Image=lscr.io/linuxserver/unifi-network-application:latest

2
cookbooks/unifi/unifi-mongo.container

@ -11,7 +11,7 @@ PartOf=unifi.target
[Container]
ContainerName=unifi-mongo
Image=docker.io/library/mongo:${MONGO_MAJOR}
Image=unifi-mongo.image
AutoUpdate=registry
# Network configuration

13
cookbooks/unifi/unifi-mongo.image

@ -0,0 +1,13 @@
[Unit]
Description=podman pull docker.io/library/mongo
Documentation=https://hub.docker.com/_/mongo/
# Only start if Unifi has been configured
ConditionPathExists=/etc/quadlets/unifi/config.env
[Image]
Image=docker.io/library/mongo:${MONGO_MAJOR}
[Service]
# These environment variables are sourced to be used by systemd in the Exec* commands
EnvironmentFile=/etc/quadlets/unifi/config.env

2
cookbooks/vaultwarden/vaultwarden.container

@ -12,7 +12,7 @@ PartOf=vaultwarden.target
[Container]
ContainerName=vaultwarden
Image=quay.io/vaultwarden/server:latest-alpine
Image=vaultwarden.image
AutoUpdate=registry
# No need for root privileges

6
cookbooks/vaultwarden/vaultwarden.image

@ -0,0 +1,6 @@
[Unit]
Description=podman pull quay.io/vaultwarden/server
Documentation=https://github.com/dani-garcia/vaultwarden
[Image]
Image=quay.io/vaultwarden/server:latest-alpine

2
cookbooks/vmagent/vmagent.container

@ -10,7 +10,7 @@ ConditionPathExists=/etc/quadlets/vmagent/vmagent.local.env
ContainerName=vmagent
# Image
Image=quay.io/victoriametrics/vmagent:latest
Image=vmagent.image
AutoUpdate=registry
# Security

5
cookbooks/vmagent/vmagent.image

@ -0,0 +1,5 @@
[Unit]
Description=podman pull quay.io/victoriametrics/vmagent
[Image]
Image=quay.io/victoriametrics/vmagent:latest

1
pyproject.toml

@ -18,6 +18,7 @@ dependencies = [
log_cli = true
log_cli_level = "INFO"
addopts = "-v"
pythonpath = ["tests"]
[tool.setuptools]
# This repo is not a Python package — suppress automatic package discovery.

37
scripts/common.mk

@ -25,6 +25,7 @@ help:
@echo " clean-vm - Clean up the Fedora CoreOS VM but keep its storage resources"
@echo " remove-vm - Remove all resources related to the Fedora CoreOS VM"
@echo " console - Connect to the Fedora CoreOS VM console"
@echo " pytest - Run integration tests on a clean Fedora CoreOS VM"
@echo
@echo "Useful commands:"
@echo
@ -35,6 +36,7 @@ help:
@echo " 5. make butane # Build Butane specifications suitable for Fedora CoreOS"
@echo " 6. make fcos-vm console # Launch a fresh Fedora CoreOS VM (while retaining its persistent data) and connect to its console"
@echo " 7. make remove-vm # Remove all resources related to the Fedora CoreOS VM"
@echo " 8. make pytest # Run integration tests on a clean Fedora CoreOS VM"
@echo
@echo "All-in-one commands:"
@echo
@ -118,7 +120,10 @@ I_KNOW_WHAT_I_AM_DOING ?=
# List of all ignition files corresponding to the dependencies
# Here, we inject the "base" project as a dependency. It can therefore be assumed to always be embeddable in project's butane specs.
DEPENDENCIES_IGNITION_FILES := $(shell for dep in $$(if [ "$(PROJECT_NAME)" != "base" ]; then echo base; fi) $(DEPENDENCIES); do echo $(COOKBOOKS_DIR)/$$dep/$$dep.ign $(COOKBOOKS_DIR)/$$dep/$$dep-examples.ign; done)
DEPENDENCIES_IGNITION_FILES := $(shell for dep in $$(if [ "$(PROJECT_NAME)" != "base" ]; then echo base; fi) $(DEPENDENCIES); do echo $(COOKBOOKS_DIR)/$$dep/$$dep.ign; done)
# Variation of the previous variable with the built-in examples.
DEPENDENCIES_IGNITION_EXAMPLES_FILES := $(shell for dep in $$(if [ "$(PROJECT_NAME)" != "base" ]; then echo base; fi) $(DEPENDENCIES); do echo $(COOKBOOKS_DIR)/$$dep/$$dep.ign $(COOKBOOKS_DIR)/$$dep/$$dep-examples.ign; done)
# User and group IDs to own the project files and directories.
PROJECT_UID ?= 0
@ -135,7 +140,7 @@ pre-requisites::
exit 1; \
fi
@set -Eeuo pipefail; \
for tool in install systemctl systemd-analyze systemd-tmpfiles sysctl virt-install virsh qemu-img journalctl coreos-installer resize butane yq podlet pip3; do \
for tool in install systemctl systemd-analyze systemd-tmpfiles sysctl virt-install virsh qemu-img journalctl coreos-installer resize butane yq podlet pip3 ncat; do \
if ! which $$tool &>/dev/null ; then \
echo "$$tool is not installed. Please install it first." >&2; \
exit 1; \
@ -324,6 +329,10 @@ tail-logs: pre-requisites
done; \
run journalctl "$${journalctl_args[@]}"
pytest: pre-requisites
$(MAKE) butane
pytest tests/
# Build the Butane specifications, suitable for Fedora CoreOS, including those of the dependencies of this project.
$(PROJECT_NAME).bu $(PROJECT_NAME)-examples.bu &:
@if [ -z "$(TARGET_CHROOT)" ]; then \
@ -366,24 +375,30 @@ $(PROJECT_NAME).ign $(PROJECT_NAME)-examples.ign: %.ign: %.bu
butane --strict -o $@ $<
# Build the Butane specifications + Ignition files suitable for Fedora CoreOS, including those of the dependencies of this project.
butane: fcos.ign
butane: fcos-dev.ign fcos-test.ign
# Generate the local Butane spec + Ignition file (the one containing local customizations).
$(TOP_LEVEL_DIR)/local.ign: $(TOP_LEVEL_DIR)/local.bu
butane --strict -o $@ $<
.INTERMEDIATE: fcos.bu
fcos.bu: DEPS := $(if $(filter-out base,$(PROJECT_NAME)),base $(DEPENDENCIES),$(DEPENDENCIES))
fcos.bu: %.bu: Makefile $(SCRIPTS_DIR)/default-butane-spec.sh
.INTERMEDIATE: fcos-dev.bu fcos-test.bu
# Generate the Butane specs for development and testing by merging the current project's spec with those of the dependencies.
# The development spec also includes the examples of the dependencies.
# Whereas the testing spec only includes the main specs of the dependencies.
fcos-dev.bu fcos-test.bu: DEPS := $(if $(filter-out base,$(PROJECT_NAME)),base $(DEPENDENCIES),$(DEPENDENCIES))
fcos-dev.bu: DEPS := $(DEPS) $(addsuffix -examples,$(DEPS))
fcos-dev.bu fcos-test.bu: %.bu: Makefile $(SCRIPTS_DIR)/default-butane-spec.sh
$(SCRIPTS_DIR)/default-butane-spec.sh $(PROJECT_NAME) $(DEPS) > $@
# Generate the final Fedora CoreOS ignition file by merging the Butane spec with the local and project-specific ignition files, as well as those of the dependencies.
fcos.ign: fcos.bu $(TOP_LEVEL_DIR)/local.ign $(PROJECT_NAME).ign $(PROJECT_NAME)-examples.ign $(DEPENDENCIES_IGNITION_FILES)
# Generate the final Fedora CoreOS ignition files (dev & test) by merging the Butane spec with the local and project-specific ignition files, as well as those of the dependencies.
fcos-dev.ign: $(TOP_LEVEL_DIR)/local.ign $(PROJECT_NAME).ign $(PROJECT_NAME)-examples.ign $(DEPENDENCIES_IGNITION_EXAMPLES_FILES)
fcos-test.ign: $(TOP_LEVEL_DIR)/local.ign $(PROJECT_NAME).ign $(DEPENDENCIES_IGNITION_FILES)
fcos-dev.ign fcos-test.ign: fcos-%.ign: fcos-%.bu
@run() { echo $$*; "$$@"; }; \
set -Eeuo pipefail; \
tmp=$$(mktemp -d /tmp/butane-XXXXXX); \
run cp $(filter %.ign,$^) $$tmp; \
run butane --strict -d $$tmp -o $@ fcos.bu; \
run butane --strict -d $$tmp -o $@ $<; \
run rm -rf $$tmp
# Fetch the latest version of the Fedora CoreOS QCOW2 image.
@ -399,7 +414,7 @@ fcos.ign: fcos.bu $(TOP_LEVEL_DIR)/local.ign $(PROJECT_NAME).ign $(PROJECT_NAME)
run mv "$$qcow2" $@
# Copy the ignition file.
/var/lib/libvirt/images/fcos-$(PROJECT_NAME)/fcos.ign: fcos.ign
/var/lib/libvirt/images/fcos-$(PROJECT_NAME)/fcos.ign: fcos-dev.ign
install -D -o root -g root -m 0644 $< $@
# Copy the Fedora CoreOS base image to create a new QCOW2 image for the VM.
@ -493,4 +508,4 @@ clean: clean-pre pre-requisites
.PHONY: tail-logs butane help fcos-vm clean-vm console units units-pre remove-vm
.PHONY: clean-pre clean-post install-pre install-post uninstall-pre uninstall-post
.PHONY: install-files install-files-pre install-files-post install-actions
.PHONY: install-actions-pre install-actions-post
.PHONY: install-actions-pre install-actions-post pytest

1
scripts/default-butane-spec.sh

@ -11,6 +11,5 @@ ignition:
EOF
for dep in "$@"; do
echo " - local: ${dep}.ign"
echo " - local: ${dep}-examples.ign"
done
echo " - local: local.ign"

0
tests/__init__.py

349
tests/fcos_vm.py

@ -0,0 +1,349 @@
"""
Fedora CoreOS VM lifecycle helpers for end-to-end testing.
Requires running as root (virt-install, virsh, qemu-img need root privileges).
Typical usage:
vm = FCOSVirtualMachine(
name="fcos-vm-abc123",
ignition_file=Path("/tmp/fcos-test.ign"),
virtiofs_dir=Path("/srv/fcos-test-abc123"),
)
vm.create()
vm.wait_ssh(ssh_key=key_path)
# ... run tests ...
vm.destroy()
"""
import re
import shutil
import subprocess
import tempfile
import textwrap
import time
from pathlib import Path
import os
LIBVIRT_IMAGES_DIR = Path("/var/lib/libvirt/images")
FCOS_BASE_IMAGE = LIBVIRT_IMAGES_DIR / "library" / "fedora-coreos.qcow2"
# Butane spec version — must match the project convention.
BUTANE_VERSION = "1.4.0"
def ensure_fcos_ign(cookbook_dir: Path) -> Path:
"""Return the path to fcos-test.ign, building it via ``make butane`` if absent."""
fcos_ign = cookbook_dir / "fcos-test.ign"
if not fcos_ign.exists():
subprocess.run(
["make", "-C", str(cookbook_dir), "butane"],
check=True,
)
return fcos_ign
class FCOSIgnition:
"""
Builds a Fedora CoreOS Ignition file, by merging multiple ignition files
and optionally injecting extra files.
All public methods are synchronous and raise on failure. The caller is
responsible for calling ``destroy()`` (typically from a pytest fixture
teardown).
"""
def __init__(self, ignition_files: list[Path] | None = None, ssh_key: str | None = None, extra_files: dict[str, tuple[str | int, str | int, int, str]] | None = None) -> None:
"""
Args:
ignition_files: List of paths to the compiled Ignition (.ign) files.
ssh_key: Optional SSH key to inject into the Ignition.
extra_files: Optional dictionary of extra files to inject into the Ignition.
"""
self.ignition_files = ignition_files or list()
self.extra_files = extra_files or dict()
self.ssh_key = ssh_key
def _build_extra_files_butane(self) -> str | None:
"""Build the butane file content for the extra files specified in self.extra_files."""
if not self.extra_files:
return None
files = []
for path, (content, owner, group, mode) in self.extra_files.items():
file_desc = (
f" - path: {path}\n"
f" mode: {mode}\n"
f" overwrite: true\n"
f" user:\n"
+ (f" id: {owner}\n" if isinstance(owner, int) else f" name: {owner}\n") +
f" group:\n"
+ (f" id: {group}\n" if isinstance(group, int) else f" name: {group}\n") +
f' contents:\n'
f' inline: |\n'
)
# Prefix all lines of content with 10 spaces (2 for indentation + 8 for the literal block)
indented_content = textwrap.indent(content + "\n", " " * 10)
file_desc += indented_content + "\n"
files.append(file_desc)
header = textwrap.dedent(f"""\
variant: fcos
version: {BUTANE_VERSION}
storage:
files:
""")
joined = "\n".join(files)
return f"{header}{joined}\n"
def _build_ssh_key_butane(self) -> str | None:
"""Build the butane file content that inject the public ssh key (self.ssh_key) into the root's authorized_keys."""
if not self.ssh_key:
return None
content = textwrap.dedent(f"""\
variant: fcos
version: {BUTANE_VERSION}
passwd:
users:
- name: root
ssh_authorized_keys:
- {self.ssh_key}
""")
return content
def build(self, output: Path) -> Path:
"""Build the final Ignition file by merging the base files and the extra files."""
try:
_tmpdir = tempfile.TemporaryDirectory(delete=False)
d = Path(_tmpdir.name)
extra_files_butane = self._build_extra_files_butane()
ssh_key_butane = self._build_ssh_key_butane()
test_bu = textwrap.dedent(f"""\
variant: fcos
version: {BUTANE_VERSION}
systemd:
units:
# Disable & mask zincati to avoid reboots during testing.
- name: zincati.service
enabled: false
mask: true
ignition:
config:
merge:
""")
for ign in self.ignition_files:
test_bu += f" - local: {ign.name}\n"
shutil.copy(ign, d / ign.name)
if extra_files_butane:
extra_files_bu = d / "test_extra_files.bu"
extra_files_bu.write_text(extra_files_butane)
extra_files_path = d / "test_extra_files.ign"
subprocess.run(
["butane", "--strict", "-o", str(extra_files_path), str(extra_files_bu)],
check=True,
capture_output=True,
)
test_bu += f" - local: {extra_files_path.name}\n"
if ssh_key_butane:
ssh_key_bu = d / "test_ssh_key.bu"
ssh_key_bu.write_text(ssh_key_butane)
ssh_key_path = d / "test_ssh_key.ign"
subprocess.run(
["butane", "--strict", "-o", str(ssh_key_path), str(ssh_key_bu)],
check=True,
capture_output=True,
)
test_bu += f" - local: {ssh_key_path.name}\n"
test_bu_path = d / "test.bu"
test_bu_path.write_text(test_bu)
subprocess.run(
[
"butane",
"--strict",
"-d", str(d),
"-o", str(output),
str(test_bu_path),
],
check=True,
capture_output=True,
)
except subprocess.CalledProcessError as e:
print(f"Error occurred while running butane: {e.stderr.decode()}")
# Keep the temporary directory for debugging
print(f"Temporary directory retained at: {_tmpdir.name}")
raise e
else:
# Clean up the temporary directory if it still exists
if Path(_tmpdir.name).exists():
shutil.rmtree(_tmpdir.name)
return output
class FCOSVirtualMachine:
"""Manages a Fedora CoreOS KVM virtual machine for end-to-end testing.
All public methods are synchronous and raise on failure. The caller is
responsible for calling ``destroy()`` (typically from a pytest fixture
teardown).
"""
def __init__(self, cookbook_name: str, instance_name: str, keep: bool = False, ignition: FCOSIgnition | None = None, virtiofs_dirs: list[tuple[Path, str]] = [], vm_config: tuple[int, int, int, int] = (4096, 2, 50, 100)) -> None:
"""
Args:
cookbook_name: Short identifier appended to "fcos-test-" to form the
libvirt domain name. Keep it unique across parallel tests.
instance_name: Short identifier appended to the domain name to allow multiple VM for the same cookbook.
keep: If True, the VM and its associated resources will not be automatically destroyed on teardown. Useful for debugging.
ignition: FCOSIgnition instance to build the Ignition (.ign) file.
virtiofs_dirs: List of host directories and virtiofs target directories that will be exposed inside the VM.
vm_config: Tuple containing VM configuration (memory in MB, vCPUs, root disk size in GB, /var disk size in GB).
"""
if keep:
self.vm_name = f"fcos-test-{cookbook_name}-{instance_name}-dev"
else:
self.vm_name = f"fcos-test-{cookbook_name}-{instance_name}-{os.getpid()}"
self.ignition = ignition or FCOSIgnition()
self.virtiofs_dirs = virtiofs_dirs
self.vm_config = vm_config
self._images_dir = LIBVIRT_IMAGES_DIR / self.vm_name
self._ip: str | None = None
# ------------------------------------------------------------------
# Lifecycle
# ------------------------------------------------------------------
def exists(self) -> bool:
"""Return True if a libvirt domain with this VM's name already exists."""
result = subprocess.run(
["virsh", "domstate", self.vm_name],
capture_output=True,
)
return result.returncode == 0
def create(self) -> None:
"""Create disk images and start the VM via virt-install."""
self._images_dir.mkdir(parents=True, exist_ok=True)
for host_dir, target_dir in self.virtiofs_dirs:
Path(host_dir).mkdir(parents=True, exist_ok=True)
ign_dest = self._images_dir / "fcos.ign"
self.ignition.build(ign_dest)
ign_dest.chmod(0o644)
(ram, vcpus, root_disk_size, var_disk_size) = self.vm_config
# Root OS disk: copy the base image, then resize it.
root_qcow2 = self._images_dir / "root.qcow2"
shutil.copy(FCOS_BASE_IMAGE, root_qcow2)
subprocess.run(
["qemu-img", "resize", "-f", "qcow2", str(root_qcow2), f"{root_disk_size}G"],
check=True,
)
# Secondary disk for /var (keeps OS and data separate, matches common.mk).
var_qcow2 = self._images_dir / "var.qcow2"
subprocess.run(
["qemu-img", "create", "-f", "qcow2", str(var_qcow2), f"{var_disk_size}G"],
check=True,
)
virtiofs_options = []
for i, (host_dir, target_dir) in enumerate(self.virtiofs_dirs):
virtiofs_options += [
f"--filesystem=type=mount,accessmode=passthrough,"
f"driver.type=virtiofs,driver.queue=1024,"
f"source.dir={host_dir},target.dir={target_dir}"
]
subprocess.run(
[
"virt-install",
f"--name={self.vm_name}",
"--import",
"--noautoconsole",
f"--ram={ram}",
f"--vcpus={vcpus}",
"--os-variant=fedora-coreos-stable",
f"--disk=path={root_qcow2},format=qcow2",
f"--disk=path={var_qcow2},format=qcow2",
f"--qemu-commandline=-fw_cfg name=opt/com.coreos/config,file={ign_dest}",
"--network=network=default,model=virtio",
"--console=pty,target.type=virtio",
"--serial=pty",
"--graphics=none",
"--boot=uefi",
"--memorybacking=access.mode=shared,source.type=memfd",
] + virtiofs_options,
check=True,
)
def destroy(self) -> None:
"""Forcefully stop and delete the VM and all associated disk images."""
subprocess.run(["virsh", "destroy", self.vm_name], capture_output=True)
subprocess.run(
["virsh", "undefine", self.vm_name, "--nvram"],
capture_output=True,
)
if self._images_dir.exists():
shutil.rmtree(self._images_dir)
# ------------------------------------------------------------------
# Readiness polling
# ------------------------------------------------------------------
def get_ip(self) -> str | None:
"""Return the VM's primary IPv4 address reported by virsh, or None."""
result = subprocess.run(
["virsh", "domifaddr", self.vm_name],
capture_output=True,
text=True,
)
if result.returncode != 0:
return None
match = re.search(r"(\d+\.\d+\.\d+\.\d+)", result.stdout)
return match.group(1) if match else None
@property
def ip(self) -> str:
if self._ip is None:
self._ip = self.get_ip()
if self._ip is None:
raise RuntimeError(f"VM {self.vm_name!r} has no IP address yet")
return self._ip
def wait_ssh(self, ssh_key: Path, timeout: int = 300) -> str:
"""Block until SSH is reachable. Returns the IP address.
Polls every 5 seconds until ``timeout`` seconds have elapsed.
"""
deadline = time.monotonic() + timeout
while time.monotonic() < deadline:
ip = self.get_ip()
if ip:
try:
result = subprocess.run(
[
"ssh",
"-i", str(ssh_key),
"-o", "StrictHostKeyChecking=no",
"-o", "UserKnownHostsFile=/dev/null",
"-o", "ConnectTimeout=5",
"-o", "BatchMode=yes",
f"root@{ip}",
"true",
],
capture_output=True,
timeout=10,
)
if result.returncode == 0:
self._ip = ip
return ip
except subprocess.TimeoutExpired:
pass
time.sleep(5)
raise TimeoutError(
f"VM {self.vm_name!r} did not become SSH-ready within {timeout}s"
)

291
tests/test_quadlet.py

@ -0,0 +1,291 @@
import socket
import json
import time
class TestQuadlet:
"""
Run common tests for Quadlet cookbooks.
All public methods are synchronous and raise on failure.
"""
expected_services : list[dict[str, str | bool]] = [
# Example:
# { "name": "postgresql.service", "state": "active", "masked": False, "enabled": True, "exists": True },
]
"""
Expected state of systemd services. Each dict must contain a "name" field with the service name, and may optionally contain:
- "state": one of "active", "inactive", "failed" (optional)
- "masked": boolean (optional)
- "enabled": boolean (optional)
- "exists": boolean (optional)
Optional fields are not checked if missing.
If "exists" is False, no other fields are checked.
"""
expected_sockets : list[dict[str, str]] = [
# Example:
# { "uri": "tcp://127.0.0.1:5432", "state": "listening" },
]
"""
Expected state of sockets. Each dict must contain a "uri" field with the socket URI, and a "state" field with one of "listening" or "closed".
"""
# all fields are mandatory
expected_ports : list[dict[str, str | int]] = [
# Example:
# { "number": 5432, "protocol": "tcp", "state": "closed" },
# { "number": 22, "protocol": "tcp", "state": "open" },
]
"""
Expected state of TCP ports as seen from the machine running pytest. Each dict must contain:
- "number": port number
- "protocol": currently only "tcp" is supported
- "state": one of "open" (accepting connections) or "closed"
"""
expected_files : list[dict[str, str | int]] = [
# Example:
# { "path": "/var/lib/quadlets/postgresql", "type": "directory", "owner": "postgresql", "group": "itix-svc", "mode": 0o755 },
]
"""
Expected files on the VM. Each dict must contain:
- "path": full path to the file
- "type": "directory", "file" or "none" (if the file is expected to not exist)
Optional fields:
- "owner": expected owner username
- "group": expected group name
- "mode": expected file mode as an integer (e.g. 0o755)
If an optional field is missing, it is not checked.
"""
expected_podman_images : list[dict[str, str]] = [
# Example:
# { "name": "docker.io/library/postgres", "tag": "15", "state": "present" },
]
"""
Expected Podman images. Each dict must contain:
- "name": image name (e.g. "docker.io/library/postgres")
- "tag": image tag (e.g. "15")
- "state": one of "present" or "absent"
"""
expected_podman_containers : list[dict[str, str | dict[str, str]]] = [
# Example:
# { "name": "postgresql-server", "state": "present", "pid1": { "owner": "10004", "group": "10000", "commandline": "postgres -h 127.0.0.1" } },
]
"""
Expected Podman containers. Each dict must contain:
- "name": container name
- "state": one of "present" or "absent"
Optional field:
- "pid1": dict with expected properties of the container's main process (PID 1). May contain:
- "owner": expected uid (numeric) of the process as seen from outside the container (i.e. on the host)
- "group": expected gid (numeric) of the process as seen from outside the container (i.e. on the host)
- "commandline": expected command line of the process
"""
expected_main_service : str | None = None
"""
If not None, the name of the main service to wait for before running any tests.
"""
expected_main_service_timeout : int = 120
"""
If expected_main_service is set, the number of seconds to wait for it to become active before giving up and failing the tests.
"""
def test_wait_for_main_service(self, fcos_host):
"""Wait for the expected main service to become active before running any other tests."""
if self.expected_main_service is None:
return
self.wait_for_service(fcos_host, self.expected_main_service, self.expected_main_service_timeout)
def wait_for_service(self, fcos_host, service: str, timeout: int = 120) -> None:
"""Block until *service* reaches the ``active`` state."""
deadline = time.monotonic() + timeout
while time.monotonic() < deadline:
result = fcos_host.run(
f"systemctl is-active {service}", check=False
)
if result.stdout.strip() == "active":
return
time.sleep(5)
status = fcos_host.run(
f"systemctl status {service} --no-pager", check=False
)
raise TimeoutError(
f"Service {service!r} not active after {timeout}s:\n{status.stdout}"
)
def wait_for_unit_done(self, fcos_host, unit: str, timeout: int = 120) -> str:
"""
Block until a oneshot service finishes (``inactive`` or ``failed``).
Returns:
The final state string: ``"inactive"`` on success, ``"failed"``
on failure.
"""
deadline = time.monotonic() + timeout
while time.monotonic() < deadline:
result = fcos_host.run(
f"systemctl is-active {unit}", check=False
)
state = result.stdout.strip()
if state in ("inactive", "failed"):
return state
time.sleep(5)
raise TimeoutError(
f"Unit {unit!r} did not finish within {timeout}s"
)
def test_expected_services(self, fcos_host):
"""The expected systemd services must be present and in the expected state."""
self.check_expected_services(fcos_host, self.expected_services)
def check_expected_services(self, fcos_host, expected_services: list[dict[str, str | bool]]) -> None:
"""The expected systemd services must be present and in the expected state."""
for svc in expected_services:
service = fcos_host.service(svc["name"])
if "exists" in svc:
if svc["exists"]:
assert service.exists, f"Service {svc['name']} does not exist"
else:
assert not service.exists, f"Service {svc['name']} exists but should not"
continue # if the service shouldn't exist, no need to check other properties
if "masked" in svc:
if svc["masked"]:
assert service.is_masked, f"Service {svc['name']} is not masked"
else:
assert not service.is_masked, f"Service {svc['name']} is masked but should not"
if "enabled" in svc:
if svc["enabled"]:
assert service.is_enabled, f"Service {svc['name']} is not enabled"
else:
assert not service.is_enabled, f"Service {svc['name']} is enabled but should not"
if "state" in svc:
if svc["state"] == "active":
assert service.is_running, f"Service {svc['name']} is not running"
elif svc["state"] == "inactive":
assert not service.is_running, f"Service {svc['name']} is running but expected to be inactive"
elif svc["state"] == "failed":
result = fcos_host.run(f"systemctl is-failed {svc['name']}")
assert result.rc == 0, f"Service {svc['name']} is not in failed state"
else:
raise ValueError(f"Invalid state for service {svc['name']}: {svc['state']}")
def test_expected_sockets(self, fcos_host):
"""The expected sockets must be present and in the expected state."""
self.check_expected_sockets(fcos_host, self.expected_sockets)
def check_expected_sockets(self, fcos_host, expected_sockets: list[dict[str, str]]) -> None:
"""The expected sockets must be present and in the expected state."""
for sock in expected_sockets:
socket = fcos_host.socket(sock["uri"])
if sock["state"] == "listening":
assert socket.is_listening, f"Socket {sock['uri']} is not listening"
elif sock["state"] == "closed":
assert not socket.is_listening, f"Socket {sock['uri']} is listening but expected to be closed"
else:
raise ValueError(f"Invalid state for socket {sock['uri']}: {sock['state']}")
def test_expected_ports(self, fcos_vm):
"""The expected TCP ports must be in the expected state."""
self.check_expected_ports(fcos_vm, self.expected_ports)
def check_expected_ports(self, fcos_vm, expected_ports: list[dict[str, str]]) -> None:
"""The expected TCP ports must be in the expected state."""
for port in expected_ports:
assert port["protocol"] == "tcp", f"Unsupported protocol {port['protocol']} for port {port['number']}"
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(3)
connect_result = s.connect_ex((fcos_vm.ip, port["number"]))
if port["state"] == "open":
assert connect_result == 0, f"Port {port['number']} is NOT reachable from the host on {fcos_vm.ip}!"
elif port["state"] == "closed":
assert connect_result != 0, f"Port {port['number']} is reachable from the host on {fcos_vm.ip} but expected to be closed"
else:
raise ValueError(f"Invalid state for port {port['number']}/{port['protocol']}: {port['state']}")
s.close()
def test_expected_files(self, fcos_host):
"""The expected files must be in the expected state."""
self.check_expected_files(fcos_host, self.expected_files)
def check_expected_files(self, fcos_host, expected_files: list[dict[str, str | int]]) -> None:
"""The expected files must be in the expected state."""
for f in expected_files:
file = fcos_host.file(f["path"])
if f["type"] == "directory":
assert file.is_directory, f"Expected {f['path']} to be a directory"
elif f["type"] == "file":
assert file.is_file, f"Expected {f['path']} to be a regular file"
elif f["type"] == "none":
assert not file.exists, f"Expected {f['path']} to not exist"
continue # if the file shouldn't exist, no need to check other properties
else:
raise ValueError(f"Invalid type for expected file {f['path']}: {f['type']}")
if "owner" in f:
assert file.user == f["owner"], f"Expected {f['path']} to be owned by {f['owner']}, but got {file.user}"
if "group" in f:
assert file.group == f["group"], f"Expected {f['path']} to belong to group {f['group']}, but got {file.group}"
if "mode" in f:
assert file.mode == f["mode"], f"Expected {f['path']} to have mode {oct(f['mode'])}, but got {oct(file.mode)}"
def test_expected_podman_images(self, fcos_host):
"""The expected Podman images must be in the expected state."""
self.check_expected_podman_images(fcos_host, self.expected_podman_images)
def check_expected_podman_images(self, fcos_host, expected_podman_images: list[dict[str, str]]) -> None:
"""The expected Podman images must be in the expected state."""
for img in expected_podman_images:
result = fcos_host.run(f"podman image exists {img['name']}:{img['tag']}")
if img["state"] == "present":
assert result.rc == 0, f"Podman image {img['name']}:{img['tag']} does not exist"
elif img["state"] == "absent":
assert result.rc != 0, f"Podman image {img['name']}:{img['tag']} is present but expected to be absent"
else:
raise ValueError(f"Invalid state for Podman image {img['name']}:{img['tag']}: {img['state']}")
def test_expected_podman_containers(self, fcos_host):
"""The expected Podman containers must be in the expected state."""
self.check_expected_podman_containers(fcos_host, self.expected_podman_containers)
def check_expected_podman_containers(self, fcos_host, expected_podman_containers: list[dict[str, str]]) -> None:
"""The expected Podman containers must be in the expected state."""
for container in expected_podman_containers:
result = fcos_host.run(f"podman container inspect {container['name']}")
if container["state"] == "present":
assert result.rc == 0, f"Podman container {container['name']} does not exist"
elif container["state"] == "absent":
assert result.rc != 0, f"Podman container {container['name']} is present but expected to be absent"
else:
raise ValueError(f"Invalid state for Podman container {container['name']}: {container['state']}")
if result.rc == 0 and "pid1" in container:
try:
result_json = json.loads(result.stdout)[0]
except json.JSONDecodeError as e:
raise AssertionError(f"Failed to parse JSON output from podman inspect for container {container['name']}: {e}\nOutput was: {result_json}")
pid = result_json["State"]["Pid"]
result = fcos_host.run(f"ps axn -o pid,user,group,state,command -q {pid} --no-header")
if result.rc != 0:
raise AssertionError(f"Failed to inspect PID 1 of container {container['name']} with nsenter: rc = {result.rc}")
pid1_info = result.stdout.strip().split(None, 4)
if len(pid1_info) < 5:
raise AssertionError(f"Unexpected output from ps for PID 1 of container {container['name']}: {result.stdout}")
pid1_pid = pid1_info[0]
pid1_user = pid1_info[1]
pid1_group = pid1_info[2]
pid1_commandline = pid1_info[4]
assert int(pid1_pid) == pid, f"Expected PID {pid} for container {container['name']} main process, but got {pid1_pid}"
if "owner" in container["pid1"]:
assert pid1_user == container["pid1"]["owner"], f"Expected PID 1 of container {container['name']} to be owned by {container['pid1']['owner']}, but got {pid1_user}"
if "group" in container["pid1"]:
assert pid1_group == container["pid1"]["group"], f"Expected PID 1 of container {container['name']} to belong to group {container['pid1']['group']}, but got {pid1_group}"
if "commandline" in container["pid1"]:
assert pid1_commandline == container["pid1"]["commandline"], f"Expected PID 1 of container {container['name']} to have command line {container['pid1']['commandline']}, but got {pid1_commandline}"

384
tests/vm.py

@ -1,384 +0,0 @@
"""Fedora CoreOS VM lifecycle helpers for end-to-end testing.
Requires running as root (virt-install, virsh, qemu-img need root privileges).
Typical usage:
vm = FCOSVirtualMachine(
name="postgresql-abc123",
ignition_file=Path("/tmp/fcos-test.ign"),
virtiofs_dir=Path("/srv/fcos-test-postgresql-abc123"),
)
vm.create()
vm.wait_ssh(ssh_key=key_path)
vm.wait_for_service("postgresql.target", ssh_key=key_path)
# ... run tests ...
vm.destroy()
"""
import base64
import re
import shutil
import subprocess
import tempfile
import textwrap
import time
from pathlib import Path
LIBVIRT_IMAGES_DIR = Path("/var/lib/libvirt/images")
FCOS_BASE_IMAGE = LIBVIRT_IMAGES_DIR / "library" / "fedora-coreos.qcow2"
# Butane spec version — must match the project convention.
BUTANE_VERSION = "1.4.0"
def ensure_fcos_ign(cookbook_dir: Path) -> Path:
"""Return the path to fcos.ign, building it via ``make butane`` if absent."""
fcos_ign = cookbook_dir / "fcos.ign"
if not fcos_ign.exists():
subprocess.run(
["make", "-C", str(cookbook_dir), "butane"],
check=True,
)
return fcos_ign
def build_test_ignition(
base_ignition: Path,
ssh_pubkey: str,
output: Path,
config_env_overrides: dict[str, str] | None = None,
extra_files: dict[str, tuple[str, int]] | None = None,
) -> Path:
"""Build a test ignition file by overlaying the cookbook's fcos.ign.
The overlay:
- Merges the base cookbook ignition (fcos.ign).
- Adds the test SSH public key to the root user so the test runner can
SSH in (FCOS allows root login with keys via PermitRootLogin
prohibit-password).
- Optionally patches /etc/quadlets/postgresql/config.env via
``config_env_overrides`` (merged on top of whatever the base ignition
already sets).
- Optionally injects arbitrary extra files via ``extra_files``:
``{"/path/on/vm": ("file content", 0o644)}``.
Args:
base_ignition: Path to the pre-built fcos.ign for the cookbook.
ssh_pubkey: Ed25519 public key string to inject for root.
output: Destination path for the compiled test ignition.
config_env_overrides: Key/value pairs to override in config.env.
The full config.env is re-written with these values merged on
top of the defaults from the base ignition.
extra_files: Additional files to inject into the VM image.
Returns:
``output`` path.
"""
with tempfile.TemporaryDirectory() as _tmpdir:
d = Path(_tmpdir)
# butane resolves "local:" references relative to the directory passed
# via -d; copy the base ignition there.
shutil.copy(base_ignition, d / "base.ign")
# Build the storage.files section of the overlay.
storage_section = _build_storage_section(config_env_overrides, extra_files)
overlay_bu = textwrap.dedent(f"""\
variant: fcos
version: {BUTANE_VERSION}
ignition:
config:
merge:
- local: base.ign
passwd:
users:
- name: root
ssh_authorized_keys:
- {ssh_pubkey}
systemd:
units:
# Disable & mask zincati to avoid reboots during testing.
- name: zincati.service
enabled: false
mask: true
""")
if storage_section:
overlay_bu += storage_section
overlay_bu_path = d / "test-overlay.bu"
overlay_bu_path.write_text(overlay_bu)
subprocess.run(
[
"butane",
"--strict",
"-d", str(d),
"-o", str(output),
str(overlay_bu_path),
],
check=True,
)
return output
def _build_storage_section(
config_env_overrides: dict[str, str] | None,
extra_files: dict[str, tuple[str, int]] | None,
) -> str:
"""Return a Butane ``storage:`` YAML block (or empty string if nothing to inject)."""
files = []
if config_env_overrides:
content = "\n".join(f"{k}={v}" for k, v in config_env_overrides.items()) + "\n"
files.append(
_butane_file("/etc/quadlets/postgresql/config.env", content, 0o600)
)
if extra_files:
for path, (content, mode) in extra_files.items():
files.append(_butane_file(path, content, mode))
if not files:
return ""
joined = "\n".join(files)
return f"storage:\n files:\n{joined}\n"
def _butane_file(path: str, content: str, mode: int) -> str:
"""Return a Butane file entry using a base64 data URI (avoids YAML quoting)."""
b64 = base64.b64encode(content.encode()).decode()
return (
f" - path: {path}\n"
f" mode: {mode}\n"
f" contents:\n"
f' source: "data:text/plain;base64,{b64}"\n'
)
class FCOSVirtualMachine:
"""Manages a Fedora CoreOS KVM virtual machine for end-to-end testing.
All public methods are synchronous and raise on failure. The caller is
responsible for calling ``destroy()`` (typically from a pytest fixture
teardown).
"""
def __init__(self, name: str, ignition_file: Path, virtiofs_dir: Path) -> None:
"""
Args:
name: Short identifier appended to "fcos-test-" to form the
libvirt domain name. Keep it unique across parallel tests.
ignition_file: Path to the compiled Ignition (.ign) file.
virtiofs_dir: Host directory that will be exposed inside the VM
at /var/lib/virtiofs/data via VirtioFS.
"""
self.name = name
self.vm_name = f"fcos-test-{name}"
self.ignition_file = Path(ignition_file)
self.virtiofs_dir = Path(virtiofs_dir)
self._images_dir = LIBVIRT_IMAGES_DIR / self.vm_name
self._ip: str | None = None
# ------------------------------------------------------------------
# Lifecycle
# ------------------------------------------------------------------
def create(self) -> None:
"""Create disk images and start the VM via virt-install."""
self._images_dir.mkdir(parents=True, exist_ok=True)
self.virtiofs_dir.mkdir(parents=True, exist_ok=True)
ign_dest = self._images_dir / "fcos.ign"
shutil.copy(self.ignition_file, ign_dest)
ign_dest.chmod(0o644)
# Root OS disk: copy from the shared base QCOW2 image.
root_qcow2 = self._images_dir / "root.qcow2"
shutil.copy(FCOS_BASE_IMAGE, root_qcow2)
# Secondary disk for /var (keeps OS and data separate, matches common.mk).
var_qcow2 = self._images_dir / "var.qcow2"
subprocess.run(
["qemu-img", "create", "-f", "qcow2", str(var_qcow2), "100G"],
check=True,
)
subprocess.run(
[
"virt-install",
f"--name={self.vm_name}",
"--import",
"--noautoconsole",
"--ram=4096",
"--vcpus=2",
"--os-variant=fedora-coreos-stable",
f"--disk=path={root_qcow2},format=qcow2,size=50",
f"--disk=path={var_qcow2},format=qcow2",
f"--qemu-commandline=-fw_cfg name=opt/com.coreos/config,file={ign_dest}",
"--network=network=default,model=virtio",
"--console=pty,target.type=virtio",
"--serial=pty",
"--graphics=none",
"--boot=uefi",
"--memorybacking=access.mode=shared,source.type=memfd",
(
f"--filesystem=type=mount,accessmode=passthrough,"
f"driver.type=virtiofs,driver.queue=1024,"
f"source.dir={self.virtiofs_dir},target.dir=data"
),
],
check=True,
)
def destroy(self) -> None:
"""Forcefully stop and delete the VM and all associated disk images."""
subprocess.run(["virsh", "destroy", self.vm_name], capture_output=True)
subprocess.run(
["virsh", "undefine", self.vm_name, "--nvram"],
capture_output=True,
)
if self._images_dir.exists():
shutil.rmtree(self._images_dir)
if self.virtiofs_dir.exists():
shutil.rmtree(self.virtiofs_dir)
# ------------------------------------------------------------------
# Readiness polling
# ------------------------------------------------------------------
def get_ip(self) -> str | None:
"""Return the VM's primary IPv4 address reported by virsh, or None."""
result = subprocess.run(
["virsh", "domifaddr", self.vm_name],
capture_output=True,
text=True,
)
if result.returncode != 0:
return None
match = re.search(r"(\d+\.\d+\.\d+\.\d+)", result.stdout)
return match.group(1) if match else None
@property
def ip(self) -> str:
if self._ip is None:
self._ip = self.get_ip()
if self._ip is None:
raise RuntimeError(f"VM {self.vm_name!r} has no IP address yet")
return self._ip
def wait_ssh(self, ssh_key: Path, timeout: int = 300) -> str:
"""Block until SSH is reachable. Returns the IP address.
Polls every 5 seconds until ``timeout`` seconds have elapsed.
"""
deadline = time.monotonic() + timeout
while time.monotonic() < deadline:
ip = self.get_ip()
if ip:
try:
result = subprocess.run(
[
"ssh",
"-i", str(ssh_key),
"-o", "StrictHostKeyChecking=no",
"-o", "UserKnownHostsFile=/dev/null",
"-o", "ConnectTimeout=5",
"-o", "BatchMode=yes",
f"root@{ip}",
"true",
],
capture_output=True,
timeout=10,
)
if result.returncode == 0:
self._ip = ip
return ip
except subprocess.TimeoutExpired:
pass
time.sleep(5)
raise TimeoutError(
f"VM {self.vm_name!r} did not become SSH-ready within {timeout}s"
)
def wait_for_service(
self, service: str, ssh_key: Path, timeout: int = 120
) -> None:
"""Block until *service* reaches the ``active`` state."""
deadline = time.monotonic() + timeout
while time.monotonic() < deadline:
result = self.ssh_run(
f"systemctl is-active {service}", ssh_key, check=False
)
if result.stdout.strip() == "active":
return
time.sleep(5)
status = self.ssh_run(
f"systemctl status {service} --no-pager", ssh_key, check=False
)
raise TimeoutError(
f"Service {service!r} not active after {timeout}s:\n{status.stdout}"
)
def wait_for_unit_done(
self, service: str, ssh_key: Path, timeout: int = 120
) -> str:
"""Block until a oneshot service finishes (``inactive`` or ``failed``).
Returns:
The final state string: ``"inactive"`` on success, ``"failed"``
on failure.
"""
deadline = time.monotonic() + timeout
while time.monotonic() < deadline:
result = self.ssh_run(
f"systemctl is-active {service}", ssh_key, check=False
)
state = result.stdout.strip()
if state in ("inactive", "failed"):
return state
time.sleep(5)
raise TimeoutError(
f"Service {service!r} did not finish within {timeout}s"
)
# ------------------------------------------------------------------
# Remote execution
# ------------------------------------------------------------------
def ssh_run(
self,
command: str,
ssh_key: Path,
check: bool = True,
) -> subprocess.CompletedProcess:
"""Run a shell command in the VM via SSH.
Args:
command: Shell command string passed to the remote bash.
ssh_key: Path to the private key used for authentication.
check: If True (default), raise RuntimeError on non-zero exit.
Returns:
CompletedProcess with stdout/stderr as text.
"""
result = subprocess.run(
[
"ssh",
"-i", str(ssh_key),
"-o", "StrictHostKeyChecking=no",
"-o", "UserKnownHostsFile=/dev/null",
f"root@{self.ip}",
command,
],
capture_output=True,
text=True,
)
if check and result.returncode != 0:
raise RuntimeError(
f"SSH command failed (exit {result.returncode}): {command!r}\n"
f"stdout: {result.stdout}\nstderr: {result.stderr}"
)
return result
Loading…
Cancel
Save