Compare commits
7 Commits
3d8c6973a0
...
1f5ed7da83
| Author | SHA1 | Date |
|---|---|---|
|
|
1f5ed7da83 | 1 month ago |
|
|
24dc9b8055 | 1 month ago |
|
|
6caba13ff3 | 1 month ago |
|
|
79ecdd27ee | 1 month ago |
|
|
9923b8f5f9 | 1 month ago |
|
|
f96cabe3c0 | 1 month ago |
|
|
f26d2fc371 | 1 month ago |
75 changed files with 1407 additions and 1217 deletions
@ -0,0 +1,6 @@ |
|||||
|
[Unit] |
||||
|
Description=podman pull docker.gitea.com/gitea |
||||
|
Documentation=https://docs.gitea.com/ |
||||
|
|
||||
|
[Image] |
||||
|
Image=docker.gitea.com/gitea:latest |
||||
@ -0,0 +1,6 @@ |
|||||
|
[Unit] |
||||
|
Description=podman pull docker.io/goacme/lego |
||||
|
Documentation=https://go-acme.github.io/lego/ |
||||
|
|
||||
|
[Image] |
||||
|
Image=docker.io/goacme/lego:latest |
||||
@ -0,0 +1,6 @@ |
|||||
|
[Unit] |
||||
|
Description=podman pull ghcr.io/miniflux/miniflux |
||||
|
Documentation=https://github.com/miniflux/v2 |
||||
|
|
||||
|
[Image] |
||||
|
Image=ghcr.io/miniflux/miniflux:latest |
||||
@ -0,0 +1,6 @@ |
|||||
|
[Unit] |
||||
|
Description=podman pull docker.io/collabora/code |
||||
|
Documentation=https://hub.docker.com/r/collabora/code/ |
||||
|
|
||||
|
[Image] |
||||
|
Image=docker.io/collabora/code:latest |
||||
@ -0,0 +1,13 @@ |
|||||
|
[Unit] |
||||
|
Description=podman pull docker.io/nginxinc/nginx-unprivileged |
||||
|
Documentation=https://hub.docker.com/r/nginxinc/nginx-unprivileged/ |
||||
|
|
||||
|
# Only start if Nextcloud has been configured |
||||
|
ConditionPathExists=/etc/quadlets/nextcloud/config.env |
||||
|
|
||||
|
[Image] |
||||
|
Image=docker.io/nginxinc/nginx-unprivileged:${NGINX_MAJOR}-alpine |
||||
|
|
||||
|
[Service] |
||||
|
# These environment variables are sourced to be used by systemd in the Exec* commands |
||||
|
EnvironmentFile=/etc/quadlets/nextcloud/config.env |
||||
@ -0,0 +1,13 @@ |
|||||
|
[Unit] |
||||
|
Description=podman pull docker.io/library/redis |
||||
|
Documentation=https://hub.docker.com/_/redis/ |
||||
|
|
||||
|
# Only start if Nextcloud has been configured |
||||
|
ConditionPathExists=/etc/quadlets/nextcloud/config.env |
||||
|
|
||||
|
[Image] |
||||
|
Image=docker.io/library/redis:${REDIS_MAJOR}-alpine |
||||
|
|
||||
|
[Service] |
||||
|
# These environment variables are sourced to be used by systemd in the Exec* commands |
||||
|
EnvironmentFile=/etc/quadlets/nextcloud/config.env |
||||
@ -0,0 +1,13 @@ |
|||||
|
[Unit] |
||||
|
Description=podman pull docker.io/library/nextcloud |
||||
|
Documentation=https://hub.docker.com/_/nextcloud/ |
||||
|
|
||||
|
# Only start if Nextcloud has been configured |
||||
|
ConditionPathExists=/etc/quadlets/nextcloud/config.env |
||||
|
|
||||
|
[Image] |
||||
|
Image=docker.io/library/nextcloud:${NEXTCLOUD_MAJOR}-fpm-alpine |
||||
|
|
||||
|
[Service] |
||||
|
# These environment variables are sourced to be used by systemd in the Exec* commands |
||||
|
EnvironmentFile=/etc/quadlets/nextcloud/config.env |
||||
@ -0,0 +1,6 @@ |
|||||
|
[Unit] |
||||
|
Description=podman pull docker.io/alpine/git |
||||
|
Documentation=https://hub.docker.com/r/alpine/git |
||||
|
|
||||
|
[Image] |
||||
|
Image=docker.io/alpine/git:latest |
||||
@ -0,0 +1,6 @@ |
|||||
|
[Unit] |
||||
|
Description=podman pull docker.io/library/nginx |
||||
|
Documentation=https://hub.docker.com/_/nginx |
||||
|
|
||||
|
[Image] |
||||
|
Image=docker.io/library/nginx:mainline-alpine |
||||
@ -0,0 +1,90 @@ |
|||||
|
import textwrap |
||||
|
import test_quadlet # noqa: F401 |
||||
|
|
||||
|
# Extra files to inject into the FCOS image for the tests in this file. |
||||
|
# The config.env is used to configure the nginx Quadlet. |
||||
|
PYTEST_FCOS_EXTRA_FILES = { |
||||
|
"/etc/quadlets/nginx/config.env": ( |
||||
|
textwrap.dedent(""" |
||||
|
# This file is generated for testing purposes. |
||||
|
GIT_REPO=https://github.com/nmasse-itix/podman-quadlet-cookbook.git |
||||
|
GIT_BRANCH=main |
||||
|
NGINX_PORT=80 |
||||
|
NGINX_HOST=localhost |
||||
|
"""), |
||||
|
0, |
||||
|
0, |
||||
|
0o600, |
||||
|
), |
||||
|
} |
||||
|
|
||||
|
""" |
||||
|
Verify that the nginx Quadlet is correctly installed and configured on a fresh VM boot. |
||||
|
""" |
||||
|
class TestNginxQuadlet(test_quadlet.TestQuadlet): |
||||
|
expected_services = [ |
||||
|
{ "name": "nginx.target", "state": "active", "exists": True }, |
||||
|
{ "name": "nginx-server.service", "state": "active", "exists": True }, |
||||
|
{ "name": "nginx-init.service", "state": "inactive", "exists": True }, |
||||
|
{ "name": "nginx-update.service", "state": "inactive", "exists": True }, |
||||
|
{ "name": "nginx-update.timer", "state": "active", "exists": True }, |
||||
|
] |
||||
|
|
||||
|
expected_sockets = [ |
||||
|
{ "uri": "tcp://127.0.0.1:80", "state": "listening" }, |
||||
|
] |
||||
|
|
||||
|
expected_ports = [ |
||||
|
{ "number": 80, "protocol": "tcp", "state": "open" }, |
||||
|
{ "number": 22, "protocol": "tcp", "state": "open" }, |
||||
|
] |
||||
|
|
||||
|
expected_files = [ |
||||
|
{ "path": "/var/lib/quadlets/nginx", "type": "directory", "owner": "root", "group": "root", "mode": 0o755 }, |
||||
|
{ "path": "/etc/quadlets/nginx/config.env", "type": "file", "owner": "root", "group": "root", "mode": 0o600 }, |
||||
|
{ "path": "/var/lib/quadlets/nginx/.git", "type": "directory" }, |
||||
|
] |
||||
|
|
||||
|
expected_podman_images = [ |
||||
|
{ "name": "docker.io/library/nginx", "tag": "mainline-alpine", "state": "present" }, |
||||
|
] |
||||
|
|
||||
|
expected_podman_containers = [ |
||||
|
{ "name": "nginx-server", "state": "present" }, |
||||
|
] |
||||
|
|
||||
|
expected_main_service = "nginx.target" |
||||
|
expected_main_service_timeout = 300 |
||||
|
|
||||
|
def test_nginx_serves_content(self, fcos_host): |
||||
|
"""Nginx must serve an HTTP 200 response on port 80.""" |
||||
|
result = fcos_host.run("curl -sSf -o /dev/null -w '%{http_code}' http://localhost/") |
||||
|
assert result.rc == 0, f"curl failed with exit code {result.rc}: {result.stderr}" |
||||
|
assert result.stdout.strip() == "200", f"Expected HTTP 200, got: {result.stdout.strip()}" |
||||
|
|
||||
|
def test_nginx_serves_expected_html(self, fcos_host): |
||||
|
"""Nginx must serve the expected HTML content cloned from the git repository.""" |
||||
|
result = fcos_host.run("curl -sSf http://localhost/") |
||||
|
assert result.rc == 0, f"curl failed with exit code {result.rc}: {result.stderr}" |
||||
|
assert "Hello World" in result.stdout, f"Expected 'Hello World' in the response, but got: {result.stdout}" |
||||
|
|
||||
|
def test_nginx_update_cycle(self, fcos_host): |
||||
|
"""Restarting nginx.target must trigger nginx-update (git pull) and nginx must keep serving content.""" |
||||
|
result = fcos_host.run("systemctl restart nginx.target") |
||||
|
assert result.rc == 0, f"Failed to restart nginx.target: {result.stderr}" |
||||
|
|
||||
|
# Wait for nginx.target to become active again after the update |
||||
|
self.wait_for_service(fcos_host, "nginx.target", timeout=120) |
||||
|
|
||||
|
# nginx-update.service must have run (git pull) and completed (oneshot → inactive) |
||||
|
# nginx-init.service must NOT have run again (.git already exists, condition not met) |
||||
|
self.check_expected_services(fcos_host, [ |
||||
|
{ "name": "nginx-update.service", "state": "inactive", "exists": True }, |
||||
|
{ "name": "nginx-init.service", "state": "inactive", "exists": True }, |
||||
|
{ "name": "nginx-server.service", "state": "active", "exists": True }, |
||||
|
]) |
||||
|
|
||||
|
# nginx must still serve the expected content after the update cycle |
||||
|
result = fcos_host.run("curl -sSf http://localhost/") |
||||
|
assert result.rc == 0, f"curl failed after update cycle: {result.stderr}" |
||||
|
assert "Hello World" in result.stdout, f"Expected 'Hello World' after update cycle, but got: {result.stdout}" |
||||
@ -0,0 +1,155 @@ |
|||||
|
import pytest |
||||
|
import textwrap |
||||
|
|
||||
|
# Add the current cookbook's tests directory to the path so we can import helpers.py. |
||||
|
from pathlib import Path |
||||
|
import sys |
||||
|
sys.path.insert(0, str(Path(__file__).parent)) |
||||
|
import helpers # noqa: E402 |
||||
|
|
||||
|
# Major version of PostgreSQL to install by default on a fresh VM boot. |
||||
|
PG_MAJOR_DEFAULT = 14 |
||||
|
|
||||
|
# Extra files to inject into the FCOS image for the tests in this file. |
||||
|
# The config.env is used to configure the PostgreSQL Quadlet, and the init.d/test.sql file is an init hook that creates a test database and user on the first boot. |
||||
|
PYTEST_FCOS_EXTRA_FILES = { |
||||
|
"/etc/quadlets/postgresql/config.env": ( |
||||
|
textwrap.dedent(f""" |
||||
|
# This file is generated by conftest.py for testing purposes. |
||||
|
POSTGRES_USER=postgres |
||||
|
POSTGRES_PASSWORD=postgres |
||||
|
POSTGRES_DB=postgres |
||||
|
POSTGRES_HOST_AUTH_METHOD=scram-sha-256 |
||||
|
POSTGRES_INITDB_ARGS=--auth-host=scram-sha-256 |
||||
|
POSTGRES_ARGS=-h 127.0.0.1 |
||||
|
PGPORT=5432 |
||||
|
PG_MAJOR={PG_MAJOR_DEFAULT} |
||||
|
POSTGRES_BACKUP_RETENTION=7 |
||||
|
"""), |
||||
|
0, |
||||
|
0, |
||||
|
0o600, |
||||
|
), |
||||
|
"/etc/quadlets/postgresql/init.d/test.sql": ( |
||||
|
textwrap.dedent(""" |
||||
|
-- This file is generated by conftest.py for testing purposes. |
||||
|
CREATE USER test WITH PASSWORD 'test'; |
||||
|
CREATE DATABASE testdb OWNER test; |
||||
|
GRANT ALL PRIVILEGES ON DATABASE testdb TO test; |
||||
|
ALTER ROLE test SET client_encoding TO 'utf8'; |
||||
|
"""), |
||||
|
10004, |
||||
|
10000, |
||||
|
0o600, |
||||
|
), |
||||
|
} |
||||
|
|
||||
|
""" |
||||
|
Verify that the postgresql Quadlet is correctly installed and configured on a fresh VM boot. |
||||
|
""" |
||||
|
class TestPostgresqlQuadletInstallUpgradeBackup(helpers.TestPostgresqlQuadlet): |
||||
|
|
||||
|
expected_pg_major = PG_MAJOR_DEFAULT |
||||
|
|
||||
|
def test_can_create_database(self, fcos_host): |
||||
|
"""Should be possible to create a new database.""" |
||||
|
self._run_sql(fcos_host, "CREATE DATABASE upgrade_path_db") |
||||
|
output = self._run_sql(fcos_host, "SELECT datname FROM pg_database WHERE datname = 'upgrade_path_db'") |
||||
|
assert output == "upgrade_path_db", f"Unexpected output from SQL query: {output}" |
||||
|
output = self._run_sql(fcos_host, "CREATE TABLE upgrade_path (version VARCHAR);", database="upgrade_path_db") |
||||
|
output = self._run_sql(fcos_host, "INSERT INTO upgrade_path (version) SELECT version();", database="upgrade_path_db") |
||||
|
|
||||
|
def test_init_hook_has_created_database(self, fcos_host): |
||||
|
"""The injected init hook has created the test database and user.""" |
||||
|
output = self._run_sql(fcos_host, "SELECT datname FROM pg_database WHERE datname = 'testdb'") |
||||
|
assert output == "testdb", f"Unexpected output from SQL query: {output}" |
||||
|
output = self._run_sql(fcos_host, "SELECT 1 FROM pg_roles WHERE rolname = 'test'") |
||||
|
assert output == "1", f"Unexpected output from SQL query: {output}" |
||||
|
|
||||
|
def test_created_database_and_user_is_working(self, fcos_host): |
||||
|
"""Should be able to connect to the test database with the test user.""" |
||||
|
result = fcos_host.run( |
||||
|
"podman exec postgresql-server psql -U test -d testdb --csv -t -c %s", "SELECT 1 AS probe" |
||||
|
) |
||||
|
assert result.exit_status == 0, f"SQL query failed with exit code {result.exit_status}: {result.stderr}" |
||||
|
output = result.stdout.strip() |
||||
|
assert output == "1", f"Unexpected output from SQL query: {output}" |
||||
|
|
||||
|
def test_upgrade_postgresql(self, fcos_host, pg_upgrade_major): |
||||
|
"""Should be able to upgrade PostgreSQL by changing PG_MAJOR and rebooting.""" |
||||
|
# Stop the server to release the data directory |
||||
|
result = fcos_host.run("systemctl stop postgresql.target") |
||||
|
assert result.exit_status == 0, f"Failed to stop postgresql.target with exit code {result.exit_status}: {result.stderr}" |
||||
|
self.check_expected_services(fcos_host, expected_services=[ |
||||
|
{ "name": "postgresql-server.service", "state": "inactive", "exists": True }, |
||||
|
]) |
||||
|
|
||||
|
# Change PG_MAJOR in the config.env |
||||
|
fcos_host.run(f"sed -i 's/^PG_MAJOR=.*/PG_MAJOR={pg_upgrade_major}/' /etc/quadlets/postgresql/config.env") |
||||
|
|
||||
|
# Start the server after changing the data directory |
||||
|
result = fcos_host.run("systemctl start postgresql.target") |
||||
|
assert result.exit_status == 0, f"Failed to start postgresql.target with exit code {result.exit_status}: {result.stderr}" |
||||
|
self.check_expected_services(fcos_host, expected_services=[ |
||||
|
{ "name": "postgresql-server.service", "state": "active", "exists": True }, |
||||
|
{ "name": "postgresql-init.service", "state": "inactive", "exists": True }, |
||||
|
{ "name": "postgresql-upgrade.service", "state": "inactive", "exists": True }, |
||||
|
]) |
||||
|
|
||||
|
# The server_version must reflect the new major version after the upgrade |
||||
|
output = self._run_sql(fcos_host, "SHOW server_version") |
||||
|
assert output.startswith(f"{pg_upgrade_major}."), f"Expected PostgreSQL server version to start with {pg_upgrade_major}, but got {output}" |
||||
|
|
||||
|
def test_data_is_still_there_after_upgrade(self, fcos_host, pg_upgrade_major): |
||||
|
"""Data created before the upgrade must still be there after the upgrade.""" |
||||
|
# Check that the old data is still there after the upgrade |
||||
|
output = self._run_sql(fcos_host, "SELECT datname FROM pg_database WHERE datname = 'upgrade_path_db'") |
||||
|
assert output == "upgrade_path_db", f"Unexpected output from SQL query: {output}" |
||||
|
output = self._run_sql(fcos_host, "SELECT datname FROM pg_database WHERE datname = 'testdb'") |
||||
|
assert output == "testdb", f"Unexpected output from SQL query: {output}" |
||||
|
result = fcos_host.run( |
||||
|
"podman exec postgresql-server psql -U test -d testdb --csv -t -c %s", "SELECT 1 AS probe" |
||||
|
) |
||||
|
assert result.exit_status == 0, f"SQL query failed with exit code {result.exit_status}: {result.stderr}" |
||||
|
|
||||
|
def test_insert_version(self, fcos_host, pg_upgrade_major): |
||||
|
"""Should be able to insert data into the database after the upgrade.""" |
||||
|
output = self._run_sql(fcos_host, "INSERT INTO upgrade_path (version) SELECT version();", database="upgrade_path_db") |
||||
|
|
||||
|
def test_upgraded_postgresql_version_is_correct(self, fcos_host, pg_upgrade_major): |
||||
|
"""The running PostgreSQL server must report the updated version.""" |
||||
|
|
||||
|
# The server_version must reflect the new major version after the upgrade |
||||
|
output = self._run_sql(fcos_host, "SHOW server_version") |
||||
|
assert output.startswith(f"{pg_upgrade_major}."), f"Expected PostgreSQL server version to start with {pg_upgrade_major}, but got {output}" |
||||
|
|
||||
|
# The new PostgreSQL major version's image must be pulled and present in Podman after the upgrade |
||||
|
self.check_expected_podman_images(fcos_host, expected_podman_images=[ |
||||
|
{ "name": "docker.io/library/postgres", "tag": f"{pg_upgrade_major}-alpine", "state": "present" }, |
||||
|
]) |
||||
|
|
||||
|
def test_latest_symlink_has_expected_target(self, fcos_host, pg_upgrade_major): |
||||
|
"""The 'latest' symlink must point to the active major-version directory.""" |
||||
|
link = fcos_host.file("/var/lib/quadlets/postgresql/latest") |
||||
|
assert link.exists |
||||
|
assert link.is_symlink |
||||
|
assert link.linked_to == f"/var/lib/quadlets/postgresql/{pg_upgrade_major}" |
||||
|
|
||||
|
def test_create_backup(self, fcos_host): |
||||
|
"""Should be able to create a backup using the backup service.""" |
||||
|
result = fcos_host.run("systemctl start postgresql-backup.service") |
||||
|
assert result.exit_status == 0, f"Failed to start postgresql-backup.service with exit code {result.exit_status}: {result.stderr}" |
||||
|
# Check that a backup file has been created in the backup directory |
||||
|
backup_dir = fcos_host.file("/var/lib/virtiofs/data/postgresql/backup") |
||||
|
assert backup_dir.exists |
||||
|
assert backup_dir.is_directory |
||||
|
backup_list = backup_dir.listdir() |
||||
|
assert len(backup_list) > 0, "No backup files found in the backup directory after running the backup service!" |
||||
|
latest_backup = max(backup_list) |
||||
|
latest_backup_content = fcos_host.file(f"/var/lib/virtiofs/data/postgresql/backup/{latest_backup}").listdir() |
||||
|
assert len(latest_backup_content) > 0, "No files found in the latest backup directory after running the backup service!" |
||||
|
assert "backup_manifest" in latest_backup_content, f"Expected 'backup_manifest' file in the backup, but got: {latest_backup_content}" |
||||
|
assert "base.tar" in latest_backup_content, f"Expected 'base.tar' file in the backup, but got: {latest_backup_content}" |
||||
|
assert "pg_wal.tar" in latest_backup_content, f"Expected 'pg_wal.tar' file in the backup, but got: {latest_backup_content}" |
||||
|
assert "dump-upgrade_path_db.sql.gz" in latest_backup_content, f"Expected 'dump-upgrade_path_db.sql.gz' file in the backup, but got: {latest_backup_content}" |
||||
|
assert "dump-testdb.sql.gz" in latest_backup_content, f"Expected 'dump-testdb.sql.gz' file in the backup, but got: {latest_backup_content}" |
||||
@ -0,0 +1,56 @@ |
|||||
|
import pytest |
||||
|
import textwrap |
||||
|
|
||||
|
# Add the current cookbook's tests directory to the path so we can import helpers.py. |
||||
|
from pathlib import Path |
||||
|
import sys |
||||
|
sys.path.insert(0, str(Path(__file__).parent)) |
||||
|
import helpers # noqa: E402 |
||||
|
|
||||
|
# Major version of PostgreSQL to install by default on a fresh VM boot. |
||||
|
PG_MAJOR_DEFAULT = 18 |
||||
|
|
||||
|
# Extra files to inject into the FCOS image for the tests in this file. |
||||
|
# The config.env is used to configure the PostgreSQL Quadlet. |
||||
|
PYTEST_FCOS_EXTRA_FILES = { |
||||
|
"/etc/quadlets/postgresql/config.env": ( |
||||
|
textwrap.dedent(f""" |
||||
|
# This file is generated by conftest.py for testing purposes. |
||||
|
POSTGRES_USER=postgres |
||||
|
POSTGRES_PASSWORD=postgres |
||||
|
POSTGRES_DB=postgres |
||||
|
POSTGRES_HOST_AUTH_METHOD=scram-sha-256 |
||||
|
POSTGRES_INITDB_ARGS=--auth-host=scram-sha-256 |
||||
|
POSTGRES_ARGS=-h 127.0.0.1 |
||||
|
PGPORT=5432 |
||||
|
PG_MAJOR={PG_MAJOR_DEFAULT} |
||||
|
POSTGRES_BACKUP_RETENTION=7 |
||||
|
"""), |
||||
|
0, |
||||
|
0, |
||||
|
0o600, |
||||
|
), |
||||
|
} |
||||
|
|
||||
|
""" |
||||
|
Verify that the postgresql Quadlet correctly restores a database from a backup |
||||
|
on a fresh VM with the backup data present in the virtiofs. |
||||
|
""" |
||||
|
class TestPostgresqlQuadletRestore(helpers.TestPostgresqlQuadlet): |
||||
|
expected_pg_major = PG_MAJOR_DEFAULT |
||||
|
|
||||
|
def test_data_is_still_there_after_restore(self, fcos_host): |
||||
|
"""Data created before the restore must still be there after the restore.""" |
||||
|
# Check that the old data is still there after the restore |
||||
|
output = self._run_sql(fcos_host, "SELECT datname FROM pg_database WHERE datname = 'upgrade_path_db'") |
||||
|
assert output == "upgrade_path_db", f"Unexpected output from SQL query: {output}" |
||||
|
output = self._run_sql(fcos_host, "SELECT datname FROM pg_database WHERE datname = 'testdb'") |
||||
|
assert output == "testdb", f"Unexpected output from SQL query: {output}" |
||||
|
result = fcos_host.run( |
||||
|
"podman exec postgresql-server psql -U test -d testdb --csv -t -c %s", "SELECT 1 AS probe" |
||||
|
) |
||||
|
assert result.exit_status == 0, f"SQL query failed with exit code {result.exit_status}: {result.stderr}" |
||||
|
|
||||
|
# Check that the upgrade_path table contains the initial postgresql version (14) |
||||
|
output = self._run_sql(fcos_host, "SELECT LEFT(version, 14) FROM upgrade_path ORDER BY version ASC LIMIT 1", database="upgrade_path_db") |
||||
|
assert output.startswith("PostgreSQL 14."), f"Unexpected output from SQL query: {output}" |
||||
@ -1,119 +0,0 @@ |
|||||
"""Test PostgreSQL backup creation and VirtioFS storage. |
|
||||
|
|
||||
These tests verify that: |
|
||||
- The backup oneshot service can be triggered manually and runs to completion. |
|
||||
- The expected backup artefacts land in the VirtioFS share (accessible from |
|
||||
the test runner's host filesystem without SSH). |
|
||||
- The backup retention policy removes stale backups. |
|
||||
|
|
||||
Note: tests within a module share a single VM (module-scoped fixture), so |
|
||||
the order of test execution matters here: the backup files checked in later |
|
||||
tests are created by the earlier trigger test. |
|
||||
""" |
|
||||
|
|
||||
import time |
|
||||
from pathlib import Path |
|
||||
|
|
||||
|
|
||||
# --------------------------------------------------------------------------- |
|
||||
# Trigger and completion |
|
||||
# --------------------------------------------------------------------------- |
|
||||
|
|
||||
def test_create_database_and_table(postgresql_vm, test_ssh_key): |
|
||||
"""Create a test database and table with some data to ensure the backup has |
|
||||
something to capture.""" |
|
||||
postgresql_vm.ssh_run( |
|
||||
"podman exec postgresql-server psql -U postgres -c \"CREATE DATABASE test;\"", |
|
||||
test_ssh_key, |
|
||||
) |
|
||||
postgresql_vm.ssh_run( |
|
||||
"podman exec postgresql-server psql -U postgres -d test -c \"CREATE TABLE witness (id SERIAL PRIMARY KEY, version VARCHAR); INSERT INTO witness (version) SELECT version();\"", |
|
||||
test_ssh_key, |
|
||||
) |
|
||||
|
|
||||
def test_trigger_backup(postgresql_vm, test_ssh_key): |
|
||||
"""Starting postgresql-backup.service must succeed (no immediate error).""" |
|
||||
postgresql_vm.ssh_run( |
|
||||
"systemctl start postgresql-backup.service", |
|
||||
test_ssh_key, |
|
||||
) |
|
||||
|
|
||||
|
|
||||
def test_backup_completes_successfully(postgresql_vm, test_ssh_key): |
|
||||
"""postgresql-backup.service must finish in ``inactive`` state (not ``failed``).""" |
|
||||
state = postgresql_vm.wait_for_unit_done( |
|
||||
"postgresql-backup.service", test_ssh_key, timeout=120 |
|
||||
) |
|
||||
assert state == "inactive", ( |
|
||||
f"Backup service ended in unexpected state {state!r}. " |
|
||||
"Run: systemctl status postgresql-backup.service --no-pager" |
|
||||
) |
|
||||
|
|
||||
|
|
||||
# --------------------------------------------------------------------------- |
|
||||
# VirtioFS artefacts (verified from the host — no SSH required) |
|
||||
# --------------------------------------------------------------------------- |
|
||||
|
|
||||
|
|
||||
def test_backup_directory_exists_in_virtiofs(virtiofs_dir: Path): |
|
||||
"""The postgresql/backup sub-directory must exist in the VirtioFS share.""" |
|
||||
backup_root = virtiofs_dir / "postgresql" / "backup" |
|
||||
assert backup_root.is_dir(), f"Backup directory not found on host: {backup_root}" |
|
||||
|
|
||||
|
|
||||
def test_at_least_one_backup_present(virtiofs_dir: Path): |
|
||||
"""At least one timestamped backup sub-directory must exist.""" |
|
||||
backup_root = virtiofs_dir / "postgresql" / "backup" |
|
||||
backups = sorted(backup_root.iterdir()) |
|
||||
assert backups, f"No backup sub-directories found under {backup_root}" |
|
||||
|
|
||||
|
|
||||
def test_backup_manifest_present(virtiofs_dir: Path): |
|
||||
"""The latest backup must contain a ``backup_manifest`` file (pg_basebackup).""" |
|
||||
backup_root = virtiofs_dir / "postgresql" / "backup" |
|
||||
latest = sorted(backup_root.iterdir())[-1] |
|
||||
assert (latest / "backup_manifest").exists(), ( |
|
||||
f"backup_manifest missing in {latest}" |
|
||||
) |
|
||||
|
|
||||
|
|
||||
def test_backup_base_tar_present(virtiofs_dir: Path): |
|
||||
"""The latest backup must contain a ``base.tar`` cluster archive.""" |
|
||||
backup_root = virtiofs_dir / "postgresql" / "backup" |
|
||||
latest = sorted(backup_root.iterdir())[-1] |
|
||||
assert (latest / "base.tar").exists(), f"base.tar missing in {latest}" |
|
||||
|
|
||||
|
|
||||
def test_database_dump_present(virtiofs_dir: Path): |
|
||||
"""At least one ``dump-test.sql.gz`` file must exist alongside the cluster backup.""" |
|
||||
backup_root = virtiofs_dir / "postgresql" / "backup" |
|
||||
latest = sorted(backup_root.iterdir())[-1] |
|
||||
dumps = list(latest.glob("dump-test.sql.gz")) |
|
||||
assert dumps, f"No dump-test.sql.gz files found in {latest}" |
|
||||
|
|
||||
# --------------------------------------------------------------------------- |
|
||||
# Retention policy |
|
||||
# --------------------------------------------------------------------------- |
|
||||
|
|
||||
|
|
||||
def test_backup_retention_enforced(postgresql_vm, test_ssh_key, virtiofs_dir: Path): |
|
||||
"""After triggering several extra backups the count must stay within the |
|
||||
configured retention limit (POSTGRES_BACKUP_RETENTION=7).""" |
|
||||
retention = 7 |
|
||||
|
|
||||
# Trigger ten additional backups so the rotation code has something to do. |
|
||||
for _ in range(10): |
|
||||
postgresql_vm.ssh_run( |
|
||||
"systemctl start postgresql-backup.service", test_ssh_key |
|
||||
) |
|
||||
state = postgresql_vm.wait_for_unit_done( |
|
||||
"postgresql-backup.service", test_ssh_key, timeout=120 |
|
||||
) |
|
||||
assert state == "inactive" |
|
||||
time.sleep(1) # ensure distinct timestamp directories |
|
||||
|
|
||||
backup_root = virtiofs_dir / "postgresql" / "backup" |
|
||||
count = len(list(backup_root.iterdir())) |
|
||||
assert count <= retention, ( |
|
||||
f"Retention policy failed: {count} backups present, expected ≤ {retention}" |
|
||||
) |
|
||||
@ -1,149 +0,0 @@ |
|||||
"""Test that a fresh PostgreSQL installation is healthy. |
|
||||
|
|
||||
These tests run against a brand-new VM booted from the cookbook's default |
|
||||
ignition (PG_MAJOR=14, example credentials). They verify: |
|
||||
- All expected systemd units are in the correct state. |
|
||||
- The PostgreSQL server is listening and accepts queries. |
|
||||
- VirtioFS is mounted and the expected directories exist. |
|
||||
""" |
|
||||
|
|
||||
from pathlib import Path |
|
||||
|
|
||||
from helpers import PG_MAJOR_DEFAULT, run_sql |
|
||||
|
|
||||
|
|
||||
# --------------------------------------------------------------------------- |
|
||||
# Systemd unit state |
|
||||
# --------------------------------------------------------------------------- |
|
||||
|
|
||||
|
|
||||
def test_postgresql_target_active(pg_host): |
|
||||
"""postgresql.target must be active once the full startup chain completes.""" |
|
||||
assert pg_host.service("postgresql.target").is_running |
|
||||
|
|
||||
|
|
||||
def test_postgresql_server_running(pg_host): |
|
||||
"""The long-running PostgreSQL server container must be active.""" |
|
||||
assert pg_host.service("postgresql-server.service").is_running |
|
||||
|
|
||||
|
|
||||
def test_set_major_oneshot_completed(pg_host): |
|
||||
"""postgresql-set-major.service (oneshot) must have finished — not still running.""" |
|
||||
result = pg_host.run("systemctl is-active postgresql-set-major.service") |
|
||||
assert result.stdout.strip() == "inactive" |
|
||||
|
|
||||
|
|
||||
def test_init_oneshot_completed(pg_host): |
|
||||
"""postgresql-init.service (oneshot) must have finished after initialization.""" |
|
||||
result = pg_host.run("systemctl is-active postgresql-init.service") |
|
||||
assert result.stdout.strip() == "inactive" |
|
||||
|
|
||||
|
|
||||
def test_upgrade_oneshot_completed(pg_host): |
|
||||
"""postgresql-upgrade.service (oneshot) must have finished — no upgrade needed |
|
||||
on a fresh install.""" |
|
||||
result = pg_host.run("systemctl is-active postgresql-upgrade.service") |
|
||||
assert result.stdout.strip() == "inactive" |
|
||||
|
|
||||
|
|
||||
def test_backup_timer_scheduled(pg_host): |
|
||||
"""The daily backup timer must be active (scheduled).""" |
|
||||
assert pg_host.service("postgresql-backup.timer").is_running |
|
||||
|
|
||||
|
|
||||
# --------------------------------------------------------------------------- |
|
||||
# Network / socket |
|
||||
# --------------------------------------------------------------------------- |
|
||||
|
|
||||
|
|
||||
def test_postgresql_port_listening(pg_host): |
|
||||
"""PostgreSQL must be listening on 127.0.0.1:5432 (POSTGRES_ARGS=-h 127.0.0.1).""" |
|
||||
assert pg_host.socket("tcp://127.0.0.1:5432").is_listening |
|
||||
|
|
||||
|
|
||||
# --------------------------------------------------------------------------- |
|
||||
# Filesystem layout |
|
||||
# --------------------------------------------------------------------------- |
|
||||
|
|
||||
|
|
||||
def test_virtiofs_mounted(pg_host): |
|
||||
"""The VirtioFS share must be mounted at /var/lib/virtiofs/data.""" |
|
||||
mount = pg_host.mount_point("/var/lib/virtiofs/data") |
|
||||
assert mount.exists |
|
||||
assert mount.filesystem == "virtiofs" |
|
||||
|
|
||||
|
|
||||
def test_virtiofs_postgresql_dir(pg_host): |
|
||||
"""/var/lib/virtiofs/data/postgresql must be created by tmpfiles.d.""" |
|
||||
assert pg_host.file("/var/lib/virtiofs/data/postgresql").is_directory |
|
||||
|
|
||||
|
|
||||
def test_virtiofs_backup_dir(pg_host): |
|
||||
"""/var/lib/virtiofs/data/postgresql/backup must be created by tmpfiles.d.""" |
|
||||
assert pg_host.file("/var/lib/virtiofs/data/postgresql/backup").is_directory |
|
||||
|
|
||||
|
|
||||
def test_data_dir_exists(pg_host): |
|
||||
"""/var/lib/quadlets/postgresql must exist with the correct ownership.""" |
|
||||
f = pg_host.file("/var/lib/quadlets/postgresql") |
|
||||
assert f.is_directory |
|
||||
assert f.user == "postgresql" |
|
||||
|
|
||||
|
|
||||
def test_latest_symlink_exists(pg_host): |
|
||||
"""The 'latest' symlink must point to the active major-version directory.""" |
|
||||
link = pg_host.file("/var/lib/quadlets/postgresql/latest") |
|
||||
assert link.exists |
|
||||
assert link.is_symlink |
|
||||
|
|
||||
|
|
||||
def test_version_dir_exists(pg_host): |
|
||||
"""A directory named after PG_MAJOR_DEFAULT must exist under the data dir.""" |
|
||||
assert pg_host.file( |
|
||||
f"/var/lib/quadlets/postgresql/{PG_MAJOR_DEFAULT}" |
|
||||
).is_directory |
|
||||
|
|
||||
|
|
||||
def test_initialized_flag_exists(pg_host): |
|
||||
"""The .initialized sentinel file must be written after a successful init.""" |
|
||||
assert pg_host.file("/var/lib/quadlets/postgresql/.initialized").exists |
|
||||
|
|
||||
|
|
||||
def test_config_env_present(pg_host): |
|
||||
"""/etc/quadlets/postgresql/config.env must be present and not world-readable.""" |
|
||||
f = pg_host.file("/etc/quadlets/postgresql/config.env") |
|
||||
assert f.exists |
|
||||
# mode 0600 — world and group bits must be 0 |
|
||||
assert f.mode & 0o077 == 0 |
|
||||
|
|
||||
|
|
||||
# --------------------------------------------------------------------------- |
|
||||
# Database connectivity |
|
||||
# --------------------------------------------------------------------------- |
|
||||
|
|
||||
|
|
||||
def test_postgresql_accepts_connections(postgresql_vm, test_ssh_key): |
|
||||
"""PostgreSQL must respond to a trivial SQL query.""" |
|
||||
output = run_sql(postgresql_vm, test_ssh_key, "SELECT 1 AS probe") |
|
||||
assert "1" in output |
|
||||
|
|
||||
|
|
||||
def test_postgresql_version_matches_config(postgresql_vm, test_ssh_key): |
|
||||
"""The running PostgreSQL server must report the version from PG_MAJOR_DEFAULT.""" |
|
||||
output = run_sql(postgresql_vm, test_ssh_key, "SHOW server_version") |
|
||||
assert PG_MAJOR_DEFAULT in output |
|
||||
|
|
||||
|
|
||||
def test_can_create_database(postgresql_vm, test_ssh_key): |
|
||||
"""Should be possible to create a new database.""" |
|
||||
run_sql( |
|
||||
postgresql_vm, |
|
||||
test_ssh_key, |
|
||||
"CREATE DATABASE install_test_db", |
|
||||
) |
|
||||
output = run_sql( |
|
||||
postgresql_vm, |
|
||||
test_ssh_key, |
|
||||
"SELECT datname FROM pg_database WHERE datname = 'install_test_db'", |
|
||||
) |
|
||||
assert "install_test_db" in output |
|
||||
@ -1,154 +0,0 @@ |
|||||
"""Test PostgreSQL automatic crash recovery. |
|
||||
|
|
||||
Scenarios covered: |
|
||||
1. Container crash (SIGKILL via ``podman kill``) → systemd restarts the |
|
||||
service automatically (Restart=always, RestartSec=10). |
|
||||
2. Hard VM reboot → all services start cleanly and data is intact. |
|
||||
|
|
||||
All tests share the module-scoped ``postgresql_vm`` fixture. Because some |
|
||||
tests are destructive (they kill the container), they are intentionally |
|
||||
sequenced: create data → crash → verify recovery → create more data → |
|
||||
reboot → verify recovery. |
|
||||
""" |
|
||||
|
|
||||
import time |
|
||||
|
|
||||
from helpers import run_sql |
|
||||
|
|
||||
# Data written before the crash that must survive each recovery scenario. |
|
||||
CRASH_WITNESS_TABLE = "crash_witness" |
|
||||
CRASH_WITNESS_VALUE = "before_crash" |
|
||||
|
|
||||
REBOOT_WITNESS_TABLE = "reboot_witness" |
|
||||
REBOOT_WITNESS_VALUE = "before_reboot" |
|
||||
|
|
||||
|
|
||||
# --------------------------------------------------------------------------- |
|
||||
# Scenario 1: container crash |
|
||||
# --------------------------------------------------------------------------- |
|
||||
|
|
||||
|
|
||||
def test_server_running_before_crash(pg_host): |
|
||||
"""Precondition: postgresql-server.service must be active before we crash it.""" |
|
||||
assert pg_host.service("postgresql-server.service").is_running |
|
||||
|
|
||||
|
|
||||
def test_create_data_before_crash(postgresql_vm, test_ssh_key): |
|
||||
"""Insert a row that must survive the container crash.""" |
|
||||
run_sql( |
|
||||
postgresql_vm, |
|
||||
test_ssh_key, |
|
||||
( |
|
||||
f"CREATE TABLE IF NOT EXISTS {CRASH_WITNESS_TABLE} " |
|
||||
f"(id SERIAL PRIMARY KEY, message TEXT NOT NULL); " |
|
||||
f"INSERT INTO {CRASH_WITNESS_TABLE} (message) " |
|
||||
f"VALUES ('{CRASH_WITNESS_VALUE}');" |
|
||||
), |
|
||||
) |
|
||||
|
|
||||
|
|
||||
def test_kill_postgresql_container(postgresql_vm, test_ssh_key): |
|
||||
"""Simulate a process crash by sending SIGKILL to the container. |
|
||||
|
|
||||
``podman kill`` delivers SIGKILL to the container's PID 1. Systemd will |
|
||||
detect the exit and restart the service after RestartSec=10 seconds. |
|
||||
""" |
|
||||
postgresql_vm.ssh_run( |
|
||||
"podman kill --signal SIGKILL postgresql-server", |
|
||||
test_ssh_key, |
|
||||
) |
|
||||
|
|
||||
|
|
||||
def test_service_restarts_automatically(postgresql_vm, test_ssh_key): |
|
||||
"""postgresql-server.service must be active again after the crash. |
|
||||
|
|
||||
Allow up to 60 seconds: systemd waits RestartSec=10 s before restarting, |
|
||||
then the container start-up and health check take additional time. |
|
||||
""" |
|
||||
# Brief pause to let systemd register the exit before we start polling. |
|
||||
time.sleep(5) |
|
||||
postgresql_vm.wait_for_service( |
|
||||
"postgresql-server.service", test_ssh_key, timeout=120 |
|
||||
) |
|
||||
|
|
||||
|
|
||||
def test_data_intact_after_crash_recovery(postgresql_vm, test_ssh_key): |
|
||||
"""Rows written before the crash must be present after automatic recovery.""" |
|
||||
output = run_sql( |
|
||||
postgresql_vm, |
|
||||
test_ssh_key, |
|
||||
f"SELECT message FROM {CRASH_WITNESS_TABLE} " |
|
||||
f"WHERE message = '{CRASH_WITNESS_VALUE}'", |
|
||||
) |
|
||||
assert CRASH_WITNESS_VALUE in output, ( |
|
||||
f"Crash witness row not found after recovery. Query returned: {output!r}" |
|
||||
) |
|
||||
|
|
||||
|
|
||||
def test_target_still_active_after_crash(pg_host): |
|
||||
"""postgresql.target must remain active after the container recovery.""" |
|
||||
assert pg_host.service("postgresql.target").is_running |
|
||||
|
|
||||
|
|
||||
# --------------------------------------------------------------------------- |
|
||||
# Scenario 2: hard reboot |
|
||||
# --------------------------------------------------------------------------- |
|
||||
|
|
||||
|
|
||||
def test_create_data_before_reboot(postgresql_vm, test_ssh_key): |
|
||||
"""Insert a row that must survive a full VM reboot.""" |
|
||||
run_sql( |
|
||||
postgresql_vm, |
|
||||
test_ssh_key, |
|
||||
( |
|
||||
f"CREATE TABLE IF NOT EXISTS {REBOOT_WITNESS_TABLE} " |
|
||||
f"(id SERIAL PRIMARY KEY, message TEXT NOT NULL); " |
|
||||
f"INSERT INTO {REBOOT_WITNESS_TABLE} (message) " |
|
||||
f"VALUES ('{REBOOT_WITNESS_VALUE}');" |
|
||||
), |
|
||||
) |
|
||||
|
|
||||
|
|
||||
def test_reboot_vm(postgresql_vm, test_ssh_key): |
|
||||
"""Trigger a graceful OS reboot. SSH will temporarily drop.""" |
|
||||
postgresql_vm.ssh_run("systemctl reboot", test_ssh_key, check=False) |
|
||||
# Wait for the VM to go down before polling for SSH again. |
|
||||
time.sleep(15) |
|
||||
|
|
||||
|
|
||||
def test_ssh_available_after_reboot(postgresql_vm, test_ssh_key): |
|
||||
"""SSH must become available again within 5 minutes of the reboot.""" |
|
||||
# Reset the cached IP so wait_ssh re-probes it. |
|
||||
postgresql_vm._ip = None |
|
||||
postgresql_vm.wait_ssh(ssh_key=test_ssh_key, timeout=300) |
|
||||
|
|
||||
|
|
||||
def test_postgresql_target_active_after_reboot(postgresql_vm, test_ssh_key): |
|
||||
"""postgresql.target must come up automatically on reboot (enabled in ignition).""" |
|
||||
postgresql_vm.wait_for_service( |
|
||||
"postgresql.target", ssh_key=test_ssh_key, timeout=300 |
|
||||
) |
|
||||
|
|
||||
|
|
||||
def test_data_intact_after_reboot(postgresql_vm, test_ssh_key): |
|
||||
"""Rows written before the reboot must still be present after boot.""" |
|
||||
output = run_sql( |
|
||||
postgresql_vm, |
|
||||
test_ssh_key, |
|
||||
f"SELECT message FROM {REBOOT_WITNESS_TABLE} " |
|
||||
f"WHERE message = '{REBOOT_WITNESS_VALUE}'", |
|
||||
) |
|
||||
assert REBOOT_WITNESS_VALUE in output, ( |
|
||||
f"Reboot witness row not found. Query returned: {output!r}" |
|
||||
) |
|
||||
|
|
||||
|
|
||||
def test_crash_witness_also_intact_after_reboot(postgresql_vm, test_ssh_key): |
|
||||
"""Data written before the crash must also survive the subsequent reboot.""" |
|
||||
output = run_sql( |
|
||||
postgresql_vm, |
|
||||
test_ssh_key, |
|
||||
f"SELECT message FROM {CRASH_WITNESS_TABLE} " |
|
||||
f"WHERE message = '{CRASH_WITNESS_VALUE}'", |
|
||||
) |
|
||||
assert CRASH_WITNESS_VALUE in output |
|
||||
@ -1,163 +0,0 @@ |
|||||
"""Test the PostgreSQL major version upgrade path: PG 14 → PG 17. |
|
||||
|
|
||||
The upgrade mechanism works as follows: |
|
||||
1. postgresql-set-major.service updates the ``latest`` symlink to point at |
|
||||
the new PG_MAJOR directory (e.g. /var/lib/quadlets/postgresql/17/). |
|
||||
2. postgresql-upgrade.service detects that |
|
||||
``latest/docker/PG_VERSION`` does not exist (the 17/ directory is |
|
||||
empty) and triggers pgautoupgrade. |
|
||||
3. pg_upgrade migrates data from the old directory to the new one. |
|
||||
4. postgresql-server.service starts against the upgraded data. |
|
||||
|
|
||||
All tests in this module share a single ``upgrade_vm`` fixture that starts |
|
||||
with PG_MAJOR_UPGRADE_FROM (14). Tests are intentionally ordered to form a |
|
||||
sequential scenario: create data → trigger upgrade → verify outcome. |
|
||||
""" |
|
||||
|
|
||||
from pathlib import Path |
|
||||
|
|
||||
from helpers import PG_MAJOR_UPGRADE_FROM, PG_MAJOR_UPGRADE_TO, run_sql |
|
||||
|
|
||||
# Sentinel table and row used to verify data survives the upgrade. |
|
||||
WITNESS_TABLE = "upgrade_witness" |
|
||||
WITNESS_VALUE = "before_upgrade" |
|
||||
|
|
||||
|
|
||||
# --------------------------------------------------------------------------- |
|
||||
# Pre-upgrade baseline |
|
||||
# --------------------------------------------------------------------------- |
|
||||
|
|
||||
|
|
||||
def test_initial_version_is_upgrade_from(upgrade_vm, test_ssh_key): |
|
||||
"""Precondition: the VM must be running PG_MAJOR_UPGRADE_FROM.""" |
|
||||
output = run_sql(upgrade_vm, test_ssh_key, "SHOW server_version") |
|
||||
assert PG_MAJOR_UPGRADE_FROM in output, ( |
|
||||
f"Expected PG {PG_MAJOR_UPGRADE_FROM}, got: {output!r}" |
|
||||
) |
|
||||
|
|
||||
|
|
||||
def test_create_witness_data(upgrade_vm, test_ssh_key): |
|
||||
"""Insert a row that must survive the major version upgrade.""" |
|
||||
run_sql( |
|
||||
upgrade_vm, |
|
||||
test_ssh_key, |
|
||||
( |
|
||||
f"CREATE TABLE IF NOT EXISTS {WITNESS_TABLE} " |
|
||||
f"(id SERIAL PRIMARY KEY, message TEXT NOT NULL); " |
|
||||
f"INSERT INTO {WITNESS_TABLE} (message) VALUES ('{WITNESS_VALUE}');" |
|
||||
), |
|
||||
) |
|
||||
output = run_sql( |
|
||||
upgrade_vm, |
|
||||
test_ssh_key, |
|
||||
f"SELECT message FROM {WITNESS_TABLE} WHERE message = '{WITNESS_VALUE}'", |
|
||||
) |
|
||||
assert WITNESS_VALUE in output |
|
||||
|
|
||||
|
|
||||
# --------------------------------------------------------------------------- |
|
||||
# Trigger the upgrade |
|
||||
# --------------------------------------------------------------------------- |
|
||||
|
|
||||
|
|
||||
def test_bump_pg_major_in_config(upgrade_vm, test_ssh_key): |
|
||||
"""Change PG_MAJOR in config.env from UPGRADE_FROM to UPGRADE_TO.""" |
|
||||
upgrade_vm.ssh_run( |
|
||||
f"sed -i 's/^PG_MAJOR={PG_MAJOR_UPGRADE_FROM}$/PG_MAJOR={PG_MAJOR_UPGRADE_TO}/' " |
|
||||
"/etc/quadlets/postgresql/config.env", |
|
||||
test_ssh_key, |
|
||||
) |
|
||||
# Verify the substitution worked. |
|
||||
result = upgrade_vm.ssh_run( |
|
||||
"grep ^PG_MAJOR= /etc/quadlets/postgresql/config.env", |
|
||||
test_ssh_key, |
|
||||
) |
|
||||
assert f"PG_MAJOR={PG_MAJOR_UPGRADE_TO}" in result.stdout |
|
||||
|
|
||||
|
|
||||
def test_restart_postgresql_target(upgrade_vm, test_ssh_key): |
|
||||
"""Restart postgresql.target to kick off the upgrade chain.""" |
|
||||
upgrade_vm.ssh_run("systemctl restart postgresql.target", test_ssh_key) |
|
||||
|
|
||||
|
|
||||
def test_upgrade_service_completes(upgrade_vm, test_ssh_key): |
|
||||
"""postgresql-upgrade.service must finish in ``inactive`` state (not ``failed``). |
|
||||
|
|
||||
pgautoupgrade can take several minutes for large databases; allow up to |
|
||||
10 minutes. |
|
||||
""" |
|
||||
state = upgrade_vm.wait_for_unit_done( |
|
||||
"postgresql-upgrade.service", test_ssh_key, timeout=600 |
|
||||
) |
|
||||
assert state == "inactive", ( |
|
||||
f"Upgrade service ended in state {state!r}. " |
|
||||
"Inspect with: systemctl status postgresql-upgrade.service --no-pager " |
|
||||
"and: journalctl -u postgresql-upgrade.service" |
|
||||
) |
|
||||
|
|
||||
|
|
||||
def test_server_active_after_upgrade(upgrade_vm, test_ssh_key): |
|
||||
"""postgresql-server.service must be active after the upgrade.""" |
|
||||
upgrade_vm.wait_for_service( |
|
||||
"postgresql-server.service", test_ssh_key, timeout=120 |
|
||||
) |
|
||||
|
|
||||
|
|
||||
# --------------------------------------------------------------------------- |
|
||||
# Post-upgrade verification |
|
||||
# --------------------------------------------------------------------------- |
|
||||
|
|
||||
|
|
||||
def test_new_version_is_running(upgrade_vm, test_ssh_key): |
|
||||
"""PostgreSQL must now report PG_MAJOR_UPGRADE_TO as the server version.""" |
|
||||
output = run_sql(upgrade_vm, test_ssh_key, "SHOW server_version") |
|
||||
assert PG_MAJOR_UPGRADE_TO in output, ( |
|
||||
f"Expected PG {PG_MAJOR_UPGRADE_TO} after upgrade, got: {output!r}" |
|
||||
) |
|
||||
|
|
||||
|
|
||||
def test_witness_data_preserved(upgrade_vm, test_ssh_key): |
|
||||
"""The row inserted before the upgrade must still be present and correct.""" |
|
||||
output = run_sql( |
|
||||
upgrade_vm, |
|
||||
test_ssh_key, |
|
||||
f"SELECT message FROM {WITNESS_TABLE} WHERE message = '{WITNESS_VALUE}'", |
|
||||
) |
|
||||
assert WITNESS_VALUE in output, ( |
|
||||
f"Witness row '{WITNESS_VALUE}' not found after upgrade. " |
|
||||
f"Query returned: {output!r}" |
|
||||
) |
|
||||
|
|
||||
|
|
||||
def test_old_data_dir_removed(upgrade_vm, test_ssh_key): |
|
||||
"""pgautoupgrade must remove the source data directory after a clean upgrade.""" |
|
||||
result = upgrade_vm.ssh_run( |
|
||||
f"test -d /var/lib/quadlets/postgresql/{PG_MAJOR_UPGRADE_FROM}/docker", |
|
||||
test_ssh_key, |
|
||||
check=False, |
|
||||
) |
|
||||
assert result.returncode != 0, ( |
|
||||
f"Old data directory for PG {PG_MAJOR_UPGRADE_FROM} still exists — " |
|
||||
"upgrade may not have cleaned up properly" |
|
||||
) |
|
||||
|
|
||||
|
|
||||
def test_latest_symlink_points_to_new_version(upgrade_vm, test_ssh_key): |
|
||||
"""The ``latest`` symlink must now point at the PG_MAJOR_UPGRADE_TO directory.""" |
|
||||
result = upgrade_vm.ssh_run( |
|
||||
"readlink /var/lib/quadlets/postgresql/latest", |
|
||||
test_ssh_key, |
|
||||
) |
|
||||
assert PG_MAJOR_UPGRADE_TO in result.stdout, ( |
|
||||
f"latest symlink does not point at PG {PG_MAJOR_UPGRADE_TO}: " |
|
||||
f"{result.stdout.strip()!r}" |
|
||||
) |
|
||||
|
|
||||
|
|
||||
def test_new_data_dir_has_pg_version_file(upgrade_vm, test_ssh_key): |
|
||||
"""PG_VERSION file must exist in the new data directory (server is healthy).""" |
|
||||
result = upgrade_vm.ssh_run( |
|
||||
f"cat /var/lib/quadlets/postgresql/{PG_MAJOR_UPGRADE_TO}/docker/PG_VERSION", |
|
||||
test_ssh_key, |
|
||||
) |
|
||||
assert PG_MAJOR_UPGRADE_TO in result.stdout |
|
||||
@ -0,0 +1,5 @@ |
|||||
|
[Unit] |
||||
|
Description=podman pull docker.io/restic/rest-server |
||||
|
|
||||
|
[Image] |
||||
|
Image=docker.io/restic/rest-server:latest |
||||
@ -0,0 +1,6 @@ |
|||||
|
[Unit] |
||||
|
Description=podman pull ghcr.io/flaresolverr/flaresolverr |
||||
|
Documentation=https://github.com/FlareSolverr/FlareSolverr/ |
||||
|
|
||||
|
[Image] |
||||
|
Image=ghcr.io/flaresolverr/flaresolverr:latest |
||||
@ -0,0 +1,6 @@ |
|||||
|
[Unit] |
||||
|
Description=podman pull lscr.io/linuxserver/jellyfin |
||||
|
Documentation=https://docs.linuxserver.io/images/docker-jellyfin/ |
||||
|
|
||||
|
[Image] |
||||
|
Image=lscr.io/linuxserver/jellyfin:latest |
||||
@ -0,0 +1,6 @@ |
|||||
|
[Unit] |
||||
|
Description=podman pull lscr.io/linuxserver/lidarr |
||||
|
Documentation=https://docs.linuxserver.io/images/docker-lidarr/ |
||||
|
|
||||
|
[Image] |
||||
|
Image=lscr.io/linuxserver/lidarr:latest |
||||
@ -0,0 +1,6 @@ |
|||||
|
[Unit] |
||||
|
Description=podman pull lscr.io/linuxserver/prowlarr |
||||
|
Documentation=https://docs.linuxserver.io/images/docker-prowlarr/ |
||||
|
|
||||
|
[Image] |
||||
|
Image=lscr.io/linuxserver/prowlarr:latest |
||||
@ -0,0 +1,5 @@ |
|||||
|
[Unit] |
||||
|
Description=podman pull lscr.io/linuxserver/qbittorrent |
||||
|
|
||||
|
[Image] |
||||
|
Image=lscr.io/linuxserver/qbittorrent:latest |
||||
@ -0,0 +1,6 @@ |
|||||
|
[Unit] |
||||
|
Description=podman pull lscr.io/linuxserver/radarr |
||||
|
Documentation=https://docs.linuxserver.io/images/docker-radarr/ |
||||
|
|
||||
|
[Image] |
||||
|
Image=lscr.io/linuxserver/radarr:latest |
||||
@ -0,0 +1,6 @@ |
|||||
|
[Unit] |
||||
|
Description=podman pull lscr.io/linuxserver/sonarr |
||||
|
Documentation=https://docs.linuxserver.io/images/docker-sonarr/ |
||||
|
|
||||
|
[Image] |
||||
|
Image=lscr.io/linuxserver/sonarr:latest |
||||
@ -0,0 +1,6 @@ |
|||||
|
[Unit] |
||||
|
Description=podman pull docker.io/library/traefik |
||||
|
Documentation=https://github.com/traefik/traefik-library-image |
||||
|
|
||||
|
[Image] |
||||
|
Image=docker.io/library/traefik:v3.4 |
||||
@ -0,0 +1,6 @@ |
|||||
|
[Unit] |
||||
|
Description=podman pull lscr.io/linuxserver/unifi-network-application |
||||
|
Documentation=https://docs.linuxserver.io/images/docker-unifi-network-application/ |
||||
|
|
||||
|
[Image] |
||||
|
Image=lscr.io/linuxserver/unifi-network-application:latest |
||||
@ -0,0 +1,13 @@ |
|||||
|
[Unit] |
||||
|
Description=podman pull docker.io/library/mongo |
||||
|
Documentation=https://hub.docker.com/_/mongo/ |
||||
|
|
||||
|
# Only start if Unifi has been configured |
||||
|
ConditionPathExists=/etc/quadlets/unifi/config.env |
||||
|
|
||||
|
[Image] |
||||
|
Image=docker.io/library/mongo:${MONGO_MAJOR} |
||||
|
|
||||
|
[Service] |
||||
|
# These environment variables are sourced to be used by systemd in the Exec* commands |
||||
|
EnvironmentFile=/etc/quadlets/unifi/config.env |
||||
@ -0,0 +1,6 @@ |
|||||
|
[Unit] |
||||
|
Description=podman pull quay.io/vaultwarden/server |
||||
|
Documentation=https://github.com/dani-garcia/vaultwarden |
||||
|
|
||||
|
[Image] |
||||
|
Image=quay.io/vaultwarden/server:latest-alpine |
||||
@ -0,0 +1,5 @@ |
|||||
|
[Unit] |
||||
|
Description=podman pull quay.io/victoriametrics/vmagent |
||||
|
|
||||
|
[Image] |
||||
|
Image=quay.io/victoriametrics/vmagent:latest |
||||
@ -0,0 +1,349 @@ |
|||||
|
""" |
||||
|
Fedora CoreOS VM lifecycle helpers for end-to-end testing. |
||||
|
|
||||
|
Requires running as root (virt-install, virsh, qemu-img need root privileges). |
||||
|
|
||||
|
Typical usage: |
||||
|
vm = FCOSVirtualMachine( |
||||
|
name="fcos-vm-abc123", |
||||
|
ignition_file=Path("/tmp/fcos-test.ign"), |
||||
|
virtiofs_dir=Path("/srv/fcos-test-abc123"), |
||||
|
) |
||||
|
vm.create() |
||||
|
vm.wait_ssh(ssh_key=key_path) |
||||
|
# ... run tests ... |
||||
|
vm.destroy() |
||||
|
""" |
||||
|
|
||||
|
import re |
||||
|
import shutil |
||||
|
import subprocess |
||||
|
import tempfile |
||||
|
import textwrap |
||||
|
import time |
||||
|
from pathlib import Path |
||||
|
import os |
||||
|
|
||||
|
LIBVIRT_IMAGES_DIR = Path("/var/lib/libvirt/images") |
||||
|
FCOS_BASE_IMAGE = LIBVIRT_IMAGES_DIR / "library" / "fedora-coreos.qcow2" |
||||
|
|
||||
|
# Butane spec version — must match the project convention. |
||||
|
BUTANE_VERSION = "1.4.0" |
||||
|
|
||||
|
def ensure_fcos_ign(cookbook_dir: Path) -> Path: |
||||
|
"""Return the path to fcos-test.ign, building it via ``make butane`` if absent.""" |
||||
|
fcos_ign = cookbook_dir / "fcos-test.ign" |
||||
|
if not fcos_ign.exists(): |
||||
|
subprocess.run( |
||||
|
["make", "-C", str(cookbook_dir), "butane"], |
||||
|
check=True, |
||||
|
) |
||||
|
return fcos_ign |
||||
|
|
||||
|
class FCOSIgnition: |
||||
|
""" |
||||
|
Builds a Fedora CoreOS Ignition file, by merging multiple ignition files |
||||
|
and optionally injecting extra files. |
||||
|
|
||||
|
All public methods are synchronous and raise on failure. The caller is |
||||
|
responsible for calling ``destroy()`` (typically from a pytest fixture |
||||
|
teardown). |
||||
|
""" |
||||
|
|
||||
|
def __init__(self, ignition_files: list[Path] | None = None, ssh_key: str | None = None, extra_files: dict[str, tuple[str | int, str | int, int, str]] | None = None) -> None: |
||||
|
""" |
||||
|
Args: |
||||
|
ignition_files: List of paths to the compiled Ignition (.ign) files. |
||||
|
ssh_key: Optional SSH key to inject into the Ignition. |
||||
|
extra_files: Optional dictionary of extra files to inject into the Ignition. |
||||
|
""" |
||||
|
self.ignition_files = ignition_files or list() |
||||
|
self.extra_files = extra_files or dict() |
||||
|
self.ssh_key = ssh_key |
||||
|
|
||||
|
def _build_extra_files_butane(self) -> str | None: |
||||
|
"""Build the butane file content for the extra files specified in self.extra_files.""" |
||||
|
if not self.extra_files: |
||||
|
return None |
||||
|
|
||||
|
files = [] |
||||
|
for path, (content, owner, group, mode) in self.extra_files.items(): |
||||
|
file_desc = ( |
||||
|
f" - path: {path}\n" |
||||
|
f" mode: {mode}\n" |
||||
|
f" overwrite: true\n" |
||||
|
f" user:\n" |
||||
|
+ (f" id: {owner}\n" if isinstance(owner, int) else f" name: {owner}\n") + |
||||
|
f" group:\n" |
||||
|
+ (f" id: {group}\n" if isinstance(group, int) else f" name: {group}\n") + |
||||
|
f' contents:\n' |
||||
|
f' inline: |\n' |
||||
|
) |
||||
|
# Prefix all lines of content with 10 spaces (2 for indentation + 8 for the literal block) |
||||
|
indented_content = textwrap.indent(content + "\n", " " * 10) |
||||
|
file_desc += indented_content + "\n" |
||||
|
files.append(file_desc) |
||||
|
header = textwrap.dedent(f"""\ |
||||
|
variant: fcos |
||||
|
version: {BUTANE_VERSION} |
||||
|
storage: |
||||
|
files: |
||||
|
""") |
||||
|
joined = "\n".join(files) |
||||
|
return f"{header}{joined}\n" |
||||
|
|
||||
|
def _build_ssh_key_butane(self) -> str | None: |
||||
|
"""Build the butane file content that inject the public ssh key (self.ssh_key) into the root's authorized_keys.""" |
||||
|
if not self.ssh_key: |
||||
|
return None |
||||
|
|
||||
|
content = textwrap.dedent(f"""\ |
||||
|
variant: fcos |
||||
|
version: {BUTANE_VERSION} |
||||
|
passwd: |
||||
|
users: |
||||
|
- name: root |
||||
|
ssh_authorized_keys: |
||||
|
- {self.ssh_key} |
||||
|
""") |
||||
|
|
||||
|
return content |
||||
|
|
||||
|
def build(self, output: Path) -> Path: |
||||
|
"""Build the final Ignition file by merging the base files and the extra files.""" |
||||
|
|
||||
|
try: |
||||
|
_tmpdir = tempfile.TemporaryDirectory(delete=False) |
||||
|
d = Path(_tmpdir.name) |
||||
|
|
||||
|
extra_files_butane = self._build_extra_files_butane() |
||||
|
ssh_key_butane = self._build_ssh_key_butane() |
||||
|
|
||||
|
test_bu = textwrap.dedent(f"""\ |
||||
|
variant: fcos |
||||
|
version: {BUTANE_VERSION} |
||||
|
systemd: |
||||
|
units: |
||||
|
# Disable & mask zincati to avoid reboots during testing. |
||||
|
- name: zincati.service |
||||
|
enabled: false |
||||
|
mask: true |
||||
|
ignition: |
||||
|
config: |
||||
|
merge: |
||||
|
""") |
||||
|
|
||||
|
for ign in self.ignition_files: |
||||
|
test_bu += f" - local: {ign.name}\n" |
||||
|
shutil.copy(ign, d / ign.name) |
||||
|
if extra_files_butane: |
||||
|
extra_files_bu = d / "test_extra_files.bu" |
||||
|
extra_files_bu.write_text(extra_files_butane) |
||||
|
extra_files_path = d / "test_extra_files.ign" |
||||
|
subprocess.run( |
||||
|
["butane", "--strict", "-o", str(extra_files_path), str(extra_files_bu)], |
||||
|
check=True, |
||||
|
capture_output=True, |
||||
|
) |
||||
|
test_bu += f" - local: {extra_files_path.name}\n" |
||||
|
if ssh_key_butane: |
||||
|
ssh_key_bu = d / "test_ssh_key.bu" |
||||
|
ssh_key_bu.write_text(ssh_key_butane) |
||||
|
ssh_key_path = d / "test_ssh_key.ign" |
||||
|
subprocess.run( |
||||
|
["butane", "--strict", "-o", str(ssh_key_path), str(ssh_key_bu)], |
||||
|
check=True, |
||||
|
capture_output=True, |
||||
|
) |
||||
|
test_bu += f" - local: {ssh_key_path.name}\n" |
||||
|
test_bu_path = d / "test.bu" |
||||
|
test_bu_path.write_text(test_bu) |
||||
|
|
||||
|
subprocess.run( |
||||
|
[ |
||||
|
"butane", |
||||
|
"--strict", |
||||
|
"-d", str(d), |
||||
|
"-o", str(output), |
||||
|
str(test_bu_path), |
||||
|
], |
||||
|
check=True, |
||||
|
capture_output=True, |
||||
|
) |
||||
|
except subprocess.CalledProcessError as e: |
||||
|
print(f"Error occurred while running butane: {e.stderr.decode()}") |
||||
|
# Keep the temporary directory for debugging |
||||
|
print(f"Temporary directory retained at: {_tmpdir.name}") |
||||
|
raise e |
||||
|
else: |
||||
|
# Clean up the temporary directory if it still exists |
||||
|
if Path(_tmpdir.name).exists(): |
||||
|
shutil.rmtree(_tmpdir.name) |
||||
|
|
||||
|
return output |
||||
|
|
||||
|
class FCOSVirtualMachine: |
||||
|
"""Manages a Fedora CoreOS KVM virtual machine for end-to-end testing. |
||||
|
|
||||
|
All public methods are synchronous and raise on failure. The caller is |
||||
|
responsible for calling ``destroy()`` (typically from a pytest fixture |
||||
|
teardown). |
||||
|
""" |
||||
|
|
||||
|
def __init__(self, cookbook_name: str, instance_name: str, keep: bool = False, ignition: FCOSIgnition | None = None, virtiofs_dirs: list[tuple[Path, str]] = [], vm_config: tuple[int, int, int, int] = (4096, 2, 50, 100)) -> None: |
||||
|
""" |
||||
|
Args: |
||||
|
cookbook_name: Short identifier appended to "fcos-test-" to form the |
||||
|
libvirt domain name. Keep it unique across parallel tests. |
||||
|
instance_name: Short identifier appended to the domain name to allow multiple VM for the same cookbook. |
||||
|
keep: If True, the VM and its associated resources will not be automatically destroyed on teardown. Useful for debugging. |
||||
|
ignition: FCOSIgnition instance to build the Ignition (.ign) file. |
||||
|
virtiofs_dirs: List of host directories and virtiofs target directories that will be exposed inside the VM. |
||||
|
vm_config: Tuple containing VM configuration (memory in MB, vCPUs, root disk size in GB, /var disk size in GB). |
||||
|
""" |
||||
|
if keep: |
||||
|
self.vm_name = f"fcos-test-{cookbook_name}-{instance_name}-dev" |
||||
|
else: |
||||
|
self.vm_name = f"fcos-test-{cookbook_name}-{instance_name}-{os.getpid()}" |
||||
|
self.ignition = ignition or FCOSIgnition() |
||||
|
self.virtiofs_dirs = virtiofs_dirs |
||||
|
self.vm_config = vm_config |
||||
|
self._images_dir = LIBVIRT_IMAGES_DIR / self.vm_name |
||||
|
self._ip: str | None = None |
||||
|
|
||||
|
# ------------------------------------------------------------------ |
||||
|
# Lifecycle |
||||
|
# ------------------------------------------------------------------ |
||||
|
|
||||
|
def exists(self) -> bool: |
||||
|
"""Return True if a libvirt domain with this VM's name already exists.""" |
||||
|
result = subprocess.run( |
||||
|
["virsh", "domstate", self.vm_name], |
||||
|
capture_output=True, |
||||
|
) |
||||
|
return result.returncode == 0 |
||||
|
|
||||
|
def create(self) -> None: |
||||
|
"""Create disk images and start the VM via virt-install.""" |
||||
|
self._images_dir.mkdir(parents=True, exist_ok=True) |
||||
|
for host_dir, target_dir in self.virtiofs_dirs: |
||||
|
Path(host_dir).mkdir(parents=True, exist_ok=True) |
||||
|
|
||||
|
ign_dest = self._images_dir / "fcos.ign" |
||||
|
self.ignition.build(ign_dest) |
||||
|
ign_dest.chmod(0o644) |
||||
|
|
||||
|
(ram, vcpus, root_disk_size, var_disk_size) = self.vm_config |
||||
|
|
||||
|
# Root OS disk: copy the base image, then resize it. |
||||
|
root_qcow2 = self._images_dir / "root.qcow2" |
||||
|
shutil.copy(FCOS_BASE_IMAGE, root_qcow2) |
||||
|
subprocess.run( |
||||
|
["qemu-img", "resize", "-f", "qcow2", str(root_qcow2), f"{root_disk_size}G"], |
||||
|
check=True, |
||||
|
) |
||||
|
|
||||
|
# Secondary disk for /var (keeps OS and data separate, matches common.mk). |
||||
|
var_qcow2 = self._images_dir / "var.qcow2" |
||||
|
subprocess.run( |
||||
|
["qemu-img", "create", "-f", "qcow2", str(var_qcow2), f"{var_disk_size}G"], |
||||
|
check=True, |
||||
|
) |
||||
|
|
||||
|
virtiofs_options = [] |
||||
|
for i, (host_dir, target_dir) in enumerate(self.virtiofs_dirs): |
||||
|
virtiofs_options += [ |
||||
|
f"--filesystem=type=mount,accessmode=passthrough," |
||||
|
f"driver.type=virtiofs,driver.queue=1024," |
||||
|
f"source.dir={host_dir},target.dir={target_dir}" |
||||
|
] |
||||
|
|
||||
|
subprocess.run( |
||||
|
[ |
||||
|
"virt-install", |
||||
|
f"--name={self.vm_name}", |
||||
|
"--import", |
||||
|
"--noautoconsole", |
||||
|
f"--ram={ram}", |
||||
|
f"--vcpus={vcpus}", |
||||
|
"--os-variant=fedora-coreos-stable", |
||||
|
f"--disk=path={root_qcow2},format=qcow2", |
||||
|
f"--disk=path={var_qcow2},format=qcow2", |
||||
|
f"--qemu-commandline=-fw_cfg name=opt/com.coreos/config,file={ign_dest}", |
||||
|
"--network=network=default,model=virtio", |
||||
|
"--console=pty,target.type=virtio", |
||||
|
"--serial=pty", |
||||
|
"--graphics=none", |
||||
|
"--boot=uefi", |
||||
|
"--memorybacking=access.mode=shared,source.type=memfd", |
||||
|
] + virtiofs_options, |
||||
|
check=True, |
||||
|
) |
||||
|
|
||||
|
def destroy(self) -> None: |
||||
|
"""Forcefully stop and delete the VM and all associated disk images.""" |
||||
|
subprocess.run(["virsh", "destroy", self.vm_name], capture_output=True) |
||||
|
subprocess.run( |
||||
|
["virsh", "undefine", self.vm_name, "--nvram"], |
||||
|
capture_output=True, |
||||
|
) |
||||
|
if self._images_dir.exists(): |
||||
|
shutil.rmtree(self._images_dir) |
||||
|
|
||||
|
# ------------------------------------------------------------------ |
||||
|
# Readiness polling |
||||
|
# ------------------------------------------------------------------ |
||||
|
|
||||
|
def get_ip(self) -> str | None: |
||||
|
"""Return the VM's primary IPv4 address reported by virsh, or None.""" |
||||
|
result = subprocess.run( |
||||
|
["virsh", "domifaddr", self.vm_name], |
||||
|
capture_output=True, |
||||
|
text=True, |
||||
|
) |
||||
|
if result.returncode != 0: |
||||
|
return None |
||||
|
match = re.search(r"(\d+\.\d+\.\d+\.\d+)", result.stdout) |
||||
|
return match.group(1) if match else None |
||||
|
|
||||
|
@property |
||||
|
def ip(self) -> str: |
||||
|
if self._ip is None: |
||||
|
self._ip = self.get_ip() |
||||
|
if self._ip is None: |
||||
|
raise RuntimeError(f"VM {self.vm_name!r} has no IP address yet") |
||||
|
return self._ip |
||||
|
|
||||
|
def wait_ssh(self, ssh_key: Path, timeout: int = 300) -> str: |
||||
|
"""Block until SSH is reachable. Returns the IP address. |
||||
|
|
||||
|
Polls every 5 seconds until ``timeout`` seconds have elapsed. |
||||
|
""" |
||||
|
deadline = time.monotonic() + timeout |
||||
|
while time.monotonic() < deadline: |
||||
|
ip = self.get_ip() |
||||
|
if ip: |
||||
|
try: |
||||
|
result = subprocess.run( |
||||
|
[ |
||||
|
"ssh", |
||||
|
"-i", str(ssh_key), |
||||
|
"-o", "StrictHostKeyChecking=no", |
||||
|
"-o", "UserKnownHostsFile=/dev/null", |
||||
|
"-o", "ConnectTimeout=5", |
||||
|
"-o", "BatchMode=yes", |
||||
|
f"root@{ip}", |
||||
|
"true", |
||||
|
], |
||||
|
capture_output=True, |
||||
|
timeout=10, |
||||
|
) |
||||
|
if result.returncode == 0: |
||||
|
self._ip = ip |
||||
|
return ip |
||||
|
except subprocess.TimeoutExpired: |
||||
|
pass |
||||
|
time.sleep(5) |
||||
|
raise TimeoutError( |
||||
|
f"VM {self.vm_name!r} did not become SSH-ready within {timeout}s" |
||||
|
) |
||||
@ -0,0 +1,291 @@ |
|||||
|
import socket |
||||
|
import json |
||||
|
import time |
||||
|
|
||||
|
class TestQuadlet: |
||||
|
""" |
||||
|
Run common tests for Quadlet cookbooks. |
||||
|
|
||||
|
All public methods are synchronous and raise on failure. |
||||
|
""" |
||||
|
|
||||
|
expected_services : list[dict[str, str | bool]] = [ |
||||
|
# Example: |
||||
|
# { "name": "postgresql.service", "state": "active", "masked": False, "enabled": True, "exists": True }, |
||||
|
] |
||||
|
""" |
||||
|
Expected state of systemd services. Each dict must contain a "name" field with the service name, and may optionally contain: |
||||
|
- "state": one of "active", "inactive", "failed" (optional) |
||||
|
- "masked": boolean (optional) |
||||
|
- "enabled": boolean (optional) |
||||
|
- "exists": boolean (optional) |
||||
|
Optional fields are not checked if missing. |
||||
|
If "exists" is False, no other fields are checked. |
||||
|
""" |
||||
|
|
||||
|
expected_sockets : list[dict[str, str]] = [ |
||||
|
# Example: |
||||
|
# { "uri": "tcp://127.0.0.1:5432", "state": "listening" }, |
||||
|
] |
||||
|
""" |
||||
|
Expected state of sockets. Each dict must contain a "uri" field with the socket URI, and a "state" field with one of "listening" or "closed". |
||||
|
""" |
||||
|
|
||||
|
# all fields are mandatory |
||||
|
expected_ports : list[dict[str, str | int]] = [ |
||||
|
# Example: |
||||
|
# { "number": 5432, "protocol": "tcp", "state": "closed" }, |
||||
|
# { "number": 22, "protocol": "tcp", "state": "open" }, |
||||
|
] |
||||
|
""" |
||||
|
Expected state of TCP ports as seen from the machine running pytest. Each dict must contain: |
||||
|
- "number": port number |
||||
|
- "protocol": currently only "tcp" is supported |
||||
|
- "state": one of "open" (accepting connections) or "closed" |
||||
|
""" |
||||
|
|
||||
|
expected_files : list[dict[str, str | int]] = [ |
||||
|
# Example: |
||||
|
# { "path": "/var/lib/quadlets/postgresql", "type": "directory", "owner": "postgresql", "group": "itix-svc", "mode": 0o755 }, |
||||
|
] |
||||
|
""" |
||||
|
Expected files on the VM. Each dict must contain: |
||||
|
- "path": full path to the file |
||||
|
- "type": "directory", "file" or "none" (if the file is expected to not exist) |
||||
|
Optional fields: |
||||
|
- "owner": expected owner username |
||||
|
- "group": expected group name |
||||
|
- "mode": expected file mode as an integer (e.g. 0o755) |
||||
|
If an optional field is missing, it is not checked. |
||||
|
""" |
||||
|
|
||||
|
expected_podman_images : list[dict[str, str]] = [ |
||||
|
# Example: |
||||
|
# { "name": "docker.io/library/postgres", "tag": "15", "state": "present" }, |
||||
|
] |
||||
|
""" |
||||
|
Expected Podman images. Each dict must contain: |
||||
|
- "name": image name (e.g. "docker.io/library/postgres") |
||||
|
- "tag": image tag (e.g. "15") |
||||
|
- "state": one of "present" or "absent" |
||||
|
""" |
||||
|
|
||||
|
expected_podman_containers : list[dict[str, str | dict[str, str]]] = [ |
||||
|
# Example: |
||||
|
# { "name": "postgresql-server", "state": "present", "pid1": { "owner": "10004", "group": "10000", "commandline": "postgres -h 127.0.0.1" } }, |
||||
|
] |
||||
|
""" |
||||
|
Expected Podman containers. Each dict must contain: |
||||
|
- "name": container name |
||||
|
- "state": one of "present" or "absent" |
||||
|
Optional field: |
||||
|
- "pid1": dict with expected properties of the container's main process (PID 1). May contain: |
||||
|
- "owner": expected uid (numeric) of the process as seen from outside the container (i.e. on the host) |
||||
|
- "group": expected gid (numeric) of the process as seen from outside the container (i.e. on the host) |
||||
|
- "commandline": expected command line of the process |
||||
|
""" |
||||
|
|
||||
|
expected_main_service : str | None = None |
||||
|
""" |
||||
|
If not None, the name of the main service to wait for before running any tests. |
||||
|
""" |
||||
|
|
||||
|
expected_main_service_timeout : int = 120 |
||||
|
""" |
||||
|
If expected_main_service is set, the number of seconds to wait for it to become active before giving up and failing the tests. |
||||
|
""" |
||||
|
|
||||
|
def test_wait_for_main_service(self, fcos_host): |
||||
|
"""Wait for the expected main service to become active before running any other tests.""" |
||||
|
if self.expected_main_service is None: |
||||
|
return |
||||
|
self.wait_for_service(fcos_host, self.expected_main_service, self.expected_main_service_timeout) |
||||
|
|
||||
|
def wait_for_service(self, fcos_host, service: str, timeout: int = 120) -> None: |
||||
|
"""Block until *service* reaches the ``active`` state.""" |
||||
|
deadline = time.monotonic() + timeout |
||||
|
while time.monotonic() < deadline: |
||||
|
result = fcos_host.run( |
||||
|
f"systemctl is-active {service}", check=False |
||||
|
) |
||||
|
if result.stdout.strip() == "active": |
||||
|
return |
||||
|
time.sleep(5) |
||||
|
status = fcos_host.run( |
||||
|
f"systemctl status {service} --no-pager", check=False |
||||
|
) |
||||
|
raise TimeoutError( |
||||
|
f"Service {service!r} not active after {timeout}s:\n{status.stdout}" |
||||
|
) |
||||
|
|
||||
|
def wait_for_unit_done(self, fcos_host, unit: str, timeout: int = 120) -> str: |
||||
|
""" |
||||
|
Block until a oneshot service finishes (``inactive`` or ``failed``). |
||||
|
|
||||
|
Returns: |
||||
|
The final state string: ``"inactive"`` on success, ``"failed"`` |
||||
|
on failure. |
||||
|
""" |
||||
|
deadline = time.monotonic() + timeout |
||||
|
while time.monotonic() < deadline: |
||||
|
result = fcos_host.run( |
||||
|
f"systemctl is-active {unit}", check=False |
||||
|
) |
||||
|
state = result.stdout.strip() |
||||
|
if state in ("inactive", "failed"): |
||||
|
return state |
||||
|
time.sleep(5) |
||||
|
raise TimeoutError( |
||||
|
f"Unit {unit!r} did not finish within {timeout}s" |
||||
|
) |
||||
|
|
||||
|
def test_expected_services(self, fcos_host): |
||||
|
"""The expected systemd services must be present and in the expected state.""" |
||||
|
self.check_expected_services(fcos_host, self.expected_services) |
||||
|
|
||||
|
def check_expected_services(self, fcos_host, expected_services: list[dict[str, str | bool]]) -> None: |
||||
|
"""The expected systemd services must be present and in the expected state.""" |
||||
|
for svc in expected_services: |
||||
|
service = fcos_host.service(svc["name"]) |
||||
|
if "exists" in svc: |
||||
|
if svc["exists"]: |
||||
|
assert service.exists, f"Service {svc['name']} does not exist" |
||||
|
else: |
||||
|
assert not service.exists, f"Service {svc['name']} exists but should not" |
||||
|
continue # if the service shouldn't exist, no need to check other properties |
||||
|
if "masked" in svc: |
||||
|
if svc["masked"]: |
||||
|
assert service.is_masked, f"Service {svc['name']} is not masked" |
||||
|
else: |
||||
|
assert not service.is_masked, f"Service {svc['name']} is masked but should not" |
||||
|
if "enabled" in svc: |
||||
|
if svc["enabled"]: |
||||
|
assert service.is_enabled, f"Service {svc['name']} is not enabled" |
||||
|
else: |
||||
|
assert not service.is_enabled, f"Service {svc['name']} is enabled but should not" |
||||
|
if "state" in svc: |
||||
|
if svc["state"] == "active": |
||||
|
assert service.is_running, f"Service {svc['name']} is not running" |
||||
|
elif svc["state"] == "inactive": |
||||
|
assert not service.is_running, f"Service {svc['name']} is running but expected to be inactive" |
||||
|
elif svc["state"] == "failed": |
||||
|
result = fcos_host.run(f"systemctl is-failed {svc['name']}") |
||||
|
assert result.rc == 0, f"Service {svc['name']} is not in failed state" |
||||
|
else: |
||||
|
raise ValueError(f"Invalid state for service {svc['name']}: {svc['state']}") |
||||
|
|
||||
|
def test_expected_sockets(self, fcos_host): |
||||
|
"""The expected sockets must be present and in the expected state.""" |
||||
|
self.check_expected_sockets(fcos_host, self.expected_sockets) |
||||
|
|
||||
|
def check_expected_sockets(self, fcos_host, expected_sockets: list[dict[str, str]]) -> None: |
||||
|
"""The expected sockets must be present and in the expected state.""" |
||||
|
for sock in expected_sockets: |
||||
|
socket = fcos_host.socket(sock["uri"]) |
||||
|
if sock["state"] == "listening": |
||||
|
assert socket.is_listening, f"Socket {sock['uri']} is not listening" |
||||
|
elif sock["state"] == "closed": |
||||
|
assert not socket.is_listening, f"Socket {sock['uri']} is listening but expected to be closed" |
||||
|
else: |
||||
|
raise ValueError(f"Invalid state for socket {sock['uri']}: {sock['state']}") |
||||
|
|
||||
|
def test_expected_ports(self, fcos_vm): |
||||
|
"""The expected TCP ports must be in the expected state.""" |
||||
|
self.check_expected_ports(fcos_vm, self.expected_ports) |
||||
|
|
||||
|
def check_expected_ports(self, fcos_vm, expected_ports: list[dict[str, str]]) -> None: |
||||
|
"""The expected TCP ports must be in the expected state.""" |
||||
|
for port in expected_ports: |
||||
|
assert port["protocol"] == "tcp", f"Unsupported protocol {port['protocol']} for port {port['number']}" |
||||
|
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) |
||||
|
s.settimeout(3) |
||||
|
connect_result = s.connect_ex((fcos_vm.ip, port["number"])) |
||||
|
if port["state"] == "open": |
||||
|
assert connect_result == 0, f"Port {port['number']} is NOT reachable from the host on {fcos_vm.ip}!" |
||||
|
elif port["state"] == "closed": |
||||
|
assert connect_result != 0, f"Port {port['number']} is reachable from the host on {fcos_vm.ip} but expected to be closed" |
||||
|
else: |
||||
|
raise ValueError(f"Invalid state for port {port['number']}/{port['protocol']}: {port['state']}") |
||||
|
s.close() |
||||
|
|
||||
|
def test_expected_files(self, fcos_host): |
||||
|
"""The expected files must be in the expected state.""" |
||||
|
self.check_expected_files(fcos_host, self.expected_files) |
||||
|
|
||||
|
def check_expected_files(self, fcos_host, expected_files: list[dict[str, str | int]]) -> None: |
||||
|
"""The expected files must be in the expected state.""" |
||||
|
for f in expected_files: |
||||
|
file = fcos_host.file(f["path"]) |
||||
|
if f["type"] == "directory": |
||||
|
assert file.is_directory, f"Expected {f['path']} to be a directory" |
||||
|
elif f["type"] == "file": |
||||
|
assert file.is_file, f"Expected {f['path']} to be a regular file" |
||||
|
elif f["type"] == "none": |
||||
|
assert not file.exists, f"Expected {f['path']} to not exist" |
||||
|
continue # if the file shouldn't exist, no need to check other properties |
||||
|
else: |
||||
|
raise ValueError(f"Invalid type for expected file {f['path']}: {f['type']}") |
||||
|
|
||||
|
if "owner" in f: |
||||
|
assert file.user == f["owner"], f"Expected {f['path']} to be owned by {f['owner']}, but got {file.user}" |
||||
|
if "group" in f: |
||||
|
assert file.group == f["group"], f"Expected {f['path']} to belong to group {f['group']}, but got {file.group}" |
||||
|
if "mode" in f: |
||||
|
assert file.mode == f["mode"], f"Expected {f['path']} to have mode {oct(f['mode'])}, but got {oct(file.mode)}" |
||||
|
|
||||
|
def test_expected_podman_images(self, fcos_host): |
||||
|
"""The expected Podman images must be in the expected state.""" |
||||
|
self.check_expected_podman_images(fcos_host, self.expected_podman_images) |
||||
|
|
||||
|
def check_expected_podman_images(self, fcos_host, expected_podman_images: list[dict[str, str]]) -> None: |
||||
|
"""The expected Podman images must be in the expected state.""" |
||||
|
for img in expected_podman_images: |
||||
|
result = fcos_host.run(f"podman image exists {img['name']}:{img['tag']}") |
||||
|
|
||||
|
if img["state"] == "present": |
||||
|
assert result.rc == 0, f"Podman image {img['name']}:{img['tag']} does not exist" |
||||
|
elif img["state"] == "absent": |
||||
|
assert result.rc != 0, f"Podman image {img['name']}:{img['tag']} is present but expected to be absent" |
||||
|
else: |
||||
|
raise ValueError(f"Invalid state for Podman image {img['name']}:{img['tag']}: {img['state']}") |
||||
|
|
||||
|
def test_expected_podman_containers(self, fcos_host): |
||||
|
"""The expected Podman containers must be in the expected state.""" |
||||
|
self.check_expected_podman_containers(fcos_host, self.expected_podman_containers) |
||||
|
|
||||
|
def check_expected_podman_containers(self, fcos_host, expected_podman_containers: list[dict[str, str]]) -> None: |
||||
|
"""The expected Podman containers must be in the expected state.""" |
||||
|
for container in expected_podman_containers: |
||||
|
result = fcos_host.run(f"podman container inspect {container['name']}") |
||||
|
if container["state"] == "present": |
||||
|
assert result.rc == 0, f"Podman container {container['name']} does not exist" |
||||
|
elif container["state"] == "absent": |
||||
|
assert result.rc != 0, f"Podman container {container['name']} is present but expected to be absent" |
||||
|
else: |
||||
|
raise ValueError(f"Invalid state for Podman container {container['name']}: {container['state']}") |
||||
|
|
||||
|
if result.rc == 0 and "pid1" in container: |
||||
|
try: |
||||
|
result_json = json.loads(result.stdout)[0] |
||||
|
except json.JSONDecodeError as e: |
||||
|
raise AssertionError(f"Failed to parse JSON output from podman inspect for container {container['name']}: {e}\nOutput was: {result_json}") |
||||
|
pid = result_json["State"]["Pid"] |
||||
|
result = fcos_host.run(f"ps axn -o pid,user,group,state,command -q {pid} --no-header") |
||||
|
if result.rc != 0: |
||||
|
raise AssertionError(f"Failed to inspect PID 1 of container {container['name']} with nsenter: rc = {result.rc}") |
||||
|
pid1_info = result.stdout.strip().split(None, 4) |
||||
|
if len(pid1_info) < 5: |
||||
|
raise AssertionError(f"Unexpected output from ps for PID 1 of container {container['name']}: {result.stdout}") |
||||
|
pid1_pid = pid1_info[0] |
||||
|
pid1_user = pid1_info[1] |
||||
|
pid1_group = pid1_info[2] |
||||
|
pid1_commandline = pid1_info[4] |
||||
|
assert int(pid1_pid) == pid, f"Expected PID {pid} for container {container['name']} main process, but got {pid1_pid}" |
||||
|
if "owner" in container["pid1"]: |
||||
|
assert pid1_user == container["pid1"]["owner"], f"Expected PID 1 of container {container['name']} to be owned by {container['pid1']['owner']}, but got {pid1_user}" |
||||
|
if "group" in container["pid1"]: |
||||
|
assert pid1_group == container["pid1"]["group"], f"Expected PID 1 of container {container['name']} to belong to group {container['pid1']['group']}, but got {pid1_group}" |
||||
|
if "commandline" in container["pid1"]: |
||||
|
assert pid1_commandline == container["pid1"]["commandline"], f"Expected PID 1 of container {container['name']} to have command line {container['pid1']['commandline']}, but got {pid1_commandline}" |
||||
|
|
||||
|
|
||||
@ -1,384 +0,0 @@ |
|||||
"""Fedora CoreOS VM lifecycle helpers for end-to-end testing. |
|
||||
|
|
||||
Requires running as root (virt-install, virsh, qemu-img need root privileges). |
|
||||
|
|
||||
Typical usage: |
|
||||
vm = FCOSVirtualMachine( |
|
||||
name="postgresql-abc123", |
|
||||
ignition_file=Path("/tmp/fcos-test.ign"), |
|
||||
virtiofs_dir=Path("/srv/fcos-test-postgresql-abc123"), |
|
||||
) |
|
||||
vm.create() |
|
||||
vm.wait_ssh(ssh_key=key_path) |
|
||||
vm.wait_for_service("postgresql.target", ssh_key=key_path) |
|
||||
# ... run tests ... |
|
||||
vm.destroy() |
|
||||
""" |
|
||||
|
|
||||
import base64 |
|
||||
import re |
|
||||
import shutil |
|
||||
import subprocess |
|
||||
import tempfile |
|
||||
import textwrap |
|
||||
import time |
|
||||
from pathlib import Path |
|
||||
|
|
||||
LIBVIRT_IMAGES_DIR = Path("/var/lib/libvirt/images") |
|
||||
FCOS_BASE_IMAGE = LIBVIRT_IMAGES_DIR / "library" / "fedora-coreos.qcow2" |
|
||||
|
|
||||
# Butane spec version — must match the project convention. |
|
||||
BUTANE_VERSION = "1.4.0" |
|
||||
|
|
||||
def ensure_fcos_ign(cookbook_dir: Path) -> Path: |
|
||||
"""Return the path to fcos.ign, building it via ``make butane`` if absent.""" |
|
||||
fcos_ign = cookbook_dir / "fcos.ign" |
|
||||
if not fcos_ign.exists(): |
|
||||
subprocess.run( |
|
||||
["make", "-C", str(cookbook_dir), "butane"], |
|
||||
check=True, |
|
||||
) |
|
||||
return fcos_ign |
|
||||
|
|
||||
|
|
||||
def build_test_ignition( |
|
||||
base_ignition: Path, |
|
||||
ssh_pubkey: str, |
|
||||
output: Path, |
|
||||
config_env_overrides: dict[str, str] | None = None, |
|
||||
extra_files: dict[str, tuple[str, int]] | None = None, |
|
||||
) -> Path: |
|
||||
"""Build a test ignition file by overlaying the cookbook's fcos.ign. |
|
||||
|
|
||||
The overlay: |
|
||||
- Merges the base cookbook ignition (fcos.ign). |
|
||||
- Adds the test SSH public key to the root user so the test runner can |
|
||||
SSH in (FCOS allows root login with keys via PermitRootLogin |
|
||||
prohibit-password). |
|
||||
- Optionally patches /etc/quadlets/postgresql/config.env via |
|
||||
``config_env_overrides`` (merged on top of whatever the base ignition |
|
||||
already sets). |
|
||||
- Optionally injects arbitrary extra files via ``extra_files``: |
|
||||
``{"/path/on/vm": ("file content", 0o644)}``. |
|
||||
|
|
||||
Args: |
|
||||
base_ignition: Path to the pre-built fcos.ign for the cookbook. |
|
||||
ssh_pubkey: Ed25519 public key string to inject for root. |
|
||||
output: Destination path for the compiled test ignition. |
|
||||
config_env_overrides: Key/value pairs to override in config.env. |
|
||||
The full config.env is re-written with these values merged on |
|
||||
top of the defaults from the base ignition. |
|
||||
extra_files: Additional files to inject into the VM image. |
|
||||
|
|
||||
Returns: |
|
||||
``output`` path. |
|
||||
""" |
|
||||
with tempfile.TemporaryDirectory() as _tmpdir: |
|
||||
d = Path(_tmpdir) |
|
||||
|
|
||||
# butane resolves "local:" references relative to the directory passed |
|
||||
# via -d; copy the base ignition there. |
|
||||
shutil.copy(base_ignition, d / "base.ign") |
|
||||
|
|
||||
# Build the storage.files section of the overlay. |
|
||||
storage_section = _build_storage_section(config_env_overrides, extra_files) |
|
||||
|
|
||||
overlay_bu = textwrap.dedent(f"""\ |
|
||||
variant: fcos |
|
||||
version: {BUTANE_VERSION} |
|
||||
ignition: |
|
||||
config: |
|
||||
merge: |
|
||||
- local: base.ign |
|
||||
passwd: |
|
||||
users: |
|
||||
- name: root |
|
||||
ssh_authorized_keys: |
|
||||
- {ssh_pubkey} |
|
||||
systemd: |
|
||||
units: |
|
||||
# Disable & mask zincati to avoid reboots during testing. |
|
||||
- name: zincati.service |
|
||||
enabled: false |
|
||||
mask: true |
|
||||
""") |
|
||||
|
|
||||
if storage_section: |
|
||||
overlay_bu += storage_section |
|
||||
|
|
||||
overlay_bu_path = d / "test-overlay.bu" |
|
||||
overlay_bu_path.write_text(overlay_bu) |
|
||||
|
|
||||
subprocess.run( |
|
||||
[ |
|
||||
"butane", |
|
||||
"--strict", |
|
||||
"-d", str(d), |
|
||||
"-o", str(output), |
|
||||
str(overlay_bu_path), |
|
||||
], |
|
||||
check=True, |
|
||||
) |
|
||||
|
|
||||
return output |
|
||||
|
|
||||
|
|
||||
def _build_storage_section( |
|
||||
config_env_overrides: dict[str, str] | None, |
|
||||
extra_files: dict[str, tuple[str, int]] | None, |
|
||||
) -> str: |
|
||||
"""Return a Butane ``storage:`` YAML block (or empty string if nothing to inject).""" |
|
||||
files = [] |
|
||||
|
|
||||
if config_env_overrides: |
|
||||
content = "\n".join(f"{k}={v}" for k, v in config_env_overrides.items()) + "\n" |
|
||||
files.append( |
|
||||
_butane_file("/etc/quadlets/postgresql/config.env", content, 0o600) |
|
||||
) |
|
||||
|
|
||||
if extra_files: |
|
||||
for path, (content, mode) in extra_files.items(): |
|
||||
files.append(_butane_file(path, content, mode)) |
|
||||
|
|
||||
if not files: |
|
||||
return "" |
|
||||
|
|
||||
joined = "\n".join(files) |
|
||||
return f"storage:\n files:\n{joined}\n" |
|
||||
|
|
||||
|
|
||||
def _butane_file(path: str, content: str, mode: int) -> str: |
|
||||
"""Return a Butane file entry using a base64 data URI (avoids YAML quoting).""" |
|
||||
b64 = base64.b64encode(content.encode()).decode() |
|
||||
return ( |
|
||||
f" - path: {path}\n" |
|
||||
f" mode: {mode}\n" |
|
||||
f" contents:\n" |
|
||||
f' source: "data:text/plain;base64,{b64}"\n' |
|
||||
) |
|
||||
|
|
||||
|
|
||||
class FCOSVirtualMachine: |
|
||||
"""Manages a Fedora CoreOS KVM virtual machine for end-to-end testing. |
|
||||
|
|
||||
All public methods are synchronous and raise on failure. The caller is |
|
||||
responsible for calling ``destroy()`` (typically from a pytest fixture |
|
||||
teardown). |
|
||||
""" |
|
||||
|
|
||||
def __init__(self, name: str, ignition_file: Path, virtiofs_dir: Path) -> None: |
|
||||
""" |
|
||||
Args: |
|
||||
name: Short identifier appended to "fcos-test-" to form the |
|
||||
libvirt domain name. Keep it unique across parallel tests. |
|
||||
ignition_file: Path to the compiled Ignition (.ign) file. |
|
||||
virtiofs_dir: Host directory that will be exposed inside the VM |
|
||||
at /var/lib/virtiofs/data via VirtioFS. |
|
||||
""" |
|
||||
self.name = name |
|
||||
self.vm_name = f"fcos-test-{name}" |
|
||||
self.ignition_file = Path(ignition_file) |
|
||||
self.virtiofs_dir = Path(virtiofs_dir) |
|
||||
self._images_dir = LIBVIRT_IMAGES_DIR / self.vm_name |
|
||||
self._ip: str | None = None |
|
||||
|
|
||||
# ------------------------------------------------------------------ |
|
||||
# Lifecycle |
|
||||
# ------------------------------------------------------------------ |
|
||||
|
|
||||
def create(self) -> None: |
|
||||
"""Create disk images and start the VM via virt-install.""" |
|
||||
self._images_dir.mkdir(parents=True, exist_ok=True) |
|
||||
self.virtiofs_dir.mkdir(parents=True, exist_ok=True) |
|
||||
|
|
||||
ign_dest = self._images_dir / "fcos.ign" |
|
||||
shutil.copy(self.ignition_file, ign_dest) |
|
||||
ign_dest.chmod(0o644) |
|
||||
|
|
||||
# Root OS disk: copy from the shared base QCOW2 image. |
|
||||
root_qcow2 = self._images_dir / "root.qcow2" |
|
||||
shutil.copy(FCOS_BASE_IMAGE, root_qcow2) |
|
||||
|
|
||||
# Secondary disk for /var (keeps OS and data separate, matches common.mk). |
|
||||
var_qcow2 = self._images_dir / "var.qcow2" |
|
||||
subprocess.run( |
|
||||
["qemu-img", "create", "-f", "qcow2", str(var_qcow2), "100G"], |
|
||||
check=True, |
|
||||
) |
|
||||
|
|
||||
subprocess.run( |
|
||||
[ |
|
||||
"virt-install", |
|
||||
f"--name={self.vm_name}", |
|
||||
"--import", |
|
||||
"--noautoconsole", |
|
||||
"--ram=4096", |
|
||||
"--vcpus=2", |
|
||||
"--os-variant=fedora-coreos-stable", |
|
||||
f"--disk=path={root_qcow2},format=qcow2,size=50", |
|
||||
f"--disk=path={var_qcow2},format=qcow2", |
|
||||
f"--qemu-commandline=-fw_cfg name=opt/com.coreos/config,file={ign_dest}", |
|
||||
"--network=network=default,model=virtio", |
|
||||
"--console=pty,target.type=virtio", |
|
||||
"--serial=pty", |
|
||||
"--graphics=none", |
|
||||
"--boot=uefi", |
|
||||
"--memorybacking=access.mode=shared,source.type=memfd", |
|
||||
( |
|
||||
f"--filesystem=type=mount,accessmode=passthrough," |
|
||||
f"driver.type=virtiofs,driver.queue=1024," |
|
||||
f"source.dir={self.virtiofs_dir},target.dir=data" |
|
||||
), |
|
||||
], |
|
||||
check=True, |
|
||||
) |
|
||||
|
|
||||
def destroy(self) -> None: |
|
||||
"""Forcefully stop and delete the VM and all associated disk images.""" |
|
||||
subprocess.run(["virsh", "destroy", self.vm_name], capture_output=True) |
|
||||
subprocess.run( |
|
||||
["virsh", "undefine", self.vm_name, "--nvram"], |
|
||||
capture_output=True, |
|
||||
) |
|
||||
if self._images_dir.exists(): |
|
||||
shutil.rmtree(self._images_dir) |
|
||||
if self.virtiofs_dir.exists(): |
|
||||
shutil.rmtree(self.virtiofs_dir) |
|
||||
|
|
||||
# ------------------------------------------------------------------ |
|
||||
# Readiness polling |
|
||||
# ------------------------------------------------------------------ |
|
||||
|
|
||||
def get_ip(self) -> str | None: |
|
||||
"""Return the VM's primary IPv4 address reported by virsh, or None.""" |
|
||||
result = subprocess.run( |
|
||||
["virsh", "domifaddr", self.vm_name], |
|
||||
capture_output=True, |
|
||||
text=True, |
|
||||
) |
|
||||
if result.returncode != 0: |
|
||||
return None |
|
||||
match = re.search(r"(\d+\.\d+\.\d+\.\d+)", result.stdout) |
|
||||
return match.group(1) if match else None |
|
||||
|
|
||||
@property |
|
||||
def ip(self) -> str: |
|
||||
if self._ip is None: |
|
||||
self._ip = self.get_ip() |
|
||||
if self._ip is None: |
|
||||
raise RuntimeError(f"VM {self.vm_name!r} has no IP address yet") |
|
||||
return self._ip |
|
||||
|
|
||||
def wait_ssh(self, ssh_key: Path, timeout: int = 300) -> str: |
|
||||
"""Block until SSH is reachable. Returns the IP address. |
|
||||
|
|
||||
Polls every 5 seconds until ``timeout`` seconds have elapsed. |
|
||||
""" |
|
||||
deadline = time.monotonic() + timeout |
|
||||
while time.monotonic() < deadline: |
|
||||
ip = self.get_ip() |
|
||||
if ip: |
|
||||
try: |
|
||||
result = subprocess.run( |
|
||||
[ |
|
||||
"ssh", |
|
||||
"-i", str(ssh_key), |
|
||||
"-o", "StrictHostKeyChecking=no", |
|
||||
"-o", "UserKnownHostsFile=/dev/null", |
|
||||
"-o", "ConnectTimeout=5", |
|
||||
"-o", "BatchMode=yes", |
|
||||
f"root@{ip}", |
|
||||
"true", |
|
||||
], |
|
||||
capture_output=True, |
|
||||
timeout=10, |
|
||||
) |
|
||||
if result.returncode == 0: |
|
||||
self._ip = ip |
|
||||
return ip |
|
||||
except subprocess.TimeoutExpired: |
|
||||
pass |
|
||||
time.sleep(5) |
|
||||
raise TimeoutError( |
|
||||
f"VM {self.vm_name!r} did not become SSH-ready within {timeout}s" |
|
||||
) |
|
||||
|
|
||||
def wait_for_service( |
|
||||
self, service: str, ssh_key: Path, timeout: int = 120 |
|
||||
) -> None: |
|
||||
"""Block until *service* reaches the ``active`` state.""" |
|
||||
deadline = time.monotonic() + timeout |
|
||||
while time.monotonic() < deadline: |
|
||||
result = self.ssh_run( |
|
||||
f"systemctl is-active {service}", ssh_key, check=False |
|
||||
) |
|
||||
if result.stdout.strip() == "active": |
|
||||
return |
|
||||
time.sleep(5) |
|
||||
status = self.ssh_run( |
|
||||
f"systemctl status {service} --no-pager", ssh_key, check=False |
|
||||
) |
|
||||
raise TimeoutError( |
|
||||
f"Service {service!r} not active after {timeout}s:\n{status.stdout}" |
|
||||
) |
|
||||
|
|
||||
def wait_for_unit_done( |
|
||||
self, service: str, ssh_key: Path, timeout: int = 120 |
|
||||
) -> str: |
|
||||
"""Block until a oneshot service finishes (``inactive`` or ``failed``). |
|
||||
|
|
||||
Returns: |
|
||||
The final state string: ``"inactive"`` on success, ``"failed"`` |
|
||||
on failure. |
|
||||
""" |
|
||||
deadline = time.monotonic() + timeout |
|
||||
while time.monotonic() < deadline: |
|
||||
result = self.ssh_run( |
|
||||
f"systemctl is-active {service}", ssh_key, check=False |
|
||||
) |
|
||||
state = result.stdout.strip() |
|
||||
if state in ("inactive", "failed"): |
|
||||
return state |
|
||||
time.sleep(5) |
|
||||
raise TimeoutError( |
|
||||
f"Service {service!r} did not finish within {timeout}s" |
|
||||
) |
|
||||
|
|
||||
# ------------------------------------------------------------------ |
|
||||
# Remote execution |
|
||||
# ------------------------------------------------------------------ |
|
||||
|
|
||||
def ssh_run( |
|
||||
self, |
|
||||
command: str, |
|
||||
ssh_key: Path, |
|
||||
check: bool = True, |
|
||||
) -> subprocess.CompletedProcess: |
|
||||
"""Run a shell command in the VM via SSH. |
|
||||
|
|
||||
Args: |
|
||||
command: Shell command string passed to the remote bash. |
|
||||
ssh_key: Path to the private key used for authentication. |
|
||||
check: If True (default), raise RuntimeError on non-zero exit. |
|
||||
|
|
||||
Returns: |
|
||||
CompletedProcess with stdout/stderr as text. |
|
||||
""" |
|
||||
result = subprocess.run( |
|
||||
[ |
|
||||
"ssh", |
|
||||
"-i", str(ssh_key), |
|
||||
"-o", "StrictHostKeyChecking=no", |
|
||||
"-o", "UserKnownHostsFile=/dev/null", |
|
||||
f"root@{self.ip}", |
|
||||
command, |
|
||||
], |
|
||||
capture_output=True, |
|
||||
text=True, |
|
||||
) |
|
||||
if check and result.returncode != 0: |
|
||||
raise RuntimeError( |
|
||||
f"SSH command failed (exit {result.returncode}): {command!r}\n" |
|
||||
f"stdout: {result.stdout}\nstderr: {result.stderr}" |
|
||||
) |
|
||||
return result |
|
||||
Loading…
Reference in new issue