Browse Source

first draft of traefik integration tests with ACME!

main
Nicolas Massé 2 weeks ago
parent
commit
cfe2a175e9
  1. 256
      conftest.py
  2. 25
      cookbooks/traefik/README.md
  3. 11
      cookbooks/traefik/config/examples/traefik.yaml
  4. 227
      cookbooks/traefik/tests/test_01_install.py
  5. 2
      cookbooks/traefik/traefik.target
  6. 2
      pyproject.toml
  7. 28
      scripts/cloud-init.dev.yaml
  8. 16
      scripts/common.mk
  9. 64
      tests/dns_server.py
  10. 16
      tests/fcos_vm.py

256
conftest.py

@ -5,27 +5,92 @@ Prerequisites:
- The Fedora CoreOS base QCOW2 image must be present at /var/lib/libvirt/images/library/fedora-coreos.qcow2.
Run ``coreos-installer download -p qemu -f qcow2.xz -d -C /var/lib/libvirt/images/library/`` to fetch it.
- fcos-test.ign for the cookbook is built on demand by ``make butane`` if it is missing.
- The rootful Podman socket must be enabled (systemctl enable --now podman.socket) for the
pebble_acme_server fixture to start a Pebble ACME container via Testcontainers.
"""
import subprocess
from pathlib import Path
import shutil
import json
import os
import re
import shutil
import socket as _socket
import subprocess
import sys
import textwrap
import time
from pathlib import Path
import urllib.request
import ssl
import pytest
import testinfra
import textwrap
from fcos_vm import FCOSVirtualMachine, ensure_fcos_ign # noqa: E402
from dns_server import DNSServer # noqa: E402
# Persistent directory used when --keep-vm is active.
# Persistent directory used when --keep is active.
_KEEP_VM_CACHE_DIR = Path.home() / ".cache" / "pytest"
# You can pass --keep-vm on the command line to keep the test VM alive after the test run and reuse it on the next run.
@pytest.fixture(scope="session")
def libvirt_network() -> str:
"""The libvirt network name to use."""
return "default"
@pytest.fixture(scope="session")
def libvirt_network_if(libvirt_network: str) -> str:
"""The libvirt network interface to use."""
result = subprocess.run(
["virsh", "net-info", libvirt_network],
capture_output=True, text=True, check=True,
)
match = re.search(r"Bridge:\s+(\S+)", result.stdout)
if match:
return match.group(1)
raise RuntimeError(f"Could not find interface for libvirt network '{libvirt_network}'")
@pytest.fixture(scope="session")
def pebble_server_ip(libvirt_network_if: str) -> str:
"""IP Address of the Pebble ACME server."""
return _get_libvirt_bridge_ip(libvirt_network_if)
@pytest.fixture(scope="session")
def top_level_domain() -> str:
"""Top-level domain for the test environment."""
return "pytest.example.test"
@pytest.fixture(scope="session")
def dns_server_ip(libvirt_network_if: str) -> str:
"""IP Address of the DNS server."""
return _get_libvirt_bridge_ip(libvirt_network_if)
def _get_libvirt_bridge_ip(libvirt_network_if: str) -> str:
"""Return the IP of the host running pytest, as seen from the test VMs."""
result = subprocess.run(
["ip", "-4", "-j", "addr", "show", "scope", "global", "dev", libvirt_network_if],
capture_output=True, text=True, check=True,
)
ip_info = json.loads(result.stdout)
if ip_info and "addr_info" in ip_info[0] and ip_info[0]["addr_info"]:
return ip_info[0]["addr_info"][0]["local"]
raise RuntimeError(f"Could not find IP address for libvirt network interface '{libvirt_network_if}'")
def _wait_for_port(host: str, port: int, timeout: int = 30) -> None:
"""Wait for a TCP port to be open on the given host, or raise after timeout."""
timeout = 30
deadline = time.monotonic() + timeout
while time.monotonic() < deadline:
try:
with _socket.create_connection((host, port), timeout=1):
return
except OSError:
time.sleep(0.5)
raise TimeoutError(f"Port {host}:{port} not available after {timeout}s")
# You can pass --keep on the command line to keep the test VM alive after the test run and reuse it on the next run.
# Speeds up iteration: the VM is created once and never destroyed. The SSH key is stored persistently in ~/.cache/pytest.
def pytest_addoption(parser: pytest.Parser) -> None:
parser.addoption(
"--keep-vm",
"--keep",
action="store_true",
default=False,
help=(
@ -37,22 +102,153 @@ def pytest_addoption(parser: pytest.Parser) -> None:
)
@pytest.fixture(scope="session")
def keep_vm(request: pytest.FixtureRequest) -> bool:
"""True when --keep-vm was passed on the command line."""
return request.config.getoption("--keep-vm")
def keep(request: pytest.FixtureRequest) -> bool:
"""True when --keep was passed on the command line."""
return request.config.getoption("--keep")
@pytest.fixture(scope="session")
def pebble_acme_server(tmp_path_factory: pytest.TempPathFactory, pebble_server_ip: str, dns_server_ip: str, keep: bool) -> dict:
"""Session-scoped Pebble ACME test server running in a Podman container.
Pebble is configured to validate HTTP-01 challenges on standard ports
(80/443) and binds to all host interfaces via host networking so it is
reachable from the libvirt test VMs.
The rootful Podman socket must be enabled before running the tests:
systemctl enable --now podman.socket
Yields a dict with:
- directory_url : ACME directory URL (https://<bridge_ip>:14000/dir)
- ca_cert : Pebble root CA certificate (PEM string), used to
authenticate Pebble's own TLS endpoint.
"""
from testcontainers.core.container import DockerContainer
# Point Testcontainers at the rootful Podman socket and disable Ryuk
# (Ryuk does not work reliably with Podman).
os.environ.setdefault("DOCKER_HOST", "unix:///run/podman/podman.sock")
os.environ.setdefault("TESTCONTAINERS_RYUK_DISABLED", "true")
if keep:
pebble_dir = Path("/srv/pebble")
pebble_dir.mkdir(parents=True, exist_ok=True)
else:
pebble_dir = tmp_path_factory.mktemp("pebble")
etc_dir = pebble_dir / "etc"
etc_dir.mkdir(exist_ok=True)
var_dir = pebble_dir / "var"
var_dir.mkdir(exist_ok=True)
ca_cert_path = var_dir / "ca.crt"
ca_key_path = var_dir / "ca.key"
server_key_path = var_dir / "server.key"
server_cert_path = var_dir / "server.crt"
# Generate a self-signed certificate for Pebble to use.
# The certificate's CN must match the host IP visible from the VM for TLS to work.
# The keys and certificates are reused across runs when --keep is set because those artefacts
# are injected into the VM and must remain consistent for the VM to be reusable.
if not (ca_cert_path.exists() and ca_key_path.exists()):
subprocess.run(["openssl", "req", "-x509", "-newkey", "rsa:2048",
"-keyout", str(ca_key_path), "-out", str(ca_cert_path), "-days", "3650", "-noenc",
"-subj", "/CN=Pebble CA", "-addext", "basicConstraints=critical,CA:TRUE"], check=True, capture_output=True)
if not (var_dir / "server.csr").exists():
subprocess.run(["openssl", "req", "-newkey", "rsa:2048",
"-keyout", str(server_key_path), "-out", str(var_dir / "server.csr"), "-noenc",
"-subj", "/CN=localhost"], check=True, capture_output=True)
if not server_cert_path.exists():
(pebble_dir / "srv_ext.txt").write_text(f"basicConstraints=CA:FALSE\nsubjectAltName=IP:127.0.0.1,IP:{pebble_server_ip},DNS:localhost\n")
subprocess.run(["openssl", "x509", "-req",
"-in", str(var_dir / "server.csr"), "-CA", str(ca_cert_path), "-CAkey", str(ca_key_path), "-CAcreateserial",
"-out", str(server_cert_path), "-days", "365", "-extfile", str(pebble_dir / "srv_ext.txt")], check=True, capture_output=True)
# Write the Pebble configuration with standard challenge ports (80 / 443).
config_file = etc_dir / "pebble-config.json"
config_file.write_text(json.dumps({
"pebble": {
"listenAddress": "0.0.0.0:14000",
"managementListenAddress": "0.0.0.0:15000",
"certificate": "/test/certs/server.crt",
"privateKey": "/test/certs/server.key",
# Use standard ports to validate HTTP-01 & TLS-ALPN-01 challenges.
"httpPort": 80,
"httpsPort": 443,
"externalAccountBindingRequired": False,
"domainBlocklist": [],
}
}))
container = (
DockerContainer("ghcr.io/letsencrypt/pebble:latest")
.with_name("pebble-acme-server")
.with_env("PEBBLE_VA_NOSLEEP", "1")
.with_env("PEBBLE_WFE_NONCEREJECT", "0")
.with_command(f"-config /test/config/pebble-config.json -dnsserver {dns_server_ip}:53")
.with_volume_mapping(str(etc_dir), "/test/config", "ro,z")
.with_volume_mapping(str(var_dir), "/test/certs", "ro,z")
.with_kwargs(
network_mode="host",
)
)
with container:
_wait_for_port(pebble_server_ip, 14000)
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
certs = {}
for name, path in [("root", "roots/0"), ("intermediate", "intermediates/0")]:
url = f"https://{pebble_server_ip}:15000/{path}"
with urllib.request.urlopen(url, context=ctx) as resp:
certs[name] = resp.read().decode()
data = {
# The directory URL is how ACME clients discover the available endpoints and must be provided to the tests.
"directory_url": f"https://{pebble_server_ip}:14000/dir",
# The CA certificate is needed by the tests to authenticate Pebble's TLS endpoint.
"ca_cert": ca_cert_path.read_text(),
# The CA bundle to trust the generated certificates.
"ca_bundle": certs["intermediate"] + certs["root"],
}
yield data # <-- tests run here with access to the Pebble ACME server
@pytest.fixture(scope="session")
def dns_server(libvirt_network: str, top_level_domain: str, keep: bool) -> DNSServer:
"""Session-scoped DNS server manager for the libvirt network."""
srv = DNSServer(network=libvirt_network, persistent=keep)
srv.set_domain(top_level_domain)
yield srv # <-- tests run here with access to the DNS server manager
srv.cleanup()
@pytest.fixture(scope="module")
def fcos_extra_files(request: pytest.FixtureRequest) -> dict:
"""Extra files to inject into the FCOS VM image.
Defaults to the ``PYTEST_FCOS_EXTRA_FILES`` module-level dict (backward
compatible with existing test modules). Override this fixture in a test
module to provide dynamic content whose values depend on other fixtures
(e.g. files that embed the Pebble ACME server URL or CA certificate).
"""
return getattr(request.module, "PYTEST_FCOS_EXTRA_FILES", {})
@pytest.fixture(scope="session")
def test_ssh_key(
keep_vm: bool,
keep: bool,
tmp_path_factory: pytest.TempPathFactory,
) -> Path:
"""SSH key pair for VM access.
When --keep-vm is set the key is stored persistently so that subsequent
When --keep is set the key is stored persistently so that subsequent
runs can re-use the same VM without re-injecting a new key.
"""
if keep_vm:
if keep:
key_dir = _KEEP_VM_CACHE_DIR
key_dir.mkdir(parents=True, exist_ok=True)
key_path = key_dir / "id_ed25519"
@ -82,15 +278,15 @@ def test_ssh_pubkey(test_ssh_key: Path) -> str:
# The virtiofs is where important and persistent data are stored.
# We keep it for the entire test session.
@pytest.fixture(scope="package")
def virtiofs_dirs(request, keep_vm: bool) -> list[tuple[Path, str]]:
def virtiofs_dirs(request, keep: bool) -> list[tuple[Path, str]]:
"""VirtioFS host directories for the default test VM.
With --keep-vm the directories are persistent so the VM can be reused across
With --keep the directories are persistent so the VM can be reused across
test runs. Without it unique per-process paths are used and cleaned up
on teardown.
"""
cookbook_dir = Path(request.path).parent.parent
if keep_vm:
if keep:
d = Path("/srv") / f"fcos-test-{cookbook_dir.name}-dev"
else:
d = Path("/srv") / f"fcos-test-{cookbook_dir.name}-{os.getpid()}"
@ -98,7 +294,7 @@ def virtiofs_dirs(request, keep_vm: bool) -> list[tuple[Path, str]]:
yield [(d, "data",)] # <-- tests run here with access to the virtiofs directories
if not keep_vm and d.exists():
if not keep and d.exists():
shutil.rmtree(d)
# However, the VM itself is recreated for each test module to ensure a clean state.
@ -120,20 +316,28 @@ def fcos_vm_config() -> tuple[int, int, int, int]:
"""Default VM configuration (memory in MB, vCPUs, root disk size in GB, /var disk size in GB)."""
return (4096, 2, 50, 100) # (memory in MB, vCPUs, disk size for / and /var in GB)
# PostgreSQL VM are kept for the duration of a test module, backed with a persistent Virtiofs directory.
@pytest.fixture(scope="module")
def dns_names() -> list[str]:
"""List of DNS names to be resolved by the VM (e.g. for ACME challenges)."""
return [ ]
# Test VM are kept for the duration of a test module, backed with a persistent Virtiofs directory.
@pytest.fixture(scope="module")
def fcos_vm(
request, # Fixture that provides information about the requesting test function, class or module.
keep_vm: bool, # Fixture passed from command line option --keep-vm to determine whether to keep the VM after tests for debugging purposes.
keep: bool, # Fixture passed from command line option --keep to determine whether to keep the VM after tests for debugging purposes.
fcos_vm_config: tuple[int, int, int, int], # Fixture that provides the VM configuration (memory in MB, vCPUs, root disk size in GB, /var disk size in GB).
test_ssh_key: Path, # Fixture that provides the path to the SSH private key to connect to the VM.
test_ssh_pubkey: str, # Fixture that provides the content of the SSH public key to inject into the VM for SSH access.
virtiofs_dirs: list[tuple[Path, str]], # Fixture that provides a list of tuples containing host directories and their corresponding target directories in the VM to be exposed via VirtioFS.
tmp_path_factory: pytest.TempPathFactory, # Fixture that provides a factory for creating temporary directories.
fcos_extra_files: dict, # Fixture that provides extra files to inject into the FCOS VM image (overridable per module).
dns_server: DNSServer, # Fixture that provides a DNS server manager to configure DNS entries for the test VMs.
dns_names: list[str], # Fixture that provides a list of DNS names to be resolved by the VM (e.g. for ACME challenges)
) -> FCOSVirtualMachine:
"""Running CoreOS VM with Quadlets installed.
With --keep-vm the VM is reused across runs: it is created only if it
With --keep the VM is reused across runs: it is created only if it
does not already exist and is never destroyed on teardown.
"""
module_name = request.module.__name__.split(".")[-1].replace("test_", "").replace("_", "-")
@ -142,22 +346,24 @@ def fcos_vm(
vm = FCOSVirtualMachine(
cookbook_name=cookbook_dir.name,
instance_name=module_name,
keep=keep_vm,
keep=keep,
virtiofs_dirs=virtiofs_dirs,
vm_config = fcos_vm_config,
)
if not (keep_vm and vm.exists()):
if not (keep and vm.exists()):
fcos_ign = ensure_fcos_ign(cookbook_dir)
vm.ignition.ignition_files.append(fcos_ign)
vm.ignition.extra_files.update(getattr(request.module, "PYTEST_FCOS_EXTRA_FILES", {}))
vm.ignition.extra_files.update(fcos_extra_files)
vm.ignition.ssh_key = test_ssh_pubkey
vm.create()
vm.wait_ip()
dns_server.add_host(vm.ip, [ vm.vm_name ] + dns_names)
vm.wait_ssh(ssh_key=test_ssh_key, timeout=300)
yield vm # <-- tests run here with access to the VM instance
if not keep_vm:
if not keep:
vm.destroy()
dns_server.remove_host(vm.ip)

25
cookbooks/traefik/README.md

@ -12,6 +12,31 @@ This cookbook:
- Stores configuration in `/etc/quadlets/traefik/` and state in `/var/lib/quadlets/traefik/`.
- Supports automatic container image updates via Podman auto-update.
## Configuration
The v3 version of Traefik expects the load its configuration from one (and only one) of the following sources:
- A static configuration file (e.g. `traefik.yaml`) mounted into the `/etc/traefik` of the container.
- `TRAEFIK_*` Environment variables.
- Command-line arguments.
If you want to use a static configuration file, you can place it in `/etc/quadlets/traefik/traefik.yaml` and it will be mounted into the container.
Since it is the default location for Traefik's configuration, no additional configuration is needed.
To use the environment variables, you can set them in the `override.conf` file for the container.
That is to say, you can create the file `/etc/containers/systemd/traefik.container.d/override.conf` with the following content:
```ini
Environment=TRAEFIK_FOO=bar TRAEFIK_BAZ=qux ...
```
Regarding command-line arguments, you can create the file `/etc/containers/systemd/traefik.container.d/override.conf` with the following content:
```ini
EntryPoint=/usr/local/bin/traefik
Exec=--foo=bar --baz=qux ...
```
## Usage
In a separate terminal, follow the logs.

11
cookbooks/traefik/config/traefik.yaml → cookbooks/traefik/config/examples/traefik.yaml

@ -1,5 +1,5 @@
api:
dashboard: true
dashboard: false
debug: false
ping:
manualRouting: true
@ -19,15 +19,6 @@ entryPoints:
https:
address: ":443"
certificatesResolvers:
le:
acme:
email: "nicolas.masse@itix.fr"
keyType: "EC384"
httpChallenge:
# used during the challenge
entryPoint: http
storage: "/var/lib/traefik/acme.json"
providers:
file:
directory: /etc/traefik/conf.d/

227
cookbooks/traefik/tests/test_01_install.py

@ -0,0 +1,227 @@
import textwrap
import test_quadlet # noqa: F401
import pytest
import subprocess
import tempfile
from pathlib import Path
@pytest.fixture(scope="module")
def dns_names() -> list[str]:
"""List of DNS names to be resolved by the VM (e.g. for ACME challenges)."""
return [ "secure", "ping" ]
# Extra files to inject into the FCOS image for the tests in this file.
@pytest.fixture(scope="module")
def fcos_extra_files(pebble_acme_server, top_level_domain) -> dict:
"""
Extra files to inject into the FCOS VM image.
"""
files = {
# Exposes the Traefik ping endpoint to localhost.
"/etc/quadlets/traefik/conf.d/ping.yaml": (
textwrap.dedent(f"""\
http:
routers:
traefik-ping:
rule: Host(`ping`) || Host(`ping.{top_level_domain}`)
entryPoints:
- http
service: "ping@internal"
middlewares:
- localhost-only
middlewares:
localhost-only:
ipAllowList:
sourceRange:
- "127.0.0.1/32"
"""),
10001,
10000,
0o644,
),
# Exposes the ping endpoint to the outside world over HTTPS only
"/etc/quadlets/traefik/conf.d/secure.yaml": (
textwrap.dedent(f"""\
http:
routers:
secure:
rule: Host(`secure.{top_level_domain}`)
entryPoints:
- https
service: "ping@internal"
tls:
certResolver: le
"""),
10001,
10000,
0o644,
),
"/etc/quadlets/traefik/pebble.pem": (
pebble_acme_server['ca_cert'],
10001,
10000,
0o644,
),
"/etc/quadlets/traefik/traefik.yaml": (
textwrap.dedent(f"""\
api:
dashboard: false
debug: false
ping:
manualRouting: true
log:
level: "INFO"
accesslog: false
global:
sendanonymoususage: false
checknewversion: false
entryPoints:
http:
address: ":80"
https:
address: ":443"
certificatesResolvers:
le:
acme:
email: "traefik@pytest.example.test"
caServer: "{pebble_acme_server['directory_url']}"
caCertificates: "/etc/traefik/pebble.pem"
keyType: "EC384"
httpChallenge:
entryPoint: http
storage: "/var/lib/traefik/acme.json"
providers:
file:
directory: /etc/traefik/conf.d/
watch: true
"""),
10001,
10000,
0o644,
),
}
return files
"""
Verify that the Traefik Quadlet is correctly installed and configured on a fresh VM boot.
"""
class TestTraefikQuadlet(test_quadlet.TestQuadlet):
expected_services = [
{ "name": "traefik.target", "state": "active", "exists": True },
{ "name": "traefik.service", "state": "active", "exists": True },
]
expected_sockets = [
{ "uri": "tcp://127.0.0.1:80", "state": "listening" },
{ "uri": "tcp://127.0.0.1:443", "state": "listening" },
]
expected_ports = [
{ "number": 80, "protocol": "tcp", "state": "open" },
{ "number": 443, "protocol": "tcp", "state": "open" },
{ "number": 22, "protocol": "tcp", "state": "open" },
]
expected_files = [
{ "path": "/var/lib/quadlets/traefik", "type": "directory", "owner": "traefik", "group": "itix-svc", "mode": 0o755 },
{ "path": "/etc/quadlets/traefik", "type": "directory", "owner": "traefik", "group": "itix-svc", "mode": 0o755 },
{ "path": "/etc/quadlets/traefik/traefik.yaml", "type": "file", "owner": "traefik", "group": "itix-svc", "mode": 0o644 },
]
expected_podman_images = [
{ "name": "docker.io/library/traefik", "tag": "v3.4", "state": "present" },
]
expected_podman_containers = [
{ "name": "traefik", "state": "present", "pid1": { "owner": "10001", "group": "10000" } },
]
expected_main_service = "traefik.target"
expected_main_service_timeout = 300
def test_clean_traefik_state(self, fcos_host, keep):
if keep:
# Stop the traefik.target to ensure a clean state for the tests, but only if --keep is set because otherwise the VM is not reused across runs and is already in a clean state.
result = fcos_host.run("systemctl stop traefik.target")
assert result.rc == 0, f"Failed to stop traefik.target: {result.stderr}"
fcos_host.run("rm -rf /var/lib/quadlets/traefik/acme.json")
result = fcos_host.run("systemctl start traefik.target")
assert result.rc == 0, f"Failed to start traefik.target: {result.stderr}"
else:
pytest.skip("Skipping clean Traefik state test because --keep is not set.")
@pytest.mark.flaky(reruns=6, reruns_delay=5)
def test_traefik_ping_localhost(self, fcos_host):
"""Traefik must respond to the ping endpoint with HTTP 200."""
result = fcos_host.run("curl -sSf -o /dev/null -w '%{http_code}' -H 'Host: ping' http://127.0.0.1/")
assert result.rc == 0, f"curl failed with exit code {result.rc}: {result.stderr}"
assert result.stdout.strip() == "200", f"Expected HTTP 200 from ping endpoint, got: {result.stdout.strip()}"
def test_traefik_ping_external(self, fcos_vm, top_level_domain):
"""Traefik must NOT respond to the ping endpoint outside localhost."""
result = subprocess.run(
[
"curl",
"-sSf",
"-o", "/dev/null",
"--resolve", f"ping.{top_level_domain}:80:{fcos_vm.ip}",
"-w", "%{http_code}",
f"http://ping.{top_level_domain}/"
],
check=False,
capture_output=True,
)
assert result.returncode == 22, f"curl failed with exit code {result.returncode}: {result.stderr}"
assert int(result.stdout.strip()) == 403, f"Expected HTTP 403 from ping endpoint, got: {result.stdout.strip()}"
@pytest.mark.flaky(reruns=12, reruns_delay=5)
def test_traefik_tls(self, fcos_vm, pebble_acme_server, top_level_domain):
"""Traefik must respond to the secure endpoint with HTTP 200."""
# On the host running pytest, create a temporary dir in /tmp and write the Pebble CA certificate in the pebble.pem file.
tmpdir = tempfile.TemporaryDirectory(delete=True)
d = Path(tmpdir.name)
pebble_ca_bundle_path = d / "pebble.pem"
pebble_ca_bundle_path.write_text(pebble_acme_server['ca_bundle'])
result = subprocess.run(
[
"curl",
"-sSf",
"-o", "/dev/null",
"--cacert", str(pebble_ca_bundle_path),
"--resolve", f"secure.{top_level_domain}:443:{fcos_vm.ip}",
"-w", "%{http_code}",
f"https://secure.{top_level_domain}/"
],
check=False,
capture_output=True,
)
assert result.returncode == 0, f"curl failed with exit code {result.returncode}: {result.stderr}"
assert int(result.stdout.strip()) == 200, f"Expected HTTP 200 from ping endpoint, got: {result.stdout.strip()}"
def test_traefik_restart(self, fcos_host):
"""Restarting traefik.target must keep Traefik running and the ping endpoint must still respond."""
result = fcos_host.run("systemctl restart traefik.target")
assert result.rc == 0, f"Failed to restart traefik.target: {result.stderr}"
# Wait for traefik.target to become active again after the restart
self.wait_for_service(fcos_host, "traefik.target", timeout=120)
# traefik.service must still be running after the restart
self.check_expected_services(fcos_host, [
{ "name": "traefik.service", "state": "active", "exists": True },
])
# Ping endpoint must still respond after the restart
result = fcos_host.run("curl -sSf -H 'Host: ping' http://127.0.0.1/")
assert result.rc == 0, f"curl failed after restart: {result.stderr}"

2
cookbooks/traefik/traefik.target

@ -1,5 +1,5 @@
[Unit]
Description=PostgreSQL Service Target
Description=Traefik Service Target
Documentation=man:systemd.target(5)
Requires=traefik.service
After=traefik.service

2
pyproject.toml

@ -10,6 +10,8 @@ dependencies = [
"pytest>=8.0",
"pytest-testinfra>=10.1",
"paramiko>=3.4",
"testcontainers>=4.0",
"pytest-rerunfailures>=16.0",
]
[tool.pytest.ini_options]

28
scripts/cloud-init.dev.yaml

@ -30,6 +30,25 @@ packages:
- virt-install
- xterm-resize # Required to fix the terminal when using `virsh console` with UEFI firmware
- yq
- NetworkManager
- dnsmasq # Required to serve DNS records for the Peeble ACME server
runcmd: |
#!/bin/bash
set -Eeuo pipefail
# Enable the Podman socket to allow running Podman containers from the testcontainers python library,
# which is used in the tests of the Podman Quadlet Cookbook.
systemctl enable --now --no-block podman.socket
# Disable systemd-resolved
systemctl stop --no-block systemd-resolved.service
systemctl disable systemd-resolved.service
systemctl mask systemd-resolved.service
# Let NetworkManager handles the DNS name resolution.
rm -f /etc/resolv.conf
systemctl restart NetworkManager.service
write_files:
- path: /etc/ssh/sshd_config.d/00-vscode.conf
@ -37,3 +56,12 @@ write_files:
# This file is used to allow VS Code Remote SSH extension to connect to the VM as root user.
PermitRootLogin prohibit-password
permissions: '0600'
- path: /etc/NetworkManager/conf.d/quadlets.conf
content: |
# This file is used to configure NetworkManager for the Quadlets environment.
# It configures NetworkManager to use dnsmasq as the system's DNS resolver and
# generates resolv.conf accordingly.
[main]
dns=dnsmasq
rc-manager=file
permissions: '0644'

16
scripts/common.mk

@ -103,13 +103,19 @@ TARGET_EXAMPLES_TMPFILESD_FILES = $(patsubst tmpfiles.d/examples/%, $(TARGET_CHR
TARGET_EXAMPLES_SYSCTLD_FILES = $(patsubst sysctl.d/examples/%, $(TARGET_CHROOT)/etc/sysctl.d/%, $(EXAMPLES_SYSCTLD_FILES))
TARGET_EXAMPLES_PROFILED_FILES = $(patsubst profile.d/examples/%, $(TARGET_CHROOT)/etc/profile.d/%, $(EXAMPLES_PROFILED_FILES))
# Example quadlet and systemd drop-ins files
EXAMPLES_QUADLET_DROPINS_FILES := $(shell find examples -mindepth 1 -type f | grep -E '\.(container|volume|network|pod|build|image)\.d/' 2>/dev/null)
EXAMPLES_SYSTEMD_DROPINS_FILES := $(shell find examples -mindepth 1 -type f | grep -E '\.(service|target|timer|mount)\.d/' 2>/dev/null)
TARGET_EXAMPLES_QUADLET_DROPINS_FILES = $(patsubst examples/%, $(TARGET_CHROOT)/etc/containers/systemd/%, $(EXAMPLES_QUADLET_DROPINS_FILES))
TARGET_EXAMPLES_SYSTEMD_DROPINS_FILES = $(patsubst examples/%, $(TARGET_CHROOT)/etc/systemd/system/%, $(EXAMPLES_SYSTEMD_DROPINS_FILES))
# All configuration files to be installed
TARGET_FILES += $(addprefix $(TARGET_CHROOT)/etc/containers/systemd/, $(QUADLETS_FILES)) \
$(addprefix $(TARGET_CHROOT)/etc/systemd/system/, $(SYSTEMD_FILES)) \
$(TARGET_CONFIG_FILES) $(TARGET_TMPFILESD_FILES) $(TARGET_SYSCTLD_FILES) $(TARGET_PROFILED_FILES)
# All example configuration files to be installed
TARGET_EXAMPLE_FILES += $(TARGET_EXAMPLES_CONFIG_FILES) $(TARGET_EXAMPLES_TMPFILESD_FILES) $(TARGET_EXAMPLES_SYSCTLD_FILES) $(TARGET_EXAMPLES_PROFILED_FILES)
TARGET_EXAMPLE_FILES += $(TARGET_EXAMPLES_CONFIG_FILES) $(TARGET_EXAMPLES_TMPFILESD_FILES) $(TARGET_EXAMPLES_SYSCTLD_FILES) $(TARGET_EXAMPLES_PROFILED_FILES) $(TARGET_EXAMPLES_QUADLET_DROPINS_FILES) $(TARGET_EXAMPLES_SYSTEMD_DROPINS_FILES)
# Dependencies on other projects
# List here the names of other projects (directories at the top-level) that this project depends on.
@ -187,6 +193,12 @@ $(filter-out %.env, $(TARGET_CONFIG_FILES) $(TARGET_EXAMPLES_CONFIG_FILES)):
$(filter %.env, $(TARGET_CONFIG_FILES) $(TARGET_EXAMPLES_CONFIG_FILES)):
install -m 0600 -o root -g root -D $< $@
# Copy systemd and quadlet drop-ins files
$(TARGET_EXAMPLES_QUADLET_DROPINS_FILES): $(TARGET_CHROOT)/etc/containers/systemd/%: examples/% $(TARGET_CHROOT)/etc/containers/systemd
$(TARGET_EXAMPLES_SYSTEMD_DROPINS_FILES): $(TARGET_CHROOT)/etc/systemd/system/%: examples/% $(TARGET_CHROOT)/etc/systemd/system
$(TARGET_EXAMPLES_QUADLET_DROPINS_FILES) $(TARGET_EXAMPLES_SYSTEMD_DROPINS_FILES):
install -D -m 0644 -o root -g root $< $@
# Copy tmpfiles.d files
$(TARGET_TMPFILESD_FILES): $(TARGET_CHROOT)/etc/tmpfiles.d/%: tmpfiles.d/% $(TARGET_CHROOT)/etc/tmpfiles.d
$(TARGET_EXAMPLES_TMPFILESD_FILES): $(TARGET_CHROOT)/etc/tmpfiles.d/%: tmpfiles.d/examples/% $(TARGET_CHROOT)/etc/tmpfiles.d
@ -210,7 +222,7 @@ $(TARGET_CHROOT)/var/lib/quadlets/$(PROJECT_NAME):
install -d -m 0755 -o $(PROJECT_UID) -g $(PROJECT_GID) $@
# Copy all configuration files provided by this project.
install-config: $(TARGET_FILES) $(TARGET_CHROOT)/var/lib/quadlets/$(PROJECT_NAME)
install-config: $(TARGET_FILES) $(TARGET_CHROOT)/var/lib/quadlets/$(PROJECT_NAME) $(TARGET_CHROOT)/etc/quadlets/$(PROJECT_NAME)
# Copy all example configuration files provided by this project.
install-examples: $(TARGET_EXAMPLE_FILES)

64
tests/dns_server.py

@ -0,0 +1,64 @@
import subprocess
class DNSServer:
"""
Manages the libvirt network configuration related to DNS.
"""
def __init__(self, network: str = "default", persistent: bool = False) -> None:
"""
Args:
network: The libvirt network name to configure DNS for.
persistent: Whether to keep the DNS configuration persistent.
"""
self.network = network
self.persistent = persistent
self.domain = None
def set_domain(self, domain: str) -> None:
"""Set the domain for the DNS server."""
self.domain = domain
def add_host(self, ip: str, hostnames: list[str]) -> None:
"""Adds a host to the DNS server."""
xml = f'<host ip="{ip}">'
for hostname in hostnames:
fqdn = f"{hostname}.{self.domain}" if self.domain else hostname
xml += f'<hostname>{fqdn}</hostname>'
xml += '</host>'
result = subprocess.run(
[
"virsh", "net-update", self.network, "add-last", "dns-host", xml, "--live",
] + (["--config"] if self.persistent else []),
capture_output=True,
timeout=10,
check = True,
)
def remove_host(self, ip: str) -> None:
"""Removes a host from the DNS server."""
xml = f'<host ip="{ip}"/>'
result = subprocess.run(
[
"virsh", "net-update", self.network, "delete", "dns-host", xml, "--live"
] + (["--config"] if self.persistent else []),
capture_output=True,
timeout=10,
check = True,
)
def cleanup(self) -> None:
"""Resets the libvirt network configuration to its default state by destroying and restarting the network."""
if not self.persistent:
for cmd in [ "net-destroy", "net-start" ]:
result = subprocess.run(
[
"virsh", cmd, self.network
],
capture_output=True,
timeout=10,
check = True,
)

16
tests/fcos_vm.py

@ -50,7 +50,7 @@ class FCOSIgnition:
teardown).
"""
def __init__(self, ignition_files: list[Path] | None = None, ssh_key: str | None = None, extra_files: dict[str, tuple[str | int, str | int, int, str]] | None = None) -> None:
def __init__(self, ignition_files: list[Path] | None = None, ssh_key: str | None = None, extra_files: dict[str, tuple[str, str | int, str | int, int]] | None = None) -> None:
"""
Args:
ignition_files: List of paths to the compiled Ignition (.ign) files.
@ -314,6 +314,20 @@ class FCOSVirtualMachine:
raise RuntimeError(f"VM {self.vm_name!r} has no IP address yet")
return self._ip
def wait_ip(self, timeout: int = 300) -> str:
"""Block until the VM has an IP address. Returns the IP address.
Polls every 5 seconds until ``timeout`` seconds have elapsed.
"""
deadline = time.monotonic() + timeout
while time.monotonic() < deadline:
ip = self.get_ip()
if ip:
self._ip = ip
return ip
time.sleep(5)
raise TimeoutError(f"VM {self.vm_name!r} did not obtain an IP address within {timeout}s")
def wait_ssh(self, ssh_key: Path, timeout: int = 300) -> str:
"""Block until SSH is reachable. Returns the IP address.

Loading…
Cancel
Save