Compare commits

...

13 Commits

  1. 1
      .gitignore
  2. 93
      Makefile
  3. 2
      README.md
  4. 45
      packaging/zvirt.spec
  5. 13
      src/bin/zvirt
  6. 202
      src/lib/zvirt/core.sh
  7. 16
      test/e2e/cloud-init/standard-user-data
  8. 19
      test/e2e/cloud-init/with-fs-user-data
  9. 31
      test/e2e/cloud-init/with-zvol-user-data
  10. 836
      test/e2e/zvirt.bats
  11. 338
      test/unit/core.bats
  12. 25
      test/unit/usage.bats

1
.gitignore

@ -0,0 +1 @@
build/

93
Makefile

@ -1,17 +1,94 @@
.PHONY: all test unit-test syntax-test integration-test lint clean
PREFIX ?= /usr/local
.PHONY: all test unit-test syntax-test e2e-test lint clean prerequisites install uninstall release tarball install-tarball srpm rpm copr-build copr-whoami git-tag
VERSION := $(shell git describe --tags --abbrev=0)
all: syntax-test unit-test lint
all: syntax-test lint unit-test e2e-test release
syntax-test:
@echo "Running syntax tests..."
@/bin/bash -nv src/zvirt
@/bin/bash -nv src/lib/core.sh
@/bin/bash -nv src/bin/zvirt
@/bin/bash -nv src/lib/zvirt/core.sh
unit-test:
prerequisites:
@echo "Installing prerequisites..."
@/bin/bash -Eeuo pipefail -c 'if ! bats --version &>/dev/null; then dnf install -y bats; fi'
@/bin/bash -Eeuo pipefail -c 'if ! yq --version &>/dev/null; then dnf install -y yq; fi'
@/bin/bash -Eeuo pipefail -c 'if ! shellcheck --version &>/dev/null; then dnf install -y shellcheck; fi'
@/bin/bash -Eeuo pipefail -c 'if ! gh --version &>/dev/null; then dnf install -y gh; fi'
@/bin/bash -Eeuo pipefail -c 'if ! rpmbuild --version &>/dev/null; then dnf install -y rpm-build; fi'
@/bin/bash -Eeuo pipefail -c 'if ! copr-cli --version &>/dev/null; then dnf install -y copr-cli; fi'
@/bin/bash -Eeuo pipefail -c 'if ! git --version &>/dev/null; then dnf install -y git; fi'
unit-test: prerequisites
@echo "Running unit tests..."
@LANG=LC_ALL=C BATS_LIB_PATH=$(PWD)/test/test_helper bats test/unit
@LANG=C LC_ALL=C BATS_LIB_PATH=$(PWD)/test/test_helper bats test/unit
e2e-test: prerequisites
@echo "Running end-to-end tests..."
@LANG=C LC_ALL=C BATS_LIB_PATH=$(PWD)/test/test_helper bats test/e2e
install:
@echo "Installing zvirt..."
@install -d $(PREFIX)/lib/zvirt $(PREFIX)/bin
@install -m 755 src/bin/zvirt $(PREFIX)/bin/zvirt
@install -m 644 src/lib/zvirt/core.sh $(PREFIX)/lib/zvirt/core.sh
uninstall:
@echo "Uninstalling zvirt..."
@rm -f $(PREFIX)/bin/zvirt
@rm -rf $(PREFIX)/lib/zvirt
tarball:
@echo "Creating release tarball..."
@mkdir -p build
@tar --exclude-vcs --exclude='*.swp' -czf build/zvirt-$(VERSION).tar.gz --transform "s|^src|zvirt-$(VERSION)|" src
install-tarball: tarball
@echo "Installing zvirt from release tarball..."
@tar -xvzf build/zvirt-$(VERSION).tar.gz --strip-components=1 -C $(PREFIX)
srpm: prerequisites
@echo "Creating SRPM..."
@sed -i "s/^Version: .*/Version: $(VERSION)/" packaging/zvirt.spec
@git ls-files | sed 's|^|./|' > build/filelist.txt
@mkdir -p build/zvirt-$(VERSION)/SOURCES
@tar --verbatim-files-from --files-from=build/filelist.txt -cvzf build/zvirt-$(VERSION)/SOURCES/zvirt-$(VERSION).tar.gz --transform "s|^./|zvirt-$(VERSION)/|"
@rpmbuild --define "_topdir $$(pwd)/build/zvirt-$(VERSION)" --define "dist %{nil}" -bs packaging/zvirt.spec
rpm: prerequisites srpm
@echo "Creating RPM..."
@rpmbuild --define "_topdir $$(pwd)/build/zvirt-$(VERSION)" -bb packaging/zvirt.spec
# https://copr.fedorainfracloud.org/api/
copr-whoami: prerequisites
@echo "Checking COPR identity..."
@copr-cli whoami
copr-build: copr-whoami srpm
@echo "Building RPM in COPR..."
@copr-cli build --nowait nmasse-itix/zvirt build/zvirt-$(VERSION)/SRPMS/zvirt-$(VERSION)-*.src.rpm
git-tag: prerequisites
@if [ -n "$$(git status --porcelain)" ]; then echo "Git working directory is dirty. Please commit or stash changes before tagging."; exit 1; fi
@echo "Tagging Git repository..."
@read -p "Enter version to tag [current: $(VERSION)]: " NEW_VERSION; \
sed -i "s/^Version: .*/Version: $${NEW_VERSION}/" packaging/zvirt.spec; \
git add packaging/zvirt.spec; \
git commit -m "Bump version to $${NEW_VERSION}" ; \
git tag -a "$${NEW_VERSION}" -m "Release v$${NEW_VERSION} of zvirt." ; \
$(MAKE) -C . VERSION=$${NEW_VERSION} release
release: prerequisites tarball srpm rpm copr-build
@echo "Pushing changes for version $(VERSION) to Git repository..."
@git push origin $$(git rev-parse --abbrev-ref HEAD)
@git push origin "$(VERSION)"
@echo "Creating GitHub release $(VERSION)..."
@gh release create $(VERSION) build/zvirt-$(VERSION).tar.gz build/zvirt-$(VERSION)/SRPMS/zvirt-$(VERSION)-*.rpm --draft --title "v$(VERSION)" --notes "Release v$(VERSION) of zvirt. RPMs are in COPR [nmasse-itix/zvirt](https://copr.fedorainfracloud.org/coprs/nmasse-itix/zvirt/)."
clean:
lint:
@echo "Cleaning up..."
@rm -rf build/zvirt-*
lint: prerequisites
@echo "Linting..."
@shellcheck src/zvirt src/lib/*.sh
@cd src && shellcheck --severity=error bin/zvirt lib/zvirt/*.sh

2
README.md

@ -5,7 +5,7 @@
Zvirt takes snapshots of Libvirt domains using ZFS.
It supports both crash-consistent and live snapshots.
At the end, all components of a domain (Domain definition, TPM, NVRAM, VirtioFS, ZFS snapshots of the underlying storage volumes) are captured as a set of consistent ZFS snapshots.
At the end, all components of a domain - Domain definition, TPM, NVRAM, VirtioFS, disks (either files on a ZFS dataset or raw zvols) - are captured as a set of consistent ZFS snapshots.
## Features

45
packaging/zvirt.spec

@ -0,0 +1,45 @@
Name: zvirt
Version: 0.0.5
%if %{defined dist}
Release: 1%{?dist}
%else
Release: 1
%endif
Summary: Libvirt ZFS snapshots utility
License: MIT
URL: https://github.com/nmasse-itix/zvirt
Source0: %{name}-%{version}.tar.gz
BuildArch: noarch
Requires: bash
Requires: libvirt
Requires: zfs
BuildRequires: make
%description
Zvirt takes snapshots of Libvirt domains using ZFS.
It supports both crash-consistent and live snapshots.
At the end, all components of a domain (Domain definition, TPM, NVRAM,
VirtioFS, ZFS snapshots of the underlying storage volumes) are captured
as a set of consistent ZFS snapshots.
%prep
%setup -q
%build
# Nothing to build for a shell script
%install
make PREFIX=%{buildroot}%{_prefix} install
%files
%{_bindir}/zvirt
%{_prefix}/lib/zvirt/core.sh
%dir %{_prefix}/lib/zvirt
%changelog
* Mon Nov 24 2025 Nicolas Massé <nicolas.masse@itix.fr> - 0.0.1-1
- Initial package release

13
src/zvirt → src/bin/zvirt

@ -30,8 +30,8 @@ export LANG=C
export LC_ALL=C
# Load core library
script_dir="$(realpath "$(dirname "${BASH_SOURCE[0]}")")"
source "$script_dir/lib/core.sh"
script_dir="$(realpath "$(dirname "${BASH_SOURCE[0]}")/../")"
source "$script_dir/lib/zvirt/core.sh"
# Parse command line arguments and act accordingly
init_global_variables
@ -51,12 +51,13 @@ case "$action" in
revert_snapshots || fatal "Failed to revert snapshots."
;;
list)
if [ ${#domains[@]} -eq 0 ]; then
# Get all domains
mapfile -t domains < <(virsh list --all --name | grep -v '^$')
fi
preflight_checks "$action" "${domains[@]}" || fatal "Pre-flight checks failed."
list_snapshots "${domains[@]}" || fatal "Failed to list snapshots."
;;
prune)
preflight_checks "$action" "${domains[@]}" || fatal "Pre-flight checks failed."
prune_snapshots "${domains[@]}" || fatal "Failed to prune snapshots."
;;
*)
fatal "Unknown action '$action'."
;;

202
src/lib/core.sh → src/lib/zvirt/core.sh

@ -1,3 +1,9 @@
#!/bin/bash
##
## zvirt core library - Provides functions for taking and reverting snapshots of libvirt domains using ZFS.
##
# Reports a verbose message to stdout if verbose mode is enabled.
function log_verbose () {
if [ "$verbose" -eq 1 ]; then
@ -7,13 +13,13 @@ function log_verbose () {
# Reports a fatal error message to stderr and exits with a non-zero exit code.
function fatal () {
echo "Error: $@" 2>&1
echo "Error: $*" 2>&1
exit 1
}
# Reports an error message to stderr.
function error () {
echo "Error: $@" 2>&1
echo "Error: $*" 2>&1
}
function show_help () {
@ -27,11 +33,13 @@ Options:
-d DOMAIN specify domain name (you can specify multiple -d options)
-s SNAPSHOT specify snapshot name
-b batch mode (pause all domains, take snapshots, then resume all domains)
-k N keep at most N snapshots per domain (used with 'prune' action)
Actions:
snapshot take a snapshot of the specified domain(s)
revert revert to a snapshot of the specified domain(s)
list list snapshots of the specified domain(s) (or all domains if none specified)
prune prune old snapshots of the specified domain(s) according to retention policy
Examples:
Take a crash-consistent snapshot of domain 'vm1' named 'backup1':
@ -48,6 +56,9 @@ Examples:
List snapshots of all domains:
${0##*/} list
Prune snapshots of all domains, keeping at most 5 snapshots:
${0##*/} prune -k 5
EOF
}
@ -60,9 +71,10 @@ function init_global_variables () {
action=""
batch=0
live=0
keep=0
# Cache for domain parameters to avoid redundant calls to the zfs command
declare -A domain_params_cache=( )
declare -gA domain_params_cache=( )
}
# Parses the command-line arguments.
@ -77,7 +89,7 @@ function parse_args () {
OPTIND=1 # Reset in case getopts has been used previously in the shell.
while getopts "h?blvd:s:" opt; do
while getopts "h?blvd:s:k:" opt; do
case "$opt" in
h|\?)
show_help
@ -93,6 +105,8 @@ function parse_args () {
;;
l) live=1
;;
k) keep="$OPTARG"
;;
*) show_help >&2
exit 1
;;
@ -108,6 +122,11 @@ function parse_args () {
should_exit=1
fi
if [ ${#domains[@]} -eq 0 ]; then
# Get all domains
mapfile -t domains < <(virsh list --all --name | grep -v '^$')
fi
case "$action" in
snapshot)
if [ ${#domains[@]} -eq 0 ] || [ -z "$snapshot_name" ]; then
@ -115,11 +134,6 @@ function parse_args () {
should_exit=1
fi
if [ "$batch" -eq 1 ] && [ "$live" -ne 1 ]; then
echo "Error: Batch mode requires live snapshot mode."
should_exit=1
fi
if [[ ! "$snapshot_name" =~ ^[a-zA-Z0-9._-]+$ ]]; then
echo "Error: Snapshot name '$snapshot_name' contains invalid characters. Only alphanumeric characters, dots (.), underscores (_) and hyphens (-) are allowed."
should_exit=1
@ -138,6 +152,12 @@ function parse_args () {
;;
list)
;;
prune)
if [ "$keep" -le 0 ]; then
echo "Error: The -k option with a positive integer value must be specified for the 'prune' action."
should_exit=1
fi
;;
*)
echo "Error: Unsupported action '$action'."
should_exit=1
@ -163,7 +183,10 @@ function domain_exists () {
function domain_checks () {
local action="$1"
local domain="$2"
local snapshot_name="$3"
local snapshot_name
if [ "$action" == "snapshot" ] || [ "$action" == "revert" ]; then
snapshot_name="$3"
fi
local error=0
local state=""
@ -193,12 +216,17 @@ function domain_checks () {
if [ -z "$zfs_mountpoint" ] || [[ ! "$zfs_mountpoint" =~ ^/ ]]; then
error "$domain: Wrong ZFS mountpoint for dataset '$zfs_dataset': '$zfs_mountpoint'." ; error=1
# elif [ ! -d "$zfs_mountpoint" ]; then
# error "$domain: ZFS mountpoint '$zfs_mountpoint' does not exist." ; error=1
fi
state=$(domain_state "$domain")
# Store those values in cache for later use
domain_params_cache["$domain/state"]="${state}"
domain_params_cache["$domain/dataset"]="${zfs_dataset}"
domain_params_cache["$domain/mountpoint"]="${zfs_mountpoint}"
domain_params_cache["$domain/zvols"]="${zfs_zvols[*]}"
domain_params_cache["$domain/snapshots"]="${zfs_dataset_snapshots[*]}"
case "$action" in
snapshot)
# Check domain state
@ -223,7 +251,7 @@ function domain_checks () {
done
# Check if save file already exists for live snapshot
if [ -f "${zfs_mountpoint}/domain.save" ]; then
if [ "$live" -eq 1 ] && has_save_file "$domain"; then
error "$domain: Save file '${zfs_mountpoint}/domain.save' already exists." ; error=1
fi
;;
@ -244,7 +272,15 @@ function domain_checks () {
fi
done
;;
list)
;;
prune)
if [ ${#zfs_dataset_snapshots[@]} -le "$keep" ]; then
log_verbose "$domain: No snapshots to prune (total: ${#zfs_dataset_snapshots[@]}, keep: $keep)."
fi
;;
*)
# Should not reach here due to prior validation
error "$domain: Unknown action '$action'."
;;
esac
@ -254,16 +290,10 @@ function domain_checks () {
return 1
fi
# Store those values in cache for later use
declare -A domain_params_cache
domain_params_cache["$domain/state"]="${state}"
domain_params_cache["$domain/dataset"]="${zfs_dataset}"
domain_params_cache["$domain/mountpoint"]="${zfs_mountpoint}"
domain_params_cache["$domain/zvols"]="${zfs_zvols[*]}"
return 0
}
# Gets the mountpoint of the specified ZFS dataset.
function get_zfs_dataset_mountpoint () {
local zfs_dataset="$1"
zfs get -H -o value mountpoint "${zfs_dataset}"
@ -340,7 +370,7 @@ function restore_domain () {
else
virsh_restore_opts+=( "--running" )
fi
virsh restore "${zfs_mountpoint}/domain.save" --verbose "${virsh_restore_opts[@]}"
virsh restore "${zfs_mountpoint}/domain.save" "${virsh_restore_opts[@]}"
}
# Pauses all domains in the list.
@ -362,7 +392,7 @@ function resume_all_domains () {
for domain in "${domains[@]}"; do
log_verbose "$domain: Resuming domain..."
state="${domain_params_cache["$domain/state"]}"
state="$(domain_state "$domain")"
case "$state" in
paused)
virsh resume "$domain" || true
@ -380,13 +410,20 @@ function resume_all_domains () {
# Performs pre-flight checks for all specified domains according to the action.
function preflight_checks () {
local action="$1" ; shift
local snapshot_name="$1" ; shift
local snapshot_name
if [ "$action" == "snapshot" ] || [ "$action" == "revert" ]; then
snapshot_name="$1" ; shift
fi
local error=0
local domains=( "$@" )
for domain in "${domains[@]}"; do
log_verbose "$domain: Performing domain pre-flight checks for $action..."
if ! domain_checks "$action" "$domain" "$snapshot_name"; then
local -a domain_checks_args=( "$action" "$domain" )
if [ "$action" == "snapshot" ] || [ "$action" == "revert" ]; then
domain_checks_args+=( "$snapshot_name" )
fi
if ! domain_checks "${domain_checks_args[@]}"; then
error=1
fi
done
@ -394,10 +431,72 @@ function preflight_checks () {
return $error
}
# Removes the save file for the specified domain.
function remove_save_file () {
local domain="$1"
zfs_mountpoint="${domain_params_cache["$domain/mountpoint"]}"
if [ -f "${zfs_mountpoint}/domain.save" ]; then
log_verbose "$domain: Removing save file '${zfs_mountpoint}/domain.save'..."
rm -f "${zfs_mountpoint}/domain.save"
fi
}
# Checks if the save file exists for the specified domain.
function has_save_file () {
local domain="$1"
zfs_mountpoint="${domain_params_cache["$domain/mountpoint"]}"
if [ -f "${zfs_mountpoint}/domain.save" ]; then
return 0
else
return 1
fi
}
# Thaws the specified domain filesystem.
function fsthaw_domain () {
local domain="$1"
virsh domfsthaw "$domain"
}
# Freezes the specified domain filesystem.
function fsfreeze_domain () {
local domain="$1"
virsh domfsfreeze "$domain"
}
# Thaws all domains in the list.
function fsthaw_all_domains () {
local domains=( "$@" )
for domain in "${domains[@]}"; do
log_verbose "$domain: Thawing domain..."
state="${domain_params_cache["$domain/state"]}"
if [ "$state" == "running" ]; then
fsthaw_domain "$domain"
fi
done
}
# Freezes all domains in the list.
function fsfreeze_all_domains () {
local domains=( "$@" )
for domain in "${domains[@]}"; do
log_verbose "$domain: Freezing domain..."
state="${domain_params_cache["$domain/state"]}"
if [ "$state" == "running" ]; then
fsfreeze_domain "$domain"
fi
done
}
# Takes snapshots for all specified domains.
function take_snapshots () {
if [ "$batch" -eq 1 ]; then
if [ "$batch" -eq 1 ] && [ "$live" -eq 1 ]; then
pause_all_domains "${domains[@]}"
elif [ "$batch" -eq 1 ] && [ "$live" -eq 0 ]; then
fsfreeze_all_domains "${domains[@]}"
fi
for domain in "${domains[@]}"; do
@ -405,13 +504,24 @@ function take_snapshots () {
if [ "$live" -eq 1 ] && [ "$state" == "running" ]; then
take_live_snapshot "$domain" "$snapshot_name"
restore_domain "$domain"
if [ "$batch" -eq 1 ]; then
remove_save_file "$domain"
fi
else
if [ "$batch" -eq 0 ] && [ "$state" == "running" ]; then
fsfreeze_domain "$domain"
fi
take_crash_consistent_snapshot "$domain" "$snapshot_name"
if [ "$batch" -eq 0 ] && [ "$state" == "running" ]; then
fsthaw_domain "$domain"
fi
fi
done
if [ "$batch" -eq 1 ]; then
if [ "$batch" -eq 1 ] && [ "$live" -eq 1 ]; then
resume_all_domains "${domains[@]}"
elif [ "$batch" -eq 1 ] && [ "$live" -eq 0 ]; then
fsthaw_all_domains "${domains[@]}"
fi
return 0
@ -421,7 +531,12 @@ function take_snapshots () {
function revert_snapshots () {
for domain in "${domains[@]}"; do
revert_snapshot "$domain" "$snapshot_name"
if has_save_file "$domain"; then
restore_domain "$domain"
if [ "$batch" -eq 1 ]; then
remove_save_file "$domain"
fi
fi
done
if [ "$batch" -eq 1 ]; then
@ -432,18 +547,35 @@ function revert_snapshots () {
# Lists snapshots for all specified domains.
function list_snapshots () {
local domains=( "$@" )
local zfs_datasets
local zfs_dataset
local domain
local snapshot
for domain in "${domains[@]}"; do
zfs_datasets=( $(get_zfs_datasets_from_domain "$domain") )
if [ ${#zfs_datasets[@]} -ne 1 ]; then
error "$domain: Wrong number of ZFS datasets (${#zfs_datasets[@]}) found." ; return 1
fi
zfs_dataset="${zfs_datasets[0]:-}"
echo "Snapshots for domain '$domain':"
get_zfs_snapshots_from_dataset "$zfs_dataset" | sed 's/^/ - /'
for snapshot in ${domain_params_cache["$domain/snapshots"]}; do
echo " - $snapshot"
done
done
}
# Prunes old snapshots for all specified domains according to the retention policy.
function prune_snapshots () {
local domains=( "$@" )
local dataset
local snapshots
local domain
for domain in "${domains[@]}"; do
snapshots=( ${domain_params_cache["$domain/snapshots"]} )
dataset="${domain_params_cache["$domain/dataset"]}"
if [ "${#snapshots[@]}" -le "$keep" ]; then
continue
fi
local first_to_delete_idx=$(( ${#snapshots[@]} - keep - 1 ))
local first_to_delete="${snapshots[$first_to_delete_idx]}"
if [ -z "$first_to_delete" ]; then
continue
fi
zfs destroy -r "${dataset}@%${first_to_delete}"
done
}

16
test/e2e/cloud-init/standard-user-data

@ -0,0 +1,16 @@
#cloud-config
bootcmd:
- setsebool -P virt_qemu_ga_run_unconfined on
- setsebool -P virt_qemu_ga_read_nonsecurity_files on
- setsebool -P virt_rw_qemu_ga_data on
- install -o root -g root -m 0777 --context=system_u:object_r:virt_qemu_ga_data_t:s0 -d /test/rootfs
users:
- name: e2e
gecos: End-to-End Test User
sudo: ALL=(ALL) NOPASSWD:ALL
groups: wheel
lock_passwd: false
# echo -n test | mkpasswd -m bcrypt -s
passwd: $2b$05$Oh13XsRSrGrL/iSvV0Rax.w7rQMx/6lyBTCuaEVXrdh/qiagci9bS

19
test/e2e/cloud-init/with-fs-user-data

@ -0,0 +1,19 @@
#cloud-config
mounts:
- [ data, /test/virtiofs, virtiofs, "defaults,context=system_u:object_r:virt_qemu_ga_data_t:s0", "0", "0" ]
bootcmd:
- setsebool -P virt_qemu_ga_run_unconfined on
- setsebool -P virt_qemu_ga_read_nonsecurity_files on
- setsebool -P virt_rw_qemu_ga_data on
- install -o root -g root -d /test/virtiofs
users:
- name: e2e
gecos: End-to-End Test User
sudo: ALL=(ALL) NOPASSWD:ALL
groups: wheel
lock_passwd: false
# echo -n test | mkpasswd -m bcrypt -s
passwd: $2b$05$Oh13XsRSrGrL/iSvV0Rax.w7rQMx/6lyBTCuaEVXrdh/qiagci9bS

31
test/e2e/cloud-init/with-zvol-user-data

@ -0,0 +1,31 @@
#cloud-config
disk_setup:
/dev/vdb:
table_type: gpt
layout: true
overwrite: true
fs_setup:
- label: zvol
filesystem: xfs
device: /dev/vdb
partition: auto
mounts:
- [ LABEL=zvol, /test/zvol, xfs, "defaults,context=system_u:object_r:virt_qemu_ga_data_t:s0", "0", "2" ]
bootcmd:
- setsebool -P virt_qemu_ga_run_unconfined on
- setsebool -P virt_qemu_ga_read_nonsecurity_files on
- setsebool -P virt_rw_qemu_ga_data on
- mkdir -p /test/zvol
users:
- name: e2e
gecos: End-to-End Test User
sudo: ALL=(ALL) NOPASSWD:ALL
groups: wheel
lock_passwd: false
# echo -n test | mkpasswd -m bcrypt -s
passwd: $2b$05$Oh13XsRSrGrL/iSvV0Rax.w7rQMx/6lyBTCuaEVXrdh/qiagci9bS

836
test/e2e/zvirt.bats

@ -0,0 +1,836 @@
#!/usr/bin/env bats
setup() {
bats_load_library 'bats-support'
bats_load_library 'bats-assert'
set -Eeuo pipefail
export LANG=C LC_ALL=C
zvirt () {
"${BATS_TEST_DIRNAME}/../../src/bin/zvirt" "$@"
}
declare -g e2e_test_enable_debug=1
e2e_test_debug_log(){
if [ "$e2e_test_enable_debug" -eq 1 ]; then
echo "$@" >&3
fi
}
qemu_exec() {
domain="$1"
shift || true
local json_args=""
for arg in "${@:2}"; do
if [ -n "$json_args" ]; then
json_args+=", "
fi
json_args+="\"$arg\""
done
local command="{\"execute\": \"guest-exec\", \"arguments\": {\"path\": \"$1\", \"arg\": [ $json_args ], \"capture-output\": true }}"
output="$(virsh qemu-agent-command "$domain" "$command")"
#e2e_test_debug_log "qemu_exec: command output: $output"
pid="$(echo "$output" | jq -r '.return.pid')"
if [ -z "$pid" ] || [ "$pid" == "null" ]; then
e2e_test_debug_log "qemu_exec: failed to get pid from command output"
return 1
fi
sleep .25
while true; do
local status_command="{\"execute\": \"guest-exec-status\", \"arguments\": {\"pid\": $pid}}"
status_output="$(virsh qemu-agent-command "$domain" "$status_command")"
#e2e_test_debug_log "qemu_exec: status output: $status_output"
exited="$(echo "$status_output" | jq -r '.return.exited')"
if [ "$exited" == "true" ]; then
stdout_base64="$(echo "$status_output" | jq -r '.return["out-data"]')"
if [ "$stdout_base64" != "null" ]; then
echo "$stdout_base64" | base64 --decode
fi
stderr_base64="$(echo "$status_output" | jq -r '.return["err-data"]')"
if [ "$stderr_base64" != "null" ]; then
echo "$stderr_base64" | base64 --decode >&2
fi
exit_code="$(echo "$status_output" | jq -r '.return.exitcode')"
return $exit_code
fi
sleep 1
done
}
create_cloud_init_iso () {
local domain="$1"
local iso_path="/var/lib/libvirt/images/${domain}/cloud-init.iso"
local user_data_path="/var/lib/libvirt/images/${domain}/cloud-init/user-data"
local meta_data_path="/var/lib/libvirt/images/${domain}/cloud-init/meta-data"
# Create cloud-init user-data and meta-data files
mkdir -p "/var/lib/libvirt/images/${domain}/cloud-init"
cp "${BATS_TEST_DIRNAME}/cloud-init/${domain}-user-data" "$user_data_path"
cat > "$meta_data_path" <<EOF
instance-id: ${domain}
local-hostname: ${domain}
EOF
# Create ISO image
genisoimage -output "$iso_path" -volid cidata -joliet -rock "$user_data_path" "$meta_data_path"
}
convert_cloud_image() {
local src="$1"
local dest="$2"
# Convert qcow2 to raw and resize to 20G
qemu-img convert -f qcow2 -O raw "$src" "$dest"
qemu-img resize -f raw "$dest" 20G
}
cleanup() {
e2e_test_debug_log "teardown: Cleaning up created domains and images..."
for domain in standard with-fs with-zvol; do
state="$(virsh domstate "$domain" 2>/dev/null || true)"
if [[ -n "$state" && "$state" != "shut off" ]]; then
virsh destroy "$domain"
fi
if virsh dominfo "$domain" &>/dev/null; then
virsh undefine "$domain" --nvram
fi
done
sleep 1
sync
sleep 1
for domain in standard with-fs with-zvol; do
if zfs list data/domains/"$domain" &>/dev/null; then
zfs destroy -rR data/domains/"$domain"
fi
sleep .2
rm -rf "/var/lib/libvirt/images/${domain}"
done
}
create_domains() {
# Create the standard VM
e2e_test_debug_log "setup: Creating the standard VM..."
mkdir -p /var/lib/libvirt/images/standard
zfs create -p data/domains/standard -o mountpoint=/var/lib/libvirt/images/standard
convert_cloud_image "$fedora_img" "/var/lib/libvirt/images/standard/root.img"
create_cloud_init_iso "standard"
virt-install --noautoconsole \
--name=standard \
--cpu=host-passthrough \
--vcpus=1 \
--ram=4096 \
--os-variant=fedora-rawhide \
--disk=path=/var/lib/libvirt/images/standard/root.img,target.dev=vda,bus=virtio,driver.discard=unmap,driver.io=io_uring,format=raw,sparse=True,blockio.logical_block_size=512,blockio.physical_block_size=512,serial=root,format=raw \
--network=none \
--console=pty,target.type=virtio \
--serial=pty \
--disk=path=/var/lib/libvirt/images/standard/cloud-init.iso,readonly=True \
--import \
--sysinfo=system.serial=ds=nocloud \
--boot=uefi
# Create the with-fs VM
e2e_test_debug_log "setup: Creating the with-fs VM..."
mkdir -p /var/lib/libvirt/images/with-fs /srv/with-fs
chmod 0777 /srv/with-fs
zfs create -p data/domains/with-fs -o mountpoint=/var/lib/libvirt/images/with-fs
zfs create -p data/domains/with-fs/virtiofs -o mountpoint=/srv/with-fs
convert_cloud_image "$fedora_img" "/var/lib/libvirt/images/with-fs/root.img"
create_cloud_init_iso "with-fs"
virt-install --noautoconsole \
--name=with-fs \
--cpu=host-passthrough \
--vcpus=1 \
--ram=4096 \
--os-variant=fedora-rawhide \
--disk=path=/var/lib/libvirt/images/with-fs/root.img,target.dev=vda,bus=virtio,driver.discard=unmap,driver.io=io_uring,format=raw,sparse=True,blockio.logical_block_size=512,blockio.physical_block_size=512,serial=root,format=raw \
--network=none \
--console=pty,target.type=virtio \
--serial=pty \
--disk=path=/var/lib/libvirt/images/with-fs/cloud-init.iso,readonly=True \
--import \
--sysinfo=system.serial=ds=nocloud \
--boot=uefi \
--memorybacking=access.mode=shared,source.type=memfd \
--filesystem=type=mount,accessmode=passthrough,driver.type=virtiofs,driver.queue=1024,source.dir=/srv/with-fs,target.dir=data
# Create the with-zvol VM
e2e_test_debug_log "setup: Creating the with-zvol VM..."
mkdir -p /var/lib/libvirt/images/with-zvol
zfs create -p data/domains/with-zvol -o mountpoint=/var/lib/libvirt/images/with-zvol
zfs create -V 10G data/domains/with-zvol/data
convert_cloud_image "$fedora_img" "/var/lib/libvirt/images/with-zvol/root.img"
create_cloud_init_iso "with-zvol"
virt-install --noautoconsole \
--name=with-zvol \
--cpu=host-passthrough \
--vcpus=1 \
--ram=4096 \
--os-variant=fedora-rawhide \
--disk=path=/var/lib/libvirt/images/with-zvol/root.img,target.dev=vda,bus=virtio,driver.discard=unmap,driver.io=io_uring,format=raw,sparse=True,blockio.logical_block_size=512,blockio.physical_block_size=512,serial=root,format=raw \
--disk=path=/dev/zvol/data/domains/with-zvol/data,target.dev=vdb,bus=virtio,cache=directsync,blockio.logical_block_size=4096,blockio.physical_block_size=4096,driver.discard=unmap,driver.io=io_uring,serial=zvol \
--network=none \
--console=pty,target.type=virtio \
--serial=pty \
--disk=path=/var/lib/libvirt/images/with-zvol/cloud-init.iso,readonly=True \
--import \
--sysinfo=system.serial=ds=nocloud \
--boot=uefi
}
readiness_wait() {
e2e_test_debug_log "setup: Waiting for VMs to become ready..."
for domain in standard with-fs with-zvol; do
e2e_test_debug_log "setup: Waiting for qemu guest agent to be running in domain '$domain'..."
until virsh qemu-agent-command "$domain" '{"execute":"guest-ping"}' &>/dev/null; do
sleep 2
done
done
e2e_test_debug_log "setup: all VMs started successfully"
for domain in standard with-fs with-zvol; do
e2e_test_debug_log "setup: Waiting for cloud-init to complete in domain '$domain'..."
until qemu_exec "$domain" test -f /var/lib/cloud/instance/boot-finished; do
sleep 2
done
done
if ! qemu_exec with-fs grep -q /test/virtiofs /proc/mounts; then
e2e_test_debug_log "setup: virtiofs not mounted in 'with-fs' domain"
return 1
fi
if ! qemu_exec with-zvol grep -q /test/zvol /proc/mounts; then
e2e_test_debug_log "setup: zvol not mounted in 'with-zvol' domain"
return 1
fi
e2e_test_debug_log "setup: VMs are ready"
}
local fedora_url="https://download.fedoraproject.org/pub/fedora/linux/releases/42/Cloud/x86_64/images/Fedora-Cloud-Base-Generic-42-1.1.x86_64.qcow2"
local fedora_img="/var/lib/libvirt/images/$(basename "$fedora_url")"
if [ ! -f "$fedora_img" ]; then
e2e_test_debug_log "setup: downloading Fedora Cloud image to $fedora_img"
mkdir -p /var/lib/libvirt/images/library
curl -sSfL -o "$fedora_img" "$fedora_url"
fi
e2e_test_debug_log "setup: Fedora Cloud image is at $fedora_img"
# Cleanup any leftover artifacts from previous runs
cleanup
create_domains
readiness_wait
}
teardown() {
cleanup
}
@test "zvirt: setup selftest" {
e2e_test_debug_log "setup: provisioning completed"
}
@test "zvirt: prune snapshots" {
# Take five snapshots in a row, each time creating and deleting a witness file
for snap in s1 s2 s3 s4 s5; do
# Create witness files in all three domains before taking snapshots
qemu_exec standard touch /test/rootfs/witness-file.$snap
qemu_exec with-fs touch /test/virtiofs/witness-file.$snap
qemu_exec with-zvol touch /test/zvol/witness-file.$snap
# Verify that the witness files exist in the virtiofs host mount
run test -f /srv/with-fs/witness-file.$snap
assert_success
# Take crash-consistent snapshots for all three domains
run zvirt snapshot -d standard -d with-zvol -d with-fs -s $snap
assert_success
# Verify that the domains are still running
run virsh domstate standard
assert_success
assert_output "running"
run virsh domstate with-fs
assert_success
assert_output "running"
run virsh domstate with-zvol
assert_success
assert_output "running"
# Assert that the files created before the snapshot exist
run qemu_exec standard ls -1 /test/rootfs
assert_success
assert_output "witness-file.$snap"
run qemu_exec with-fs ls -1 /test/virtiofs
assert_success
assert_output "witness-file.$snap"
run qemu_exec with-zvol ls -1 /test/zvol
assert_success
assert_output "witness-file.$snap"
# Delete the witness files
run qemu_exec standard rm /test/rootfs/witness-file.$snap
assert_success
run qemu_exec with-fs rm /test/virtiofs/witness-file.$snap
assert_success
run qemu_exec with-zvol rm /test/zvol/witness-file.$snap
assert_success
# Sync all filesystems
run qemu_exec standard sync
assert_success
run qemu_exec with-fs sync
assert_success
run qemu_exec with-zvol sync
assert_success
# Wait a moment to ensure all writes are flushed
sleep 2
# Verify that the witness files have been deleted in the virtiofs host mount
run test -f /srv/with-fs/witness-file.$snap
assert_failure
done
# List snapshots and verify their existence
run zvirt list -d standard -d with-zvol -d with-fs
assert_success
assert_output "Snapshots for domain 'standard':
- s1
- s2
- s3
- s4
- s5
Snapshots for domain 'with-zvol':
- s1
- s2
- s3
- s4
- s5
Snapshots for domain 'with-fs':
- s1
- s2
- s3
- s4
- s5"
# Prune snapshots to keep only the latest two
run zvirt prune -k 2 -d standard -d with-zvol -d with-fs
assert_success
# List snapshots and verify their existence
run zvirt list -d standard -d with-zvol -d with-fs
assert_success
assert_output "Snapshots for domain 'standard':
- s4
- s5
Snapshots for domain 'with-zvol':
- s4
- s5
Snapshots for domain 'with-fs':
- s4
- s5"
# Stop all domains
run virsh destroy standard
assert_success
run virsh destroy with-fs
assert_success
run virsh destroy with-zvol
assert_success
# Revert snapshots in batch mode
run zvirt revert -d standard -d with-zvol -d with-fs -s s4
assert_success
# Check all domains have been shut off
run virsh domstate standard
assert_success
assert_output "shut off"
run virsh domstate with-fs
assert_success
assert_output "shut off"
run virsh domstate with-zvol
assert_success
assert_output "shut off"
# Start all domains
run virsh start standard
assert_success
run virsh start with-fs
assert_success
run virsh start with-zvol
assert_success
# Wait for all domains to be fully ready
readiness_wait
# Verify that the witness files still exist after revert
run qemu_exec standard ls -1 /test/rootfs
assert_success
assert_output "witness-file.s4"
run qemu_exec with-fs ls -1 /test/virtiofs
assert_success
assert_output "witness-file.s4"
run qemu_exec with-zvol ls -1 /test/zvol
assert_success
assert_output "witness-file.s4"
}
@test "zvirt: take live snapshot in batch mode" {
# Create witness files in all three domains before taking snapshots
qemu_exec standard touch /test/rootfs/witness-file
qemu_exec with-fs touch /test/virtiofs/witness-file
qemu_exec with-zvol touch /test/zvol/witness-file
# Verify that the witness files exist in the virtiofs host mount
run test -f /srv/with-fs/witness-file
assert_success
# Take live snapshots for all three domains
run zvirt snapshot -b -d standard -d with-zvol -d with-fs -s backup1 -l
assert_success
# Verify that the domains are still running
run virsh domstate standard
assert_success
assert_output "running"
run virsh domstate with-fs
assert_success
assert_output "running"
run virsh domstate with-zvol
assert_success
assert_output "running"
# Assert that the files created before the snapshot exist
run qemu_exec standard ls -1 /test/rootfs
assert_success
assert_output "witness-file"
run qemu_exec with-fs ls -1 /test/virtiofs
assert_success
assert_output "witness-file"
run qemu_exec with-zvol ls -1 /test/zvol
assert_success
assert_output "witness-file"
# List snapshots and verify their existence
run zvirt list -d standard -d with-zvol -d with-fs
assert_success
assert_output "Snapshots for domain 'standard':
- backup1
Snapshots for domain 'with-zvol':
- backup1
Snapshots for domain 'with-fs':
- backup1"
# Attempt to take the same snapshot again and expect failure
run zvirt snapshot -b -d standard -d with-zvol -d with-fs -s backup1 -l
assert_failure
assert_output --partial "Snapshot 'backup1' already exists."
assert_output --partial "standard:"
assert_output --partial "with-zvol:"
assert_output --partial "with-fs:"
assert_output --partial "Pre-flight checks failed."
# Delete the witness files
run qemu_exec standard rm /test/rootfs/witness-file
assert_success
run qemu_exec with-fs rm /test/virtiofs/witness-file
assert_success
run qemu_exec with-zvol rm /test/zvol/witness-file
assert_success
# Sync all filesystems
run qemu_exec standard sync
assert_success
run qemu_exec with-fs sync
assert_success
run qemu_exec with-zvol sync
assert_success
# Verify that the witness files have been deleted in the virtiofs host mount
run test -f /srv/with-fs/witness-file
assert_failure
# Stop all domains
run virsh destroy standard
assert_success
run virsh destroy with-fs
assert_success
run virsh destroy with-zvol
assert_success
# Revert snapshots in batch mode
run zvirt revert -b -d standard -d with-zvol -d with-fs -s backup1
assert_success
# Check all domains are running again
run virsh domstate standard
assert_success
assert_output "running"
run virsh domstate with-fs
assert_success
assert_output "running"
run virsh domstate with-zvol
assert_success
assert_output "running"
# Verify that the witness files still exist after revert
run qemu_exec standard ls -1 /test/rootfs
assert_success
assert_output "witness-file"
run qemu_exec with-fs ls -1 /test/virtiofs
assert_success
assert_output "witness-file"
run qemu_exec with-zvol ls -1 /test/zvol
assert_success
assert_output "witness-file"
}
@test "zvirt: take live snapshot without batch mode" {
# Create witness files in all three domains before taking snapshots
qemu_exec standard touch /test/rootfs/witness-file
qemu_exec with-fs touch /test/virtiofs/witness-file
qemu_exec with-zvol touch /test/zvol/witness-file
# Verify that the witness files exist in the virtiofs host mount
run test -f /srv/with-fs/witness-file
assert_success
# Take live snapshots for all three domains
run zvirt snapshot -d standard -d with-zvol -d with-fs -s backup1 -l
assert_success
# Verify that the domains are still running
run virsh domstate standard
assert_success
assert_output "running"
run virsh domstate with-fs
assert_success
assert_output "running"
run virsh domstate with-zvol
assert_success
assert_output "running"
# Assert that the files created before the snapshot exist
run qemu_exec standard ls -1 /test/rootfs
assert_success
assert_output "witness-file"
run qemu_exec with-fs ls -1 /test/virtiofs
assert_success
assert_output "witness-file"
run qemu_exec with-zvol ls -1 /test/zvol
assert_success
assert_output "witness-file"
# List snapshots and verify their existence
run zvirt list -d standard -d with-zvol -d with-fs
assert_success
assert_output "Snapshots for domain 'standard':
- backup1
Snapshots for domain 'with-zvol':
- backup1
Snapshots for domain 'with-fs':
- backup1"
# Attempt to take the same snapshot again and expect failure
run zvirt snapshot -d standard -d with-zvol -d with-fs -s backup1 -l
assert_failure
assert_output --partial "Snapshot 'backup1' already exists."
assert_output --partial "standard:"
assert_output --partial "with-zvol:"
assert_output --partial "with-fs:"
assert_output --partial "Pre-flight checks failed."
# Delete the witness files
run qemu_exec standard rm /test/rootfs/witness-file
assert_success
run qemu_exec with-fs rm /test/virtiofs/witness-file
assert_success
run qemu_exec with-zvol rm /test/zvol/witness-file
assert_success
# Sync all filesystems
run qemu_exec standard sync
assert_success
run qemu_exec with-fs sync
assert_success
run qemu_exec with-zvol sync
assert_success
# Verify that the witness files have been deleted in the virtiofs host mount
run test -f /srv/with-fs/witness-file
assert_failure
# Stop all domains
run virsh destroy standard
assert_success
run virsh destroy with-fs
assert_success
run virsh destroy with-zvol
assert_success
# Revert snapshots in batch mode
run zvirt revert -d standard -d with-zvol -d with-fs -s backup1
assert_success
# Check all domains are running again
run virsh domstate standard
assert_success
assert_output "running"
run virsh domstate with-fs
assert_success
assert_output "running"
run virsh domstate with-zvol
assert_success
assert_output "running"
# Verify that the witness files still exist after revert
run qemu_exec standard ls -1 /test/rootfs
assert_success
assert_output "witness-file"
run qemu_exec with-fs ls -1 /test/virtiofs
assert_success
assert_output "witness-file"
run qemu_exec with-zvol ls -1 /test/zvol
assert_success
assert_output "witness-file"
}
@test "zvirt: take crash-consistent snapshot without batch mode" {
# Create witness files in all three domains before taking snapshots
qemu_exec standard touch /test/rootfs/witness-file
qemu_exec with-fs touch /test/virtiofs/witness-file
qemu_exec with-zvol touch /test/zvol/witness-file
# Verify that the witness files exist in the virtiofs host mount
run test -f /srv/with-fs/witness-file
assert_success
# Take crash-consistent snapshots for all three domains
run zvirt snapshot -d standard -d with-zvol -d with-fs -s backup1
assert_success
# Verify that the domains are still running
run virsh domstate standard
assert_success
assert_output "running"
run virsh domstate with-fs
assert_success
assert_output "running"
run virsh domstate with-zvol
assert_success
assert_output "running"
# Assert that the files created before the snapshot exist
run qemu_exec standard ls -1 /test/rootfs
assert_success
assert_output "witness-file"
run qemu_exec with-fs ls -1 /test/virtiofs
assert_success
assert_output "witness-file"
run qemu_exec with-zvol ls -1 /test/zvol
assert_success
assert_output "witness-file"
# List snapshots and verify their existence
run zvirt list -d standard -d with-zvol -d with-fs
assert_success
assert_output "Snapshots for domain 'standard':
- backup1
Snapshots for domain 'with-zvol':
- backup1
Snapshots for domain 'with-fs':
- backup1"
# Attempt to take the same snapshot again and expect failure
run zvirt snapshot -d standard -d with-zvol -d with-fs -s backup1
assert_failure
assert_output --partial "Snapshot 'backup1' already exists."
assert_output --partial "standard:"
assert_output --partial "with-zvol:"
assert_output --partial "with-fs:"
assert_output --partial "Pre-flight checks failed."
# Delete the witness files
run qemu_exec standard rm /test/rootfs/witness-file
assert_success
run qemu_exec with-fs rm /test/virtiofs/witness-file
assert_success
run qemu_exec with-zvol rm /test/zvol/witness-file
assert_success
# Sync all filesystems
run qemu_exec standard sync
assert_success
run qemu_exec with-fs sync
assert_success
run qemu_exec with-zvol sync
assert_success
# Wait a moment to ensure all writes are flushed
sleep 2
# Verify that the witness files have been deleted in the virtiofs host mount
run test -f /srv/with-fs/witness-file
assert_failure
# Stop all domains
run virsh destroy standard
assert_success
run virsh destroy with-fs
assert_success
run virsh destroy with-zvol
assert_success
# Revert snapshots in batch mode
run zvirt revert -d standard -d with-zvol -d with-fs -s backup1
assert_success
# Check all domains have been shut off
run virsh domstate standard
assert_success
assert_output "shut off"
run virsh domstate with-fs
assert_success
assert_output "shut off"
run virsh domstate with-zvol
assert_success
assert_output "shut off"
# Start all domains
run virsh start standard
assert_success
run virsh start with-fs
assert_success
run virsh start with-zvol
assert_success
# Wait for all domains to be fully ready
readiness_wait
# Verify that the witness files still exist after revert
run qemu_exec standard ls -1 /test/rootfs
assert_success
assert_output "witness-file"
run qemu_exec with-fs ls -1 /test/virtiofs
assert_success
assert_output "witness-file"
run qemu_exec with-zvol ls -1 /test/zvol
assert_success
assert_output "witness-file"
}
@test "zvirt: take crash-consistent snapshot with batch mode" {
# Create witness files in all three domains before taking snapshots
qemu_exec standard touch /test/rootfs/witness-file
qemu_exec with-fs touch /test/virtiofs/witness-file
qemu_exec with-zvol touch /test/zvol/witness-file
# Verify that the witness files exist in the virtiofs host mount
run test -f /srv/with-fs/witness-file
assert_success
# Take crash-consistent snapshots for all three domains
run zvirt snapshot -b -d standard -d with-zvol -d with-fs -s backup1
assert_success
# Verify that the domains are still running
run virsh domstate standard
assert_success
assert_output "running"
run virsh domstate with-fs
assert_success
assert_output "running"
run virsh domstate with-zvol
assert_success
assert_output "running"
# Assert that the files created before the snapshot exist
run qemu_exec standard ls -1 /test/rootfs
assert_success
assert_output "witness-file"
run qemu_exec with-fs ls -1 /test/virtiofs
assert_success
assert_output "witness-file"
run qemu_exec with-zvol ls -1 /test/zvol
assert_success
assert_output "witness-file"
# List snapshots and verify their existence
run zvirt list -d standard -d with-zvol -d with-fs
assert_success
assert_output "Snapshots for domain 'standard':
- backup1
Snapshots for domain 'with-zvol':
- backup1
Snapshots for domain 'with-fs':
- backup1"
# Attempt to take the same snapshot again and expect failure
run zvirt snapshot -b -d standard -d with-zvol -d with-fs -s backup1
assert_failure
assert_output --partial "Snapshot 'backup1' already exists."
assert_output --partial "standard:"
assert_output --partial "with-zvol:"
assert_output --partial "with-fs:"
assert_output --partial "Pre-flight checks failed."
# Delete the witness files
run qemu_exec standard rm /test/rootfs/witness-file
assert_success
run qemu_exec with-fs rm /test/virtiofs/witness-file
assert_success
run qemu_exec with-zvol rm /test/zvol/witness-file
assert_success
# Sync all filesystems
run qemu_exec standard sync
assert_success
run qemu_exec with-fs sync
assert_success
run qemu_exec with-zvol sync
assert_success
# Wait a moment to ensure all writes are flushed
sleep 2
# Verify that the witness files have been deleted in the virtiofs host mount
run test -f /srv/with-fs/witness-file
assert_failure
# Stop all domains
run virsh destroy standard
assert_success
run virsh destroy with-fs
assert_success
run virsh destroy with-zvol
assert_success
# Revert snapshots in batch mode
run zvirt revert -b -d standard -d with-zvol -d with-fs -s backup1
assert_success
# Check all domains are running again
run virsh domstate standard
assert_success
assert_output "running"
run virsh domstate with-fs
assert_success
assert_output "running"
run virsh domstate with-zvol
assert_success
assert_output "running"
# Wait for all domains to be fully ready
readiness_wait
# Verify that the witness files still exist after revert
run qemu_exec standard ls -1 /test/rootfs
assert_success
assert_output "witness-file"
run qemu_exec with-fs ls -1 /test/virtiofs
assert_success
assert_output "witness-file"
run qemu_exec with-zvol ls -1 /test/zvol
assert_success
assert_output "witness-file"
}

338
test/unit/core.bats

@ -8,7 +8,7 @@ setup() {
# Load the core library and export its functions
local fn_before="$(declare -F | cut -d ' ' -f 3 | sort)"
set -Eeuo pipefail
source "${BATS_TEST_DIRNAME}/../../src/lib/core.sh"
source "${BATS_TEST_DIRNAME}/../../src/lib/zvirt/core.sh"
local fn_after="$(declare -F | cut -d ' ' -f 3 | sort)"
declare -a zvirt_fn=( $(comm -13 <(echo "$fn_before") <(echo "$fn_after")) )
for fn in "${zvirt_fn[@]}"; do
@ -19,7 +19,7 @@ setup() {
# and with access to the domain_params_cache associative array
in_bash() {
local vars=""
for var in domain_params_cache snapshot_name domains verbose action batch live; do
for var in domain_params_cache snapshot_name domains verbose action batch live keep; do
if declare -p "${var}" &>/dev/null; then
vars+="$(declare -p "${var}") ; "
fi
@ -165,6 +165,23 @@ snapshot2"
assert_failure
}
@test "has_save_file: nominal case" {
# Temporary directory for save files
local temp_dir="$(mktemp -d)"
mkdir -p "$temp_dir/foo" "$temp_dir/bar"
# Only foo has a save file
touch "$temp_dir/foo/domain.save"
# Fill up the cache
declare -A domain_params_cache=( ["foo/mountpoint"]="$temp_dir/foo" ["bar/mountpoint"]="$temp_dir/bar" )
# Run the test
run in_bash has_save_file foo
assert_success
run in_bash has_save_file bar
assert_failure
}
@test "take_live_snapshot: nominal case" {
# Mock the underlying tools
declare -A domain_params_cache=( ["foo/state"]="running" ["foo/dataset"]="data/domains/foo" ["foo/mountpoint"]="/var/lib/libvirt/images/foo" ["foo/zvols"]="" )
@ -246,7 +263,7 @@ data/domains/baz/virtiofs"
declare -A domain_params_cache=( ["foo/state"]="running" ["foo/dataset"]="data/domains/foo" ["foo/mountpoint"]="/var/lib/libvirt/images/foo" ["foo/zvols"]="" )
virsh_mock="$(mock_create)"
virsh() {
if [[ "$*" == "restore /var/lib/libvirt/images/foo/domain.save --verbose --paused" ]]; then
if [[ "$*" == "restore /var/lib/libvirt/images/foo/domain.save --paused" ]]; then
$virsh_mock "$@"
return $?
fi
@ -267,7 +284,7 @@ data/domains/baz/virtiofs"
declare -A domain_params_cache=( ["foo/state"]="running" ["foo/dataset"]="data/domains/foo" ["foo/mountpoint"]="/var/lib/libvirt/images/foo" ["foo/zvols"]="" )
virsh_mock="$(mock_create)"
virsh() {
if [[ "$*" == "restore /var/lib/libvirt/images/foo/domain.save --verbose --running" ]]; then
if [[ "$*" == "restore /var/lib/libvirt/images/foo/domain.save --running" ]]; then
$virsh_mock "$@"
return $?
fi
@ -306,13 +323,19 @@ data/domains/baz/virtiofs"
@test "resume_all_domains: nominal case" {
# Mock the underlying tools
local domains=( "foo" "bar" )
declare -A domain_params_cache=( ["foo/state"]="paused" ["bar/state"]="shut off" )
virsh_mock="$(mock_create)"
virsh() {
if [[ "$*" == "resume foo" ]] || [[ "$*" == "start bar" ]]; then
$virsh_mock "$@"
return $?
fi
if [[ "$*" == "domstate foo" ]]; then
echo "paused"
return 0
elif [[ "$*" == "domstate bar" ]]; then
echo "shut off"
return 0
fi
return 1
}
export -f virsh
@ -324,6 +347,78 @@ data/domains/baz/virtiofs"
[[ "$(mock_get_call_num ${virsh_mock})" -eq 2 ]]
}
@test "fsthaw_all_domains: nominal case" {
# Mock the underlying tools
local domains=( "foo" "bar" )
declare -A domain_params_cache=( ["foo/state"]="running" ["bar/state"]="shut off" )
fsthaw_mock="$(mock_create)"
fsthaw_domain() {
if [[ "$*" == "foo" ]]; then
$fsthaw_mock "$@"
return $?
fi
return 1
}
export -f fsthaw_domain
export fsthaw_mock
# Run the test
run in_bash fsthaw_all_domains "${domains[@]}"
assert_success
[[ "$(mock_get_call_num ${fsthaw_mock})" -eq 1 ]]
}
@test "fsfreeze_all_domains: nominal case" {
# Mock the underlying tools
local domains=( "foo" "bar" )
declare -A domain_params_cache=( ["foo/state"]="running" ["bar/state"]="shut off" )
fsfreeze_mock="$(mock_create)"
fsfreeze_domain() {
if [[ "$*" == "foo" ]]; then
$fsfreeze_mock "$@"
return $?
fi
return 1
}
export -f fsfreeze_domain
export fsfreeze_mock
# Run the test
run in_bash fsfreeze_all_domains "${domains[@]}"
assert_success
[[ "$(mock_get_call_num ${fsfreeze_mock})" -eq 1 ]]
}
@test "fsthaw_domain: nominal case" {
# Mock the underlying tools
virsh() {
[[ "$*" == "domfsthaw foo" ]] && return 0
return 1
}
export -f virsh
# Run the test
run in_bash virsh domfsthaw "foo"
assert_success
run in_bash virsh domfsthaw "bar"
assert_failure
}
@test "fsfreeze_domain: nominal case" {
# Mock the underlying tools
virsh() {
[[ "$*" == "domfsfreeze foo" ]] && return 0
return 1
}
export -f virsh
# Run the test
run in_bash virsh domfsfreeze "foo"
assert_success
run in_bash virsh domfsfreeze "bar"
assert_failure
}
@test "domain_checks: nominal case" {
# Mock the underlying tools
domain_exists() {
@ -387,26 +482,29 @@ data/domains/baz/virtiofs"
assert_success
run in_bash domain_checks revert bar backup1
assert_success
}
@test "list_snapshots: nominal case" {
# Mock the underlying tools
get_zfs_datasets_from_domain() {
if [[ "$*" == "foo" ]]; then
echo "data/domains/foo"
# Live mode with existing save file
has_save_file() {
return 0
fi
return 1
}
get_zfs_snapshots_from_dataset() {
if [[ "$*" == "data/domains/foo" ]]; then
echo "snapshot1
snapshot2"
return 0
fi
export -f has_save_file
live=1
run in_bash domain_checks snapshot foo backup2
assert_failure
# Live mode with non-existing save file
has_save_file() {
return 1
}
export -f get_zfs_datasets_from_domain get_zfs_snapshots_from_dataset
export -f has_save_file
live=1
run in_bash domain_checks snapshot foo backup2
assert_success
}
@test "list_snapshots: nominal case" {
# Mock the underlying tools
declare -A domain_params_cache=( ["foo/snapshots"]="snapshot1 snapshot2" ["bar/snapshots"]="snapshot3 snapshot4" )
# Run the test
run in_bash list_snapshots foo
@ -416,6 +514,42 @@ snapshot2"
- snapshot2"
}
@test "prune_snapshots: nominal case" {
# Mock the underlying tools
declare -A domain_params_cache=( ["foo/snapshots"]="s1 s2 s3 s4 s5" ["bar/snapshots"]="s1 s2 s3 s4 s5" ["baz/snapshots"]="s1" ["foo/dataset"]="data/domains/foo" ["bar/dataset"]="data/domains/bar" ["baz/dataset"]="data/domains/baz" )
zfs_destroy_mock="$(mock_create)"
zfs() {
if [[ "$*" == "destroy -r data/domains/foo@%s3" ]] || [[ "$*" == "destroy -r data/domains/bar@%s2" ]]; then
$zfs_destroy_mock "$@"
return $?
fi
return 1
}
export -f zfs
export zfs_destroy_mock
# Run the test
keep=2
run in_bash prune_snapshots foo
assert_success
[[ "$(mock_get_call_num ${zfs_destroy_mock})" -eq 1 ]] # Deletion up to s3
keep=3
run in_bash prune_snapshots bar
assert_success
[[ "$(mock_get_call_num ${zfs_destroy_mock})" -eq 2 ]] # Deletion up to s2
keep=5
run in_bash prune_snapshots bar
assert_success
[[ "$(mock_get_call_num ${zfs_destroy_mock})" -eq 2 ]] # No deletion should occur
keep=1
run in_bash prune_snapshots baz
assert_success
[[ "$(mock_get_call_num ${zfs_destroy_mock})" -eq 2 ]] # No deletion should occur
}
@test "preflight_checks: nominal case" {
# Mock the underlying tools
domain_checks() {
@ -444,7 +578,22 @@ snapshot2"
take_live_snapshot() { return 1; }
restore_domain() { return 1; }
resume_all_domains() { return 1; }
export -f take_crash_consistent_snapshot pause_all_domains take_live_snapshot restore_domain resume_all_domains
remove_save_file() { return 1; }
fsfreeze_all_domains() { return 1; }
fsthaw_all_domains() { return 1; }
fsfreeze_domain() {
if [[ "$*" == "foo" ]]; then
return 0
fi
return 1
}
fsthaw_domain() {
if [[ "$*" == "foo" ]]; then
return 0
fi
return 1
}
export -f take_crash_consistent_snapshot pause_all_domains take_live_snapshot restore_domain resume_all_domains remove_save_file fsfreeze_all_domains fsthaw_all_domains fsfreeze_domain fsthaw_domain
declare -A domain_params_cache=( ["foo/state"]="running" ["bar/state"]="shut off" )
@ -471,22 +620,26 @@ snapshot2"
fi
return 1
}
pause_all_domains() {
pause_all_domains() { return 1; }
take_live_snapshot() { return 1; }
restore_domain() { return 1; }
resume_all_domains() { return 1; }
remove_save_file() { return 1; }
fsfreeze_all_domains() {
if [[ "$*" == "foo bar" ]]; then
return 0
fi
return 1
}
take_live_snapshot() { return 1; }
restore_domain() { return 1; }
resume_all_domains() {
fsthaw_all_domains() {
if [[ "$*" == "foo bar" ]]; then
return 0
fi
return 1
}
export -f take_crash_consistent_snapshot pause_all_domains take_live_snapshot restore_domain resume_all_domains
fsfreeze_domain() { return 1; }
fsthaw_domain() { return 1; }
export -f take_crash_consistent_snapshot pause_all_domains take_live_snapshot restore_domain resume_all_domains remove_save_file fsfreeze_all_domains fsthaw_all_domains fsfreeze_domain fsthaw_domain
declare -A domain_params_cache=( ["foo/state"]="running" ["bar/state"]="shut off" )
@ -526,7 +679,87 @@ snapshot2"
return 1
}
resume_all_domains() { return 1; }
export -f take_crash_consistent_snapshot pause_all_domains take_live_snapshot restore_domain resume_all_domains
remove_save_file() { return 1; }
fsfreeze_all_domains() { return 1; }
fsthaw_all_domains() { return 1; }
fsfreeze_domain() {
if [[ "$*" == "bar" ]]; then
return 0
fi
return 1
}
fsthaw_domain() {
if [[ "$*" == "bar" ]]; then
return 0
fi
return 1
}
export -f take_crash_consistent_snapshot pause_all_domains take_live_snapshot restore_domain resume_all_domains remove_save_file fsfreeze_all_domains fsthaw_all_domains fsfreeze_domain fsthaw_domain
declare -A domain_params_cache=( ["foo/state"]="running" ["bar/state"]="shut off" )
# Run the test
domains=( "foo" "bar" )
snapshot_name="backup"
batch=0
live=1
run in_bash take_snapshots
assert_success
# Add a non-existing domain to the list
domains+=( "baz" )
run in_bash take_snapshots
assert_failure
}
@test "take_snapshots: batch=1, live=1" {
# Mock the underlying tools
take_crash_consistent_snapshot() {
if [[ "$*" == "bar backup" ]]; then
return 0
fi
return 1
}
pause_all_domains() {
if [[ "$*" == "foo bar" ]]; then
return 0
fi
return 1
}
take_live_snapshot() {
if [[ "$*" == "foo backup" ]]; then
return 0
fi
return 1
}
restore_domain() {
if [[ "$*" == "foo" ]]; then
return 0
fi
return 1
}
resume_all_domains() {
if [[ "$*" == "foo bar" ]]; then
return 0
fi
return 1
}
remove_save_file() { return 1; }
fsfreeze_all_domains() { return 1; }
fsthaw_all_domains() { return 1; }
fsfreeze_domain() {
if [[ "$*" == "bar" ]]; then
return 0
fi
return 1
}
fsthaw_domain() {
if [[ "$*" == "bar" ]]; then
return 0
fi
return 1
}
export -f take_crash_consistent_snapshot pause_all_domains take_live_snapshot restore_domain resume_all_domains remove_save_file fsfreeze_all_domains fsthaw_all_domains fsfreeze_domain fsthaw_domain
declare -A domain_params_cache=( ["foo/state"]="running" ["bar/state"]="shut off" )
@ -554,14 +787,30 @@ snapshot2"
return 1
}
restore_domain() {
regex="^(foo|bar)$"
if [[ "$*" =~ $regex ]]; then
if [[ "$*" == "foo" ]]; then
return 0
fi
return 1
}
resume_all_domains() { return 1; }
export -f revert_snapshot restore_domain resume_all_domains
has_save_file() {
if [[ "$*" == "foo" ]]; then
return 0
fi
return 1
}
remove_save_file() { return 1; }
domain_state() {
if [[ "$*" == "foo" ]]; then
echo "paused"
return 0
elif [[ "$*" == "bar" ]]; then
echo "shut off"
return 0
fi
return 1
}
export -f revert_snapshot restore_domain resume_all_domains has_save_file remove_save_file domain_state
# Run the test
domains=( "foo" "bar" )
@ -586,8 +835,7 @@ snapshot2"
return 1
}
restore_domain() {
regex="^(foo|bar)$"
if [[ "$*" =~ $regex ]]; then
if [[ "$*" == "foo" ]]; then
return 0
fi
return 1
@ -598,7 +846,29 @@ snapshot2"
fi
return 1
}
export -f revert_snapshot restore_domain resume_all_domains
has_save_file() {
if [[ "$*" == "foo" ]]; then
return 0
fi
return 1
}
remove_save_file() {
if [[ "$*" == "foo" ]]; then
return 0
fi
return 1
}
domain_state() {
if [[ "$*" == "foo" ]]; then
echo "paused"
return 0
elif [[ "$*" == "bar" ]]; then
echo "shut off"
return 0
fi
return 1
}
export -f revert_snapshot restore_domain resume_all_domains has_save_file remove_save_file domain_state
# Run the test
domains=( "foo" "bar" )

25
test/unit/usage.bats

@ -1,19 +1,17 @@
#!/usr/bin/env bats
bats_require_minimum_version 1.5.0
setup() {
bats_load_library 'bats-support'
bats_load_library 'bats-assert'
set -Eeuo pipefail
source "${BATS_TEST_DIRNAME}/../../src/lib/core.sh"
source "${BATS_TEST_DIRNAME}/../../src/lib/zvirt/core.sh"
function call_parse_args () {
init_global_variables
parse_args "$@"
ret=$?
declare -p action batch live verbose domains snapshot_name
declare -p action batch live verbose domains snapshot_name keep
return $ret
}
}
@ -67,3 +65,22 @@ setup() {
assert_output --partial 'live="0"'
}
@test "call_parse_args: prune snapshots for all domains" {
virsh() {
if [[ "$*" == "list --all --name" ]]; then
echo -e "foo\nbar"
return 0
fi
return 1
}
run call_parse_args prune -k 5
assert_success
assert_output --partial 'action="prune"'
assert_output --partial 'domains=([0]="foo" [1]="bar")'
assert_output --partial 'keep="5"'
run call_parse_args prune
assert_failure
assert_output --partial "The -k option with a positive integer value must be specified for the 'prune' action"
}

Loading…
Cancel
Save