diff --git a/Makefile b/Makefile index f905549..36436a5 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -.PHONY: all test unit-test syntax-test integration-test lint clean +.PHONY: all test unit-test syntax-test e2e-test lint clean prerequisites all: syntax-test unit-test lint @@ -7,10 +7,19 @@ syntax-test: @/bin/bash -nv src/zvirt @/bin/bash -nv src/lib/core.sh -unit-test: +prerequisites: + @echo "Installing prerequisites..." + @/bin/bash -Eeuo pipefail -c 'if ! bats --version &>/dev/null; then dnf install -y bats; fi' + @/bin/bash -Eeuo pipefail -c 'if ! yq --version &>/dev/null; then dnf install -y yq; fi' + +unit-test: prerequisites @echo "Running unit tests..." @LANG=LC_ALL=C BATS_LIB_PATH=$(PWD)/test/test_helper bats test/unit +e2e-test: prerequisites + @echo "Running end-to-end tests..." + @LANG=LC_ALL=C BATS_LIB_PATH=$(PWD)/test/test_helper bats test/e2e + clean: lint: @echo "Linting..." diff --git a/src/lib/core.sh b/src/lib/core.sh index b092a1b..8835ba9 100644 --- a/src/lib/core.sh +++ b/src/lib/core.sh @@ -62,7 +62,7 @@ function init_global_variables () { live=0 # Cache for domain parameters to avoid redundant calls to the zfs command - declare -A domain_params_cache=( ) + declare -gA domain_params_cache=( ) } # Parses the command-line arguments. @@ -255,7 +255,6 @@ function domain_checks () { fi # Store those values in cache for later use - declare -A domain_params_cache domain_params_cache["$domain/state"]="${state}" domain_params_cache["$domain/dataset"]="${zfs_dataset}" domain_params_cache["$domain/mountpoint"]="${zfs_mountpoint}" @@ -340,7 +339,7 @@ function restore_domain () { else virsh_restore_opts+=( "--running" ) fi - virsh restore "${zfs_mountpoint}/domain.save" --verbose "${virsh_restore_opts[@]}" + virsh restore "${zfs_mountpoint}/domain.save" "${virsh_restore_opts[@]}" } # Pauses all domains in the list. @@ -362,7 +361,7 @@ function resume_all_domains () { for domain in "${domains[@]}"; do log_verbose "$domain: Resuming domain..." - state="${domain_params_cache["$domain/state"]}" + state="$(domain_state "$domain")" case "$state" in paused) virsh resume "$domain" || true @@ -394,6 +393,15 @@ function preflight_checks () { return $error } +function remove_save_file () { + local domain="$1" + zfs_mountpoint="${domain_params_cache["$domain/mountpoint"]}" + if [ -f "${zfs_mountpoint}/domain.save" ]; then + log_verbose "$domain: Removing save file '${zfs_mountpoint}/domain.save'..." + rm -f "${zfs_mountpoint}/domain.save" + fi +} + # Takes snapshots for all specified domains. function take_snapshots () { if [ "$batch" -eq 1 ]; then @@ -405,6 +413,9 @@ function take_snapshots () { if [ "$live" -eq 1 ] && [ "$state" == "running" ]; then take_live_snapshot "$domain" "$snapshot_name" restore_domain "$domain" + if [ "$batch" -eq 1 ]; then + remove_save_file "$domain" + fi else take_crash_consistent_snapshot "$domain" "$snapshot_name" fi diff --git a/test/e2e/cloud-init/standard-user-data b/test/e2e/cloud-init/standard-user-data new file mode 100644 index 0000000..6f37128 --- /dev/null +++ b/test/e2e/cloud-init/standard-user-data @@ -0,0 +1,17 @@ +#cloud-config + +bootcmd: +- setsebool -P virt_qemu_ga_run_unconfined on +- setsebool -P virt_qemu_ga_read_nonsecurity_files on + +runcmd: +- install -o root -g root -m 0777 -d /test/rootfs + +users: +- name: e2e + gecos: End-to-End Test User + sudo: ALL=(ALL) NOPASSWD:ALL + groups: wheel + lock_passwd: false + # echo -n test | mkpasswd -m bcrypt -s + passwd: $2b$05$Oh13XsRSrGrL/iSvV0Rax.w7rQMx/6lyBTCuaEVXrdh/qiagci9bS diff --git a/test/e2e/cloud-init/with-fs-user-data b/test/e2e/cloud-init/with-fs-user-data new file mode 100644 index 0000000..43cbabe --- /dev/null +++ b/test/e2e/cloud-init/with-fs-user-data @@ -0,0 +1,21 @@ +#cloud-config + +mounts: +- [ data, /test/virtiofs, virtiofs, "defaults,nofail", "0", "0" ] + +bootcmd: +- setsebool -P virt_qemu_ga_run_unconfined on +- setsebool -P virt_qemu_ga_read_nonsecurity_files on + +runcmd: +- install -o root -g root -m 0777 -d /test/virtiofs +- mount -a + +users: +- name: e2e + gecos: End-to-End Test User + sudo: ALL=(ALL) NOPASSWD:ALL + groups: wheel + lock_passwd: false + # echo -n test | mkpasswd -m bcrypt -s + passwd: $2b$05$Oh13XsRSrGrL/iSvV0Rax.w7rQMx/6lyBTCuaEVXrdh/qiagci9bS diff --git a/test/e2e/cloud-init/with-zvol-user-data b/test/e2e/cloud-init/with-zvol-user-data new file mode 100644 index 0000000..a805a7f --- /dev/null +++ b/test/e2e/cloud-init/with-zvol-user-data @@ -0,0 +1,34 @@ +#cloud-config + +disk_setup: + /dev/vdb: + table_type: gpt + layout: true + overwrite: true + +fs_setup: +- label: zvol + filesystem: xfs + device: /dev/vdb + partition: auto + +mounts: + - [ LABEL=zvol, /test/zvol, xfs, "defaults,nofail", "0", "2" ] + +bootcmd: +- setsebool -P virt_qemu_ga_run_unconfined on +- setsebool -P virt_qemu_ga_read_nonsecurity_files on + +runcmd: +- install -o root -g root -m 0777 -d /test/zvol +- chmod 0777 /test/zvol +- mount -a + +users: +- name: e2e + gecos: End-to-End Test User + sudo: ALL=(ALL) NOPASSWD:ALL + groups: wheel + lock_passwd: false + # echo -n test | mkpasswd -m bcrypt -s + passwd: $2b$05$Oh13XsRSrGrL/iSvV0Rax.w7rQMx/6lyBTCuaEVXrdh/qiagci9bS diff --git a/test/e2e/zvirt.bats b/test/e2e/zvirt.bats new file mode 100644 index 0000000..3d40318 --- /dev/null +++ b/test/e2e/zvirt.bats @@ -0,0 +1,277 @@ +#!/usr/bin/env bats + +setup() { + bats_load_library 'bats-support' + bats_load_library 'bats-assert' + + set -Eeuo pipefail + export LANG=C LC_ALL=C + + zvirt () { + "${BATS_TEST_DIRNAME}/../../src/zvirt" "$@" + } + + qemu_exec() { + domain="$1" + shift || true + local json_args="" + for arg in "${@:2}"; do + if [ -n "$json_args" ]; then + json_args+=", " + fi + json_args+="\"$arg\"" + done + local command="{\"execute\": \"guest-exec\", \"arguments\": {\"path\": \"$1\", \"arg\": [ $json_args ], \"capture-output\": true }}" + output="$(virsh qemu-agent-command "$domain" "$command")" + #echo "qemu_exec: command output: $output" >&3 + pid="$(echo "$output" | jq -r '.return.pid')" + if [ -z "$pid" ] || [ "$pid" == "null" ]; then + echo "qemu_exec: failed to get pid from command output" >&3 + return 1 + fi + sleep .25 + while true; do + local status_command="{\"execute\": \"guest-exec-status\", \"arguments\": {\"pid\": $pid}}" + status_output="$(virsh qemu-agent-command "$domain" "$status_command")" + #echo "qemu_exec: status output: $status_output" >&3 + exited="$(echo "$status_output" | jq -r '.return.exited')" + if [ "$exited" == "true" ]; then + stdout_base64="$(echo "$status_output" | jq -r '.return["out-data"]')" + if [ "$stdout_base64" != "null" ]; then + echo "$stdout_base64" | base64 --decode + fi + stderr_base64="$(echo "$status_output" | jq -r '.return["err-data"]')" + if [ "$stderr_base64" != "null" ]; then + echo "$stderr_base64" | base64 --decode >&2 + fi + exit_code="$(echo "$status_output" | jq -r '.return.exitcode')" + return $exit_code + fi + sleep 1 + done + } + + create_cloud_init_iso () { + local domain="$1" + local iso_path="/var/lib/libvirt/images/${domain}/cloud-init.iso" + local user_data_path="/var/lib/libvirt/images/${domain}/cloud-init/user-data" + local meta_data_path="/var/lib/libvirt/images/${domain}/cloud-init/meta-data" + + # Create cloud-init user-data and meta-data files + mkdir -p "/var/lib/libvirt/images/${domain}/cloud-init" + cp "${BATS_TEST_DIRNAME}/cloud-init/${domain}-user-data" "$user_data_path" + cat > "$meta_data_path" <&3 + for domain in standard with-fs with-zvol; do + if virsh dominfo "$domain" &>/dev/null; then + virsh destroy "$domain" || true + virsh undefine "$domain" --nvram || true + fi + zfs destroy -r data/domains/"$domain" || true + rm -rf "/var/lib/libvirt/images/${domain}" + done + } + + create_domains() { + # Create the standard VM + echo "setup: Creating the standard VM..." >&3 + mkdir -p /var/lib/libvirt/images/standard + zfs create -p data/domains/standard -o mountpoint=/var/lib/libvirt/images/standard + convert_cloud_image "$fedora_img" "/var/lib/libvirt/images/standard/root.img" + create_cloud_init_iso "standard" + virt-install --noautoconsole \ + --name=standard \ + --cpu=host-passthrough \ + --vcpus=1 \ + --ram=4096 \ + --os-variant=fedora-rawhide \ + --disk=path=/var/lib/libvirt/images/standard/root.img,target.dev=vda,bus=virtio,driver.discard=unmap,driver.io=io_uring,format=raw,sparse=True,blockio.logical_block_size=512,blockio.physical_block_size=512,serial=root,format=raw \ + --network=none \ + --console=pty,target.type=virtio \ + --serial=pty \ + --disk=path=/var/lib/libvirt/images/standard/cloud-init.iso,readonly=True \ + --import \ + --sysinfo=system.serial=ds=nocloud \ + --boot=uefi + + # Create the with-fs VM + echo "setup: Creating the with-fs VM..." >&3 + mkdir -p /var/lib/libvirt/images/with-fs /srv/with-fs + chmod 0777 /srv/with-fs + zfs create -p data/domains/with-fs -o mountpoint=/var/lib/libvirt/images/with-fs + zfs create -p data/domains/with-fs/virtiofs -o mountpoint=/srv/with-fs + convert_cloud_image "$fedora_img" "/var/lib/libvirt/images/with-fs/root.img" + create_cloud_init_iso "with-fs" + virt-install --noautoconsole \ + --name=with-fs \ + --cpu=host-passthrough \ + --vcpus=1 \ + --ram=4096 \ + --os-variant=fedora-rawhide \ + --disk=path=/var/lib/libvirt/images/with-fs/root.img,target.dev=vda,bus=virtio,driver.discard=unmap,driver.io=io_uring,format=raw,sparse=True,blockio.logical_block_size=512,blockio.physical_block_size=512,serial=root,format=raw \ + --network=none \ + --console=pty,target.type=virtio \ + --serial=pty \ + --disk=path=/var/lib/libvirt/images/with-fs/cloud-init.iso,readonly=True \ + --import \ + --sysinfo=system.serial=ds=nocloud \ + --boot=uefi \ + --memorybacking=access.mode=shared,source.type=memfd \ + --filesystem=type=mount,accessmode=passthrough,driver.type=virtiofs,driver.queue=1024,source.dir=/srv/with-fs,target.dir=data + + # Create the with-zvol VM + echo "setup: Creating the with-zvol VM..." >&3 + mkdir -p /var/lib/libvirt/images/with-zvol + zfs create -p data/domains/with-zvol -o mountpoint=/var/lib/libvirt/images/with-zvol + zfs create -V 10G data/domains/with-zvol/data + convert_cloud_image "$fedora_img" "/var/lib/libvirt/images/with-zvol/root.img" + create_cloud_init_iso "with-zvol" + virt-install --noautoconsole \ + --name=with-zvol \ + --cpu=host-passthrough \ + --vcpus=1 \ + --ram=4096 \ + --os-variant=fedora-rawhide \ + --disk=path=/var/lib/libvirt/images/with-zvol/root.img,target.dev=vda,bus=virtio,driver.discard=unmap,driver.io=io_uring,format=raw,sparse=True,blockio.logical_block_size=512,blockio.physical_block_size=512,serial=root,format=raw \ + --disk=path=/dev/zvol/data/domains/with-zvol/data,target.dev=vdb,bus=virtio,cache=directsync,blockio.logical_block_size=4096,blockio.physical_block_size=4096,driver.discard=unmap,driver.io=io_uring,serial=zvol \ + --network=none \ + --console=pty,target.type=virtio \ + --serial=pty \ + --disk=path=/var/lib/libvirt/images/with-zvol/cloud-init.iso,readonly=True \ + --import \ + --sysinfo=system.serial=ds=nocloud \ + --boot=uefi + } + + readiness_wait() { + echo "setup: Waiting for VMs to become ready..." >&3 + for domain in standard with-fs with-zvol; do + echo "setup: Waiting for qemu guest agent to be running in domain '$domain'..." >&3 + until virsh qemu-agent-command "$domain" '{"execute":"guest-ping"}' &>/dev/null; do + sleep 2 + done + done + echo "setup: all VMs started successfully" >&3 + for domain in standard with-fs with-zvol; do + echo "setup: Waiting for cloud-init to complete in domain '$domain'..." >&3 + until qemu_exec "$domain" test -f /var/lib/cloud/instance/boot-finished; do + sleep 2 + done + done + echo "setup: VMs are ready" >&3 + } + + local fedora_url="https://download.fedoraproject.org/pub/fedora/linux/releases/42/Cloud/x86_64/images/Fedora-Cloud-Base-Generic-42-1.1.x86_64.qcow2" + local fedora_img="/var/lib/libvirt/images/$(basename "$fedora_url")" + if [ ! -f "$fedora_img" ]; then + echo "setup: downloading Fedora Cloud image to $fedora_img" >&3 + mkdir -p /var/lib/libvirt/images/library + curl -sSfL -o "$fedora_img" "$fedora_url" + fi + echo "setup: Fedora Cloud image is at $fedora_img" >&3 + + # Cleanup any leftover artifacts from previous runs + cleanup + create_domains + readiness_wait +} + +teardown() { + cleanup +} + +@test "zvirt: setup selftest" { + echo "setup: provisioning completed" >&3 +} + +@test "zvirt: take live snapshot in batch mode" { + # Create witness files in all three domains before taking snapshots + qemu_exec standard touch /test/rootfs/before-backup1 + qemu_exec with-fs touch /test/rootfs/before-backup1 + qemu_exec with-zvol touch /test/zvol/before-backup1 + [[ -f /srv/with-fs/before-backup1 ]] + + # Take live snapshots for all three domains + run zvirt snapshot -b -d standard -d with-zvol -d with-fs -s backup1 -l + assert_success + + # Verify that the domains are still running + run virsh domstate standard + assert_success + assert_output "running" + run virsh domstate with-fs + assert_success + assert_output "running" + run virsh domstate with-zvol + assert_success + assert_output "running" + + # Assert that the files created before the snapshot exist + run qemu_exec standard ls -1 /test/rootfs + assert_success + assert_output "before-backup1" + run qemu_exec with-fs ls -1 /test/rootfs + assert_success + assert_output "before-backup1" + run qemu_exec with-zvol ls -1 /test/zvol + assert_success + assert_output "before-backup1" + + # List snapshots and verify their existence + run zvirt list -d standard -d with-zvol -d with-fs + assert_success + assert_output "Snapshots for domain 'standard': + - backup1 +Snapshots for domain 'with-zvol': + - backup1 +Snapshots for domain 'with-fs': + - backup1" + + # Attempt to take the same snapshot again and expect failure + run zvirt snapshot -b -d standard -d with-zvol -d with-fs -s backup1 -l + assert_failure + assert_output --partial "Snapshot 'backup1' already exists." + assert_output --partial "standard:" + assert_output --partial "with-zvol:" + assert_output --partial "with-fs:" + assert_output --partial "Pre-flight checks failed." +} + + +# @test "call_parse_args: take a crash-consistent snapshot for two domains" { +# run zvirt snapshot -d standard -d with-zvol -d with-fs backup2 +# assert_success +# } + +# @test "call_parse_args: revert snapshot for a domain" { +# virsh destroy standard || true +# run zvirt revert -d standard -s backup2 +# assert_success +# } + +# @test "call_parse_args: revert snapshot for all domains in batch mode" { +# virsh destroy standard || true +# virsh destroy with-zvol || true +# virsh destroy with-fs || true +# run zvirt revert -b -d standard -d with-zvol -d with-fs -s backup1 +# assert_success +# } + diff --git a/test/unit/core.bats b/test/unit/core.bats index b3dea45..0f313b9 100644 --- a/test/unit/core.bats +++ b/test/unit/core.bats @@ -246,7 +246,7 @@ data/domains/baz/virtiofs" declare -A domain_params_cache=( ["foo/state"]="running" ["foo/dataset"]="data/domains/foo" ["foo/mountpoint"]="/var/lib/libvirt/images/foo" ["foo/zvols"]="" ) virsh_mock="$(mock_create)" virsh() { - if [[ "$*" == "restore /var/lib/libvirt/images/foo/domain.save --verbose --paused" ]]; then + if [[ "$*" == "restore /var/lib/libvirt/images/foo/domain.save --paused" ]]; then $virsh_mock "$@" return $? fi @@ -267,7 +267,7 @@ data/domains/baz/virtiofs" declare -A domain_params_cache=( ["foo/state"]="running" ["foo/dataset"]="data/domains/foo" ["foo/mountpoint"]="/var/lib/libvirt/images/foo" ["foo/zvols"]="" ) virsh_mock="$(mock_create)" virsh() { - if [[ "$*" == "restore /var/lib/libvirt/images/foo/domain.save --verbose --running" ]]; then + if [[ "$*" == "restore /var/lib/libvirt/images/foo/domain.save --running" ]]; then $virsh_mock "$@" return $? fi @@ -306,13 +306,19 @@ data/domains/baz/virtiofs" @test "resume_all_domains: nominal case" { # Mock the underlying tools local domains=( "foo" "bar" ) - declare -A domain_params_cache=( ["foo/state"]="paused" ["bar/state"]="shut off" ) virsh_mock="$(mock_create)" virsh() { if [[ "$*" == "resume foo" ]] || [[ "$*" == "start bar" ]]; then $virsh_mock "$@" return $? fi + if [[ "$*" == "domstate foo" ]]; then + echo "paused" + return 0 + elif [[ "$*" == "domstate bar" ]]; then + echo "shut off" + return 0 + fi return 1 } export -f virsh @@ -444,7 +450,8 @@ snapshot2" take_live_snapshot() { return 1; } restore_domain() { return 1; } resume_all_domains() { return 1; } - export -f take_crash_consistent_snapshot pause_all_domains take_live_snapshot restore_domain resume_all_domains + remove_save_file() { return 1; } + export -f take_crash_consistent_snapshot pause_all_domains take_live_snapshot restore_domain resume_all_domains remove_save_file declare -A domain_params_cache=( ["foo/state"]="running" ["bar/state"]="shut off" ) @@ -486,7 +493,14 @@ snapshot2" return 1 } - export -f take_crash_consistent_snapshot pause_all_domains take_live_snapshot restore_domain resume_all_domains + remove_save_file() { + regex="^(foo|bar) backup$" + if [[ "$*" =~ $regex ]]; then + return 0 + fi + return 1 + } + export -f take_crash_consistent_snapshot pause_all_domains take_live_snapshot restore_domain resume_all_domains remove_save_file declare -A domain_params_cache=( ["foo/state"]="running" ["bar/state"]="shut off" ) @@ -526,7 +540,8 @@ snapshot2" return 1 } resume_all_domains() { return 1; } - export -f take_crash_consistent_snapshot pause_all_domains take_live_snapshot restore_domain resume_all_domains + remove_save_file() { return 1; } + export -f take_crash_consistent_snapshot pause_all_domains take_live_snapshot restore_domain resume_all_domains remove_save_file declare -A domain_params_cache=( ["foo/state"]="running" ["bar/state"]="shut off" ) diff --git a/test/unit/usage.bats b/test/unit/usage.bats index 14baf4f..e22dc35 100644 --- a/test/unit/usage.bats +++ b/test/unit/usage.bats @@ -1,7 +1,5 @@ #!/usr/bin/env bats -bats_require_minimum_version 1.5.0 - setup() { bats_load_library 'bats-support' bats_load_library 'bats-assert'