From 0537de5bb669c3ec9ab63c464411779443cfda5a Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Mon, 20 May 2019 18:19:03 +0200 Subject: [PATCH] test case for reseting the resume state if it's invalid --- tests/syncoid/6_reset_resume_state2/run.sh | 55 ++++++++++++++++++++++ 1 file changed, 55 insertions(+) create mode 100755 tests/syncoid/6_reset_resume_state2/run.sh diff --git a/tests/syncoid/6_reset_resume_state2/run.sh b/tests/syncoid/6_reset_resume_state2/run.sh new file mode 100755 index 0000000..e227223 --- /dev/null +++ b/tests/syncoid/6_reset_resume_state2/run.sh @@ -0,0 +1,55 @@ +#!/bin/bash + +# test resumable replication where the original snapshot doesn't exist anymore + +set -x +set -e + +. ../../common/lib.sh + +POOL_IMAGE="/tmp/syncoid-test-6.zpool" +MOUNT_TARGET="/tmp/syncoid-test-6.mount" +POOL_SIZE="1000M" +POOL_NAME="syncoid-test-6" + +truncate -s "${POOL_SIZE}" "${POOL_IMAGE}" + +zpool create -m none -f "${POOL_NAME}" "${POOL_IMAGE}" + +function cleanUp { + zpool export "${POOL_NAME}" +} + +# export pool in any case +trap cleanUp EXIT + +zfs create "${POOL_NAME}"/src -o mountpoint="${MOUNT_TARGET}" +../../../syncoid --debug --compress=none "${POOL_NAME}"/src "${POOL_NAME}"/dst + +dd if=/dev/urandom of="${MOUNT_TARGET}"/big_file bs=1M count=200 + +zfs snapshot "${POOL_NAME}"/src@big +../../../syncoid --debug --no-sync-snap --compress=none --source-bwlimit=2m "${POOL_NAME}"/src "${POOL_NAME}"/dst & +syncoid_pid=$! +sleep 5 +list_descendants () +{ + local children=$(ps -o pid= --ppid "$1") + + for pid in $children + do + list_descendants "$pid" + done + + echo "$children" +} + +kill $(list_descendants $$) || true +wait +sleep 1 + +zfs destroy "${POOL_NAME}"/src@big +../../../syncoid --debug --compress=none "${POOL_NAME}"/src "${POOL_NAME}"/dst # | grep "reset partial receive state of syncoid" +../../../syncoid --debug --compress=none "${POOL_NAME}"/src "${POOL_NAME}"/dst + +exit $?