From 03c3db3d9aa30f5354ec433b7528f9f9385faec9 Mon Sep 17 00:00:00 2001 From: Steffen Dettmer Date: Sat, 20 Apr 2024 12:55:44 +0200 Subject: [PATCH 01/14] sanoid #912: sanoid --prune-snapshots performance boost by removing unneeded iszfsbusy() --- README.md | 4 ---- sanoid | 62 +++++++++++++++---------------------------------------- 2 files changed, 17 insertions(+), 49 deletions(-) diff --git a/README.md b/README.md index f2ce10c..85539d5 100644 --- a/README.md +++ b/README.md @@ -80,10 +80,6 @@ For more full details on sanoid.conf settings see [Wiki page](https://github.com This will process your sanoid.conf file, it will NOT create snapshots, but it will purge expired ones. -+ --force-prune - - Purges expired snapshots even if a send/recv is in progress - + --monitor-snapshots This option is designed to be run by a Nagios monitoring system. It reports on the health of your snapshots. diff --git a/sanoid b/sanoid index 295957b..22f630e 100755 --- a/sanoid +++ b/sanoid @@ -25,7 +25,7 @@ my %args = ( GetOptions(\%args, "verbose", "debug", "cron", "readonly", "quiet", "configdir=s", "cache-dir=s", "run-dir=s", "monitor-health", "force-update", - "monitor-snapshots", "take-snapshots", "prune-snapshots", "force-prune", + "monitor-snapshots", "take-snapshots", "prune-snapshots", "monitor-capacity" ) or pod2usage(2); @@ -40,7 +40,7 @@ my $cacheTTL = 900; # 15 minutes # Allow a much older snapshot cache file than default if _only_ "--monitor-*" action commands are given # (ignore "--verbose", "--configdir" etc) -if (($args{'monitor-snapshots'} || $args{'monitor-health'} || $args{'monitor-capacity'}) && ! ($args{'cron'} || $args{'force-update'} || $args{'take-snapshots'} || $args{'prune-snapshots'} || $args{'force-prune'})) { +if (($args{'monitor-snapshots'} || $args{'monitor-health'} || $args{'monitor-capacity'}) && ! ($args{'cron'} || $args{'force-update'} || $args{'take-snapshots'} || $args{'prune-snapshots'})) { # The command combination above must not assert true for any command that takes or prunes snapshots $cacheTTL = 18000; # 5 hours if ($args{'debug'}) { print "DEBUG: command combo means that the cache file (provided it exists) will be allowed to be older than default.\n"; } @@ -349,26 +349,23 @@ sub prune_snapshots { } if ($args{'verbose'}) { print "INFO: pruning $snap ... \n"; } - if (!$args{'force-prune'} && iszfsbusy($path)) { - if ($args{'verbose'}) { print "INFO: deferring pruning of $snap - $path is currently in zfs send or receive.\n"; } - } else { - if (! $args{'readonly'}) { - if (system($zfs, "destroy", $snap) == 0) { - $pruned{$snap} = 1; - if ($config{$dataset}{'pruning_script'}) { - $ENV{'SANOID_TARGET'} = $dataset; - $ENV{'SANOID_SNAPNAME'} = $snapname; - $ENV{'SANOID_SCRIPT'} = 'prune'; - if ($args{'verbose'}) { print "executing pruning_script '".$config{$dataset}{'pruning_script'}."' on dataset '$dataset'\n"; } - my $ret = runscript('pruning_script',$dataset); - delete $ENV{'SANOID_TARGET'}; - delete $ENV{'SANOID_SNAPNAME'}; - delete $ENV{'SANOID_SCRIPT'}; - } - } else { - warn "could not remove $snap : $?"; + if (! $args{'readonly'}) { + if (system($zfs, "destroy", $snap) == 0) { + $pruned{$snap} = 1; + if ($config{$dataset}{'pruning_script'}) { + $ENV{'SANOID_TARGET'} = $dataset; + $ENV{'SANOID_SNAPNAME'} = $snapname; + $ENV{'SANOID_SCRIPT'} = 'prune'; + if ($args{'verbose'}) { print "executing pruning_script '".$config{$dataset}{'pruning_script'}."' on dataset '$dataset'\n"; } + my $ret = runscript('pruning_script',$dataset); + + delete $ENV{'SANOID_TARGET'}; + delete $ENV{'SANOID_SNAPNAME'}; + delete $ENV{'SANOID_SCRIPT'}; } + } else { + warn "could not remove $snap : $?"; } } } @@ -1557,30 +1554,6 @@ sub writelock { close FH; } -sub iszfsbusy { - # check to see if ZFS filesystem passed in as argument currently has a zfs send or zfs receive process referencing it. - # return true if busy (currently being sent or received), return false if not. - - my $fs = shift; - # if (args{'debug'}) { print "DEBUG: checking to see if $fs on is already in zfs receive using $pscmd -Ao args= ...\n"; } - - open PL, "$pscmd -Ao args= |"; - my @processes = ; - close PL; - - foreach my $process (@processes) { - # if ($args{'debug'}) { print "DEBUG: checking process $process...\n"; } - if ($process =~ /zfs *(send|receive|recv).*$fs/) { - # there's already a zfs send/receive process for our target filesystem - return true - # if ($args{'debug'}) { print "DEBUG: process $process matches target $fs!\n"; } - return 1; - } - } - - # no zfs receive processes for our target filesystem found - return false - return 0; -} - #######################################################################################################################3 #######################################################################################################################3 #######################################################################################################################3 @@ -1752,7 +1725,6 @@ Options: --monitor-snapshots Reports on snapshot "health", in a Nagios compatible format --take-snapshots Creates snapshots as specified in sanoid.conf --prune-snapshots Purges expired snapshots as specified in sanoid.conf - --force-prune Purges expired snapshots even if a send/recv is in progress --help Prints this helptext --version Prints the version number From fab4b4076cb10b4f4722e827e0fad3d940fc4a5b Mon Sep 17 00:00:00 2001 From: Nick Liu Date: Wed, 24 Apr 2024 13:08:21 -0500 Subject: [PATCH 02/14] fix(syncoid): `zfs send` arg allowlist when sendsource is receivetoken The `runsynccmd` subroutine was not matching the `$sendsource` when a receive resume token is passed in. All usages that pass in the receive resume token do not begin with a space; instead, they start with `-t `. Fixes: https://github.com/jimsalterjrs/sanoid/issues/918 --- syncoid | 2 +- tests/syncoid/012_receive_resume_token/run.sh | 55 +++++++++++++++++++ 2 files changed, 56 insertions(+), 1 deletion(-) create mode 100755 tests/syncoid/012_receive_resume_token/run.sh diff --git a/syncoid b/syncoid index 79ad45f..94bdb03 100755 --- a/syncoid +++ b/syncoid @@ -898,7 +898,7 @@ sub runsynccmd { my $disp_pvsize = $pvsize == 0 ? 'UNKNOWN' : readablebytes($pvsize); my $sendoptions; - if ($sendsource =~ / -t /) { + if ($sendsource =~ /^-t /) { $sendoptions = getoptionsline(\@sendoptions, ('P','V','e','v')); } elsif ($sendsource =~ /#/) { $sendoptions = getoptionsline(\@sendoptions, ('L','V','c','e','w')); diff --git a/tests/syncoid/012_receive_resume_token/run.sh b/tests/syncoid/012_receive_resume_token/run.sh new file mode 100755 index 0000000..a28becc --- /dev/null +++ b/tests/syncoid/012_receive_resume_token/run.sh @@ -0,0 +1,55 @@ +#!/bin/bash + +# test verifying syncoid behavior with partial transfers + +set -x + +. ../../common/lib.sh + +POOL_IMAGE="/tmp/syncoid-test-012.zpool" +POOL_SIZE="128M" +POOL_NAME="syncoid-test-012" +MOUNT_TARGET="/tmp/syncoid-test-012.mount" + +truncate -s "${POOL_SIZE}" "${POOL_IMAGE}" + +zpool create -O mountpoint="${MOUNT_TARGET}" -f "${POOL_NAME}" "${POOL_IMAGE}" + +function cleanUp { + zpool destroy "${POOL_NAME}" + rm -f "${POOL_IMAGE}" +} + +# Clean up the pool and image file on exit +trap cleanUp EXIT + +zfs create "${POOL_NAME}/source" +zfs snap "${POOL_NAME}/source@empty" +dd if=/dev/urandom of="${MOUNT_TARGET}/source/garbage.bin" bs=1M count=16 +zfs snap "${POOL_NAME}/source@something" + +# Simulate interrupted transfer +zfs send -pwR "${POOL_NAME}/source@something" | head --bytes=8M | zfs recv -s "${POOL_NAME}/destination" + +# Using syncoid to continue interrupted transfer +../../../syncoid --sendoptions="pw" "${POOL_NAME}/source" "${POOL_NAME}/destination" + +# Check if syncoid succeeded in handling the interrupted transfer +if [ $? -eq 0 ]; then + echo "Syncoid resumed transfer successfully." + + # Verify data integrity with sha256sum comparison + original_sum=$(sha256sum "${MOUNT_TARGET}/source/garbage.bin" | cut -d ' ' -f 1) + received_sum=$(sha256sum "${MOUNT_TARGET}/destination/garbage.bin" | cut -d ' ' -f 1) + + if [ "${original_sum}" == "${received_sum}" ]; then + echo "Data integrity verified." + exit 0 + else + echo "Data integrity check failed." + exit 1 + fi +else + echo "Regression detected: syncoid did not handle the resuming correctly." + exit 1 +fi From 4d39e3921768fac5f5dc6cb7ce24b3cc804b5c8c Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Mon, 3 Jun 2024 23:46:54 +0200 Subject: [PATCH 03/14] fix debian packaging with debian 12 and ubuntu 24.04 --- packages/debian/postinst | 2 ++ packages/debian/rules | 6 ++---- .../{sanoid-prune.service => sanoid.sanoid-prune.service} | 0 3 files changed, 4 insertions(+), 4 deletions(-) rename packages/debian/{sanoid-prune.service => sanoid.sanoid-prune.service} (100%) diff --git a/packages/debian/postinst b/packages/debian/postinst index 0d6142f..646a461 100755 --- a/packages/debian/postinst +++ b/packages/debian/postinst @@ -2,3 +2,5 @@ # remove old cache file [ -f /var/cache/sanoidsnapshots.txt ] && rm /var/cache/sanoidsnapshots.txt || true +[ -f /var/cache/sanoid/snapshots.txt ] && rm /var/cache/sanoid/snapshots.txt || true +[ -f /var/cache/sanoid/datasets.txt ] && rm /var/cache/sanoid/datasets.txt || true diff --git a/packages/debian/rules b/packages/debian/rules index 51e52af..05a3754 100755 --- a/packages/debian/rules +++ b/packages/debian/rules @@ -12,10 +12,6 @@ override_dh_auto_install: install -d $(DESTDIR)/etc/sanoid install -m 664 sanoid.defaults.conf $(DESTDIR)/etc/sanoid - install -d $(DESTDIR)/lib/systemd/system - install -m 664 debian/sanoid-prune.service debian/sanoid.timer \ - $(DESTDIR)/lib/systemd/system - install -d $(DESTDIR)/usr/sbin install -m 775 \ findoid sanoid sleepymutex syncoid \ @@ -25,6 +21,8 @@ override_dh_auto_install: install -m 664 sanoid.conf \ $(DESTDIR)/usr/share/doc/sanoid/sanoid.conf.example + dh_installsystemd --name sanoid-prune + override_dh_installinit: dh_installinit --noscripts diff --git a/packages/debian/sanoid-prune.service b/packages/debian/sanoid.sanoid-prune.service similarity index 100% rename from packages/debian/sanoid-prune.service rename to packages/debian/sanoid.sanoid-prune.service From cf0ecb30ae2fa62ba57910efe73cb91ccbcd1510 Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Tue, 4 Jun 2024 08:40:41 +0200 Subject: [PATCH 04/14] added deprecation warning for removed force-prune --- sanoid | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/sanoid b/sanoid index 4bffdf6..3585846 100755 --- a/sanoid +++ b/sanoid @@ -25,7 +25,7 @@ my %args = ( GetOptions(\%args, "verbose", "debug", "cron", "readonly", "quiet", "configdir=s", "cache-dir=s", "run-dir=s", "monitor-health", "force-update", - "monitor-snapshots", "take-snapshots", "prune-snapshots", + "monitor-snapshots", "take-snapshots", "prune-snapshots", "force-prune", "monitor-capacity" ) or pod2usage(2); @@ -54,6 +54,10 @@ make_path($run_dir); my $cacheTTL = 1200; # 20 minutes +if ($args{'force-prune'}) { + warn "WARN: --force-prune argument is deprecated and its behavior is now standard"; +} + # Allow a much older snapshot cache file than default if _only_ "--monitor-*" action commands are given # (ignore "--verbose", "--configdir" etc) if ( From b27b120c1974e22f9204809334edb26b1ca2b0f3 Mon Sep 17 00:00:00 2001 From: Ossi A Date: Tue, 25 Jun 2024 11:11:29 +0300 Subject: [PATCH 05/14] syncoid: add -X send option in special options --- syncoid | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/syncoid b/syncoid index 79ad45f..3213195 100755 --- a/syncoid +++ b/syncoid @@ -2175,7 +2175,7 @@ sub parsespecialoptions { return undef; } - if ($char eq 'o' || $char eq 'x') { + if ($char eq 'o' || $char eq 'x' || $char eq 'X') { $lastOption = $char; $optionValue = 1; } else { From 3942254e30e915f57d1496def3d58a29b054314e Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Fri, 20 Sep 2024 07:38:12 +0200 Subject: [PATCH 06/14] ignore duplicate template keys --- sanoid | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/sanoid b/sanoid index f74f731..c2d2ebd 100755 --- a/sanoid +++ b/sanoid @@ -1008,6 +1008,12 @@ sub init { } if ($args{'debug'}) { print "DEBUG: overriding $key on $section with value from user-defined template $template.\n"; } $config{$section}{$key} = $ini{$template}{$key}; + + my $value = $config{$section}{$key}; + if (ref($value) eq 'ARRAY') { + # handle duplicates silently (warning was already printed above) + $config{$section}{$key} = $value->[0]; + } } } } From 19f8877dcb1d17637c86c1556e5f86ff7863713b Mon Sep 17 00:00:00 2001 From: Alex Garel Date: Tue, 5 Nov 2024 11:20:31 +0100 Subject: [PATCH 07/14] docs: clarify that scripts are run only if autosnap or autoprune are set --- README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index f2ce10c..b46e36f 100644 --- a/README.md +++ b/README.md @@ -126,7 +126,9 @@ For more full details on sanoid.conf settings see [Wiki page](https://github.com ### Sanoid script hooks -There are three script types which can optionally be executed at various stages in the lifecycle of a snapshot: +There are three script types which can optionally be executed at various stages in the lifecycle of a snapshot. + +**Note** that snapshots related script are triggered only if you have `autosnap = yes` and pruning scripts are triggered only if you have `autoprune = yes`. #### `pre_snapshot_script` From f4e425d6827ae01c271f4738d2fba570f24de072 Mon Sep 17 00:00:00 2001 From: Christopher Morrow Date: Wed, 27 Nov 2024 18:24:08 -0800 Subject: [PATCH 08/14] Add Install instructions for EL9 systems Added to INSTALL.md the command to add the `crb` repo for Rocky Linux 9 and AlmaLinux 9. Necessary for perl-Capture-Tiny package. --- INSTALL.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/INSTALL.md b/INSTALL.md index 12b6734..6aa4347 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -59,6 +59,8 @@ sudo yum config-manager --set-enabled powertools sudo dnf config-manager --set-enabled powertools # On RHEL, instead of PowerTools, we need to enable the CodeReady Builder repo: sudo subscription-manager repos --enable=codeready-builder-for-rhel-8-x86_64-rpms +# For Rocky Linux 9 or AlmaLinux 9 you need the CodeReady Builder repo, and it is labelled `crb` +sudo dnf config-manager --set-enabled crb # Install the packages that Sanoid depends on: sudo yum install -y perl-Config-IniFiles perl-Data-Dumper perl-Capture-Tiny perl-Getopt-Long lzop mbuffer mhash pv # The repositories above should contain all the relevant Perl modules, but if you From 4a9db9541d7a6df23aad363c6cf25cbe8341c962 Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Tue, 3 Dec 2024 19:32:47 +0100 Subject: [PATCH 09/14] fix warning in edge cases ("Use of uninitialized value in numeric comparison") --- syncoid | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/syncoid b/syncoid index 79ad45f..f3cac71 100755 --- a/syncoid +++ b/syncoid @@ -1535,7 +1535,12 @@ sub sortsnapshots { if (defined $snaps->{'source'}{$left}{'createtxg'} && defined $snaps->{'source'}{$right}{'createtxg'}) { return $snaps->{'source'}{$left}{'createtxg'} <=> $snaps->{'source'}{$right}{'createtxg'}; } - return $snaps->{'source'}{$left}{'creation'} <=> $snaps->{'source'}{$right}{'creation'}; + + if (defined $snaps->{'source'}{$left}{'creation'} && defined $snaps->{'source'}{$right}{'creation'}) { + return $snaps->{'source'}{$left}{'creation'} <=> $snaps->{'source'}{$right}{'creation'}; + } + + return 0; } sub getoldestsnapshot { From 7062b7347eea637e68b915bf8a65bb0a85a67f88 Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Fri, 24 Jan 2025 14:01:59 +0100 Subject: [PATCH 10/14] blacklist encryption property from preserving --- syncoid | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/syncoid b/syncoid index 79ad45f..9dce246 100755 --- a/syncoid +++ b/syncoid @@ -1501,7 +1501,8 @@ sub getlocalzfsvalues { "receive_resume_token", "redact_snaps", "referenced", "refcompressratio", "snapshot_count", "type", "used", "usedbychildren", "usedbydataset", "usedbyrefreservation", "usedbysnapshots", "userrefs", "snapshots_changed", "volblocksize", "written", - "version", "volsize", "casesensitivity", "normalization", "utf8only" + "version", "volsize", "casesensitivity", "normalization", "utf8only", + "encryption" ); my %blacklisthash = map {$_ => 1} @blacklist; From 27fc1794907c95570cd0c8d3d6cd11301d5e5a53 Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Thu, 5 Jun 2025 21:59:30 +0200 Subject: [PATCH 11/14] implemented adding of taken snapshot to the cache file and a new parameter for setting an custom cache expire time --- README.md | 4 +++ sanoid | 86 +++++++++++++++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 87 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index f2ce10c..fadffb4 100644 --- a/README.md +++ b/README.md @@ -100,6 +100,10 @@ For more full details on sanoid.conf settings see [Wiki page](https://github.com This clears out sanoid's zfs snapshot listing cache. This is normally not needed. ++ --cache-ttl=SECONDS + + Set custom cache expire time in seconds (default: 20 minutes). + + --version This prints the version number, and exits. diff --git a/sanoid b/sanoid index c3242a1..57a942f 100755 --- a/sanoid +++ b/sanoid @@ -12,6 +12,7 @@ use warnings; use Config::IniFiles; # read samba-style conf file use Data::Dumper; # debugging - print contents of hash use File::Path 'make_path'; +use File::Copy; use Getopt::Long qw(:config auto_version auto_help); use Pod::Usage; # pod2usage use Time::Local; # to parse dates in reverse @@ -26,7 +27,7 @@ GetOptions(\%args, "verbose", "debug", "cron", "readonly", "quiet", "configdir=s", "cache-dir=s", "run-dir=s", "monitor-health", "force-update", "monitor-snapshots", "take-snapshots", "prune-snapshots", "force-prune", - "monitor-capacity" + "monitor-capacity", "cache-ttl=i" ) or pod2usage(2); # If only config directory (or nothing) has been specified, default to --cron --verbose @@ -54,6 +55,13 @@ make_path($run_dir); my $cacheTTL = 1200; # 20 minutes +if ($args{'cache-ttl'}) { + if ($args{'cache-ttl'} < 0) { + die "ERROR: cache-ttl needs to be positive!\n"; + } + $cacheTTL = $args{'cache-ttl'}; +} + # Allow a much older snapshot cache file than default if _only_ "--monitor-*" action commands are given # (ignore "--verbose", "--configdir" etc) if ( @@ -67,6 +75,7 @@ if ( || $args{'take-snapshots'} || $args{'prune-snapshots'} || $args{'force-prune'} + || $args{'cache-ttl'} ) ) { # The command combination above must not assert true for any command that takes or prunes snapshots @@ -86,6 +95,7 @@ my %config = init($conf_file,$default_conf_file); my %pruned; my %capacitycache; +my %taken; my %snaps; my %snapsbytype; @@ -592,6 +602,7 @@ sub take_snapshots { } if (%newsnapsgroup) { + $forcecacheupdate = 0; while ((my $path, my $snapData) = each(%newsnapsgroup)) { my $recursiveFlag = $snapData->{recursive}; my $dstHandling = $snapData->{handleDst}; @@ -662,9 +673,17 @@ sub take_snapshots { } }; + if ($exit == 0) { + $taken{$snap} = { + 'time' => time(), + 'recursive' => $recursiveFlag + }; + } + $exit == 0 or do { if ($dstHandling) { if ($stderr =~ /already exists/) { + $forcecacheupdate = 1; $exit = 0; $snap =~ s/_([a-z]+)$/dst_$1/g; if ($args{'verbose'}) { print "taking dst snapshot $snap$extraMessage\n"; } @@ -714,8 +733,8 @@ sub take_snapshots { } } } - $forcecacheupdate = 1; - %snaps = getsnaps(%config,$cacheTTL,$forcecacheupdate); + addcachedsnapshots(); + %snaps = getsnaps(\%config,$cacheTTL,$forcecacheupdate); } } @@ -1740,6 +1759,11 @@ sub removecachedsnapshots { print FH $snapline unless ( exists($pruned{$snap}) ); } close FH; + + # preserve mtime of cache for expire check + my ($dev, $ino, $mode, $nlink, $uid, $gid, $rdev, $size, $atime, $mtime, $ctime, $blksize, $blocks) = stat($cache); + utime($atime, $mtime, "$cache.tmp"); + rename("$cache.tmp", "$cache") or die "Could not rename to $cache!\n"; removelock('sanoid_cacheupdate'); @@ -1753,6 +1777,61 @@ sub removecachedsnapshots { #######################################################################################################################3 #######################################################################################################################3 +sub addcachedsnapshots { + if (not %taken) { + return; + } + + my $unlocked = checklock('sanoid_cacheupdate'); + + # wait until we can get a lock to do our cache changes + while (not $unlocked) { + if ($args{'verbose'}) { print "INFO: waiting for cache update lock held by another sanoid process.\n"; } + sleep(10); + $unlocked = checklock('sanoid_cacheupdate'); + } + + writelock('sanoid_cacheupdate'); + + if ($args{'verbose'}) { + print "INFO: adding taken snapshots to cache.\n"; + } + + copy($cache, "$cache.tmp") or die "Could not copy to $cache.tmp!\n"; + + open FH, ">> $cache.tmp" or die "Could not write to $cache.tmp!\n"; + while((my $snap, my $details) = each(%taken)) { + my @parts = split("@", $snap, 2); + + my $suffix = $parts[1] . "\tcreation\t" . $details->{time} . "\t-"; + my $dataset = $parts[0]; + + print FH "${dataset}\@${suffix}\n"; + + if ($details->{recursive}) { + my @datasets = getchilddatasets($dataset); + + foreach my $dataset(@datasets) { + print FH "${dataset}\@${suffix}\n"; + } + } + } + + close FH; + + # preserve mtime of cache for expire check + my ($dev, $ino, $mode, $nlink, $uid, $gid, $rdev, $size, $atime, $mtime, $ctime, $blksize, $blocks) = stat($cache); + utime($atime, $mtime, "$cache.tmp"); + + rename("$cache.tmp", "$cache") or die "Could not rename to $cache!\n"; + + removelock('sanoid_cacheupdate'); +} + +#######################################################################################################################3 +#######################################################################################################################3 +#######################################################################################################################3 + sub runscript { my $key=shift; my $dataset=shift; @@ -1851,6 +1930,7 @@ Options: --take-snapshots Creates snapshots as specified in sanoid.conf --prune-snapshots Purges expired snapshots as specified in sanoid.conf --force-prune Purges expired snapshots even if a send/recv is in progress + --cache-ttl=SECONDS Set custom cache expire time in seconds (default: 20 minutes) --help Prints this helptext --version Prints the version number From b794da6f145fdeb8ff1e48d267629cc51193470c Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Thu, 5 Jun 2025 23:23:30 +0200 Subject: [PATCH 12/14] Revert "Merge pull request #818 from Deltik/fix/815" This reverts commit 7c225a1d7b2350817468c97d0e33a599b9d02344, reversing changes made to acdc0938c9cf33878787244d194e9379ee8d5bb1. --- syncoid | 304 +++++++++++------- tests/run-tests.sh | 2 +- .../011_sync_out-of-order_snapshots/run.sh | 50 --- tests/syncoid/run-tests.sh | 2 +- 4 files changed, 186 insertions(+), 172 deletions(-) delete mode 100755 tests/syncoid/011_sync_out-of-order_snapshots/run.sh diff --git a/syncoid b/syncoid index 5dc6436..bd041ae 100755 --- a/syncoid +++ b/syncoid @@ -415,10 +415,10 @@ sub syncdataset { if (!defined($receivetoken)) { # build hashes of the snaps on the source and target filesystems. - %snaps = getsnaps('source',$sourcehost,$sourcefs,$sourceisroot,0); + %snaps = getsnaps('source',$sourcehost,$sourcefs,$sourceisroot); if ($targetexists) { - my %targetsnaps = getsnaps('target',$targethost,$targetfs,$targetisroot,0); + my %targetsnaps = getsnaps('target',$targethost,$targetfs,$targetisroot); my %sourcesnaps = %snaps; %snaps = (%sourcesnaps, %targetsnaps); } @@ -438,7 +438,7 @@ sub syncdataset { # Don't send the sync snap if it's filtered out by --exclude-snaps or # --include-snaps if (!snapisincluded($newsyncsnap)) { - $newsyncsnap = getnewestsnapshot(\%snaps); + $newsyncsnap = getnewestsnapshot($sourcehost,$sourcefs,$sourceisroot); if ($newsyncsnap eq 0) { writelog('WARN', "CRITICAL: no snapshots exist on source $sourcefs, and you asked for --no-sync-snap."); if ($exitcode < 1) { $exitcode = 1; } @@ -447,7 +447,7 @@ sub syncdataset { } } else { # we don't want sync snapshots created, so use the newest snapshot we can find. - $newsyncsnap = getnewestsnapshot(\%snaps); + $newsyncsnap = getnewestsnapshot($sourcehost,$sourcefs,$sourceisroot); if ($newsyncsnap eq 0) { writelog('WARN', "CRITICAL: no snapshots exist on source $sourcefs, and you asked for --no-sync-snap."); if ($exitcode < 1) { $exitcode = 1; } @@ -575,7 +575,8 @@ sub syncdataset { my $targetsize = getzfsvalue($targethost,$targetfs,$targetisroot,'-p used'); - my %bookmark = (); + my $bookmark = 0; + my $bookmarkcreation = 0; $matchingsnap = getmatchingsnapshot($sourcefs, $targetfs, \%snaps); if (! $matchingsnap) { @@ -583,18 +584,19 @@ sub syncdataset { my %bookmarks = getbookmarks($sourcehost,$sourcefs,$sourceisroot); # check for matching guid of source bookmark and target snapshot (oldest first) - foreach my $snap ( sort { sortsnapshots(\%snaps, $b, $a) } keys %{ $snaps{'target'} }) { + foreach my $snap ( sort { $snaps{'target'}{$b}{'creation'}<=>$snaps{'target'}{$a}{'creation'} } keys %{ $snaps{'target'} }) { my $guid = $snaps{'target'}{$snap}{'guid'}; if (defined $bookmarks{$guid}) { # found a match - %bookmark = %{ $bookmarks{$guid} }; + $bookmark = $bookmarks{$guid}{'name'}; + $bookmarkcreation = $bookmarks{$guid}{'creation'}; $matchingsnap = $snap; last; } } - if (! %bookmark) { + if (! $bookmark) { # force delete is not possible for the root dataset if ($args{'force-delete'} && index($targetfs, '/') != -1) { writelog('INFO', "Removing $targetfs because no matching snapshots were found"); @@ -667,18 +669,15 @@ sub syncdataset { my $nextsnapshot = 0; - if (%bookmark) { + if ($bookmark) { + my $bookmarkescaped = escapeshellparam($bookmark); if (!defined $args{'no-stream'}) { # if intermediate snapshots are needed we need to find the next oldest snapshot, # do an replication to it and replicate as always from oldest to newest # because bookmark sends doesn't support intermediates directly - foreach my $snap ( sort { sortsnapshots(\%snaps, $a, $b) } keys %{ $snaps{'source'} }) { - my $comparisonkey = 'creation'; - if (defined $snaps{'source'}{$snap}{'createtxg'} && defined $bookmark{'createtxg'}) { - $comparisonkey = 'createtxg'; - } - if ($snaps{'source'}{$snap}{$comparisonkey} >= $bookmark{$comparisonkey}) { + foreach my $snap ( sort { $snaps{'source'}{$a}{'creation'}<=>$snaps{'source'}{$b}{'creation'} } keys %{ $snaps{'source'} }) { + if ($snaps{'source'}{$snap}{'creation'} >= $bookmarkcreation) { $nextsnapshot = $snap; last; } @@ -686,13 +685,13 @@ sub syncdataset { } if ($nextsnapshot) { - ($exit, $stdout) = syncbookmark($sourcehost, $sourcefs, $targethost, $targetfs, $bookmark{'name'}, $nextsnapshot); + ($exit, $stdout) = syncbookmark($sourcehost, $sourcefs, $targethost, $targetfs, $bookmark, $nextsnapshot); $exit == 0 or do { if (!$resume && $stdout =~ /\Qcontains partially-complete state\E/) { writelog('WARN', "resetting partially receive state"); resetreceivestate($targethost,$targetfs,$targetisroot); - (my $ret) = syncbookmark($sourcehost, $sourcefs, $targethost, $targetfs, $bookmark{'name'}, $nextsnapshot); + (my $ret) = syncbookmark($sourcehost, $sourcefs, $targethost, $targetfs, $bookmark, $nextsnapshot); $ret == 0 or do { if ($exitcode < 2) { $exitcode = 2; } return 0; @@ -706,13 +705,13 @@ sub syncdataset { $matchingsnap = $nextsnapshot; $matchingsnapescaped = escapeshellparam($matchingsnap); } else { - ($exit, $stdout) = syncbookmark($sourcehost, $sourcefs, $targethost, $targetfs, $bookmark{'name'}, $newsyncsnap); + ($exit, $stdout) = syncbookmark($sourcehost, $sourcefs, $targethost, $targetfs, $bookmark, $newsyncsnap); $exit == 0 or do { if (!$resume && $stdout =~ /\Qcontains partially-complete state\E/) { writelog('WARN', "resetting partially receive state"); resetreceivestate($targethost,$targetfs,$targetisroot); - (my $ret) = syncbookmark($sourcehost, $sourcefs, $targethost, $targetfs, $bookmark{'name'}, $newsyncsnap); + (my $ret) = syncbookmark($sourcehost, $sourcefs, $targethost, $targetfs, $bookmark, $newsyncsnap); $ret == 0 or do { if ($exitcode < 2) { $exitcode = 2; } return 0; @@ -727,7 +726,7 @@ sub syncdataset { # do a normal replication if bookmarks aren't used or if previous # bookmark replication was only done to the next oldest snapshot - if (!%bookmark || $nextsnapshot) { + if (!$bookmark || $nextsnapshot) { if ($matchingsnap eq $newsyncsnap) { # edge case: bookmark replication used the latest snapshot return 0; @@ -854,19 +853,9 @@ sub syncdataset { if (defined $args{'delete-target-snapshots'}) { # Find the snapshots that exist on the target, filter with # those that exist on the source. Remaining are the snapshots - # that are only on the target. Then sort to remove the oldest - # snapshots first. - - # regather snapshots on source and target - %snaps = getsnaps('source',$sourcehost,$sourcefs,$sourceisroot,0); - - if ($targetexists) { - my %targetsnaps = getsnaps('target',$targethost,$targetfs,$targetisroot,0); - my %sourcesnaps = %snaps; - %snaps = (%sourcesnaps, %targetsnaps); - } - - my @to_delete = sort { sortsnapshots(\%snaps, $a, $b) } grep {!exists $snaps{'source'}{$_}} keys %{ $snaps{'target'} }; + # that are only on the target. Then sort by creation date, as + # to remove the oldest snapshots first. + my @to_delete = sort { $snaps{'target'}{$a}{'creation'}<=>$snaps{'target'}{$b}{'creation'} } grep {!exists $snaps{'source'}{$_}} keys %{ $snaps{'target'} }; while (@to_delete) { # Create batch of snapshots to remove my $snaps = join ',', splice(@to_delete, 0, 50); @@ -1531,22 +1520,9 @@ sub readablebytes { return $disp; } -sub sortsnapshots { - my ($snaps, $left, $right) = @_; - if (defined $snaps->{'source'}{$left}{'createtxg'} && defined $snaps->{'source'}{$right}{'createtxg'}) { - return $snaps->{'source'}{$left}{'createtxg'} <=> $snaps->{'source'}{$right}{'createtxg'}; - } - - if (defined $snaps->{'source'}{$left}{'creation'} && defined $snaps->{'source'}{$right}{'creation'}) { - return $snaps->{'source'}{$left}{'creation'} <=> $snaps->{'source'}{$right}{'creation'}; - } - - return 0; -} - sub getoldestsnapshot { my $snaps = shift; - foreach my $snap (sort { sortsnapshots($snaps, $a, $b) } keys %{ $snaps{'source'} }) { + foreach my $snap ( sort { $snaps{'source'}{$a}{'creation'}<=>$snaps{'source'}{$b}{'creation'} } keys %{ $snaps{'source'} }) { # return on first snap found - it's the oldest return $snap; } @@ -1560,7 +1536,7 @@ sub getoldestsnapshot { sub getnewestsnapshot { my $snaps = shift; - foreach my $snap (sort { sortsnapshots($snaps, $b, $a) } keys %{ $snaps{'source'} }) { + foreach my $snap ( sort { $snaps{'source'}{$b}{'creation'}<=>$snaps{'source'}{$a}{'creation'} } keys %{ $snaps{'source'} }) { # return on first snap found - it's the newest writelog('DEBUG', "NEWEST SNAPSHOT: $snap"); return $snap; @@ -1739,7 +1715,7 @@ sub pruneoldsyncsnaps { sub getmatchingsnapshot { my ($sourcefs, $targetfs, $snaps) = @_; - foreach my $snap ( sort { sortsnapshots($snaps, $b, $a) } keys %{ $snaps{'source'} }) { + foreach my $snap ( sort { $snaps{'source'}{$b}{'creation'}<=>$snaps{'source'}{$a}{'creation'} } keys %{ $snaps{'source'} }) { if (defined $snaps{'target'}{$snap}) { if ($snaps{'source'}{$snap}{'guid'} == $snaps{'target'}{$snap}{'guid'}) { return $snap; @@ -1874,8 +1850,88 @@ sub dumphash() { writelog('INFO', Dumper($hash)); } -sub getsnaps { - my ($type,$rhost,$fs,$isroot,$use_fallback,%snaps) = @_; +sub getsnaps() { + my ($type,$rhost,$fs,$isroot,%snaps) = @_; + my $mysudocmd; + my $fsescaped = escapeshellparam($fs); + if ($isroot) { $mysudocmd = ''; } else { $mysudocmd = $sudocmd; } + + my $rhostOriginal = $rhost; + + if ($rhost ne '') { + $rhost = "$sshcmd $rhost"; + # double escaping needed + $fsescaped = escapeshellparam($fsescaped); + } + + my $getsnapcmd = "$rhost $mysudocmd $zfscmd get -Hpd 1 -t snapshot guid,creation $fsescaped"; + if ($debug) { + $getsnapcmd = "$getsnapcmd |"; + writelog('DEBUG', "getting list of snapshots on $fs using $getsnapcmd..."); + } else { + $getsnapcmd = "$getsnapcmd 2>/dev/null |"; + } + open FH, $getsnapcmd; + my @rawsnaps = ; + close FH or do { + # fallback (solaris for example doesn't support the -t option) + return getsnapsfallback($type,$rhostOriginal,$fs,$isroot,%snaps); + }; + + # this is a little obnoxious. get guid,creation returns guid,creation on two separate lines + # as though each were an entirely separate get command. + + my %creationtimes=(); + + foreach my $line (@rawsnaps) { + $line =~ /\Q$fs\E\@(\S*)/; + my $snapname = $1; + + if (!snapisincluded($snapname)) { next; } + + # only import snap guids from the specified filesystem + if ($line =~ /\Q$fs\E\@.*\tguid/) { + chomp $line; + my $guid = $line; + $guid =~ s/^.*\tguid\t*(\d*).*/$1/; + my $snap = $line; + $snap =~ s/^.*\@(.*)\tguid.*$/$1/; + $snaps{$type}{$snap}{'guid'}=$guid; + } + # only import snap creations from the specified filesystem + elsif ($line =~ /\Q$fs\E\@.*\tcreation/) { + chomp $line; + my $creation = $line; + $creation =~ s/^.*\tcreation\t*(\d*).*/$1/; + my $snap = $line; + $snap =~ s/^.*\@(.*)\tcreation.*$/$1/; + + # the accuracy of the creation timestamp is only for a second, but + # snapshots in the same second are highly likely. The list command + # has an ordered output so we append another three digit running number + # to the creation timestamp and make sure those are ordered correctly + # for snapshot with the same creation timestamp + my $counter = 0; + my $creationsuffix; + while ($counter < 999) { + $creationsuffix = sprintf("%s%03d", $creation, $counter); + if (!defined $creationtimes{$creationsuffix}) { + $creationtimes{$creationsuffix} = 1; + last; + } + $counter += 1; + } + + $snaps{$type}{$snap}{'creation'}=$creationsuffix; + } + } + + return %snaps; +} + +sub getsnapsfallback() { + # fallback (solaris for example doesn't support the -t option) + my ($type,$rhost,$fs,$isroot,%snaps) = @_; my $mysudocmd; my $fsescaped = escapeshellparam($fs); if ($isroot) { $mysudocmd = ''; } else { $mysudocmd = $sudocmd; } @@ -1886,67 +1942,73 @@ sub getsnaps { $fsescaped = escapeshellparam($fsescaped); } - my $getsnapcmd = $use_fallback - ? "$rhost $mysudocmd $zfscmd get -Hpd 1 all $fsescaped" - : "$rhost $mysudocmd $zfscmd get -Hpd 1 -t snapshot all $fsescaped"; - - if ($debug) { - $getsnapcmd = "$getsnapcmd |"; - writelog('DEBUG', "getting list of snapshots on $fs using $getsnapcmd..."); - } else { - $getsnapcmd = "$getsnapcmd 2>/dev/null |"; - } + my $getsnapcmd = "$rhost $mysudocmd $zfscmd get -Hpd 1 type,guid,creation $fsescaped |"; + writelog('WARN', "snapshot listing failed, trying fallback command"); + writelog('DEBUG', "FALLBACK, getting list of snapshots on $fs using $getsnapcmd..."); open FH, $getsnapcmd; my @rawsnaps = ; - close FH or do { - if (!$use_fallback) { - writelog('WARN', "snapshot listing failed, trying fallback command"); - return getsnaps($type, $rhost, $fs, $isroot, 1, %snaps); + close FH or die "CRITICAL ERROR: snapshots couldn't be listed for $fs (exit code $?)"; + + my %creationtimes=(); + + my $state = 0; + foreach my $line (@rawsnaps) { + if ($state < 0) { + $state++; + next; } - die "CRITICAL ERROR: snapshots couldn't be listed for $fs (exit code $?)"; - }; - my %snap_data; - my %creationtimes; + if ($state eq 0) { + if ($line !~ /\Q$fs\E\@.*\ttype\s*snapshot/) { + # skip non snapshot type object + $state = -2; + next; + } + } elsif ($state eq 1) { + if ($line !~ /\Q$fs\E\@.*\tguid/) { + die "CRITICAL ERROR: snapshots couldn't be listed for $fs (guid parser error)"; + } - for my $line (@rawsnaps) { - chomp $line; - my ($dataset, $property, $value) = split /\t/, $line; - die "CRITICAL ERROR: Unexpected line format in $line" unless defined $value; + chomp $line; + my $guid = $line; + $guid =~ s/^.*\tguid\t*(\d*).*/$1/; + my $snap = $line; + $snap =~ s/^.*\@(.*)\tguid.*$/$1/; + if (!snapisincluded($snap)) { next; } + $snaps{$type}{$snap}{'guid'}=$guid; + } elsif ($state eq 2) { + if ($line !~ /\Q$fs\E\@.*\tcreation/) { + die "CRITICAL ERROR: snapshots couldn't be listed for $fs (creation parser error)"; + } - my (undef, $snap) = split /@/, $dataset; - die "CRITICAL ERROR: Unexpected dataset format in $line" unless $snap; + chomp $line; + my $creation = $line; + $creation =~ s/^.*\tcreation\t*(\d*).*/$1/; + my $snap = $line; + $snap =~ s/^.*\@(.*)\tcreation.*$/$1/; + if (!snapisincluded($snap)) { next; } - if (!snapisincluded($snap)) { next; } - - $snap_data{$snap}{$property} = $value; - - # the accuracy of the creation timestamp is only for a second, but - # snapshots in the same second are highly likely. The list command - # has an ordered output so we append another three digit running number - # to the creation timestamp and make sure those are ordered correctly - # for snapshot with the same creation timestamp - if ($property eq 'creation') { + # the accuracy of the creation timestamp is only for a second, but + # snapshots in the same second are highly likely. The list command + # has an ordered output so we append another three digit running number + # to the creation timestamp and make sure those are ordered correctly + # for snapshot with the same creation timestamp my $counter = 0; my $creationsuffix; while ($counter < 999) { - $creationsuffix = sprintf("%s%03d", $value, $counter); + $creationsuffix = sprintf("%s%03d", $creation, $counter); if (!defined $creationtimes{$creationsuffix}) { $creationtimes{$creationsuffix} = 1; last; } $counter += 1; } - $snap_data{$snap}{'creation'} = $creationsuffix; - } - } - for my $snap (keys %snap_data) { - if (!$use_fallback || $snap_data{$snap}{'type'} eq 'snapshot') { - $snaps{$type}{$snap}{'guid'} = $snap_data{$snap}{'guid'}; - $snaps{$type}{$snap}{'createtxg'} = $snap_data{$snap}{'createtxg'}; - $snaps{$type}{$snap}{'creation'} = $snap_data{$snap}{'creation'}; + $snaps{$type}{$snap}{'creation'}=$creationsuffix; + $state = -1; } + + $state++; } return %snaps; @@ -1965,7 +2027,7 @@ sub getbookmarks() { } my $error = 0; - my $getbookmarkcmd = "$rhost $mysudocmd $zfscmd get -Hpd 1 -t bookmark all $fsescaped 2>&1 |"; + my $getbookmarkcmd = "$rhost $mysudocmd $zfscmd get -Hpd 1 -t bookmark guid,creation $fsescaped 2>&1 |"; writelog('DEBUG', "getting list of bookmarks on $fs using $getbookmarkcmd..."); open FH, $getbookmarkcmd; my @rawbookmarks = ; @@ -1980,44 +2042,46 @@ sub getbookmarks() { die "CRITICAL ERROR: bookmarks couldn't be listed for $fs (exit code $?)"; } - my %bookmark_data; - my %creationtimes; + # this is a little obnoxious. get guid,creation returns guid,creation on two separate lines + # as though each were an entirely separate get command. - for my $line (@rawbookmarks) { - chomp $line; - my ($dataset, $property, $value) = split /\t/, $line; - die "CRITICAL ERROR: Unexpected line format in $line" unless defined $value; + my $lastguid; + my %creationtimes=(); - my (undef, $bookmark) = split /#/, $dataset; - die "CRITICAL ERROR: Unexpected dataset format in $line" unless $bookmark; + foreach my $line (@rawbookmarks) { + # only import bookmark guids, creation from the specified filesystem + if ($line =~ /\Q$fs\E\#.*\tguid/) { + chomp $line; + $lastguid = $line; + $lastguid =~ s/^.*\tguid\t*(\d*).*/$1/; + my $bookmark = $line; + $bookmark =~ s/^.*\#(.*)\tguid.*$/$1/; + $bookmarks{$lastguid}{'name'}=$bookmark; + } elsif ($line =~ /\Q$fs\E\#.*\tcreation/) { + chomp $line; + my $creation = $line; + $creation =~ s/^.*\tcreation\t*(\d*).*/$1/; + my $bookmark = $line; + $bookmark =~ s/^.*\#(.*)\tcreation.*$/$1/; - $bookmark_data{$bookmark}{$property} = $value; - - # the accuracy of the creation timestamp is only for a second, but - # bookmarks in the same second are possible. The list command - # has an ordered output so we append another three digit running number - # to the creation timestamp and make sure those are ordered correctly - # for bookmarks with the same creation timestamp - if ($property eq 'creation') { + # the accuracy of the creation timestamp is only for a second, but + # bookmarks in the same second are possible. The list command + # has an ordered output so we append another three digit running number + # to the creation timestamp and make sure those are ordered correctly + # for bookmarks with the same creation timestamp my $counter = 0; my $creationsuffix; while ($counter < 999) { - $creationsuffix = sprintf("%s%03d", $value, $counter); + $creationsuffix = sprintf("%s%03d", $creation, $counter); if (!defined $creationtimes{$creationsuffix}) { $creationtimes{$creationsuffix} = 1; last; } $counter += 1; } - $bookmark_data{$bookmark}{'creation'} = $creationsuffix; - } - } - for my $bookmark (keys %bookmark_data) { - my $guid = $bookmark_data{$bookmark}{'guid'}; - $bookmarks{$guid}{'name'} = $bookmark; - $bookmarks{$guid}{'creation'} = $bookmark_data{$bookmark}{'creation'}; - $bookmarks{$guid}{'createtxg'} = $bookmark_data{$bookmark}{'createtxg'}; + $bookmarks{$lastguid}{'creation'}=$creationsuffix; + } } return %bookmarks; diff --git a/tests/run-tests.sh b/tests/run-tests.sh index ec14721..34813d1 100755 --- a/tests/run-tests.sh +++ b/tests/run-tests.sh @@ -2,7 +2,7 @@ # run's all the available tests -for test in $(find . -mindepth 1 -maxdepth 1 -type d -printf "%P\n" | sort -g); do +for test in */; do if [ ! -x "${test}/run.sh" ]; then continue fi diff --git a/tests/syncoid/011_sync_out-of-order_snapshots/run.sh b/tests/syncoid/011_sync_out-of-order_snapshots/run.sh deleted file mode 100755 index bb96ad0..0000000 --- a/tests/syncoid/011_sync_out-of-order_snapshots/run.sh +++ /dev/null @@ -1,50 +0,0 @@ -#!/bin/bash - -# test verifying snapshots with out-of-order snapshot creation datetimes - -set -x -set -e - -. ../../common/lib.sh - -if [ -z "$ALLOW_INVASIVE_TESTS" ]; then - exit 130 -fi - -POOL_IMAGE="/tmp/syncoid-test-11.zpool" -POOL_SIZE="64M" -POOL_NAME="syncoid-test-11" - -truncate -s "${POOL_SIZE}" "${POOL_IMAGE}" - -zpool create -m none -f "${POOL_NAME}" "${POOL_IMAGE}" - -function cleanUp { - zpool export "${POOL_NAME}" - rm -f "${POOL_IMAGE}" -} - -# export pool and remove the image in any case -trap cleanUp EXIT - -zfs create "${POOL_NAME}"/before -zfs snapshot "${POOL_NAME}"/before@this-snapshot-should-make-it-into-the-after-dataset - -disableTimeSync -setdate 1155533696 -zfs snapshot "${POOL_NAME}"/before@oldest-snapshot - -zfs snapshot "${POOL_NAME}"/before@another-snapshot-does-not-matter -../../../syncoid --sendoptions="Lec" "${POOL_NAME}"/before "${POOL_NAME}"/after - -# verify -saveSnapshotList "${POOL_NAME}" "snapshot-list.txt" - -grep "${POOL_NAME}/before@this-snapshot-should-make-it-into-the-after-dataset" "snapshot-list.txt" || exit $? -grep "${POOL_NAME}/after@this-snapshot-should-make-it-into-the-after-dataset" "snapshot-list.txt" || exit $? -grep "${POOL_NAME}/before@oldest-snapshot" "snapshot-list.txt" || exit $? -grep "${POOL_NAME}/after@oldest-snapshot" "snapshot-list.txt" || exit $? -grep "${POOL_NAME}/before@another-snapshot-does-not-matter" "snapshot-list.txt" || exit $? -grep "${POOL_NAME}/after@another-snapshot-does-not-matter" "snapshot-list.txt" || exit $? - -exit 0 diff --git a/tests/syncoid/run-tests.sh b/tests/syncoid/run-tests.sh index 8307413..0e7570e 100755 --- a/tests/syncoid/run-tests.sh +++ b/tests/syncoid/run-tests.sh @@ -2,7 +2,7 @@ # run's all the available tests -for test in $(find . -mindepth 1 -maxdepth 1 -type d -printf "%P\n" | sort -g); do +for test in */; do if [ ! -x "${test}/run.sh" ]; then continue fi From aa2c693e622036c6d6c472b115dea1273b3ca41a Mon Sep 17 00:00:00 2001 From: Adam Fulton Date: Mon, 1 Apr 2024 11:53:45 -0500 Subject: [PATCH 13/14] fix(syncoid): regather $snaps on --delete-target-snapshots flag --- syncoid | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/syncoid b/syncoid index bd041ae..b7a1c13 100755 --- a/syncoid +++ b/syncoid @@ -853,8 +853,18 @@ sub syncdataset { if (defined $args{'delete-target-snapshots'}) { # Find the snapshots that exist on the target, filter with # those that exist on the source. Remaining are the snapshots - # that are only on the target. Then sort by creation date, as - # to remove the oldest snapshots first. + # that are only on the target. Then sort to remove the oldest + # snapshots first. + + # regather snapshots on source and target + %snaps = getsnaps('source',$sourcehost,$sourcefs,$sourceisroot); + + if ($targetexists) { + my %targetsnaps = getsnaps('target',$targethost,$targetfs,$targetisroot); + my %sourcesnaps = %snaps; + %snaps = (%sourcesnaps, %targetsnaps); + } + my @to_delete = sort { $snaps{'target'}{$a}{'creation'}<=>$snaps{'target'}{$b}{'creation'} } grep {!exists $snaps{'source'}{$_}} keys %{ $snaps{'target'} }; while (@to_delete) { # Create batch of snapshots to remove From becec663207b792f15e5f47a16438899664dd560 Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Mon, 9 Jun 2025 23:21:44 +0200 Subject: [PATCH 14/14] prepare v2.3.0 --- CHANGELIST | 20 ++++++++++++++++++++ VERSION | 2 +- findoid | 2 +- packages/debian/changelog | 24 ++++++++++++++++++++++++ packages/rhel/sanoid.spec | 4 +++- sanoid | 2 +- syncoid | 2 +- tests/1_one_year/run.sh | 2 +- tests/2_dst_handling/run.sh | 2 +- tests/common/lib.sh | 10 +++++++++- 10 files changed, 62 insertions(+), 8 deletions(-) diff --git a/CHANGELIST b/CHANGELIST index 314e431..de00f66 100644 --- a/CHANGELIST +++ b/CHANGELIST @@ -1,3 +1,23 @@ +2.3.0 [overall] documentation updates, small fixes (@thecatontheflat, @mjeanson, @jiawen, @EchterAgo, @jan-krieg, @dlangille, @rightaditya, @MynaITLabs, @ossimoi, @alexgarel, @TopherIsSwell, @jimsalterjrs, @phreaker0) + [sanoid] implemented adding of taken snapshots to the cache file and a new parameter for setting an custom cache expire time (@phreaker0) + [sanoid] ignore duplicate template keys (@phreaker0) + [packaging] fix debian packaging with debian 12 and ubuntu 24.04 (@phreaker0) + [syncoid] fix typo preventing resumed transfer with --sendoptions (@Deltik) + [sanoid] remove iszfsbusy check to boost performance (@sdettmer) + [sanoid] write cache files in an atomic way to prevent race conditions (@phreaker0) + [sanoid] improve performance (especially for monitor commands) by caching the dataset list (@phreaker0) + [syncoid] add zstdmt compress options (@0xFelix) + [syncoid] added missing status information about what is done and provide more details (@phreaker0) + [syncoid] rename ssh control socket to avoid problem with length limits and conflicts (@phreaker0) + [syncoid] support relative paths (@phreaker0) + [syncoid] regather snapshots on --delete-target-snapshots flag (@Adam Fulton) + [sanoid] allow monitor commands to be run without root by using only the cache file (@Pajkastare) + [syncoid] add --include-snaps and --exclude-snaps options (@mr-vinn, @phreaker0) + [syncoid] escape property key and value pair in case of property preservation (@phreaker0) + [syncoid] prevent destroying of root dataset which leads to infinite loop because it can't be destroyed (@phreaker0) + [syncoid] modify zfs-get argument order for portability (@Rantherhin) + [sanoid] trim config values (@phreaker0) + 2.2.0 [overall] documentation updates, small fixes (@azmodude, @deviantintegral, @jimsalterjrs, @alexhaydock, @cbreak-black, @kd8bny, @JavaScriptDude, @veeableful, @rsheasby, @Topslakr, @mavhc, @adam-stamand, @joelishness, @jsoref, @dodexahedron, @phreaker0) [syncoid] implemented flag for preserving properties without the zfs -p flag (@phreaker0) [syncoid] implemented target snapshot deletion (@mat813) diff --git a/VERSION b/VERSION index ccbccc3..276cbf9 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.2.0 +2.3.0 diff --git a/findoid b/findoid index 2561246..adda5cc 100755 --- a/findoid +++ b/findoid @@ -4,7 +4,7 @@ # from http://www.gnu.org/licenses/gpl-3.0.html on 2014-11-17. A copy should also be available in this # project's Git repository at https://github.com/jimsalterjrs/sanoid/blob/master/LICENSE. -$::VERSION = '2.2.0'; +$::VERSION = '2.3.0'; use strict; use warnings; diff --git a/packages/debian/changelog b/packages/debian/changelog index 00c0c07..ba369a1 100644 --- a/packages/debian/changelog +++ b/packages/debian/changelog @@ -1,3 +1,27 @@ +sanoid (2.3.0) unstable; urgency=medium + + [overall] documentation updates, small fixes (@thecatontheflat, @mjeanson, @jiawen, @EchterAgo, @jan-krieg, @dlangille, @rightaditya, @MynaITLabs, @ossimoi, @alexgarel, @TopherIsSwell, @jimsalterjrs, @phreaker0) + [sanoid] implemented adding of taken snapshots to the cache file and a new parameter for setting an custom cache expire time (@phreaker0) + [sanoid] ignore duplicate template keys (@phreaker0) + [packaging] fix debian packaging with debian 12 and ubuntu 24.04 (@phreaker0) + [syncoid] fix typo preventing resumed transfer with --sendoptions (@Deltik) + [sanoid] remove iszfsbusy check to boost performance (@sdettmer) + [sanoid] write cache files in an atomic way to prevent race conditions (@phreaker0) + [sanoid] improve performance (especially for monitor commands) by caching the dataset list (@phreaker0) + [syncoid] add zstdmt compress options (@0xFelix) + [syncoid] added missing status information about what is done and provide more details (@phreaker0) + [syncoid] rename ssh control socket to avoid problem with length limits and conflicts (@phreaker0) + [syncoid] support relative paths (@phreaker0) + [syncoid] regather snapshots on --delete-target-snapshots flag (@Adam Fulton) + [sanoid] allow monitor commands to be run without root by using only the cache file (@Pajkastare) + [syncoid] add --include-snaps and --exclude-snaps options (@mr-vinn, @phreaker0) + [syncoid] escape property key and value pair in case of property preservation (@phreaker0) + [syncoid] prevent destroying of root dataset which leads to infinite loop because it can't be destroyed (@phreaker0) + [syncoid] modify zfs-get argument order for portability (@Rantherhin) + [sanoid] trim config values (@phreaker0) + + -- Jim Salter Tue, 05 Jun 2025 22:47:00 +0200 + sanoid (2.2.0) unstable; urgency=medium [overall] documentation updates, small fixes (@azmodude, @deviantintegral, @jimsalterjrs, @alexhaydock, @cbreak-black, @kd8bny, @JavaScriptDude, @veeableful, @rsheasby, @Topslakr, @mavhc, @adam-stamand, @joelishness, @jsoref, @dodexahedron, @phreaker0) diff --git a/packages/rhel/sanoid.spec b/packages/rhel/sanoid.spec index 218f52d..ce48247 100644 --- a/packages/rhel/sanoid.spec +++ b/packages/rhel/sanoid.spec @@ -1,4 +1,4 @@ -%global version 2.2.0 +%global version 2.3.0 %global git_tag v%{version} # Enable with systemctl "enable sanoid.timer" @@ -111,6 +111,8 @@ echo "* * * * * root %{_sbindir}/sanoid --cron" > %{buildroot}%{_docdir}/%{name} %endif %changelog +* Tue Jun 05 2025 Christoph Klaffl - 2.3.0 +- Bump to 2.3.0 * Tue Jul 18 2023 Christoph Klaffl - 2.2.0 - Bump to 2.2.0 * Tue Nov 24 2020 Christoph Klaffl - 2.1.0 diff --git a/sanoid b/sanoid index c33451c..4a71319 100755 --- a/sanoid +++ b/sanoid @@ -4,7 +4,7 @@ # from http://www.gnu.org/licenses/gpl-3.0.html on 2014-11-17. A copy should also be available in this # project's Git repository at https://github.com/jimsalterjrs/sanoid/blob/master/LICENSE. -$::VERSION = '2.2.0'; +$::VERSION = '2.3.0'; my $MINIMUM_DEFAULTS_VERSION = 2; use strict; diff --git a/syncoid b/syncoid index b7a1c13..956f3e7 100755 --- a/syncoid +++ b/syncoid @@ -4,7 +4,7 @@ # from http://www.gnu.org/licenses/gpl-3.0.html on 2014-11-17. A copy should also be available in this # project's Git repository at https://github.com/jimsalterjrs/sanoid/blob/master/LICENSE. -$::VERSION = '2.2.0'; +$::VERSION = '2.3.0'; use strict; use warnings; diff --git a/tests/1_one_year/run.sh b/tests/1_one_year/run.sh index fe76946..48b3c7b 100755 --- a/tests/1_one_year/run.sh +++ b/tests/1_one_year/run.sh @@ -39,7 +39,7 @@ function cleanUp { trap cleanUp EXIT while [ $timestamp -le $END ]; do - setdate $timestamp; date; "${SANOID}" --cron --verbose + setdate $timestamp; date; "${SANOID}" --cron --verbose --cache-ttl=2592000 timestamp=$((timestamp+3600)) done diff --git a/tests/2_dst_handling/run.sh b/tests/2_dst_handling/run.sh index 3231631..2c3fd3e 100755 --- a/tests/2_dst_handling/run.sh +++ b/tests/2_dst_handling/run.sh @@ -42,7 +42,7 @@ function cleanUp { trap cleanUp EXIT while [ $timestamp -le $END ]; do - setdate $timestamp; date; "${SANOID}" --cron --verbose + setdate $timestamp; date; "${SANOID}" --cron --verbose --cache-ttl=2592000 timestamp=$((timestamp+900)) done diff --git a/tests/common/lib.sh b/tests/common/lib.sh index 9c88eff..84b2c63 100644 --- a/tests/common/lib.sh +++ b/tests/common/lib.sh @@ -10,7 +10,10 @@ function setup { export SANOID="../../sanoid" # make sure that there is no cache file - rm -f /var/cache/sanoidsnapshots.txt + rm -f /var/cache/sanoid/snapshots.txt + rm -f /var/cache/sanoid/datasets.txt + + mkdir -p /etc/sanoid # install needed sanoid configuration files [ -f sanoid.conf ] && cp sanoid.conf /etc/sanoid/sanoid.conf @@ -51,6 +54,11 @@ function disableTimeSync { if [ $? -eq 0 ]; then timedatectl set-ntp 0 fi + + which systemctl > /dev/null + if [ $? -eq 0 ]; then + systemctl is-active virtualbox-guest-utils.service && systemctl stop virtualbox-guest-utils.service + fi } function saveSnapshotList {