From 44f7cae115e5e16d593d771bb6997dffb66f1212 Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Thu, 13 Feb 2020 01:01:09 +0100 Subject: [PATCH 001/130] implemented pre pruning script hook --- sanoid | 21 +++++++++++++++++++-- sanoid.conf | 2 ++ sanoid.defaults.conf | 1 + 3 files changed, 22 insertions(+), 2 deletions(-) diff --git a/sanoid b/sanoid index c280d46..f9dfe33 100755 --- a/sanoid +++ b/sanoid @@ -316,6 +316,25 @@ sub prune_snapshots { if (checklock('sanoid_pruning')) { writelock('sanoid_pruning'); foreach my $snap( @prunesnaps ){ + my $dataset = (split '@', $snap)[0]; + my $snapname = (split '@', $snap)[1]; + + if (! $args{'readonly'} && $config{$dataset}{'pre_pruning_script'}) { + $ENV{'SANOID_TARGET'} = $dataset; + $ENV{'SANOID_SNAPNAME'} = $snapname; + if ($args{'verbose'}) { print "executing pre_pruning_script '".$config{$dataset}{'pre_pruning_script'}."' on dataset '$dataset'\n"; } + my $ret = runscript('pre_pruning_script', $dataset); + + delete $ENV{'SANOID_TARGET'}; + delete $ENV{'SANOID_SNAPNAME'}; + + if ($ret != 0) { + # warning was already thrown by runscript function + # skip pruning if pre snapshot script returns non zero exit code + next; + } + } + if ($args{'verbose'}) { print "INFO: pruning $snap ... \n"; } if (!$args{'force-prune'} && iszfsbusy($path)) { if ($args{'verbose'}) { print "INFO: deferring pruning of $snap - $path is currently in zfs send or receive.\n"; } @@ -323,8 +342,6 @@ sub prune_snapshots { if (! $args{'readonly'}) { if (system($zfs, "destroy", $snap) == 0) { $pruned{$snap} = 1; - my $dataset = (split '@', $snap)[0]; - my $snapname = (split '@', $snap)[1]; if ($config{$dataset}{'pruning_script'}) { $ENV{'SANOID_TARGET'} = $dataset; $ENV{'SANOID_SNAPNAME'} = $snapname; diff --git a/sanoid.conf b/sanoid.conf index 6bd5c62..6d982bc 100644 --- a/sanoid.conf +++ b/sanoid.conf @@ -97,6 +97,8 @@ pre_snapshot_script = /path/to/script.sh ### run script after snapshot post_snapshot_script = /path/to/script.sh + ### run script before pruning snapshot + pre_pruning_script = /path/to/script.sh ### run script after pruning snapshot pruning_script = /path/to/script.sh ### don't take an inconsistent snapshot (skip if pre script fails) diff --git a/sanoid.defaults.conf b/sanoid.defaults.conf index a9ca382..d65601c 100644 --- a/sanoid.defaults.conf +++ b/sanoid.defaults.conf @@ -21,6 +21,7 @@ skip_children = pre_snapshot_script = post_snapshot_script = +pre_pruning_script = pruning_script = script_timeout = 5 no_inconsistent_snapshot = From 84bc3164f0414da416b13e9902dc8680631add9d Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Fri, 21 Feb 2020 20:34:28 +0100 Subject: [PATCH 002/130] implemented option for direct connection via socat and busybox nc --- README.md | 5 +++++ syncoid | 44 +++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 48 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index c2b502b..2afe3b0 100644 --- a/README.md +++ b/README.md @@ -262,6 +262,11 @@ As of 1.4.18, syncoid also automatically supports and enables resume of interrup Use specified identity file as per ssh -i. ++ --insecure-direct-connection=IP:PORT[,IP:PORT] + + WARNING: This is an insecure option as the data is not encrypted while being sent over the network. Only use if you trust the complete network path. + Use a direct tcp connection (with socat and busybox nc) for the actual zfs send/recv stream. All control commands are still executed via the ssh connection. The first address pair is used for connecting to the target host from the source host and the second pair is for listening on the target host. If the later isn't provided the same as the former is used. This can be used for saturating high throughput connection like >= 10GBe network which isn't easy with the overhead off ssh. It can also be useful for encrypted datasets to lower the cpu usage needed for replication but be aware that metadata is NOT ENCRYPTED in this case. (This option can't be used for relaying between two remote hosts) + + --quiet Supress non-error output. diff --git a/syncoid b/syncoid index 2eef326..cef0e3a 100755 --- a/syncoid +++ b/syncoid @@ -25,7 +25,7 @@ GetOptions(\%args, "no-command-checks", "monitor-version", "compress=s", "dumpsn "source-bwlimit=s", "target-bwlimit=s", "sshkey=s", "sshport=i", "sshcipher|c=s", "sshoption|o=s@", "debug", "quiet", "no-stream", "no-sync-snap", "no-resume", "exclude=s@", "skip-parent", "identifier=s", "no-clone-handling", "no-privilege-elevation", "force-delete", "no-clone-rollback", "no-rollback", - "create-bookmark", "pv-options=s" => \$pvoptions, + "create-bookmark", "pv-options=s" => \$pvoptions, "insecure-direct-connection=s", "mbuffer-size=s" => \$mbuffer_size) or pod2usage(2); my %compressargs = %{compressargset($args{'compress'} || 'default')}; # Can't be done with GetOptions arg, as default still needs to be set @@ -120,6 +120,31 @@ if ($debug) { print "DEBUG: SSHCMD: $sshcmd\n"; } my ($sourcehost,$sourcefs,$sourceisroot) = getssh($rawsourcefs); my ($targethost,$targetfs,$targetisroot) = getssh($rawtargetfs); +# handle insecure direct connection arguments +my $directconnect = ""; +my $directlisten = ""; + +if (length $args{'insecure-direct-connection'}) { + if ($sourcehost ne '' && $targethost ne '') { + print("CRITICAL: relaying between remote hosts is not supported with insecure direct connection!\n"); + pod2usage(2); + exit 127; + } + + my @parts = split(',', $args{'insecure-direct-connection'}); + if (scalar @parts > 2) { + print("CRITICAL: invalid insecure-direct-connection argument!\n"); + pod2usage(2); + exit 127; + } elsif (scalar @parts == 2) { + $directconnect = $parts[0]; + $directlisten = $parts[1]; + } else { + $directconnect = $args{'insecure-direct-connection'}; + $directlisten = $args{'insecure-direct-connection'}; + } +} + my $sourcesudocmd = $sourceisroot ? '' : $sudocmd; my $targetsudocmd = $targetisroot ? '' : $sudocmd; @@ -1250,9 +1275,17 @@ sub buildsynccmd { if ($avail{'localpv'} && !$quiet) { $synccmd .= " $pvcmd $pvoptions -s $pvsize |"; } if ($avail{'compress'}) { $synccmd .= " $compressargs{'cmd'} |"; } if ($avail{'sourcembuffer'}) { $synccmd .= " $mbuffercmd $args{'source-bwlimit'} $mbufferoptions |"; } + if (length $directconnect) { + # try 10 times over 10 seconds to connect + $synccmd .= " socat - TCP:" . $directconnect . ",retry=10,interval=1 |"; + } $synccmd .= " $sshcmd $targethost "; my $remotecmd = ""; + if (length $directlisten) { + # wait up to 10 seconds for a connection or error out + $remotecmd .= " busybox nc -l " . $directlisten . " -w 10 |"; + } if ($avail{'targetmbuffer'}) { $remotecmd .= " $mbuffercmd $args{'target-bwlimit'} $mbufferoptions |"; } if ($avail{'compress'}) { $remotecmd .= " $compressargs{'decomcmd'} |"; } $remotecmd .= " $recvcmd"; @@ -1265,9 +1298,17 @@ sub buildsynccmd { my $remotecmd = $sendcmd; if ($avail{'compress'}) { $remotecmd .= " | $compressargs{'cmd'}"; } if ($avail{'sourcembuffer'}) { $remotecmd .= " | $mbuffercmd $args{'source-bwlimit'} $mbufferoptions"; } + if (length $directconnect) { + # try 10 times over 10 seconds to connect + $remotecmd .= " | socat - TCP:" . $directconnect . ",retry=10,interval=1"; + } $synccmd = "$sshcmd $sourcehost " . escapeshellparam($remotecmd); $synccmd .= " | "; + if (length $directlisten) { + # wait up to 10 seconds for a connection or error out + $synccmd .= " busybox nc -l " . $directlisten . " -w 10 |"; + } if ($avail{'targetmbuffer'}) { $synccmd .= "$mbuffercmd $args{'target-bwlimit'} $mbufferoptions | "; } if ($avail{'compress'}) { $synccmd .= "$compressargs{'decomcmd'} | "; } if ($avail{'localpv'} && !$quiet) { $synccmd .= "$pvcmd $pvoptions -s $pvsize | "; } @@ -1911,6 +1952,7 @@ Options: --sshport=PORT Connects to remote on a particular port --sshcipher|c=CIPHER Passes CIPHER to ssh to use a particular cipher set --sshoption|o=OPTION Passes OPTION to ssh for remote usage. Can be specified multiple times + --insecure-direct-connection=IP:PORT[,IP:PORT] WARNING: DATA IS NOT ENCRYPTED. First address pair is for connecting to the target and the second for listening at the target --help Prints this helptext --version Prints the version number From 2e6abddb5e04b1acd18dba4c2be32b6838d0e49d Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Mon, 30 Mar 2020 19:01:54 +0200 Subject: [PATCH 003/130] fixed uninitialized value warning --- syncoid | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/syncoid b/syncoid index cef0e3a..554f5dd 100755 --- a/syncoid +++ b/syncoid @@ -120,6 +120,14 @@ if ($debug) { print "DEBUG: SSHCMD: $sshcmd\n"; } my ($sourcehost,$sourcefs,$sourceisroot) = getssh($rawsourcefs); my ($targethost,$targetfs,$targetisroot) = getssh($rawtargetfs); +my $sourcesudocmd = $sourceisroot ? '' : $sudocmd; +my $targetsudocmd = $targetisroot ? '' : $sudocmd; + +# figure out whether compression, mbuffering, pv +# are available on source, target, local machines. +# warn user of anything missing, then continue with sync. +my %avail = checkcommands(); + # handle insecure direct connection arguments my $directconnect = ""; my $directlisten = ""; @@ -145,14 +153,6 @@ if (length $args{'insecure-direct-connection'}) { } } -my $sourcesudocmd = $sourceisroot ? '' : $sudocmd; -my $targetsudocmd = $targetisroot ? '' : $sudocmd; - -# figure out whether compression, mbuffering, pv -# are available on source, target, local machines. -# warn user of anything missing, then continue with sync. -my %avail = checkcommands(); - my %snaps; my $exitcode = 0; From f0dfd46c9f4410ef1923bb0147771152b061598a Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Mon, 30 Mar 2020 20:15:09 +0200 Subject: [PATCH 004/130] use mbuffer tcp listen socket instead of busybox nc --- syncoid | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/syncoid b/syncoid index 554f5dd..ede92e0 100755 --- a/syncoid +++ b/syncoid @@ -1284,9 +1284,11 @@ sub buildsynccmd { my $remotecmd = ""; if (length $directlisten) { # wait up to 10 seconds for a connection or error out - $remotecmd .= " busybox nc -l " . $directlisten . " -w 10 |"; + $remotecmd .= " $mbuffercmd $args{'target -bwlimit'} -W 10 -I " . $directlisten . " $mbufferoptions |"; + } else { + if ($avail{'targetmbuffer'}) { $remotecmd .= " $mbuffercmd $args{'target-bwlimit'} $mbufferoptions |"; } } - if ($avail{'targetmbuffer'}) { $remotecmd .= " $mbuffercmd $args{'target-bwlimit'} $mbufferoptions |"; } + if ($avail{'compress'}) { $remotecmd .= " $compressargs{'decomcmd'} |"; } $remotecmd .= " $recvcmd"; @@ -1307,9 +1309,11 @@ sub buildsynccmd { $synccmd .= " | "; if (length $directlisten) { # wait up to 10 seconds for a connection or error out - $synccmd .= " busybox nc -l " . $directlisten . " -w 10 |"; + $synccmd .= "$mbuffercmd $args{'target-bwlimit'} -W 10 -I " . $directlisten . " $mbufferoptions | "; + } else { + if ($avail{'targetmbuffer'}) { $synccmd .= "$mbuffercmd $args{'target-bwlimit'} $mbufferoptions | "; } } - if ($avail{'targetmbuffer'}) { $synccmd .= "$mbuffercmd $args{'target-bwlimit'} $mbufferoptions | "; } + if ($avail{'compress'}) { $synccmd .= "$compressargs{'decomcmd'} | "; } if ($avail{'localpv'} && !$quiet) { $synccmd .= "$pvcmd $pvoptions -s $pvsize | "; } $synccmd .= "$recvcmd"; From 807f6fa1bba3ab37ec30323a0bd6d4fcebee49b0 Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Tue, 31 Mar 2020 09:27:17 +0200 Subject: [PATCH 005/130] check if socat and mbuffer is available for insecure direct connection --- syncoid | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/syncoid b/syncoid index ede92e0..1f3de38 100755 --- a/syncoid +++ b/syncoid @@ -86,6 +86,7 @@ my $pscmd = 'ps'; my $pvcmd = 'pv'; my $mbuffercmd = 'mbuffer'; +my $socatcmd = 'socat'; my $sudocmd = 'sudo'; my $mbufferoptions = "-q -s 128k -m $mbuffer_size 2>/dev/null"; # currently using POSIX compatible command to check for program existence because we aren't depending on perl @@ -1032,6 +1033,14 @@ sub checkcommands { $avail{'compress'} = 0; } + if (length $args{'insecure-direct-connection'}) { + if ($debug) { print "DEBUG: checking availability of $socatcmd on source...\n"; } + my $socatAvailable = `$sourcessh $checkcmd $socatcmd 2>/dev/null`; + if ($socatAvailable eq '') { + die "CRIT: $socatcmd is needed on source for insecure direct connection!\n"; + } + } + if ($debug) { print "DEBUG: checking availability of $mbuffercmd on source...\n"; } $avail{'sourcembuffer'} = `$sourcessh $checkcmd $mbuffercmd 2>/dev/null`; if ($avail{'sourcembuffer'} eq '') { @@ -1044,6 +1053,9 @@ sub checkcommands { if ($debug) { print "DEBUG: checking availability of $mbuffercmd on target...\n"; } $avail{'targetmbuffer'} = `$targetssh $checkcmd $mbuffercmd 2>/dev/null`; if ($avail{'targetmbuffer'} eq '') { + if (length $args{'insecure-direct-connection'}) { + die "CRIT: $mbuffercmd is needed on target for insecure direct connection!\n"; + } if (!$quiet) { print "WARN: $mbuffercmd not available on target $t - sync will continue without target buffering.\n"; } $avail{'targetmbuffer'} = 0; } else { @@ -1277,7 +1289,7 @@ sub buildsynccmd { if ($avail{'sourcembuffer'}) { $synccmd .= " $mbuffercmd $args{'source-bwlimit'} $mbufferoptions |"; } if (length $directconnect) { # try 10 times over 10 seconds to connect - $synccmd .= " socat - TCP:" . $directconnect . ",retry=10,interval=1 |"; + $synccmd .= " $socatcmd - TCP:" . $directconnect . ",retry=10,interval=1 |"; } $synccmd .= " $sshcmd $targethost "; @@ -1302,7 +1314,7 @@ sub buildsynccmd { if ($avail{'sourcembuffer'}) { $remotecmd .= " | $mbuffercmd $args{'source-bwlimit'} $mbufferoptions"; } if (length $directconnect) { # try 10 times over 10 seconds to connect - $remotecmd .= " | socat - TCP:" . $directconnect . ",retry=10,interval=1"; + $remotecmd .= " | $socatcmd - TCP:" . $directconnect . ",retry=10,interval=1"; } $synccmd = "$sshcmd $sourcehost " . escapeshellparam($remotecmd); From fcae918ffafb87f67aa5fa15f13193cb8709e184 Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Tue, 31 Mar 2020 09:40:36 +0200 Subject: [PATCH 006/130] updated readme regarding busybox nc -> mbuffer switch --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 2afe3b0..7a3db82 100644 --- a/README.md +++ b/README.md @@ -265,7 +265,7 @@ As of 1.4.18, syncoid also automatically supports and enables resume of interrup + --insecure-direct-connection=IP:PORT[,IP:PORT] WARNING: This is an insecure option as the data is not encrypted while being sent over the network. Only use if you trust the complete network path. - Use a direct tcp connection (with socat and busybox nc) for the actual zfs send/recv stream. All control commands are still executed via the ssh connection. The first address pair is used for connecting to the target host from the source host and the second pair is for listening on the target host. If the later isn't provided the same as the former is used. This can be used for saturating high throughput connection like >= 10GBe network which isn't easy with the overhead off ssh. It can also be useful for encrypted datasets to lower the cpu usage needed for replication but be aware that metadata is NOT ENCRYPTED in this case. (This option can't be used for relaying between two remote hosts) + Use a direct tcp connection (with socat and mbuffer) for the actual zfs send/recv stream. All control commands are still executed via the ssh connection. The first address pair is used for connecting to the target host from the source host and the second pair is for listening on the target host. If the later isn't provided the same as the former is used. This can be used for saturating high throughput connection like >= 10GBe network which isn't easy with the overhead off ssh. It can also be useful for encrypted datasets to lower the cpu usage needed for replication but be aware that metadata is NOT ENCRYPTED in this case. (This option can't be used for relaying between two remote hosts) + --quiet From e1a6507455f1e422a22a79be0c05543ba6205d26 Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Thu, 2 Apr 2020 21:37:06 +0200 Subject: [PATCH 007/130] direct connection will default to busybox nc again but can be switched to mbuffer --- README.md | 4 ++-- syncoid | 66 +++++++++++++++++++++++++++++++++++-------------------- 2 files changed, 44 insertions(+), 26 deletions(-) diff --git a/README.md b/README.md index 7a3db82..58c5a1a 100644 --- a/README.md +++ b/README.md @@ -262,10 +262,10 @@ As of 1.4.18, syncoid also automatically supports and enables resume of interrup Use specified identity file as per ssh -i. -+ --insecure-direct-connection=IP:PORT[,IP:PORT] ++ --insecure-direct-connection=IP:PORT[,IP:PORT,[TIMEOUT,[mbuffer]]] WARNING: This is an insecure option as the data is not encrypted while being sent over the network. Only use if you trust the complete network path. - Use a direct tcp connection (with socat and mbuffer) for the actual zfs send/recv stream. All control commands are still executed via the ssh connection. The first address pair is used for connecting to the target host from the source host and the second pair is for listening on the target host. If the later isn't provided the same as the former is used. This can be used for saturating high throughput connection like >= 10GBe network which isn't easy with the overhead off ssh. It can also be useful for encrypted datasets to lower the cpu usage needed for replication but be aware that metadata is NOT ENCRYPTED in this case. (This option can't be used for relaying between two remote hosts) + Use a direct tcp connection (with socat and busybox nc/mbuffer) for the actual zfs send/recv stream. All control commands are still executed via the ssh connection. The first address pair is used for connecting to the target host from the source host and the second pair is for listening on the target host. If the later isn't provided the same as the former is used. This can be used for saturating high throughput connection like >= 10GBe network which isn't easy with the overhead off ssh. It can also be useful for encrypted datasets to lower the cpu usage needed for replication but be aware that metadata is NOT ENCRYPTED in this case. The default timeout is 60 seconds and can be overridden by providing it as third argument. By default busybox nc is used for the listeing tcp socket, if mbuffer is preferred specify its name as fourth argument but be aware that mbuffer listens on all interfaces and uses an optionally provided ip address for access restriction (This option can't be used for relaying between two remote hosts) + --quiet diff --git a/syncoid b/syncoid index 1f3de38..caaba44 100755 --- a/syncoid +++ b/syncoid @@ -124,14 +124,14 @@ my ($targethost,$targetfs,$targetisroot) = getssh($rawtargetfs); my $sourcesudocmd = $sourceisroot ? '' : $sudocmd; my $targetsudocmd = $targetisroot ? '' : $sudocmd; -# figure out whether compression, mbuffering, pv -# are available on source, target, local machines. -# warn user of anything missing, then continue with sync. -my %avail = checkcommands(); +if (!defined $sourcehost) { $sourcehost = ''; } +if (!defined $targethost) { $targethost = ''; } # handle insecure direct connection arguments my $directconnect = ""; my $directlisten = ""; +my $directtimeout = 60; +my $directmbuffer = 0; if (length $args{'insecure-direct-connection'}) { if ($sourcehost ne '' && $targethost ne '') { @@ -141,19 +141,34 @@ if (length $args{'insecure-direct-connection'}) { } my @parts = split(',', $args{'insecure-direct-connection'}); - if (scalar @parts > 2) { + if (scalar @parts > 4) { print("CRITICAL: invalid insecure-direct-connection argument!\n"); pod2usage(2); exit 127; - } elsif (scalar @parts == 2) { + } elsif (scalar @parts >= 2) { $directconnect = $parts[0]; $directlisten = $parts[1]; } else { $directconnect = $args{'insecure-direct-connection'}; $directlisten = $args{'insecure-direct-connection'}; } + + if (scalar @parts == 3) { + $directtimeout = $parts[2]; + } + + if (scalar @parts == 4) { + if ($parts[3] eq "mbuffer") { + $directmbuffer = 1; + } + } } +# figure out whether compression, mbuffering, pv +# are available on source, target, local machines. +# warn user of anything missing, then continue with sync. +my %avail = checkcommands(); + my %snaps; my $exitcode = 0; @@ -965,9 +980,6 @@ sub checkcommands { return %avail; } - if (!defined $sourcehost) { $sourcehost = ''; } - if (!defined $targethost) { $targethost = ''; } - if ($sourcehost ne '') { $sourcessh = "$sshcmd $sourcehost"; } else { $sourcessh = ''; } if ($targethost ne '') { $targetssh = "$sshcmd $targethost"; } else { $targetssh = ''; } @@ -1039,6 +1051,14 @@ sub checkcommands { if ($socatAvailable eq '') { die "CRIT: $socatcmd is needed on source for insecure direct connection!\n"; } + + if (!$directmbuffer) { + if ($debug) { print "DEBUG: checking availability of busybox (for nc) on target...\n"; } + my $busyboxAvailable = `$targetssh $checkcmd busybox 2>/dev/null`; + if ($busyboxAvailable eq '') { + die "CRIT: busybox is needed on target for insecure direct connection!\n"; + } + } } if ($debug) { print "DEBUG: checking availability of $mbuffercmd on source...\n"; } @@ -1053,7 +1073,7 @@ sub checkcommands { if ($debug) { print "DEBUG: checking availability of $mbuffercmd on target...\n"; } $avail{'targetmbuffer'} = `$targetssh $checkcmd $mbuffercmd 2>/dev/null`; if ($avail{'targetmbuffer'} eq '') { - if (length $args{'insecure-direct-connection'}) { + if ($directmbuffer) { die "CRIT: $mbuffercmd is needed on target for insecure direct connection!\n"; } if (!$quiet) { print "WARN: $mbuffercmd not available on target $t - sync will continue without target buffering.\n"; } @@ -1288,19 +1308,18 @@ sub buildsynccmd { if ($avail{'compress'}) { $synccmd .= " $compressargs{'cmd'} |"; } if ($avail{'sourcembuffer'}) { $synccmd .= " $mbuffercmd $args{'source-bwlimit'} $mbufferoptions |"; } if (length $directconnect) { - # try 10 times over 10 seconds to connect - $synccmd .= " $socatcmd - TCP:" . $directconnect . ",retry=10,interval=1 |"; + $synccmd .= " $socatcmd - TCP:" . $directconnect . ",retry=$directtimeout,interval=1 |"; } $synccmd .= " $sshcmd $targethost "; my $remotecmd = ""; - if (length $directlisten) { - # wait up to 10 seconds for a connection or error out - $remotecmd .= " $mbuffercmd $args{'target -bwlimit'} -W 10 -I " . $directlisten . " $mbufferoptions |"; - } else { - if ($avail{'targetmbuffer'}) { $remotecmd .= " $mbuffercmd $args{'target-bwlimit'} $mbufferoptions |"; } + if ($directmbuffer) { + $remotecmd .= " $mbuffercmd $args{'target -bwlimit'} -W $directtimeout -I " . $directlisten . " $mbufferoptions |"; + } elsif (length $directlisten) { + $remotecmd .= " busybox nc -l " . $directlisten . " -w $directtimeout |"; } + if ($avail{'targetmbuffer'} && !$directmbuffer) { $remotecmd .= " $mbuffercmd $args{'target-bwlimit'} $mbufferoptions |"; } if ($avail{'compress'}) { $remotecmd .= " $compressargs{'decomcmd'} |"; } $remotecmd .= " $recvcmd"; @@ -1313,19 +1332,18 @@ sub buildsynccmd { if ($avail{'compress'}) { $remotecmd .= " | $compressargs{'cmd'}"; } if ($avail{'sourcembuffer'}) { $remotecmd .= " | $mbuffercmd $args{'source-bwlimit'} $mbufferoptions"; } if (length $directconnect) { - # try 10 times over 10 seconds to connect - $remotecmd .= " | $socatcmd - TCP:" . $directconnect . ",retry=10,interval=1"; + $remotecmd .= " | $socatcmd - TCP:" . $directconnect . ",retry=$directtimeout,interval=1"; } $synccmd = "$sshcmd $sourcehost " . escapeshellparam($remotecmd); $synccmd .= " | "; - if (length $directlisten) { - # wait up to 10 seconds for a connection or error out - $synccmd .= "$mbuffercmd $args{'target-bwlimit'} -W 10 -I " . $directlisten . " $mbufferoptions | "; - } else { - if ($avail{'targetmbuffer'}) { $synccmd .= "$mbuffercmd $args{'target-bwlimit'} $mbufferoptions | "; } + if ($directmbuffer) { + $synccmd .= "$mbuffercmd $args{'target-bwlimit'} -W $directtimeout -I " . $directlisten . " $mbufferoptions | "; + } elsif (length $directlisten) { + $synccmd .= " busybox nc -l " . $directlisten . " -w $directtimeout | "; } + if ($avail{'targetmbuffer'} && !$directmbuffer) { $synccmd .= "$mbuffercmd $args{'target-bwlimit'} $mbufferoptions | "; } if ($avail{'compress'}) { $synccmd .= "$compressargs{'decomcmd'} | "; } if ($avail{'localpv'} && !$quiet) { $synccmd .= "$pvcmd $pvoptions -s $pvsize | "; } $synccmd .= "$recvcmd"; From 6bc210deb3bcffd831b73136d94e3b8d5a75fd24 Mon Sep 17 00:00:00 2001 From: James Bunton Date: Sun, 3 May 2020 23:33:51 +1000 Subject: [PATCH 008/130] fix stream sync from bookmark syncoid fails in the following case: - performing a stream sync (default) - there are no matching snapshots between source and target - there is a bookmark on the source matching a snapshot on the target - there is an unrelated snapshot on the source older than this bookmark This happens because the units for snapshots and bookmarks were different. syncoid tells `zfs send` to generate an incremental stream starting from the bookmark and going to the unrelated older snapshot. ZFS refuses to do this because it doesn't make sense. This fix is a bit of hack. Ideally we should share the counter code between getsnaps and getbookmarks. --- syncoid | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/syncoid b/syncoid index 2eef326..1d7150a 100755 --- a/syncoid +++ b/syncoid @@ -1664,7 +1664,7 @@ sub getbookmarks() { $creation =~ s/^.*\tcreation\t*(\d*).*/$1/; my $bookmark = $line; $bookmark =~ s/^.*\#(.*)\tcreation.*$/$1/; - $bookmarks{$lastguid}{'creation'}=$creation; + $bookmarks{$lastguid}{'creation'}=$creation . "000"; } } From 5092ff10001a3868d5a2e16a8e84728d5168efb9 Mon Sep 17 00:00:00 2001 From: Jim Perkins Date: Wed, 5 Aug 2020 11:37:48 -0500 Subject: [PATCH 009/130] typo 'target -bwlimit' Use of uninitialized value in concatenation (.) or string at ./syncoid line 1317. just an asuumption on my part considering the rest of the variables with same name dont have the space in that location --- syncoid | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/syncoid b/syncoid index caaba44..ec40733 100755 --- a/syncoid +++ b/syncoid @@ -1314,7 +1314,7 @@ sub buildsynccmd { my $remotecmd = ""; if ($directmbuffer) { - $remotecmd .= " $mbuffercmd $args{'target -bwlimit'} -W $directtimeout -I " . $directlisten . " $mbufferoptions |"; + $remotecmd .= " $mbuffercmd $args{'target-bwlimit'} -W $directtimeout -I " . $directlisten . " $mbufferoptions |"; } elsif (length $directlisten) { $remotecmd .= " busybox nc -l " . $directlisten . " -w $directtimeout |"; } From d7bf12623d0d16713c5b5a7d42065e752e3c4753 Mon Sep 17 00:00:00 2001 From: Leo Winter Date: Tue, 6 Oct 2020 13:48:34 +0000 Subject: [PATCH 010/130] Add git to apt install on Debian --- INSTALL.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/INSTALL.md b/INSTALL.md index 57f225b..dcc7e34 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -21,7 +21,7 @@ Install prerequisite software: ```bash -apt install debhelper libcapture-tiny-perl libconfig-inifiles-perl pv lzop mbuffer +apt install debhelper libcapture-tiny-perl libconfig-inifiles-perl pv lzop mbuffer git ``` From 309c0866fa8dc7793846e607015cc627dcd98350 Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Mon, 7 Dec 2020 22:40:28 +0100 Subject: [PATCH 011/130] implemented removal of conflicting snapshots with force-delete option --- README.md | 4 +- syncoid | 32 ++++++++++++- tests/syncoid/8_force_delete_snapshot/run.sh | 48 ++++++++++++++++++++ 3 files changed, 81 insertions(+), 3 deletions(-) create mode 100755 tests/syncoid/8_force_delete_snapshot/run.sh diff --git a/README.md b/README.md index b4558c0..5a052e9 100644 --- a/README.md +++ b/README.md @@ -336,11 +336,11 @@ As of 1.4.18, syncoid also automatically supports and enables resume of interrup + --force-delete - Remove target datasets recursively (WARNING: this will also affect child datasets with matching snapshots/bookmarks), if there are no matching snapshots/bookmarks. + Remove target datasets recursively (WARNING: this will also affect child datasets with matching snapshots/bookmarks), if there are no matching snapshots/bookmarks. Also removes conflicting snapshots if the replication would fail because of a snapshot which has the same name between source and target but different contents. + --no-clone-handling - This argument tells syncoid to not recreate clones on the targe on initial sync and doing a normal replication instead. + This argument tells syncoid to not recreate clones on the target on initial sync and doing a normal replication instead. + --dumpsnaps diff --git a/syncoid b/syncoid index b771d8f..7d8329b 100755 --- a/syncoid +++ b/syncoid @@ -813,6 +813,36 @@ sub syncdataset { if ($exitcode < 2) { $exitcode = 2; } return 0; } + } elsif ($args{'force-delete'} && $stdout =~ /\Qdestination already exists\E/) { + (my $existing) = $stdout =~ m/^cannot restore to ([^:]*): destination already exists$/g; + if ($existing eq "") { + warn "CRITICAL ERROR: $synccmd failed: $?"; + if ($exitcode < 2) { $exitcode = 2; } + return 0; + } + + if (!$quiet) { print "WARN: removing existing destination: $existing\n"; } + my $rcommand = ''; + my $mysudocmd = ''; + my $existingescaped = escapeshellparam($existing); + + if ($targethost ne '') { $rcommand = "$sshcmd $targethost"; } + if (!$targetisroot) { $mysudocmd = $sudocmd; } + + my $prunecmd = "$mysudocmd $zfscmd destroy $existingescaped; "; + if ($targethost ne '') { + $prunecmd = escapeshellparam($prunecmd); + } + + my $ret = system("$rcommand $prunecmd"); + if ($ret != 0) { + warn "CRITICAL ERROR: $rcommand $prunecmd failed: $?"; + if ($exitcode < 2) { $exitcode = 2; } + return 0; + } else { + # redo sync and skip snapshot creation (already taken) + return syncdataset($sourcehost, $sourcefs, $targethost, $targetfs, undef, 1); + } } else { warn "CRITICAL ERROR: $synccmd failed: $?"; if ($exitcode < 2) { $exitcode = 2; } @@ -1994,4 +2024,4 @@ Options: --no-clone-handling Don't try to recreate clones on target --no-privilege-elevation Bypass the root check, for use with ZFS permission delegation - --force-delete Remove target datasets recursively, if there are no matching snapshots/bookmarks + --force-delete Remove target datasets recursively, if there are no matching snapshots/bookmarks (also overwrites conflicting named snapshots) diff --git a/tests/syncoid/8_force_delete_snapshot/run.sh b/tests/syncoid/8_force_delete_snapshot/run.sh new file mode 100755 index 0000000..899092a --- /dev/null +++ b/tests/syncoid/8_force_delete_snapshot/run.sh @@ -0,0 +1,48 @@ +#!/bin/bash + +# test replication with deletion of conflicting snapshot on target + +set -x +set -e + +. ../../common/lib.sh + +POOL_IMAGE="/tmp/syncoid-test-8.zpool" +POOL_SIZE="200M" +POOL_NAME="syncoid-test-8" +TARGET_CHECKSUM="ee439200c9fa54fc33ce301ef64d4240a6c5587766bfeb651c5cf358e11ec89d -" + +truncate -s "${POOL_SIZE}" "${POOL_IMAGE}" + +zpool create -m none -f "${POOL_NAME}" "${POOL_IMAGE}" + +function cleanUp { + zpool export "${POOL_NAME}" +} + +# export pool in any case +trap cleanUp EXIT + +zfs create "${POOL_NAME}"/src +zfs snapshot "${POOL_NAME}"/src@duplicate + +# initial replication +../../../syncoid -r --debug --compress=none "${POOL_NAME}"/src "${POOL_NAME}"/dst +# recreate snapshot with the same name on src +zfs destroy "${POOL_NAME}"/src@duplicate +zfs snapshot "${POOL_NAME}"/src@duplicate +sleep 1 +../../../syncoid -r --force-delete --debug --compress=none "${POOL_NAME}"/src "${POOL_NAME}"/dst || exit 1 + +# verify +output1=$(zfs list -t snapshot -r -H -o guid,name "${POOL_NAME}"/src | sed 's/@syncoid_.*$'/@syncoid_/) +checksum1=$(echo "${output1}" | shasum -a 256) + +output2=$(zfs list -t snapshot -r -H -o guid,name "${POOL_NAME}"/dst | sed 's/@syncoid_.*$'/@syncoid_/ | sed 's/dst/src/') +checksum2=$(echo "${output2}" | shasum -a 256) + +if [ "${checksum1}" != "${checksum2}" ]; then + exit 1 +fi + +exit 0 From 316b01e9c2e0cc92a826178dbd39136f0d54dded Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Wed, 9 Dec 2020 09:46:54 +0100 Subject: [PATCH 012/130] fix duplicate key definitions by only using the first occurence and printing a warning --- sanoid | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/sanoid b/sanoid index 041445c..56d11bc 100755 --- a/sanoid +++ b/sanoid @@ -881,6 +881,13 @@ sub init { if (! defined ($defaults{'template_default'}{$key})) { die "FATAL ERROR: I don't understand the setting $key you've set in \[$section\] in $conf_file.\n"; } + + # in case of duplicate lines we will end up with an array of all values + my $value = $ini{$section}{$key}; + if (ref($value) eq 'ARRAY') { + warn "duplicate key '$key' in section '$section', using the value from the first occurence and ignoring the others.\n"; + $ini{$section}{$key} = $value->[0]; + } } if ($section =~ /^template_/) { next; } # don't process templates directly From 48eefd21e85dfdf8f41193295ee3175099c199c2 Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Mon, 14 Dec 2020 20:57:34 +0100 Subject: [PATCH 013/130] don't try to parse stats for spares which are in use as there are none --- sanoid | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sanoid b/sanoid index 041445c..1972bda 100755 --- a/sanoid +++ b/sanoid @@ -1239,7 +1239,7 @@ sub check_zpool() { ## no display for verbose level 1 next if ($verbose==1); ## don't display working devices for verbose level 2 - if ($verbose==2 && ($state eq "OK" || $sta eq "ONLINE" || $sta eq "AVAIL" || $sta eq "INUSE")) { + if ($verbose==2 && ($state eq "OK" || $sta eq "ONLINE" || $sta eq "AVAIL")) { # check for io/checksum errors my @vdeverr = (); From 3bae6baf3d68efcc451d2db366c4692c854b3677 Mon Sep 17 00:00:00 2001 From: rbike <46862457+rbike@users.noreply.github.com> Date: Tue, 2 Mar 2021 16:03:16 +0100 Subject: [PATCH 014/130] syncoid hold Added hold feature to syncoid. Parameter "--use-hold" sets hold for newsyncsmap and removes hold from matchingsnap both on source and target. Hold name is "syncoid" + identiifer + host name --- syncoid | 41 +++++++++++++++++++++++++++++++++++++---- 1 file changed, 37 insertions(+), 4 deletions(-) diff --git a/syncoid b/syncoid index b771d8f..22d1c65 100755 --- a/syncoid +++ b/syncoid @@ -3,6 +3,9 @@ # this software is licensed for use under the Free Software Foundation's GPL v3.0 license, as retrieved # from http://www.gnu.org/licenses/gpl-3.0.html on 2014-11-17. A copy should also be available in this # project's Git repository at https://github.com/jimsalterjrs/sanoid/blob/master/LICENSE. +# +# 2021-03-ß2: redbike +# support f0r zfs holds added $::VERSION = '2.0.3'; @@ -25,7 +28,7 @@ GetOptions(\%args, "no-command-checks", "monitor-version", "compress=s", "dumpsn "source-bwlimit=s", "target-bwlimit=s", "sshkey=s", "sshport=i", "sshcipher|c=s", "sshoption|o=s@", "debug", "quiet", "no-stream", "no-sync-snap", "no-resume", "exclude=s@", "skip-parent", "identifier=s", "no-clone-handling", "no-privilege-elevation", "force-delete", "no-clone-rollback", "no-rollback", - "create-bookmark", "pv-options=s" => \$pvoptions, "keep-sync-snap", "preserve-recordsize", + "create-bookmark", "use-hold", "pv-options=s" => \$pvoptions, "keep-sync-snap", "preserve-recordsize", "mbuffer-size=s" => \$mbuffer_size) or pod2usage(2); my %compressargs = %{compressargset($args{'compress'} || 'default')}; # Can't be done with GetOptions arg, as default still needs to be set @@ -355,6 +358,7 @@ sub syncdataset { } my $newsyncsnap; + my $matchingsnap; # skip snapshot checking/creation in case of resumed receive if (!defined($receivetoken)) { @@ -602,7 +606,7 @@ sub syncdataset { my $bookmark = 0; my $bookmarkcreation = 0; - my $matchingsnap = getmatchingsnapshot($sourcefs, $targetfs, \%snaps); + $matchingsnap = getmatchingsnapshot($sourcefs, $targetfs, \%snaps); if (! $matchingsnap) { # no matching snapshots, check for bookmarks as fallback my %bookmarks = getbookmarks($sourcehost,$sourcefs,$sourceisroot); @@ -827,7 +831,36 @@ sub syncdataset { #setzfsvalue($targethost,$targetfs,$targetisroot,'readonly',$originaltargetreadonly); } } - +# if "--use-hold" parameter is used set hold on newsync snapshot and remove hold on matching snapshot both on source and target + # hold name: "syncoid" + identifier + hostname -> in case of replication to multiple targets separate holds can be set for each target by assinging different identifiers to each target. Only if all targets have been replicated all syncoid holds are removed from the matching snapshot and it can be removed + if (defined $args{'use-hold'}) { + my $holdcmd; + my $holdreleasecmd; + my $hostid = hostname(); + my $matchingsnapescaped = escapeshellparam($matchingsnap); + my $holdname = "syncoid\_$identifier$hostid"; + if ($sourcehost ne '') { + $holdcmd = "$sshcmd $sourcehost " . escapeshellparam("$sourcesudocmd $zfscmd hold $holdname $sourcefsescaped\@$newsyncsnapescaped"); + $holdreleasecmd = "$sshcmd $sourcehost " . escapeshellparam("$sourcesudocmd $zfscmd release $holdname $sourcefsescaped\@$matchingsnapescaped"); + } else { + $holdcmd = "$sourcesudocmd $zfscmd hold $holdname $sourcefsescaped\@$newsyncsnapescaped"; + $holdreleasecmd = "$sourcesudocmd $zfscmd release $holdname $sourcefsescaped\@$matchingsnapescaped"; + }; + if ($debug) { print "DEBUG: Set new hold on source: $holdcmd\n"; } + system($holdcmd) == 0 or warn "WARNING: $holdcmd failed: $?"; + if ($debug) { print "DEBUG: Release old hold on source: $holdreleasecmd\n"; } + system($holdreleasecmd) == 0 or warn "WARNING: $holdreleasecmd failed: $?"; + if ($targethost ne '') { + $holdcmd = "$sshcmd $targethost " . escapeshellparam("$targetsudocmd $zfscmd hold $holdname $targetfsescaped\@$newsyncsnapescaped"); + $holdreleasecmd = "$sshcmd $targethost " . escapeshellparam("$targetsudocmd $zfscmd release $holdname $targetfsescaped\@$matchingsnapescaped"); + } else { + $holdcmd = "$targetsudocmd $zfscmd hold $holdname $targetfsescaped\@$newsyncsnapescaped"; $holdreleasecmd = "$targetsudocmd $zfscmd release $holdname $targetfsescaped\@$matchingsnapescaped"; + }; + if ($debug) { print "DEBUG: Set new hold on target: $holdcmd\n"; } + system($holdcmd) == 0 or warn "WARNING: $holdcmd failed: $?"; + if ($debug) { print "DEBUG: Release old hold on target: $holdreleasecmd\n"; } + system($holdreleasecmd) == 0 or warn "WARNING: $holdreleasecmd failed: $?"; + } if (defined $args{'no-sync-snap'}) { if (defined $args{'create-bookmark'}) { my $bookmarkcmd; @@ -1325,7 +1358,7 @@ sub buildsynccmd { sub pruneoldsyncsnaps { my ($rhost,$fs,$newsyncsnap,$isroot,@snaps) = @_; - my $fsescaped = escapeshellparam($fs); + my $fsescaped = escapeshellparam($fs); if ($rhost ne '') { $rhost = "$sshcmd $rhost"; } From 2858fbe907680f04f0cd449bf676a2b33e13343b Mon Sep 17 00:00:00 2001 From: rbike <46862457+rbike@users.noreply.github.com> Date: Wed, 3 Mar 2021 13:49:13 +0100 Subject: [PATCH 015/130] Docs update + error handling updated in-program docs and readme, check if matchingsnap exists before releasing hold --- README.md | 5 ++++- syncoid | 23 +++++++++++++++-------- 2 files changed, 19 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index b4558c0..3048b7f 100644 --- a/README.md +++ b/README.md @@ -272,7 +272,7 @@ As of 1.4.18, syncoid also automatically supports and enables resume of interrup + --identifier= - Adds the given identifier to the snapshot name after "syncoid_" prefix and before the hostname. This enables the use case of reliable replication to multiple targets from the same host. The following chars are allowed: a-z, A-Z, 0-9, _, -, : and . . + Adds the given identifier to the snapshot and hold name after "syncoid_" prefix and before the hostname. This enables the use case of reliable replication to multiple targets from the same host. The following chars are allowed: a-z, A-Z, 0-9, _, -, : and . . + -r --recursive @@ -314,6 +314,9 @@ As of 1.4.18, syncoid also automatically supports and enables resume of interrup This argument tells syncoid to create a zfs bookmark for the newest snapshot after it got replicated successfully. The bookmark name will be equal to the snapshot name. Only works in combination with the --no-sync-snap option. This can be very useful for irregular replication where the last matching snapshot on the source was already deleted but the bookmark remains so a replication is still possible. ++ --use-hold + This argument tells syncoid to add a hold to the newest snapshot on the source and target after replication succeeds and to remove the hold after the next succesful replication. Setting a hold prevents the snapshots from being destroyed. The hold name incldues the identifier if set. This allows for separate holds in case of replication to multiple targets. + + --preserve-recordsize This argument tells syncoid to set the recordsize on the target before writing any data to it matching the one set on the replication src. This only applies to initial sends. diff --git a/syncoid b/syncoid index 22d1c65..84510fb 100755 --- a/syncoid +++ b/syncoid @@ -4,8 +4,8 @@ # from http://www.gnu.org/licenses/gpl-3.0.html on 2014-11-17. A copy should also be available in this # project's Git repository at https://github.com/jimsalterjrs/sanoid/blob/master/LICENSE. # -# 2021-03-ß2: redbike -# support f0r zfs holds added +# 2021-03-03: redbike +# support for zfs holds added $::VERSION = '2.0.3'; @@ -845,21 +845,27 @@ sub syncdataset { } else { $holdcmd = "$sourcesudocmd $zfscmd hold $holdname $sourcefsescaped\@$newsyncsnapescaped"; $holdreleasecmd = "$sourcesudocmd $zfscmd release $holdname $sourcefsescaped\@$matchingsnapescaped"; - }; + } if ($debug) { print "DEBUG: Set new hold on source: $holdcmd\n"; } system($holdcmd) == 0 or warn "WARNING: $holdcmd failed: $?"; - if ($debug) { print "DEBUG: Release old hold on source: $holdreleasecmd\n"; } - system($holdreleasecmd) == 0 or warn "WARNING: $holdreleasecmd failed: $?"; + # Do hold release only if matchingsnap exists + if ($matchingsnap) { + if ($debug) { print "DEBUG: Release old hold on source: $holdreleasecmd\n"; } + system($holdreleasecmd) == 0 or warn "WARNING: $holdreleasecmd failed: $?"; + } if ($targethost ne '') { $holdcmd = "$sshcmd $targethost " . escapeshellparam("$targetsudocmd $zfscmd hold $holdname $targetfsescaped\@$newsyncsnapescaped"); $holdreleasecmd = "$sshcmd $targethost " . escapeshellparam("$targetsudocmd $zfscmd release $holdname $targetfsescaped\@$matchingsnapescaped"); } else { $holdcmd = "$targetsudocmd $zfscmd hold $holdname $targetfsescaped\@$newsyncsnapescaped"; $holdreleasecmd = "$targetsudocmd $zfscmd release $holdname $targetfsescaped\@$matchingsnapescaped"; - }; + } if ($debug) { print "DEBUG: Set new hold on target: $holdcmd\n"; } system($holdcmd) == 0 or warn "WARNING: $holdcmd failed: $?"; - if ($debug) { print "DEBUG: Release old hold on target: $holdreleasecmd\n"; } - system($holdreleasecmd) == 0 or warn "WARNING: $holdreleasecmd failed: $?"; + # Do hold release only if matchingsnap exists + if ($matchingsnap) { + if ($debug) { print "DEBUG: Release old hold on target: $holdreleasecmd\n"; } + system($holdreleasecmd) == 0 or warn "WARNING: $holdreleasecmd failed: $?"; + } } if (defined $args{'no-sync-snap'}) { if (defined $args{'create-bookmark'}) { @@ -2005,6 +2011,7 @@ Options: --no-sync-snap Does not create new snapshot, only transfers existing --keep-sync-snap Don't destroy created sync snapshots --create-bookmark Creates a zfs bookmark for the newest snapshot on the source after replication succeeds (only works with --no-sync-snap) + --use-hold Adds a hold to the newest snapshot on the source and target after replication succeeds and removes the hold after the next succesful replication. The hold name incldues the identifier if set. This allows for separate holds in case of multiple targets --preserve-recordsize Preserves the recordsize on initial sends to the target --no-clone-rollback Does not rollback clones on target --no-rollback Does not rollback clones or snapshots on target (it probably requires a readonly target) From 21eee41fdb20a6bc3b2068c18a3061723f52435a Mon Sep 17 00:00:00 2001 From: rbike <46862457+rbike@users.noreply.github.com> Date: Thu, 1 Apr 2021 09:23:48 +0200 Subject: [PATCH 016/130] Final revision comments from @phreaker0 implemented --- syncoid | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/syncoid b/syncoid index 84510fb..a3708c6 100755 --- a/syncoid +++ b/syncoid @@ -3,9 +3,6 @@ # this software is licensed for use under the Free Software Foundation's GPL v3.0 license, as retrieved # from http://www.gnu.org/licenses/gpl-3.0.html on 2014-11-17. A copy should also be available in this # project's Git repository at https://github.com/jimsalterjrs/sanoid/blob/master/LICENSE. -# -# 2021-03-03: redbike -# support for zfs holds added $::VERSION = '2.0.3'; @@ -831,7 +828,7 @@ sub syncdataset { #setzfsvalue($targethost,$targetfs,$targetisroot,'readonly',$originaltargetreadonly); } } -# if "--use-hold" parameter is used set hold on newsync snapshot and remove hold on matching snapshot both on source and target + # if "--use-hold" parameter is used set hold on newsync snapshot and remove hold on matching snapshot both on source and target # hold name: "syncoid" + identifier + hostname -> in case of replication to multiple targets separate holds can be set for each target by assinging different identifiers to each target. Only if all targets have been replicated all syncoid holds are removed from the matching snapshot and it can be removed if (defined $args{'use-hold'}) { my $holdcmd; @@ -1364,7 +1361,7 @@ sub buildsynccmd { sub pruneoldsyncsnaps { my ($rhost,$fs,$newsyncsnap,$isroot,@snaps) = @_; - my $fsescaped = escapeshellparam($fs); + my $fsescaped = escapeshellparam($fs); if ($rhost ne '') { $rhost = "$sshcmd $rhost"; } From c151703837ba618944fb3779dccaa43d2d869d42 Mon Sep 17 00:00:00 2001 From: "git@end.re" Date: Thu, 8 Apr 2021 15:01:45 +0200 Subject: [PATCH 017/130] Implementing support for ssh_config(5) files --- syncoid | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/syncoid b/syncoid index 3f112de..c0f98e8 100755 --- a/syncoid +++ b/syncoid @@ -20,9 +20,9 @@ my $pvoptions = "-p -t -e -r -b"; # Blank defaults to use ssh client's default # TODO: Merge into a single "sshflags" option? -my %args = ('sshkey' => '', 'sshport' => '', 'sshcipher' => '', 'sshoption' => [], 'target-bwlimit' => '', 'source-bwlimit' => ''); +my %args = ('sshconfig' => '', 'sshkey' => '', 'sshport' => '', 'sshcipher' => '', 'sshoption' => [], 'target-bwlimit' => '', 'source-bwlimit' => ''); GetOptions(\%args, "no-command-checks", "monitor-version", "compress=s", "dumpsnaps", "recursive|r", "sendoptions=s", "recvoptions=s", - "source-bwlimit=s", "target-bwlimit=s", "sshkey=s", "sshport=i", "sshcipher|c=s", "sshoption|o=s@", + "source-bwlimit=s", "target-bwlimit=s", "sshconfig=s", "sshkey=s", "sshport=i", "sshcipher|c=s", "sshoption|o=s@", "debug", "quiet", "no-stream", "no-sync-snap", "no-resume", "exclude=s@", "skip-parent", "identifier=s", "no-clone-handling", "no-privilege-elevation", "force-delete", "no-clone-rollback", "no-rollback", "create-bookmark", "pv-options=s" => \$pvoptions, "keep-sync-snap", "preserve-recordsize", @@ -108,6 +108,9 @@ if (length $args{'sshcipher'}) { if (length $args{'sshport'}) { $args{'sshport'} = "-p $args{'sshport'}"; } +if (length $args{'sshconfig'}) { + $args{'sshconfig'} = "-F $args{'sshconfig'}"; +} if (length $args{'sshkey'}) { $args{'sshkey'} = "-i $args{'sshkey'}"; } @@ -125,7 +128,7 @@ if (length $args{'identifier'}) { } # figure out if source and/or target are remote. -$sshcmd = "$sshcmd $args{'sshcipher'} $sshoptions $args{'sshport'} $args{'sshkey'}"; +$sshcmd = "$sshcmd $args{'sshconfig'} $args{'sshcipher'} $sshoptions $args{'sshport'} $args{'sshkey'}"; if ($debug) { print "DEBUG: SSHCMD: $sshcmd\n"; } my ($sourcehost,$sourcefs,$sourceisroot) = getssh($rawsourcefs); my ($targethost,$targetfs,$targetisroot) = getssh($rawtargetfs); @@ -1978,6 +1981,7 @@ Options: --exclude=REGEX Exclude specific datasets which match the given regular expression. Can be specified multiple times --sendoptions=OPTIONS Use advanced options for zfs send (the arguments are filtered as needed), e.g. syncoid --sendoptions="Lc e" sets zfs send -L -c -e ... --recvoptions=OPTIONS Use advanced options for zfs receive (the arguments are filtered as needed), e.g. syncoid --recvoptions="ux recordsize o compression=lz4" sets zfs receive -u -x recordsize -o compression=lz4 ... + --sshconfig=FILE Specifies an ssh_config(5) file to be used --sshkey=FILE Specifies a ssh key to use to connect --sshport=PORT Connects to remote on a particular port --sshcipher|c=CIPHER Passes CIPHER to ssh to use a particular cipher set From 3954008697333592226a7c6b817b22d522e60b04 Mon Sep 17 00:00:00 2001 From: Gordon Schulz Date: Sat, 10 Apr 2021 12:59:31 +0200 Subject: [PATCH 018/130] Fix bogus dates in %changelog to satisfy rpmlint - 11/24/2020 -> Tuesday - 12/04/2018 -> Tuesday Prior to changes: ``` $ rpmlint sanoid.spec sanoid.spec: E: specfile-error warning: bogus date in %changelog: Wed Nov 24 2020 Christoph Klaffl - 2.1.0 sanoid.spec: E: specfile-error warning: bogus date in %changelog: Wed Dec 04 2018 Christoph Klaffl - 2.0.0 0 packages and 1 specfiles checked; 2 errors, 0 warnings. ``` After changes: ``` $ rpmlint sanoid.spec 0 packages and 1 specfiles checked; 0 errors, 0 warnings. ``` --- packages/rhel/sanoid.spec | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/rhel/sanoid.spec b/packages/rhel/sanoid.spec index b4452e8..376f58a 100644 --- a/packages/rhel/sanoid.spec +++ b/packages/rhel/sanoid.spec @@ -111,13 +111,13 @@ echo "* * * * * root %{_sbindir}/sanoid --cron" > %{buildroot}%{_docdir}/%{name} %endif %changelog -* Wed Nov 24 2020 Christoph Klaffl - 2.1.0 +* Tue Nov 24 2020 Christoph Klaffl - 2.1.0 - Bump to 2.1.0 * Wed Oct 02 2019 Christoph Klaffl - 2.0.3 - Bump to 2.0.3 * Wed Sep 25 2019 Christoph Klaffl - 2.0.2 - Bump to 2.0.2 -* Wed Dec 04 2018 Christoph Klaffl - 2.0.0 +* Tue Dec 04 2018 Christoph Klaffl - 2.0.0 - Bump to 2.0.0 * Sat Apr 28 2018 Dominic Robinson - 1.4.18-1 - Bump to 1.4.18 From c2f3f5b6b16bdba642ce6f2a7b3d73ceab1b59a9 Mon Sep 17 00:00:00 2001 From: Andrew Berry Date: Thu, 15 Apr 2021 07:05:03 -0400 Subject: [PATCH 019/130] Fix missing t on target --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index b4558c0..9a2425a 100644 --- a/README.md +++ b/README.md @@ -340,7 +340,7 @@ As of 1.4.18, syncoid also automatically supports and enables resume of interrup + --no-clone-handling - This argument tells syncoid to not recreate clones on the targe on initial sync and doing a normal replication instead. + This argument tells syncoid to not recreate clones on the target on initial sync and doing a normal replication instead. + --dumpsnaps From e0862dff5234e8cad075782dc293de39ff19a15c Mon Sep 17 00:00:00 2001 From: Alex Haydock Date: Sun, 2 May 2021 15:08:28 +0100 Subject: [PATCH 020/130] Improve documentation for RHEL-family distros --- INSTALL.md | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/INSTALL.md b/INSTALL.md index c8411d3..4891b3c 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -46,20 +46,28 @@ sudo systemctl enable sanoid.timer sudo systemctl start sanoid.timer ``` -## CentOS +## CentOS/RHEL Install prerequisite software: ```bash -# Install and enable epel if we don't already have it, and git too +# Install and enable EPEL if we don't already have it, and git too: +# (Note that on RHEL we cannot enable EPEL with the epel-release +# package, so you should follow the instructions on the main EPEL site.) sudo yum install -y epel-release git +# On CentOS, we also need to enable the PowerTools repo: +sudo yum config-manager --set-enabled powertools +# On RHEL, instead of PowerTools, we need to enable the CodeReady Builder repo: +sudo subscription-manager repos --enable=codeready-builder-for-rhel-8-x86_64-rpms # Install the packages that Sanoid depends on: -sudo yum install -y perl-Config-IniFiles perl-Data-Dumper perl-Capture-Tiny lzop mbuffer mhash pv -# if the perl dependencies can't be found in the configured repositories you can install them from CPAN manually: +sudo yum install -y perl-Config-IniFiles perl-Data-Dumper perl-Capture-Tiny perl-Getopt-Long lzop mbuffer mhash pv +# The repositories above should contain all the relevant Perl modules, but if you +# still cannot find them then you can install them from CPAN manually: sudo dnf install perl-CPAN perl-CPAN -cpan # answer the questions and past the following lines +cpan # answer the questions and paste the following lines: # install Capture::Tiny # install Config::IniFiles +# install Getopt::Long ``` Clone this repo, then put the executables and config files into the appropriate directories: From c125835e4433c6d5e852b1e06b0fa710dcb695f9 Mon Sep 17 00:00:00 2001 From: Gerhard Roethlin Date: Fri, 23 Jul 2021 17:39:28 +0200 Subject: [PATCH 021/130] MacOS Install Information Installation information for running sanoid on MacOS via launchd. A launchd example plist file is provided in the same way equivalent files are created on other platforms. --- INSTALL.md | 45 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/INSTALL.md b/INSTALL.md index c8411d3..5e4a6e3 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -171,6 +171,51 @@ For Alpine Linux this can be done with: `apk --no-cache add procps` +## MacOS + +Install prerequisite software: + +``` +perl -MCPAN -e install Config::IniFiles +``` + +The crontab can be used as on a normal unix. To use launchd instead, this example config file can be use can be used. Modify it for your needs. In particular, adjust the sanoid path. +It will start sanoid once per hour, at minute 51. Missed invocations due to standby will be merged into a single invocation at the next wakeup. + +```bash +cat << "EOF" | sudo tee /Library/LaunchDaemons/net.openoid.Sanoid.plist + + + + + Label + net.openoid.Sanoid + ProgramArguments + + /usr/local/sanoid/sanoid + --cron + + EnvironmentVariables + + TZ + UTC + PATH + /usr/local/zfs/bin:$PATH:/usr/local/bin + + StartCalendarInterval + + + Minute + 51 + + + + +EOF + +sudo launchctl load /Library/LaunchDaemons/net.openoid.Sanoid.plist +``` + ## Other OSes **Sanoid** depends on the Perl module Config::IniFiles and will not operate without it. Config::IniFiles may be installed from CPAN, though the project strongly recommends using your distribution's repositories instead. From a9ece1c7bfc934e689cf87cf4dfb56e4e13a9287 Mon Sep 17 00:00:00 2001 From: Daryl Bennett Date: Mon, 23 Aug 2021 20:22:35 -0400 Subject: [PATCH 022/130] Include syncoid remote user requirment Add requirement for syncoid w/ remote hosts to be able to sudo without a password. Recommends a least privilege configuration option for enabling `NOPASSWD` --- INSTALL.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/INSTALL.md b/INSTALL.md index c8411d3..68b2fa9 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -206,3 +206,12 @@ Adapt the timer interval to the lowest configured snapshot interval. Take a look at the files `sanoid.defaults.conf` and `sanoid.conf` for all possible configuration options. Also have a look at the README.md for a simpler suggestion for `sanoid.conf`. + +## Syncoid +If you are pushing or pulling from a remote host, create an user with privldeges to `ssh` as well as `sudo`. To ensure that `zfs send/recieve` can execute, adjust the privileges of the user to execute `sudo` **without** a password for only the `zfs` binary. Modify `/etc/sudoers` by running `# visudo`. Add the following line for your user. + +``` +... + ALL=NOPASSWD: /usr/local/sbin/zfs +... +``` From 523d5006092157094efaea1669ea24d208cdcfde Mon Sep 17 00:00:00 2001 From: Lilis Iskandar Date: Wed, 29 Sep 2021 18:19:05 +0800 Subject: [PATCH 023/130] Update README.md Signed-off-by: Lilis Iskandar --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 21432ac..3859ed9 100644 --- a/README.md +++ b/README.md @@ -4,12 +4,12 @@

sanoid rollback demo
(Real time demo: rolling back a full-scale cryptomalware infection in seconds!)

-More prosaically, you can use Sanoid to create, automatically thin, and monitor snapshots and pool health from a single eminently human-readable TOML config file at /etc/sanoid/sanoid.conf. (Sanoid also requires a "defaults" file located at /etc/sanoid/sanoid.defaults.conf, which is not user-editable.) A typical Sanoid system would have a single cron job but see INSTALL.md fore more details: +More prosaically, you can use Sanoid to create, automatically thin, and monitor snapshots and pool health from a single eminently human-readable TOML config file at /etc/sanoid/sanoid.conf. (Sanoid also requires a "defaults" file located at /etc/sanoid/sanoid.defaults.conf, which is not user-editable.) A typical Sanoid system would have a single cron job but see INSTALL.md for more details: ``` * * * * * TZ=UTC /usr/local/bin/sanoid --cron ``` -`Note`: Using UTC as timezone is recommend to prevent problems with daylight saving times +`Note`: Using UTC as timezone is recommended to prevent problems with daylight saving times And its /etc/sanoid/sanoid.conf might look something like this: From 864ab7f95b67cf92a34731e08de61fd59cf3a5bc Mon Sep 17 00:00:00 2001 From: Ryan David Sheasby Date: Tue, 2 Nov 2021 11:36:13 +0200 Subject: [PATCH 024/130] Update debian installation instructions Previous instructions couldn't actually be copied and pasted. This will update the instructions to work correctly when copied as-is --- INSTALL.md | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/INSTALL.md b/INSTALL.md index c8411d3..338cddb 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -22,28 +22,26 @@ Install prerequisite software: ```bash -apt install debhelper libcapture-tiny-perl libconfig-inifiles-perl pv lzop mbuffer build-essential +apt install debhelper libcapture-tiny-perl libconfig-inifiles-perl pv lzop mbuffer build-essential git ``` Clone this repo, build the debian package and install it (alternatively you can skip the package and do it manually like described below for CentOS): ```bash -# Download the repo as root to avoid changing permissions later -sudo git clone https://github.com/jimsalterjrs/sanoid.git +git clone https://github.com/jimsalterjrs/sanoid.git cd sanoid # checkout latest stable release or stay on master for bleeding edge stuff (but expect bugs!) git checkout $(git tag | grep "^v" | tail -n 1) ln -s packages/debian . dpkg-buildpackage -uc -us -apt install ../sanoid_*_all.deb +sudo apt install ../sanoid_*_all.deb ``` Enable sanoid timer: ```bash # enable and start the sanoid timer -sudo systemctl enable sanoid.timer -sudo systemctl start sanoid.timer +sudo systemctl enable --now sanoid.timer ``` ## CentOS From 1fcf34800138833b1efbcb64276047f7b6fe042c Mon Sep 17 00:00:00 2001 From: Topslakr <67913692+Topslakr@users.noreply.github.com> Date: Mon, 6 Dec 2021 13:57:18 -0500 Subject: [PATCH 025/130] Update Install.md for Centos 8 Updated the install language, and commands, to better server Centos 8 and it's derivatives. Instead of using CPAN, enabling the 'PowerTools' repo will make those modules available for installation and kept current with normal OS updates. Updated command to enable _and_ start sanoid.timer, instead of using two commands. --- INSTALL.md | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/INSTALL.md b/INSTALL.md index c8411d3..e4724d8 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -53,9 +53,11 @@ Install prerequisite software: ```bash # Install and enable epel if we don't already have it, and git too sudo yum install -y epel-release git +# For Centos 8 you need to enable the PowerTools repo to make all the needed Perl modules available (Recommended) +sudo dnf config-manager --set-enabled powertools # Install the packages that Sanoid depends on: sudo yum install -y perl-Config-IniFiles perl-Data-Dumper perl-Capture-Tiny lzop mbuffer mhash pv -# if the perl dependencies can't be found in the configured repositories you can install them from CPAN manually: +# if you don't want to enable the powertools repo, the perl dependencies can be installed from CPAN manually: sudo dnf install perl-CPAN perl-CPAN cpan # answer the questions and past the following lines # install Capture::Tiny @@ -142,8 +144,7 @@ sudo systemctl daemon-reload # Enable sanoid-prune.service to allow it to be triggered by sanoid.service sudo systemctl enable sanoid-prune.service # Enable and start the Sanoid timer -sudo systemctl enable sanoid.timer -sudo systemctl start sanoid.timer +sudo systemctl enable --now sanoid.timer ``` Now, proceed to configure [**Sanoid**](#configuration) From b1c2684c63fe2268bb12f09ffb177d3fd7fee009 Mon Sep 17 00:00:00 2001 From: Gryd3 Date: Mon, 4 Apr 2022 15:00:31 -0700 Subject: [PATCH 026/130] append a rand int to the socket name. Running more than one 'syncoid' at the same time to the same host resulted in two jobs referencing the same socket. Often results in "already exists, disabling multiplexing" but has in more than one occasion failed with the following: ControlSocket /tmp/syncoid-zfsbackup-zfsbackup@10.0.0.1-1649107066 already exists, disabling multiplexing lzop: Inappropriate ioctl for device: CRITICAL ERROR: ssh -S /tmp/syncoid-zfsbackup-zfsbackup@10.0.0.1-1649107066 zfsbackup@10.0.0.1 ' zfs send -I '"'"'pool/office'"'"'@'"'"'autosnap_2022-04-04_21:00:00_frequently'"'"' '"'"'pool/office'"'"'@'"'"'autosnap_2022-04-04_21:15:00_frequently'"'"' | lzop | mbuffer -R 5m -q -s 128k -m 16M 2>/dev/null' | mbuffer -q -s 128k -m 16M 2>/dev/null | lzop -dfc | pv -s 18356312 | zfs receive -s -F 'zfs-pool/vault/office' 2>&1 failed: 256 at /usr/sbin/syncoid line 786. Sample use-case: Using Monit, Cron, or some other scheduler to trigger more than syncoid to the same host to sync two datasets. Stagger the sync so that no two jobs get started at the same time, or add some form of randomization to the socket name so that two jobs may start at the same time. --- syncoid | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/syncoid b/syncoid index 3f112de..d792e6f 100755 --- a/syncoid +++ b/syncoid @@ -1488,7 +1488,7 @@ sub getssh { if ($rhost ne "") { if ($remoteuser eq 'root' || $args{'no-privilege-elevation'}) { $isroot = 1; } else { $isroot = 0; } # now we need to establish a persistent master SSH connection - $socket = "/tmp/syncoid-$rhost-" . time(); + $socket = "/tmp/syncoid-$rhost-" . time() . "-" . int(rand(10000)); open FH, "$sshcmd -M -S $socket -o ControlPersist=1m $args{'sshport'} $rhost exit |"; close FH; From ec7858c1c7db24a445212b007bc84b52eae664b4 Mon Sep 17 00:00:00 2001 From: Jim Salter Date: Fri, 8 Apr 2022 20:57:53 +0000 Subject: [PATCH 027/130] don't use resume tokens for purely local replication --- syncoid | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/syncoid b/syncoid index 3f112de..a6d2c3e 100755 --- a/syncoid +++ b/syncoid @@ -340,7 +340,10 @@ sub syncdataset { my $receiveextraargs = ""; my $receivetoken; - if ($resume) { + + # we need to make sure this isn't a local-only replication before trying to use resume tokens. + # trying to receive using -s on a stream sent from another dataset on the same host produces ZFS errors. (Discovered on 2022-04-08 on an Ubuntu 20.04 system) + if ($resume && ($sourcehost ne $targethost)) { # save state of interrupted receive stream $receiveextraargs = "-s"; From ef2b4f887a7cd1e139a500f1cac44e476b9ea198 Mon Sep 17 00:00:00 2001 From: Jim Salter Date: Fri, 8 Apr 2022 21:09:51 +0000 Subject: [PATCH 028/130] no more rolling targets back, receive -F only --- syncoid | 28 ++++------------------------ 1 file changed, 4 insertions(+), 24 deletions(-) diff --git a/syncoid b/syncoid index a6d2c3e..539c0f8 100755 --- a/syncoid +++ b/syncoid @@ -24,9 +24,9 @@ my %args = ('sshkey' => '', 'sshport' => '', 'sshcipher' => '', 'sshoption' => [ GetOptions(\%args, "no-command-checks", "monitor-version", "compress=s", "dumpsnaps", "recursive|r", "sendoptions=s", "recvoptions=s", "source-bwlimit=s", "target-bwlimit=s", "sshkey=s", "sshport=i", "sshcipher|c=s", "sshoption|o=s@", "debug", "quiet", "no-stream", "no-sync-snap", "no-resume", "exclude=s@", "skip-parent", "identifier=s", - "no-clone-handling", "no-privilege-elevation", "force-delete", "no-clone-rollback", "no-rollback", - "create-bookmark", "pv-options=s" => \$pvoptions, "keep-sync-snap", "preserve-recordsize", - "mbuffer-size=s" => \$mbuffer_size) or pod2usage(2); + "no-clone-handling", "no-privilege-elevation", "force-delete", "create-bookmark", + "pv-options=s" => \$pvoptions, "keep-sync-snap", "preserve-recordsize", "mbuffer-size=s" => \$mbuffer_size) + or pod2usage(2); my %compressargs = %{compressargset($args{'compress'} || 'default')}; # Can't be done with GetOptions arg, as default still needs to be set @@ -288,11 +288,8 @@ sub syncdataset { my $sourcefsescaped = escapeshellparam($sourcefs); my $targetfsescaped = escapeshellparam($targetfs); - # if no rollbacks are allowed, disable forced receive + # keep forcedrecv as a variable to allow us to disable it with an optional argument later if necessary my $forcedrecv = "-F"; - if (defined $args{'no-rollback'}) { - $forcedrecv = ""; - } if ($debug) { print "DEBUG: syncing source $sourcefs to target $targetfs.\n"; } @@ -684,21 +681,6 @@ sub syncdataset { return 0; } else { my $matchingsnapescaped = escapeshellparam($matchingsnap); - # rollback target to matchingsnap - if (!defined $args{'no-rollback'}) { - my $rollbacktype = "-R"; - if (defined $args{'no-clone-rollback'}) { - $rollbacktype = "-r"; - } - if ($debug) { print "DEBUG: rolling back target to $targetfs\@$matchingsnap...\n"; } - if ($targethost ne '') { - if ($debug) { print "$sshcmd $targethost $targetsudocmd $zfscmd rollback $rollbacktype $targetfsescaped\@$matchingsnapescaped\n"; } - system ("$sshcmd $targethost " . escapeshellparam("$targetsudocmd $zfscmd rollback $rollbacktype $targetfsescaped\@$matchingsnapescaped")); - } else { - if ($debug) { print "$targetsudocmd $zfscmd rollback $rollbacktype $targetfsescaped\@$matchingsnapescaped\n"; } - system ("$targetsudocmd $zfscmd rollback $rollbacktype $targetfsescaped\@$matchingsnapescaped"); - } - } my $nextsnapshot = 0; @@ -1976,8 +1958,6 @@ Options: --keep-sync-snap Don't destroy created sync snapshots --create-bookmark Creates a zfs bookmark for the newest snapshot on the source after replication succeeds (only works with --no-sync-snap) --preserve-recordsize Preserves the recordsize on initial sends to the target - --no-clone-rollback Does not rollback clones on target - --no-rollback Does not rollback clones or snapshots on target (it probably requires a readonly target) --exclude=REGEX Exclude specific datasets which match the given regular expression. Can be specified multiple times --sendoptions=OPTIONS Use advanced options for zfs send (the arguments are filtered as needed), e.g. syncoid --sendoptions="Lc e" sets zfs send -L -c -e ... --recvoptions=OPTIONS Use advanced options for zfs receive (the arguments are filtered as needed), e.g. syncoid --recvoptions="ux recordsize o compression=lz4" sets zfs receive -u -x recordsize -o compression=lz4 ... From ee8c884460bab5fa3655a5bcff574a62e6114aad Mon Sep 17 00:00:00 2001 From: Jim Salter Date: Fri, 8 Apr 2022 21:34:01 +0000 Subject: [PATCH 029/130] put local resume token support back in --- syncoid | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/syncoid b/syncoid index 539c0f8..89f16ea 100755 --- a/syncoid +++ b/syncoid @@ -338,9 +338,7 @@ sub syncdataset { my $receiveextraargs = ""; my $receivetoken; - # we need to make sure this isn't a local-only replication before trying to use resume tokens. - # trying to receive using -s on a stream sent from another dataset on the same host produces ZFS errors. (Discovered on 2022-04-08 on an Ubuntu 20.04 system) - if ($resume && ($sourcehost ne $targethost)) { + if ($resume) { # save state of interrupted receive stream $receiveextraargs = "-s"; From 59c59e2b000553c439fa9785aeb17d5d1822adaa Mon Sep 17 00:00:00 2001 From: Mark Scholes Date: Thu, 19 May 2022 11:54:16 +0100 Subject: [PATCH 030/130] Removed unneeded 2>/dev/null from mbufferoptions --- syncoid | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/syncoid b/syncoid index ec6ae9d..ac8f74f 100755 --- a/syncoid +++ b/syncoid @@ -97,7 +97,7 @@ my $pscmd = 'ps'; my $pvcmd = 'pv'; my $mbuffercmd = 'mbuffer'; my $sudocmd = 'sudo'; -my $mbufferoptions = "-q -s 128k -m $mbuffer_size 2>/dev/null"; +my $mbufferoptions = "-q -s 128k -m $mbuffer_size"; # currently using POSIX compatible command to check for program existence because we aren't depending on perl # being present on remote machines. my $checkcmd = 'command -v'; From c408bddab4e0eb11b16628d9cd52240dd323cec7 Mon Sep 17 00:00:00 2001 From: Daryl Bennett Date: Mon, 23 Aug 2021 20:22:35 -0400 Subject: [PATCH 031/130] Include syncoid remote user requirment Add requirement for syncoid w/ remote hosts to be able to sudo without a password. Recommends a least privilege configuration option for enabling `NOPASSWD` --- INSTALL.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/INSTALL.md b/INSTALL.md index c8411d3..ec7645b 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -206,3 +206,12 @@ Adapt the timer interval to the lowest configured snapshot interval. Take a look at the files `sanoid.defaults.conf` and `sanoid.conf` for all possible configuration options. Also have a look at the README.md for a simpler suggestion for `sanoid.conf`. + +## Syncoid +If you are pushing or pulling from a remote host, create a user with privileges to `ssh` as well as `sudo`. To ensure that `zfs send/receive` can execute, adjust the privileges of the user to execute `sudo` **without** a password for only the `zfs` binary (run `which zfs` to find the path of the `zfs` binary). Modify `/etc/sudoers` by running `# visudo`. Add the following line for your user. + +``` +... + ALL=NOPASSWD: +... +``` From fa67fbe845977ea136c724c4c8680bf1102b30b7 Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Mon, 12 Sep 2022 23:44:52 +0200 Subject: [PATCH 032/130] bring back no-rollback option --- syncoid | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/syncoid b/syncoid index ec6ae9d..1746009 100755 --- a/syncoid +++ b/syncoid @@ -24,7 +24,7 @@ my %args = ('sshkey' => '', 'sshport' => '', 'sshcipher' => '', 'sshoption' => [ GetOptions(\%args, "no-command-checks", "monitor-version", "compress=s", "dumpsnaps", "recursive|r", "sendoptions=s", "recvoptions=s", "source-bwlimit=s", "target-bwlimit=s", "sshkey=s", "sshport=i", "sshcipher|c=s", "sshoption|o=s@", "debug", "quiet", "no-stream", "no-sync-snap", "no-resume", "exclude=s@", "skip-parent", "identifier=s", - "no-clone-handling", "no-privilege-elevation", "force-delete", "create-bookmark", + "no-clone-handling", "no-privilege-elevation", "force-delete", "no-rollback", "create-bookmark", "pv-options=s" => \$pvoptions, "keep-sync-snap", "preserve-recordsize", "mbuffer-size=s" => \$mbuffer_size) or pod2usage(2); @@ -288,8 +288,11 @@ sub syncdataset { my $sourcefsescaped = escapeshellparam($sourcefs); my $targetfsescaped = escapeshellparam($targetfs); - # keep forcedrecv as a variable to allow us to disable it with an optional argument later if necessary + # if no rollbacks are allowed, disable forced receive my $forcedrecv = "-F"; + if (defined $args{'no-rollback'}) { + $forcedrecv = ""; + } if ($debug) { print "DEBUG: syncing source $sourcefs to target $targetfs.\n"; } @@ -1956,6 +1959,7 @@ Options: --keep-sync-snap Don't destroy created sync snapshots --create-bookmark Creates a zfs bookmark for the newest snapshot on the source after replication succeeds (only works with --no-sync-snap) --preserve-recordsize Preserves the recordsize on initial sends to the target + --no-rollback Does not rollback snapshots on target (it probably requires a readonly target) --exclude=REGEX Exclude specific datasets which match the given regular expression. Can be specified multiple times --sendoptions=OPTIONS Use advanced options for zfs send (the arguments are filtered as needed), e.g. syncoid --sendoptions="Lc e" sets zfs send -L -c -e ... --recvoptions=OPTIONS Use advanced options for zfs receive (the arguments are filtered as needed), e.g. syncoid --recvoptions="ux recordsize o compression=lz4" sets zfs receive -u -x recordsize -o compression=lz4 ... From 4f150ec6ec90844c9b8d6aa4eaee6202734bc993 Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Tue, 13 Sep 2022 00:02:03 +0200 Subject: [PATCH 033/130] fix snapshot listing fallback --- syncoid | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/syncoid b/syncoid index ec6ae9d..23271d5 100755 --- a/syncoid +++ b/syncoid @@ -1503,6 +1503,8 @@ sub getsnaps() { my $fsescaped = escapeshellparam($fs); if ($isroot) { $mysudocmd = ''; } else { $mysudocmd = $sudocmd; } + my $rhostOriginal = $rhost; + if ($rhost ne '') { $rhost = "$sshcmd $rhost"; # double escaping needed @@ -1520,7 +1522,7 @@ sub getsnaps() { my @rawsnaps = ; close FH or do { # fallback (solaris for example doesn't support the -t option) - return getsnapsfallback($type,$rhost,$fs,$isroot,%snaps); + return getsnapsfallback($type,$rhostOriginal,$fs,$isroot,%snaps); }; # this is a little obnoxious. get guid,creation returns guid,creation on two separate lines From a5d7896c0fe5ad6feb8c2969bbecb8d28155fab0 Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Tue, 13 Sep 2022 00:16:49 +0200 Subject: [PATCH 034/130] improve accuracy of zfs receive check --- syncoid | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/syncoid b/syncoid index ec6ae9d..1a945f9 100755 --- a/syncoid +++ b/syncoid @@ -1113,7 +1113,7 @@ sub iszfsbusy { foreach my $process (@processes) { # if ($debug) { print "DEBUG: checking process $process...\n"; } - if ($process =~ /zfs *(receive|recv).*\Q$fs\E\Z/) { + if ($process =~ /zfs *(receive|recv)[^\/]*\Q$fs\E\Z/) { # there's already a zfs receive process for our target filesystem - return true if ($debug) { print "DEBUG: process $process matches target $fs!\n"; } return 1; From 0808575874875ee202d4a035744db75ca9d54162 Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Tue, 13 Sep 2022 17:13:01 +0200 Subject: [PATCH 035/130] syncoid should exit with an error if the specified src dataset doesn't exist (and only skip child datasets which vanished) --- syncoid | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/syncoid b/syncoid index ec6ae9d..167fc58 100755 --- a/syncoid +++ b/syncoid @@ -141,6 +141,8 @@ my %avail = checkcommands(); my %snaps; my $exitcode = 0; +my $replicationCount = 0; + ## break here to call replication individually so that we ## ## can loop across children separately, for recursive ## ## replication ## @@ -297,7 +299,7 @@ sub syncdataset { if (!defined $sync) { # zfs already printed the corresponding error - if ($error =~ /\bdataset does not exist\b/) { + if ($error =~ /\bdataset does not exist\b/ && $replicationCount > 0) { if (!$quiet) { print "WARN Skipping dataset (dataset no longer exists): $sourcefs...\n"; } return 0; } @@ -811,6 +813,8 @@ sub syncdataset { } } + $replicationCount++; + if (defined $args{'no-sync-snap'}) { if (defined $args{'create-bookmark'}) { my $bookmarkcmd; From 5d469c4bb2ec5e6e18073d32e99d5d1bde77a948 Mon Sep 17 00:00:00 2001 From: joelishness Date: Mon, 3 Oct 2022 21:36:26 -0700 Subject: [PATCH 036/130] Update README.md Fixed some typos --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 21432ac..eaa8b38 100644 --- a/README.md +++ b/README.md @@ -342,7 +342,7 @@ As of 1.4.18, syncoid also automatically supports and enables resume of interrup + --no-clone-handling - This argument tells syncoid to not recreate clones on the targe on initial sync and doing a normal replication instead. + This argument tells syncoid to not recreate clones on the target on initial sync, and do a normal replication instead. + --dumpsnaps From c4e70280225f7b3ec03cc075daf218dde468412f Mon Sep 17 00:00:00 2001 From: Vinnie Okada Date: Sat, 27 Nov 2021 16:10:02 -0700 Subject: [PATCH 037/130] Refactor terminal output Replace `print` and `warn` statements with a logging function. --- syncoid | 299 ++++++++++++++++++++++++++++++-------------------------- 1 file changed, 159 insertions(+), 140 deletions(-) diff --git a/syncoid b/syncoid index ec6ae9d..fa6feff 100755 --- a/syncoid +++ b/syncoid @@ -34,7 +34,7 @@ my @sendoptions = (); if (length $args{'sendoptions'}) { @sendoptions = parsespecialoptions($args{'sendoptions'}); if (! defined($sendoptions[0])) { - warn "invalid send options!"; + writelog('WARN', "invalid send options!"); pod2usage(2); exit 127; } @@ -42,7 +42,7 @@ if (length $args{'sendoptions'}) { if (defined $args{'recursive'}) { foreach my $option(@sendoptions) { if ($option->{option} eq 'R') { - warn "invalid argument combination, zfs send -R and --recursive aren't compatible!"; + writelog('WARN', "invalid argument combination, zfs send -R and --recursive aren't compatible!"); pod2usage(2); exit 127; } @@ -54,7 +54,7 @@ my @recvoptions = (); if (length $args{'recvoptions'}) { @recvoptions = parsespecialoptions($args{'recvoptions'}); if (! defined($recvoptions[0])) { - warn "invalid receive options!"; + writelog('WARN', "invalid receive options!"); pod2usage(2); exit 127; } @@ -63,7 +63,7 @@ if (length $args{'recvoptions'}) { # TODO Expand to accept multiple sources? if (scalar(@ARGV) != 2) { - print("Source or target not found!\n"); + writelog('WARN', "Source or target not found!"); pod2usage(2); exit 127; } else { @@ -117,7 +117,7 @@ my $identifier = ""; if (length $args{'identifier'}) { if ($args{'identifier'} !~ /^[a-zA-Z0-9-_:.]+$/) { # invalid extra identifier - print("CRITICAL: extra identifier contains invalid chars!\n"); + writelog('WARN', "extra identifier contains invalid chars!"); pod2usage(2); exit 127; } @@ -126,7 +126,7 @@ if (length $args{'identifier'}) { # figure out if source and/or target are remote. $sshcmd = "$sshcmd $args{'sshcipher'} $sshoptions $args{'sshport'} $args{'sshkey'}"; -if ($debug) { print "DEBUG: SSHCMD: $sshcmd\n"; } +writelog('DEBUG', "SSHCMD: $sshcmd"); my ($sourcehost,$sourcefs,$sourceisroot) = getssh($rawsourcefs); my ($targethost,$targetfs,$targetisroot) = getssh($rawtargetfs); @@ -148,11 +148,11 @@ my $exitcode = 0; if (!defined $args{'recursive'}) { syncdataset($sourcehost, $sourcefs, $targethost, $targetfs, undef); } else { - if ($debug) { print "DEBUG: recursive sync of $sourcefs.\n"; } + writelog('DEBUG', "recursive sync of $sourcefs."); my @datasets = getchilddatasets($sourcehost, $sourcefs, $sourceisroot); if (!@datasets) { - warn "CRITICAL ERROR: no datasets found"; + writelog('CRITICAL', "no datasets found"); @datasets = (); $exitcode = 2; } @@ -191,7 +191,6 @@ if (!defined $args{'recursive'}) { chomp $dataset; my $childsourcefs = $sourcefs . $dataset; my $childtargetfs = $targetfs . $dataset; - # print "syncdataset($sourcehost, $childsourcefs, $targethost, $childtargetfs); \n"; syncdataset($sourcehost, $childsourcefs, $targethost, $childtargetfs, $origin); } @@ -238,7 +237,7 @@ sub getchilddatasets { } my $getchildrencmd = "$rhost $mysudocmd $zfscmd list -o name,origin -t filesystem,volume -Hr $fsescaped |"; - if ($debug) { print "DEBUG: getting list of child datasets on $fs using $getchildrencmd...\n"; } + writelog('DEBUG', "getting list of child datasets on $fs using $getchildrencmd..."); if (! open FH, $getchildrencmd) { die "ERROR: list command failed!\n"; } @@ -261,7 +260,7 @@ sub getchilddatasets { my $excludes = $args{'exclude'}; foreach (@$excludes) { if ($dataset =~ /$_/) { - if ($debug) { print "DEBUG: excluded $dataset because of $_\n"; } + writelog('DEBUG', "excluded $dataset because of $_"); next DATASETS; } } @@ -291,19 +290,19 @@ sub syncdataset { # keep forcedrecv as a variable to allow us to disable it with an optional argument later if necessary my $forcedrecv = "-F"; - if ($debug) { print "DEBUG: syncing source $sourcefs to target $targetfs.\n"; } + writelog('DEBUG', "syncing source $sourcefs to target $targetfs."); my ($sync, $error) = getzfsvalue($sourcehost,$sourcefs,$sourceisroot,'syncoid:sync'); if (!defined $sync) { # zfs already printed the corresponding error if ($error =~ /\bdataset does not exist\b/) { - if (!$quiet) { print "WARN Skipping dataset (dataset no longer exists): $sourcefs...\n"; } + writelog('WARN', "Skipping dataset (dataset no longer exists): $sourcefs..."); return 0; } else { # print the error out and set exit code - print "ERROR: $error\n"; + writelog('CRITICAL', "$error"); if ($exitcode < 2) { $exitcode = 2 } } @@ -314,20 +313,20 @@ sub syncdataset { # empty is handled the same as unset (aka: '-') # definitely sync this dataset - if a host is called 'true' or '-', then you're special } elsif ($sync eq 'false') { - if (!$quiet) { print "INFO: Skipping dataset (syncoid:sync=false): $sourcefs...\n"; } + writelog('INFO', "Skipping dataset (syncoid:sync=false): $sourcefs..."); return 0; } else { my $hostid = hostname(); my @hosts = split(/,/,$sync); if (!(grep $hostid eq $_, @hosts)) { - if (!$quiet) { print "INFO: Skipping dataset (syncoid:sync doesn't include $hostid): $sourcefs...\n"; } + writelog('INFO', "Skipping dataset (syncoid:sync doesn't include $hostid): $sourcefs..."); return 0; } } # make sure target is not currently in receive. if (iszfsbusy($targethost,$targetfs,$targetisroot)) { - warn "Cannot sync now: $targetfs is already target of a zfs receive process.\n"; + writelog('WARN', "Cannot sync now: $targetfs is already target of a zfs receive process."); if ($exitcode < 1) { $exitcode = 1; } return 0; } @@ -346,8 +345,8 @@ sub syncdataset { # check remote dataset for receive resume token (interrupted receive) $receivetoken = getreceivetoken($targethost,$targetfs,$targetisroot); - if ($debug && defined($receivetoken)) { - print "DEBUG: got receive resume token: $receivetoken: \n"; + if (defined($receivetoken)) { + writelog('DEBUG', "got receive resume token: $receivetoken: "); } } } @@ -367,9 +366,8 @@ sub syncdataset { } if (defined $args{'dumpsnaps'}) { - print "merged snapshot list of $targetfs: \n"; + writelog('INFO', "merged snapshot list of $targetfs: "); dumphash(\%snaps); - print "\n\n\n"; } if (!defined $args{'no-sync-snap'} && !defined $skipsnapshot) { @@ -383,7 +381,7 @@ sub syncdataset { # we don't want sync snapshots created, so use the newest snapshot we can find. $newsyncsnap = getnewestsnapshot($sourcehost,$sourcefs,$sourceisroot); if ($newsyncsnap eq 0) { - warn "CRITICAL: no snapshots exist on source $sourcefs, and you asked for --no-sync-snap.\n"; + writelog('WARN', "CRITICAL: no snapshots exist on source $sourcefs, and you asked for --no-sync-snap."); if ($exitcode < 1) { $exitcode = 1; } return 0; } @@ -407,12 +405,10 @@ sub syncdataset { if (! $targetexists) { # do an initial sync from the oldest source snapshot # THEN do an -I to the newest - if ($debug) { - if (!defined ($args{'no-stream'}) ) { - print "DEBUG: target $targetfs does not exist. Finding oldest available snapshot on source $sourcefs ...\n"; - } else { - print "DEBUG: target $targetfs does not exist, and --no-stream selected. Finding newest available snapshot on source $sourcefs ...\n"; - } + if (!defined ($args{'no-stream'}) ) { + writelog('DEBUG', "target $targetfs does not exist. Finding oldest available snapshot on source $sourcefs ..."); + } else { + writelog('DEBUG', "target $targetfs does not exist, and --no-stream selected. Finding newest available snapshot on source $sourcefs ..."); } my $oldestsnap = getoldestsnapshot(\%snaps); if (! $oldestsnap) { @@ -422,7 +418,7 @@ sub syncdataset { } # getoldestsnapshot() returned false, so use new sync snapshot - if ($debug) { print "DEBUG: getoldestsnapshot() returned false, so using $newsyncsnap.\n"; } + writelog('DEBUG', "getoldestsnapshot() returned false, so using $newsyncsnap."); $oldestsnap = $newsyncsnap; } @@ -462,32 +458,30 @@ sub syncdataset { my $disp_pvsize = readablebytes($pvsize); if ($pvsize == 0) { $disp_pvsize = 'UNKNOWN'; } my $synccmd = buildsynccmd($sendcmd,$recvcmd,$pvsize,$sourceisroot,$targetisroot); - if (!$quiet) { - if (defined $origin) { - print "INFO: Clone is recreated on target $targetfs based on $origin\n"; - } - if (!defined ($args{'no-stream'}) ) { - print "INFO: Sending oldest full snapshot $sourcefs\@$oldestsnap (~ $disp_pvsize) to new target filesystem:\n"; - } else { - print "INFO: --no-stream selected; sending newest full snapshot $sourcefs\@$oldestsnap (~ $disp_pvsize) to new target filesystem:\n"; - } + if (defined $origin) { + writelog('INFO', "Clone is recreated on target $targetfs based on $origin"); } - if ($debug) { print "DEBUG: $synccmd\n"; } + if (!defined ($args{'no-stream'}) ) { + writelog('INFO', "Sending oldest full snapshot $sourcefs\@$oldestsnap (~ $disp_pvsize) to new target filesystem:"); + } else { + writelog('INFO', "--no-stream selected; sending newest full snapshot $sourcefs\@$oldestsnap (~ $disp_pvsize) to new target filesystem:"); + } + writelog('DEBUG', "$synccmd"); # make sure target is (still) not currently in receive. if (iszfsbusy($targethost,$targetfs,$targetisroot)) { - warn "Cannot sync now: $targetfs is already target of a zfs receive process.\n"; + writelog('WARN', "Cannot sync now: $targetfs is already target of a zfs receive process."); if ($exitcode < 1) { $exitcode = 1; } return 0; } system($synccmd) == 0 or do { if (defined $origin) { - print "INFO: clone creation failed, trying ordinary replication as fallback\n"; + writelog('INFO', "clone creation failed, trying ordinary replication as fallback"); syncdataset($sourcehost, $sourcefs, $targethost, $targetfs, undef, 1); return 0; } - warn "CRITICAL ERROR: $synccmd failed: $?"; + writelog('CRITICAL', "$synccmd failed: $?"); if ($exitcode < 2) { $exitcode = 2; } return 0; }; @@ -512,23 +506,23 @@ sub syncdataset { # make sure target is (still) not currently in receive. if (iszfsbusy($targethost,$targetfs,$targetisroot)) { - warn "Cannot sync now: $targetfs is already target of a zfs receive process.\n"; + writelog('WARN', "Cannot sync now: $targetfs is already target of a zfs receive process."); if ($exitcode < 1) { $exitcode = 1; } return 0; } - if (!$quiet) { print "INFO: Updating new target filesystem with incremental $sourcefs\@$oldestsnap ... $newsyncsnap (~ $disp_pvsize):\n"; } - if ($debug) { print "DEBUG: $synccmd\n"; } + writelog('INFO', "Updating new target filesystem with incremental $sourcefs\@$oldestsnap ... $newsyncsnap (~ $disp_pvsize):"); + writelog('DEBUG', "$synccmd"); if ($oldestsnap ne $newsyncsnap) { my $ret = system($synccmd); if ($ret != 0) { - warn "CRITICAL ERROR: $synccmd failed: $?"; + writelog('CRITICAL', "$synccmd failed: $?"); if ($exitcode < 1) { $exitcode = 1; } return 0; } } else { - if (!$quiet) { print "INFO: no incremental sync needed; $oldestsnap is already the newest available snapshot.\n"; } + writelog('INFO', "no incremental sync needed; $oldestsnap is already the newest available snapshot."); } # restore original readonly value to target after sync complete @@ -549,8 +543,8 @@ sub syncdataset { if ($pvsize == 0) { $disp_pvsize = "UNKNOWN"; } my $synccmd = buildsynccmd($sendcmd,$recvcmd,$pvsize,$sourceisroot,$targetisroot); - if (!$quiet) { print "Resuming interrupted zfs send/receive from $sourcefs to $targetfs (~ $disp_pvsize remaining):\n"; } - if ($debug) { print "DEBUG: $synccmd\n"; } + writelog('INFO', "Resuming interrupted zfs send/receive from $sourcefs to $targetfs (~ $disp_pvsize remaining):"); + writelog('DEBUG', "$synccmd"); if ($pvsize == 0) { # we need to capture the error of zfs send, this will render pv useless but in this case @@ -570,12 +564,12 @@ sub syncdataset { $stdout =~ /\Qused in the initial send no longer exists\E/ || $stdout =~ /incremental source [0-9xa-f]+ no longer exists/ ) { - if (!$quiet) { print "WARN: resetting partially receive state because the snapshot source no longer exists\n"; } + writelog('WARN', "resetting partially receive state because the snapshot source no longer exists"); resetreceivestate($targethost,$targetfs,$targetisroot); # do an normal sync cycle return syncdataset($sourcehost, $sourcefs, $targethost, $targetfs, $origin); } else { - warn "CRITICAL ERROR: $synccmd failed: $?"; + writelog('CRITICAL', "$synccmd failed: $?"); if ($exitcode < 2) { $exitcode = 2; } return 0; } @@ -620,7 +614,7 @@ sub syncdataset { if (! $bookmark) { if ($args{'force-delete'}) { - if (!$quiet) { print "Removing $targetfs because no matching snapshots were found\n"; } + writelog('INFO', "Removing $targetfs because no matching snapshots were found"); my $rcommand = ''; my $mysudocmd = ''; @@ -636,7 +630,7 @@ sub syncdataset { my $ret = system("$rcommand $prunecmd"); if ($ret != 0) { - warn "WARNING: $rcommand $prunecmd failed: $?"; + writelog('WARN', "$rcommand $prunecmd failed: $?"); } else { # redo sync and skip snapshot creation (already taken) return syncdataset($sourcehost, $sourcefs, $targethost, $targetfs, undef, 1); @@ -646,19 +640,27 @@ sub syncdataset { # if we got this far, we failed to find a matching snapshot/bookmark. if ($exitcode < 2) { $exitcode = 2; } - print "\n"; - print "CRITICAL ERROR: Target $targetfs exists but has no snapshots matching with $sourcefs!\n"; - print " Replication to target would require destroying existing\n"; - print " target. Cowardly refusing to destroy your existing target.\n\n"; + my $msg = <<~"EOT"; + + Target $targetfs exists but has no snapshots matching with $sourcefs! + Replication to target would require destroying existing + target. Cowardly refusing to destroy your existing target. + + EOT + + writelog('CRITICAL', $msg); # experience tells me we need a mollyguard for people who try to # zfs create targetpool/targetsnap ; syncoid sourcepool/sourcesnap targetpool/targetsnap ... if ( $targetsize < (64*1024*1024) ) { - print " NOTE: Target $targetfs dataset is < 64MB used - did you mistakenly run\n"; - print " \`zfs create $args{'target'}\` on the target? ZFS initial\n"; - print " replication must be to a NON EXISTENT DATASET, which will\n"; - print " then be CREATED BY the initial replication process.\n\n"; + $msg = <<~"EOT"; + NOTE: Target $targetfs dataset is < 64MB used - did you mistakenly run + `zfs create $args{'target'}` on the target? ZFS initial + replication must be to a NON EXISTENT DATASET, which will + then be CREATED BY the initial replication process. + + EOT } # return false now in case more child datasets need replication. @@ -668,14 +670,14 @@ sub syncdataset { # make sure target is (still) not currently in receive. if (iszfsbusy($targethost,$targetfs,$targetisroot)) { - warn "Cannot sync now: $targetfs is already target of a zfs receive process.\n"; + writelog('WARN', "Cannot sync now: $targetfs is already target of a zfs receive process."); if ($exitcode < 1) { $exitcode = 1; } return 0; } if ($matchingsnap eq $newsyncsnap) { # barf some text but don't touch the filesystem - if (!$quiet) { print "INFO: no snapshots on source newer than $newsyncsnap on target. Nothing to do, not syncing.\n"; } + writelog('INFO', "no snapshots on source newer than $newsyncsnap on target. Nothing to do, not syncing."); return 0; } else { my $matchingsnapescaped = escapeshellparam($matchingsnap); @@ -708,8 +710,8 @@ sub syncdataset { my $recvcmd = "$targetsudocmd $zfscmd receive $recvoptions $receiveextraargs $forcedrecv $targetfsescaped 2>&1"; my $synccmd = buildsynccmd($sendcmd,$recvcmd,$pvsize,$sourceisroot,$targetisroot); - if (!$quiet) { print "Sending incremental $sourcefs#$bookmarkescaped ... $nextsnapshot (~ $disp_pvsize):\n"; } - if ($debug) { print "DEBUG: $synccmd\n"; } + writelog('INFO', "Sending incremental $sourcefs#$bookmarkescaped ... $nextsnapshot (~ $disp_pvsize):"); + writelog('DEBUG', "$synccmd"); ($stdout, $exit) = tee_stdout { system("$synccmd") @@ -717,15 +719,15 @@ sub syncdataset { $exit == 0 or do { if (!$resume && $stdout =~ /\Qcontains partially-complete state\E/) { - if (!$quiet) { print "WARN: resetting partially receive state\n"; } + writelog('WARN', "resetting partially receive state"); resetreceivestate($targethost,$targetfs,$targetisroot); system("$synccmd") == 0 or do { - warn "CRITICAL ERROR: $synccmd failed: $?"; + writelog('CRITICAL', "$synccmd failed: $?"); if ($exitcode < 2) { $exitcode = 2; } return 0; } } else { - warn "CRITICAL ERROR: $synccmd failed: $?"; + writelog('CRITICAL', "$synccmd failed: $?"); if ($exitcode < 2) { $exitcode = 2; } return 0; } @@ -738,8 +740,8 @@ sub syncdataset { my $recvcmd = "$targetsudocmd $zfscmd receive $recvoptions $receiveextraargs $forcedrecv $targetfsescaped 2>&1"; my $synccmd = buildsynccmd($sendcmd,$recvcmd,$pvsize,$sourceisroot,$targetisroot); - if (!$quiet) { print "Sending incremental $sourcefs#$bookmarkescaped ... $newsyncsnap (~ $disp_pvsize):\n"; } - if ($debug) { print "DEBUG: $synccmd\n"; } + writelog('INFO', "Sending incremental $sourcefs#$bookmarkescaped ... $newsyncsnap (~ $disp_pvsize):"); + writelog('DEBUG', "$synccmd"); ($stdout, $exit) = tee_stdout { system("$synccmd") @@ -747,15 +749,15 @@ sub syncdataset { $exit == 0 or do { if (!$resume && $stdout =~ /\Qcontains partially-complete state\E/) { - if (!$quiet) { print "WARN: resetting partially receive state\n"; } + writelog('WARN', "resetting partially receive state"); resetreceivestate($targethost,$targetfs,$targetisroot); system("$synccmd") == 0 or do { - warn "CRITICAL ERROR: $synccmd failed: $?"; + writelog('CRITICAL', "$synccmd failed: $?"); if ($exitcode < 2) { $exitcode = 2; } return 0; } } else { - warn "CRITICAL ERROR: $synccmd failed: $?"; + writelog('CRITICAL', "$synccmd failed: $?"); if ($exitcode < 2) { $exitcode = 2; } return 0; } @@ -779,8 +781,8 @@ sub syncdataset { if ($pvsize == 0) { $disp_pvsize = "UNKNOWN"; } my $synccmd = buildsynccmd($sendcmd,$recvcmd,$pvsize,$sourceisroot,$targetisroot); - if (!$quiet) { print "Sending incremental $sourcefs\@$matchingsnap ... $newsyncsnap (~ $disp_pvsize):\n"; } - if ($debug) { print "DEBUG: $synccmd\n"; } + writelog('INFO', "Sending incremental $sourcefs\@$matchingsnap ... $newsyncsnap (~ $disp_pvsize):"); + writelog('DEBUG', "$synccmd"); ($stdout, $exit) = tee_stdout { system("$synccmd") @@ -789,15 +791,15 @@ sub syncdataset { $exit == 0 or do { # FreeBSD reports "dataset is busy" instead of "contains partially-complete state" if (!$resume && ($stdout =~ /\Qcontains partially-complete state\E/ || $stdout =~ /\Qdataset is busy\E/)) { - if (!$quiet) { print "WARN: resetting partially receive state\n"; } + writelog('WARN', "resetting partially receive state"); resetreceivestate($targethost,$targetfs,$targetisroot); system("$synccmd") == 0 or do { - warn "CRITICAL ERROR: $synccmd failed: $?"; + writelog('CRITICAL', "$synccmd failed: $?"); if ($exitcode < 2) { $exitcode = 2; } return 0; } } else { - warn "CRITICAL ERROR: $synccmd failed: $?"; + writelog('CRITICAL', "$synccmd failed: $?"); if ($exitcode < 2) { $exitcode = 2; } return 0; } @@ -819,22 +821,22 @@ sub syncdataset { } else { $bookmarkcmd = "$sourcesudocmd $zfscmd bookmark $sourcefsescaped\@$newsyncsnapescaped $sourcefsescaped\#$newsyncsnapescaped"; } - if ($debug) { print "DEBUG: $bookmarkcmd\n"; } + writelog('DEBUG', "$bookmarkcmd"); system($bookmarkcmd) == 0 or do { # fallback: assume nameing conflict and try again with guid based suffix my $guid = $snaps{'source'}{$newsyncsnap}{'guid'}; $guid = substr($guid, 0, 6); - if (!$quiet) { print "INFO: bookmark creation failed, retrying with guid based suffix ($guid)...\n"; } + writelog('INFO', "bookmark creation failed, retrying with guid based suffix ($guid)..."); if ($sourcehost ne '') { $bookmarkcmd = "$sshcmd $sourcehost " . escapeshellparam("$sourcesudocmd $zfscmd bookmark $sourcefsescaped\@$newsyncsnapescaped $sourcefsescaped\#$newsyncsnapescaped$guid"); } else { $bookmarkcmd = "$sourcesudocmd $zfscmd bookmark $sourcefsescaped\@$newsyncsnapescaped $sourcefsescaped\#$newsyncsnapescaped$guid"; } - if ($debug) { print "DEBUG: $bookmarkcmd\n"; } + writelog('DEBUG', "$bookmarkcmd"); system($bookmarkcmd) == 0 or do { - warn "CRITICAL ERROR: $bookmarkcmd failed: $?"; + writelog('CRITICAL', "$bookmarkcmd failed: $?"); if ($exitcode < 2) { $exitcode = 2; } return 0; } @@ -913,7 +915,7 @@ sub compressargset { if ($value eq 'default') { $value = $DEFAULT_COMPRESSION; } elsif (!(grep $value eq $_, ('gzip', 'pigz-fast', 'pigz-slow', 'zstd-fast', 'zstd-slow', 'lz4', 'xz', 'lzo', 'default', 'none'))) { - warn "Unrecognised compression value $value, defaulting to $DEFAULT_COMPRESSION"; + writelog('WARN', "Unrecognised compression value $value, defaulting to $DEFAULT_COMPRESSION"); $value = $DEFAULT_COMPRESSION; } @@ -934,7 +936,7 @@ sub checkcommands { # if --nocommandchecks then assume everything's available and return if ($args{'nocommandchecks'}) { - if ($debug) { print "DEBUG: not checking for command availability due to --nocommandchecks switch.\n"; } + writelog('DEBUG', "not checking for command availability due to --nocommandchecks switch."); $avail{'compress'} = 1; $avail{'localpv'} = 1; $avail{'localmbuffer'} = 1; @@ -954,13 +956,13 @@ sub checkcommands { # if raw compress command is null, we must have specified no compression. otherwise, # make sure that compression is available everywhere we need it if ($compressargs{'compress'} eq 'none') { - if ($debug) { print "DEBUG: compression forced off from command line arguments.\n"; } + writelog('DEBUG', "compression forced off from command line arguments."); } else { - if ($debug) { print "DEBUG: checking availability of $compressargs{'rawcmd'} on source...\n"; } + writelog('DEBUG', "checking availability of $compressargs{'rawcmd'} on source..."); $avail{'sourcecompress'} = `$sourcessh $checkcmd $compressargs{'rawcmd'} 2>/dev/null`; - if ($debug) { print "DEBUG: checking availability of $compressargs{'rawcmd'} on target...\n"; } + writelog('DEBUG', "checking availability of $compressargs{'rawcmd'} on target..."); $avail{'targetcompress'} = `$targetssh $checkcmd $compressargs{'rawcmd'} 2>/dev/null`; - if ($debug) { print "DEBUG: checking availability of $compressargs{'rawcmd'} on local machine...\n"; } + writelog('DEBUG', "checking availability of $compressargs{'rawcmd'} on local machine..."); $avail{'localcompress'} = `$checkcmd $compressargs{'rawcmd'} 2>/dev/null`; } @@ -989,13 +991,13 @@ sub checkcommands { if ($avail{'sourcecompress'} eq '') { if ($compressargs{'rawcmd'} ne '') { - print "WARN: $compressargs{'rawcmd'} not available on source $s- sync will continue without compression.\n"; + writelog('WARN', "$compressargs{'rawcmd'} not available on source $s- sync will continue without compression."); } $avail{'compress'} = 0; } if ($avail{'targetcompress'} eq '') { if ($compressargs{'rawcmd'} ne '') { - print "WARN: $compressargs{'rawcmd'} not available on target $t - sync will continue without compression.\n"; + writelog('WARN', "$compressargs{'rawcmd'} not available on target $t - sync will continue without compression."); } $avail{'compress'} = 0; } @@ -1008,24 +1010,24 @@ sub checkcommands { # corner case - if source AND target are BOTH remote, we have to check for local compress too if ($sourcehost ne '' && $targethost ne '' && $avail{'localcompress'} eq '') { if ($compressargs{'rawcmd'} ne '') { - print "WARN: $compressargs{'rawcmd'} not available on local machine - sync will continue without compression.\n"; + writelog('WARN', "$compressargs{'rawcmd'} not available on local machine - sync will continue without compression."); } $avail{'compress'} = 0; } - if ($debug) { print "DEBUG: checking availability of $mbuffercmd on source...\n"; } + writelog('DEBUG', "checking availability of $mbuffercmd on source..."); $avail{'sourcembuffer'} = `$sourcessh $checkcmd $mbuffercmd 2>/dev/null`; if ($avail{'sourcembuffer'} eq '') { - if (!$quiet) { print "WARN: $mbuffercmd not available on source $s - sync will continue without source buffering.\n"; } + writelog('WARN', "$mbuffercmd not available on source $s - sync will continue without source buffering."); $avail{'sourcembuffer'} = 0; } else { $avail{'sourcembuffer'} = 1; } - if ($debug) { print "DEBUG: checking availability of $mbuffercmd on target...\n"; } + writelog('DEBUG', "checking availability of $mbuffercmd on target..."); $avail{'targetmbuffer'} = `$targetssh $checkcmd $mbuffercmd 2>/dev/null`; if ($avail{'targetmbuffer'} eq '') { - if (!$quiet) { print "WARN: $mbuffercmd not available on target $t - sync will continue without target buffering.\n"; } + writelog('WARN', "$mbuffercmd not available on target $t - sync will continue without target buffering."); $avail{'targetmbuffer'} = 0; } else { $avail{'targetmbuffer'} = 1; @@ -1033,18 +1035,18 @@ sub checkcommands { # if we're doing remote source AND remote target, check for local mbuffer as well if ($sourcehost ne '' && $targethost ne '') { - if ($debug) { print "DEBUG: checking availability of $mbuffercmd on local machine...\n"; } + writelog('DEBUG', "checking availability of $mbuffercmd on local machine..."); $avail{'localmbuffer'} = `$checkcmd $mbuffercmd 2>/dev/null`; if ($avail{'localmbuffer'} eq '') { $avail{'localmbuffer'} = 0; - if (!$quiet) { print "WARN: $mbuffercmd not available on local machine - sync will continue without local buffering.\n"; } + writelog('WARN', "$mbuffercmd not available on local machine - sync will continue without local buffering."); } } - if ($debug) { print "DEBUG: checking availability of $pvcmd on local machine...\n"; } + writelog('DEBUG', "checking availability of $pvcmd on local machine..."); $avail{'localpv'} = `$checkcmd $pvcmd 2>/dev/null`; if ($avail{'localpv'} eq '') { - if (!$quiet) { print "WARN: $pvcmd not available on local machine - sync will continue without progress bar.\n"; } + writelog('WARN', "$pvcmd not available on local machine - sync will continue without progress bar."); $avail{'localpv'} = 0; } else { $avail{'localpv'} = 1; @@ -1072,11 +1074,11 @@ sub checkcommands { my $resumechkcmd = "$zpoolcmd get -o value -H feature\@extensible_dataset"; - if ($debug) { print "DEBUG: checking availability of zfs resume feature on source...\n"; } + writelog('DEBUG', "checking availability of zfs resume feature on source..."); $avail{'sourceresume'} = system("$sourcessh $sourcesudocmd $resumechkcmd $srcpool 2>/dev/null | grep '\\(active\\|enabled\\)' >/dev/null 2>&1"); $avail{'sourceresume'} = $avail{'sourceresume'} == 0 ? 1 : 0; - if ($debug) { print "DEBUG: checking availability of zfs resume feature on target...\n"; } + writelog('DEBUG', "checking availability of zfs resume feature on target..."); $avail{'targetresume'} = system("$targetssh $targetsudocmd $resumechkcmd $dstpool 2>/dev/null | grep '\\(active\\|enabled\\)' >/dev/null 2>&1"); $avail{'targetresume'} = $avail{'targetresume'} == 0 ? 1 : 0; @@ -1092,7 +1094,7 @@ sub checkcommands { push @hosts, 'target'; } my $affected = join(" and ", @hosts); - print "WARN: ZFS resume feature not available on $affected machine - sync will continue without resume support.\n"; + writelog('WARN', "ZFS resume feature not available on $affected machine - sync will continue without resume support."); } } else { $avail{'sourceresume'} = 0; @@ -1105,17 +1107,16 @@ sub checkcommands { sub iszfsbusy { my ($rhost,$fs,$isroot) = @_; if ($rhost ne '') { $rhost = "$sshcmd $rhost"; } - if ($debug) { print "DEBUG: checking to see if $fs on $rhost is already in zfs receive using $rhost $pscmd -Ao args= ...\n"; } + writelog('DEBUG', "checking to see if $fs on $rhost is already in zfs receive using $rhost $pscmd -Ao args= ..."); open PL, "$rhost $pscmd -Ao args= |"; my @processes = ; close PL; foreach my $process (@processes) { - # if ($debug) { print "DEBUG: checking process $process...\n"; } if ($process =~ /zfs *(receive|recv).*\Q$fs\E\Z/) { # there's already a zfs receive process for our target filesystem - return true - if ($debug) { print "DEBUG: process $process matches target $fs!\n"; } + writelog('DEBUG', "process $process matches target $fs!"); return 1; } } @@ -1135,12 +1136,12 @@ sub setzfsvalue { $fsescaped = escapeshellparam($fsescaped); } - if ($debug) { print "DEBUG: setting $property to $value on $fs...\n"; } + writelog('DEBUG', "setting $property to $value on $fs..."); my $mysudocmd; if ($isroot) { $mysudocmd = ''; } else { $mysudocmd = $sudocmd; } - if ($debug) { print "$rhost $mysudocmd $zfscmd set $property=$value $fsescaped\n"; } + writelog('DEBUG', "$rhost $mysudocmd $zfscmd set $property=$value $fsescaped"); system("$rhost $mysudocmd $zfscmd set $property=$value $fsescaped") == 0 - or warn "WARNING: $rhost $mysudocmd $zfscmd set $property=$value $fsescaped died: $?, proceeding anyway.\n"; + or writelog('WARN', "$rhost $mysudocmd $zfscmd set $property=$value $fsescaped died: $?, proceeding anyway."); return; } @@ -1155,10 +1156,10 @@ sub getzfsvalue { $fsescaped = escapeshellparam($fsescaped); } - if ($debug) { print "DEBUG: getting current value of $property on $fs...\n"; } + writelog('DEBUG', "getting current value of $property on $fs..."); my $mysudocmd; if ($isroot) { $mysudocmd = ''; } else { $mysudocmd = $sudocmd; } - if ($debug) { print "$rhost $mysudocmd $zfscmd get -H $property $fsescaped\n"; } + writelog('DEBUG', "$rhost $mysudocmd $zfscmd get -H $property $fsescaped"); my ($value, $error, $exit) = capture { system("$rhost $mysudocmd $zfscmd get -H $property $fsescaped"); }; @@ -1171,7 +1172,7 @@ sub getzfsvalue { # If we are in scalar context and there is an error, print it out. # Otherwise we assume the caller will deal with it. if (!$wantarray and $error) { - print "ERROR getzfsvalue $fs $property: $error\n"; + writelog('CRITICAL', "getzfsvalue $fs $property: $error"); } return $wantarray ? ($value, $error) : $value; @@ -1200,7 +1201,7 @@ sub getoldestsnapshot { # must not have had any snapshots on source - luckily, we already made one, amirite? if (defined ($args{'no-sync-snap'}) ) { # well, actually we set --no-sync-snap, so no we *didn't* already make one. Whoops. - warn "CRIT: --no-sync-snap is set, and getoldestsnapshot() could not find any snapshots on source!\n"; + writelog('CRITICAL', "--no-sync-snap is set, and getoldestsnapshot() could not find any snapshots on source!"); } return 0; } @@ -1209,7 +1210,7 @@ sub getnewestsnapshot { my $snaps = shift; foreach my $snap ( sort { $snaps{'source'}{$b}{'creation'}<=>$snaps{'source'}{$a}{'creation'} } keys %{ $snaps{'source'} }) { # return on first snap found - it's the newest - if (!$quiet) { print "NEWEST SNAPSHOT: $snap\n"; } + writelog('INFO', "NEWEST SNAPSHOT: $snap"); return $snap; } # must not have had any snapshots on source - looks like we'd better create one! @@ -1221,7 +1222,7 @@ sub getnewestsnapshot { # fixme: we need to output WHAT the current dataset IS if we encounter this WARN condition. # we also probably need an argument to mute this WARN, for people who deliberately exclude # datasets from recursive replication this way. - warn "WARN: --no-sync-snap is set, and getnewestsnapshot() could not find any snapshots on source for current dataset. Continuing.\n"; + writelog('WARN', "--no-sync-snap is set, and getnewestsnapshot() could not find any snapshots on source for current dataset. Continuing."); if ($exitcode < 2) { $exitcode = 2; } } return 0; @@ -1340,13 +1341,13 @@ sub pruneoldsyncsnaps { $prunecmd .= "$mysudocmd $zfscmd destroy $fsescaped\@$snap; "; if ($counter > $maxsnapspercmd) { $prunecmd =~ s/\; $//; - if ($debug) { print "DEBUG: pruning up to $maxsnapspercmd obsolete sync snapshots...\n"; } - if ($debug) { print "DEBUG: $rhost $prunecmd\n"; } + writelog('DEBUG', "pruning up to $maxsnapspercmd obsolete sync snapshots..."); + writelog('DEBUG', "$rhost $prunecmd"); if ($rhost ne '') { $prunecmd = escapeshellparam($prunecmd); } system("$rhost $prunecmd") == 0 - or warn "WARNING: $rhost $prunecmd failed: $?"; + or writelog('WARN', "$rhost $prunecmd failed: $?"); $prunecmd = ''; $counter = 0; } @@ -1355,13 +1356,13 @@ sub pruneoldsyncsnaps { # the loop, commit 'em now if ($counter) { $prunecmd =~ s/\; $//; - if ($debug) { print "DEBUG: pruning up to $maxsnapspercmd obsolete sync snapshots...\n"; } - if ($debug) { print "DEBUG: $rhost $prunecmd\n"; } + writelog('DEBUG', "pruning up to $maxsnapspercmd obsolete sync snapshots..."); + writelog('DEBUG', "$rhost $prunecmd"); if ($rhost ne '') { $prunecmd = escapeshellparam($prunecmd); } system("$rhost $prunecmd") == 0 - or warn "WARNING: $rhost $prunecmd failed: $?"; + or writelog('WARN', "$rhost $prunecmd failed: $?"); } return; } @@ -1393,9 +1394,9 @@ sub newsyncsnap { my %date = getdate(); my $snapname = "syncoid\_$identifier$hostid\_$date{'stamp'}"; my $snapcmd = "$rhost $mysudocmd $zfscmd snapshot $fsescaped\@$snapname\n"; - if ($debug) { print "DEBUG: creating sync snapshot using \"$snapcmd\"...\n"; } + writelog('DEBUG', "creating sync snapshot using \"$snapcmd\"..."); system($snapcmd) == 0 or do { - warn "CRITICAL ERROR: $snapcmd failed: $?"; + writelog('CRITICAL', "$snapcmd failed: $?"); if ($exitcode < 2) { $exitcode = 2; } return 0; }; @@ -1414,7 +1415,7 @@ sub targetexists { my $mysudocmd; if ($isroot) { $mysudocmd = ''; } else { $mysudocmd = $sudocmd; } my $checktargetcmd = "$rhost $mysudocmd $zfscmd get -H name $fsescaped"; - if ($debug) { print "DEBUG: checking to see if target filesystem exists using \"$checktargetcmd 2>&1 |\"...\n"; } + writelog('DEBUG', "checking to see if target filesystem exists using \"$checktargetcmd 2>&1 |\"..."); open FH, "$checktargetcmd 2>&1 |"; my $targetexists = ; close FH; @@ -1451,7 +1452,7 @@ sub getssh { }; $rhost = $fs; if ($exit != 0) { - warn "Unable to enumerate pools (is zfs available?)"; + writelog('WARN', "Unable to enumerate pools (is zfs available?)"); } else { foreach (split(/\n/,$pools)) { if ($_ eq $pool) { @@ -1478,7 +1479,7 @@ sub getssh { system("$sshcmd -S $socket $rhost echo -n") == 0 or do { my $code = $? >> 8; - warn "CRITICAL ERROR: ssh connection echo test failed for $rhost with exit code $code"; + writelog('CRITICAL', "ssh connection echo test failed for $rhost with exit code $code"); exit(2); }; @@ -1487,14 +1488,13 @@ sub getssh { my $localuid = $<; if ($localuid == 0 || $args{'no-privilege-elevation'}) { $isroot = 1; } else { $isroot = 0; } } - # if ($isroot) { print "this user is root.\n"; } else { print "this user is not root.\n"; } return ($rhost,$fs,$isroot); } sub dumphash() { my $hash = shift; $Data::Dumper::Sortkeys = 1; - print Dumper($hash); + writelog('INFO', Dumper($hash)); } sub getsnaps() { @@ -1512,7 +1512,7 @@ sub getsnaps() { my $getsnapcmd = "$rhost $mysudocmd $zfscmd get -Hpd 1 -t snapshot guid,creation $fsescaped"; if ($debug) { $getsnapcmd = "$getsnapcmd |"; - print "DEBUG: getting list of snapshots on $fs using $getsnapcmd...\n"; + writelog('DEBUG', "getting list of snapshots on $fs using $getsnapcmd..."); } else { $getsnapcmd = "$getsnapcmd 2>/dev/null |"; } @@ -1586,8 +1586,8 @@ sub getsnapsfallback() { } my $getsnapcmd = "$rhost $mysudocmd $zfscmd get -Hpd 1 type,guid,creation $fsescaped |"; - warn "snapshot listing failed, trying fallback command"; - if ($debug) { print "DEBUG: FALLBACK, getting list of snapshots on $fs using $getsnapcmd...\n"; } + writelog('WARN', "snapshot listing failed, trying fallback command"); + writelog('DEBUG', "FALLBACK, getting list of snapshots on $fs using $getsnapcmd..."); open FH, $getsnapcmd; my @rawsnaps = ; close FH or die "CRITICAL ERROR: snapshots couldn't be listed for $fs (exit code $?)"; @@ -1669,7 +1669,7 @@ sub getbookmarks() { my $error = 0; my $getbookmarkcmd = "$rhost $mysudocmd $zfscmd get -Hpd 1 -t bookmark guid,creation $fsescaped 2>&1 |"; - if ($debug) { print "DEBUG: getting list of bookmarks on $fs using $getbookmarkcmd...\n"; } + writelog('DEBUG', "getting list of bookmarks on $fs using $getbookmarkcmd..."); open FH, $getbookmarkcmd; my @rawbookmarks = ; close FH or $error = 1; @@ -1750,7 +1750,7 @@ sub getsendsize { $sendoptions = getoptionsline(\@sendoptions, ('D','L','R','c','e','h','p','w')); } my $getsendsizecmd = "$sourcessh $mysudocmd $zfscmd send $sendoptions -nvP $snaps"; - if ($debug) { print "DEBUG: getting estimated transfer size from source $sourcehost using \"$getsendsizecmd 2>&1 |\"...\n"; } + writelog('DEBUG', "getting estimated transfer size from source $sourcehost using \"$getsendsizecmd 2>&1 |\"..."); open FH, "$getsendsizecmd 2>&1 |"; my @rawsize = ; @@ -1777,7 +1777,7 @@ sub getsendsize { # to avoid confusion with a zero size pv, give sendsize # a minimum 4K value - or if empty, make sure it reads UNKNOWN - if ($debug) { print "DEBUG: sendsize = $sendsize\n"; } + writelog('DEBUG', "sendsize = $sendsize"); if ($sendsize eq '' || $exit != 0) { $sendsize = '0'; } elsif ($sendsize < 4096) { @@ -1836,9 +1836,7 @@ sub getreceivetoken() { return $token; } - if ($debug) { - print "DEBUG: no receive token found \n"; - } + writelog('DEBUG', "no receive token found"); return } @@ -1905,8 +1903,7 @@ sub getoptionsline { return $line; } -sub resetreceivestate { - my ($rhost,$fs,$isroot) = @_; +sub resetreceivestate { my ($rhost,$fs,$isroot) = @_; my $fsescaped = escapeshellparam($fs); @@ -1916,15 +1913,37 @@ sub resetreceivestate { $fsescaped = escapeshellparam($fsescaped); } - if ($debug) { print "DEBUG: reset partial receive state of $fs...\n"; } + writelog('DEBUG', "reset partial receive state of $fs..."); my $mysudocmd; if ($isroot) { $mysudocmd = ''; } else { $mysudocmd = $sudocmd; } my $resetcmd = "$rhost $mysudocmd $zfscmd receive -A $fsescaped"; - if ($debug) { print "$resetcmd\n"; } + writelog('DEBUG', "$resetcmd"); system("$resetcmd") == 0 or die "CRITICAL ERROR: $resetcmd failed: $?"; } +# $loglevel can be one of: +# - CRITICAL +# - WARN +# - INFO +# - DEBUG +sub writelog { + my ($loglevel, $msg) = @_; + + my $header; + chomp($msg); + + if ($loglevel eq 'CRITICAL') { + warn("CRITICAL ERROR: $msg\n"); + } elsif ($loglevel eq 'WARN') { + if (!$quiet) { warn("WARNING: $msg\n"); } + } elsif ($loglevel eq 'INFO') { + if (!$quiet) { print("INFO: $msg\n"); } + } elsif ($loglevel eq 'DEBUG') { + if ($debug) { print("DEBUG: $msg\n"); } + } +} + __END__ =head1 NAME From 09b42d6ade2843171be810db7df3b189954b7992 Mon Sep 17 00:00:00 2001 From: Vinnie Okada Date: Sun, 28 Nov 2021 20:50:46 -0700 Subject: [PATCH 038/130] Refactor system calls Build the zfs send and receive commands in a new subroutine, and implement other subroutines that can be called instead of building a zfs command and running it with system(); --- syncoid | 341 ++++++++++++++++++++++++++++---------------------------- 1 file changed, 172 insertions(+), 169 deletions(-) diff --git a/syncoid b/syncoid index fa6feff..1ceed0c 100755 --- a/syncoid +++ b/syncoid @@ -284,12 +284,6 @@ sub syncdataset { my $stdout; my $exit; - my $sourcefsescaped = escapeshellparam($sourcefs); - my $targetfsescaped = escapeshellparam($targetfs); - - # keep forcedrecv as a variable to allow us to disable it with an optional argument later if necessary - my $forcedrecv = "-F"; - writelog('DEBUG', "syncing source $sourcefs to target $targetfs."); my ($sync, $error) = getzfsvalue($sourcehost,$sourcefs,$sourceisroot,'syncoid:sync'); @@ -334,13 +328,9 @@ sub syncdataset { # does the target filesystem exist yet? my $targetexists = targetexists($targethost,$targetfs,$targetisroot); - my $receiveextraargs = ""; my $receivetoken; if ($resume) { - # save state of interrupted receive stream - $receiveextraargs = "-s"; - if ($targetexists) { # check remote dataset for receive resume token (interrupted receive) $receivetoken = getreceivetoken($targethost,$targetfs,$targetisroot); @@ -398,9 +388,6 @@ sub syncdataset { # with ZFS on Linux (possibly OpenZFS in general) when setting/unsetting readonly. #my $originaltargetreadonly; - my $sendoptions = getoptionsline(\@sendoptions, ('D','L','P','R','c','e','h','p','v','w')); - my $recvoptions = getoptionsline(\@recvoptions, ('h','o','x','u','v')); - # sync 'em up. if (! $targetexists) { # do an initial sync from the oldest source snapshot @@ -430,61 +417,30 @@ sub syncdataset { $oldestsnap = $newsyncsnap; } } - my $oldestsnapescaped = escapeshellparam($oldestsnap); - if (defined $args{'preserve-recordsize'}) { - my $type = getzfsvalue($sourcehost,$sourcefs,$sourceisroot,'type'); - if ($type eq "filesystem") { - my $recordsize = getzfsvalue($sourcehost,$sourcefs,$sourceisroot,'recordsize'); - $recvoptions .= "-o recordsize=$recordsize" - } - } - - my $sendcmd = "$sourcesudocmd $zfscmd send $sendoptions $sourcefsescaped\@$oldestsnapescaped"; - my $recvcmd = "$targetsudocmd $zfscmd receive $recvoptions $receiveextraargs $forcedrecv $targetfsescaped"; - - my $pvsize; - if (defined $origin) { - my $originescaped = escapeshellparam($origin); - $sendcmd = "$sourcesudocmd $zfscmd send $sendoptions -i $originescaped $sourcefsescaped\@$oldestsnapescaped"; - my $streamargBackup = $args{'streamarg'}; - $args{'streamarg'} = "-i"; - $pvsize = getsendsize($sourcehost,$origin,"$sourcefs\@$oldestsnap",$sourceisroot); - $args{'streamarg'} = $streamargBackup; - } else { - $pvsize = getsendsize($sourcehost,"$sourcefs\@$oldestsnap",0,$sourceisroot); - } - - my $disp_pvsize = readablebytes($pvsize); - if ($pvsize == 0) { $disp_pvsize = 'UNKNOWN'; } - my $synccmd = buildsynccmd($sendcmd,$recvcmd,$pvsize,$sourceisroot,$targetisroot); + my $ret; if (defined $origin) { writelog('INFO', "Clone is recreated on target $targetfs based on $origin"); - } - if (!defined ($args{'no-stream'}) ) { - writelog('INFO', "Sending oldest full snapshot $sourcefs\@$oldestsnap (~ $disp_pvsize) to new target filesystem:"); - } else { - writelog('INFO', "--no-stream selected; sending newest full snapshot $sourcefs\@$oldestsnap (~ $disp_pvsize) to new target filesystem:"); - } - writelog('DEBUG', "$synccmd"); - - # make sure target is (still) not currently in receive. - if (iszfsbusy($targethost,$targetfs,$targetisroot)) { - writelog('WARN', "Cannot sync now: $targetfs is already target of a zfs receive process."); - if ($exitcode < 1) { $exitcode = 1; } - return 0; - } - system($synccmd) == 0 or do { - if (defined $origin) { + ($ret, $stdout) = syncclone($sourcehost, $sourcefs, $origin, $targethost, $targetfs, $oldestsnap); + if ($ret) { writelog('INFO', "clone creation failed, trying ordinary replication as fallback"); syncdataset($sourcehost, $sourcefs, $targethost, $targetfs, undef, 1); return 0; } + } else { + if (!defined ($args{'no-stream'}) ) { + writelog('INFO', "Sending oldest full snapshot $sourcefs\@$oldestsnap to new target filesystem:"); + } else { + writelog('INFO', "--no-stream selected; sending newest full snapshot $sourcefs\@$oldestsnap to new target filesystem:"); + } - writelog('CRITICAL', "$synccmd failed: $?"); + ($ret, $stdout) = syncfull($sourcehost, $sourcefs, $targethost, $targetfs, $oldestsnap); + } + + if ($ret) { if ($exitcode < 2) { $exitcode = 2; } return 0; - }; + } # now do an -I to the new sync snapshot, assuming there were any snapshots # other than the new sync snapshot to begin with, of course - and that we @@ -498,33 +454,15 @@ sub syncdataset { # $originaltargetreadonly = getzfsvalue($targethost,$targetfs,$targetisroot,'readonly'); # setzfsvalue($targethost,$targetfs,$targetisroot,'readonly','on'); - $sendcmd = "$sourcesudocmd $zfscmd send $sendoptions $args{'streamarg'} $sourcefsescaped\@$oldestsnapescaped $sourcefsescaped\@$newsyncsnapescaped"; - $pvsize = getsendsize($sourcehost,"$sourcefs\@$oldestsnap","$sourcefs\@$newsyncsnap",$sourceisroot); - $disp_pvsize = readablebytes($pvsize); - if ($pvsize == 0) { $disp_pvsize = "UNKNOWN"; } - $synccmd = buildsynccmd($sendcmd,$recvcmd,$pvsize,$sourceisroot,$targetisroot); + writelog('INFO', "Updating new target filesystem with incremental $sourcefs\@$oldestsnap ... $newsyncsnap:"); - # make sure target is (still) not currently in receive. - if (iszfsbusy($targethost,$targetfs,$targetisroot)) { - writelog('WARN', "Cannot sync now: $targetfs is already target of a zfs receive process."); + (my $ret, $stdout) = syncincremental($sourcehost, $sourcefs, $targethost, $targetfs, $oldestsnap, $newsyncsnap, 0); + + if ($ret != 0) { if ($exitcode < 1) { $exitcode = 1; } return 0; } - writelog('INFO', "Updating new target filesystem with incremental $sourcefs\@$oldestsnap ... $newsyncsnap (~ $disp_pvsize):"); - writelog('DEBUG', "$synccmd"); - - if ($oldestsnap ne $newsyncsnap) { - my $ret = system($synccmd); - if ($ret != 0) { - writelog('CRITICAL', "$synccmd failed: $?"); - if ($exitcode < 1) { $exitcode = 1; } - return 0; - } - } else { - writelog('INFO', "no incremental sync needed; $oldestsnap is already the newest available snapshot."); - } - # restore original readonly value to target after sync complete # dyking this functionality out for the time being due to buggy mount/unmount behavior # with ZFS on Linux (possibly OpenZFS in general) when setting/unsetting readonly. @@ -535,29 +473,7 @@ sub syncdataset { # and because this will ony resume the receive to the next # snapshot, do a normal sync after that if (defined($receivetoken)) { - $sendoptions = getoptionsline(\@sendoptions, ('P','e','v','w')); - my $sendcmd = "$sourcesudocmd $zfscmd send $sendoptions -t $receivetoken"; - my $recvcmd = "$targetsudocmd $zfscmd receive $recvoptions $receiveextraargs $forcedrecv $targetfsescaped 2>&1"; - my $pvsize = getsendsize($sourcehost,"","",$sourceisroot,$receivetoken); - my $disp_pvsize = readablebytes($pvsize); - if ($pvsize == 0) { $disp_pvsize = "UNKNOWN"; } - my $synccmd = buildsynccmd($sendcmd,$recvcmd,$pvsize,$sourceisroot,$targetisroot); - - writelog('INFO', "Resuming interrupted zfs send/receive from $sourcefs to $targetfs (~ $disp_pvsize remaining):"); - writelog('DEBUG', "$synccmd"); - - if ($pvsize == 0) { - # we need to capture the error of zfs send, this will render pv useless but in this case - # it doesn't matter because we don't know the estimated send size (probably because - # the initial snapshot used for resumed send doesn't exist anymore) - ($stdout, $exit) = tee_stderr { - system("$synccmd") - }; - } else { - ($stdout, $exit) = tee_stdout { - system("$synccmd") - }; - } + ($exit, $stdout) = syncresume($sourcehost, $sourcefs, $targethost, $targetfs, $receivetoken); $exit == 0 or do { if ( @@ -569,7 +485,6 @@ sub syncdataset { # do an normal sync cycle return syncdataset($sourcehost, $sourcefs, $targethost, $targetfs, $origin); } else { - writelog('CRITICAL', "$synccmd failed: $?"); if ($exitcode < 2) { $exitcode = 2; } return 0; } @@ -699,35 +614,19 @@ sub syncdataset { } } - # bookmark stream size can't be determined - my $pvsize = 0; - my $disp_pvsize = "UNKNOWN"; - - $sendoptions = getoptionsline(\@sendoptions, ('L','c','e','w')); if ($nextsnapshot) { - my $nextsnapshotescaped = escapeshellparam($nextsnapshot); - my $sendcmd = "$sourcesudocmd $zfscmd send $sendoptions -i $sourcefsescaped#$bookmarkescaped $sourcefsescaped\@$nextsnapshotescaped"; - my $recvcmd = "$targetsudocmd $zfscmd receive $recvoptions $receiveextraargs $forcedrecv $targetfsescaped 2>&1"; - my $synccmd = buildsynccmd($sendcmd,$recvcmd,$pvsize,$sourceisroot,$targetisroot); - - writelog('INFO', "Sending incremental $sourcefs#$bookmarkescaped ... $nextsnapshot (~ $disp_pvsize):"); - writelog('DEBUG', "$synccmd"); - - ($stdout, $exit) = tee_stdout { - system("$synccmd") - }; + ($exit, $stdout) = syncbookmark($sourcehost, $sourcefs, $targethost, $targetfs, $bookmark, $nextsnapshot); $exit == 0 or do { if (!$resume && $stdout =~ /\Qcontains partially-complete state\E/) { writelog('WARN', "resetting partially receive state"); resetreceivestate($targethost,$targetfs,$targetisroot); - system("$synccmd") == 0 or do { - writelog('CRITICAL', "$synccmd failed: $?"); + (my $ret) = syncbookmark($sourcehost, $sourcefs, $targethost, $targetfs, $bookmark, $nextsnapshot); + $ret == 0 or do { if ($exitcode < 2) { $exitcode = 2; } return 0; } } else { - writelog('CRITICAL', "$synccmd failed: $?"); if ($exitcode < 2) { $exitcode = 2; } return 0; } @@ -736,28 +635,18 @@ sub syncdataset { $matchingsnap = $nextsnapshot; $matchingsnapescaped = escapeshellparam($matchingsnap); } else { - my $sendcmd = "$sourcesudocmd $zfscmd send $sendoptions -i $sourcefsescaped#$bookmarkescaped $sourcefsescaped\@$newsyncsnapescaped"; - my $recvcmd = "$targetsudocmd $zfscmd receive $recvoptions $receiveextraargs $forcedrecv $targetfsescaped 2>&1"; - my $synccmd = buildsynccmd($sendcmd,$recvcmd,$pvsize,$sourceisroot,$targetisroot); - - writelog('INFO', "Sending incremental $sourcefs#$bookmarkescaped ... $newsyncsnap (~ $disp_pvsize):"); - writelog('DEBUG', "$synccmd"); - - ($stdout, $exit) = tee_stdout { - system("$synccmd") - }; + ($exit, $stdout) = syncbookmark($sourcehost, $sourcefs, $targethost, $targetfs, $bookmark, $newsyncsnap); $exit == 0 or do { if (!$resume && $stdout =~ /\Qcontains partially-complete state\E/) { writelog('WARN', "resetting partially receive state"); resetreceivestate($targethost,$targetfs,$targetisroot); - system("$synccmd") == 0 or do { - writelog('CRITICAL', "$synccmd failed: $?"); + (my $ret) = syncbookmark($sourcehost, $sourcefs, $targethost, $targetfs, $bookmark, $newsyncsnap); + $ret == 0 or do { if ($exitcode < 2) { $exitcode = 2; } return 0; } } else { - writelog('CRITICAL', "$synccmd failed: $?"); if ($exitcode < 2) { $exitcode = 2; } return 0; } @@ -773,33 +662,19 @@ sub syncdataset { return 0; } - $sendoptions = getoptionsline(\@sendoptions, ('D','L','P','R','c','e','h','p','v','w')); - my $sendcmd = "$sourcesudocmd $zfscmd send $sendoptions $args{'streamarg'} $sourcefsescaped\@$matchingsnapescaped $sourcefsescaped\@$newsyncsnapescaped"; - my $recvcmd = "$targetsudocmd $zfscmd receive $recvoptions $receiveextraargs $forcedrecv $targetfsescaped 2>&1"; - my $pvsize = getsendsize($sourcehost,"$sourcefs\@$matchingsnap","$sourcefs\@$newsyncsnap",$sourceisroot); - my $disp_pvsize = readablebytes($pvsize); - if ($pvsize == 0) { $disp_pvsize = "UNKNOWN"; } - my $synccmd = buildsynccmd($sendcmd,$recvcmd,$pvsize,$sourceisroot,$targetisroot); - - writelog('INFO', "Sending incremental $sourcefs\@$matchingsnap ... $newsyncsnap (~ $disp_pvsize):"); - writelog('DEBUG', "$synccmd"); - - ($stdout, $exit) = tee_stdout { - system("$synccmd") - }; + ($exit, $stdout) = syncincremental($sourcehost, $sourcefs, $targethost, $targetfs, $matchingsnap, $newsyncsnap, defined($args{'no-stream'})); $exit == 0 or do { # FreeBSD reports "dataset is busy" instead of "contains partially-complete state" if (!$resume && ($stdout =~ /\Qcontains partially-complete state\E/ || $stdout =~ /\Qdataset is busy\E/)) { writelog('WARN', "resetting partially receive state"); resetreceivestate($targethost,$targetfs,$targetisroot); - system("$synccmd") == 0 or do { - writelog('CRITICAL', "$synccmd failed: $?"); + (my $ret) = syncincremental($sourcehost, $sourcefs, $targethost, $targetfs, $matchingsnap, $newsyncsnap, defined($args{'no-stream'})); + $ret == 0 or do { if ($exitcode < 2) { $exitcode = 2; } return 0; } } else { - writelog('CRITICAL', "$synccmd failed: $?"); if ($exitcode < 2) { $exitcode = 2; } return 0; } @@ -815,28 +690,16 @@ sub syncdataset { if (defined $args{'no-sync-snap'}) { if (defined $args{'create-bookmark'}) { - my $bookmarkcmd; - if ($sourcehost ne '') { - $bookmarkcmd = "$sshcmd $sourcehost " . escapeshellparam("$sourcesudocmd $zfscmd bookmark $sourcefsescaped\@$newsyncsnapescaped $sourcefsescaped\#$newsyncsnapescaped"); - } else { - $bookmarkcmd = "$sourcesudocmd $zfscmd bookmark $sourcefsescaped\@$newsyncsnapescaped $sourcefsescaped\#$newsyncsnapescaped"; - } - writelog('DEBUG', "$bookmarkcmd"); - system($bookmarkcmd) == 0 or do { + my $ret = createbookmark($sourcehost, $sourcefs, $newsyncsnap, $newsyncsnap); + $ret == 0 or do { # fallback: assume nameing conflict and try again with guid based suffix my $guid = $snaps{'source'}{$newsyncsnap}{'guid'}; $guid = substr($guid, 0, 6); writelog('INFO', "bookmark creation failed, retrying with guid based suffix ($guid)..."); - if ($sourcehost ne '') { - $bookmarkcmd = "$sshcmd $sourcehost " . escapeshellparam("$sourcesudocmd $zfscmd bookmark $sourcefsescaped\@$newsyncsnapescaped $sourcefsescaped\#$newsyncsnapescaped$guid"); - } else { - $bookmarkcmd = "$sourcesudocmd $zfscmd bookmark $sourcefsescaped\@$newsyncsnapescaped $sourcefsescaped\#$newsyncsnapescaped$guid"; - } - writelog('DEBUG', "$bookmarkcmd"); - system($bookmarkcmd) == 0 or do { - writelog('CRITICAL', "$bookmarkcmd failed: $?"); + my $ret = createbookmark($sourcehost, $sourcefs, $newsyncsnap, "$newsyncsnap$guid"); + $ret == 0 or do { if ($exitcode < 2) { $exitcode = 2; } return 0; } @@ -852,6 +715,146 @@ sub syncdataset { } # end syncdataset() +# Return codes: +# 0 - ZFS send/receive completed without errors +# 1 - ZFS target is currently in receive +# 2 - Critical error encountered when running the ZFS send/receive command +sub runsynccmd { + my ($sourcehost, $sourcefs, $sendsource, $targethost, $targetfs, $pvsize) = @_; + + my $sourcefsescaped = escapeshellparam($sourcefs); + my $targetfsescaped = escapeshellparam($targetfs); + + my $disp_pvsize = $pvsize == 0 ? 'UNKNOWN' : readablebytes($pvsize); + my $sendoptions; + if ($sendsource =~ / -t /) { + writelog('INFO', "Resuming interrupted zfs send/receive from $sourcefs to $targetfs (~ $disp_pvsize remaining):"); + $sendoptions = getoptionsline(\@sendoptions, ('P','e','v','w')); + } elsif ($sendsource =~ /#/) { + $sendoptions = getoptionsline(\@sendoptions, ('L','c','e','w')); + } else { + $sendoptions = getoptionsline(\@sendoptions, ('D','L','P','R','c','e','h','p','v','w')); + } + + my $recvoptions = getoptionsline(\@recvoptions, ('h','o','x','u','v')); + + # save state of interrupted receive stream + if ($resume) { $recvoptions .= ' -s'; } + # if no rollbacks are allowed, disable forced receive + if (!defined $args{'no-rollback'}) { $recvoptions .= ' -F'; } + + if (defined $args{'preserve-recordsize'}) { + my $type = getzfsvalue($sourcehost,$sourcefs,$sourceisroot,'type'); + if ($type eq "filesystem") { + my $recordsize = getzfsvalue($sourcehost,$sourcefs,$sourceisroot,'recordsize'); + $recvoptions .= " -o recordsize=$recordsize" + } + } + + my $sendcmd = "$sourcesudocmd $zfscmd send $sendoptions $sendsource"; + my $recvcmd = "$targetsudocmd $zfscmd receive $recvoptions $targetfsescaped"; + + my $synccmd = buildsynccmd($sendcmd,$recvcmd,$pvsize,$sourceisroot,$targetisroot); + writelog('INFO', "Sync size: ~$disp_pvsize"); + writelog('DEBUG', "$synccmd"); + + # make sure target is (still) not currently in receive. + if (iszfsbusy($targethost,$targetfs,$targetisroot)) { + writelog('WARN', "Cannot sync now: $targetfs is already target of a zfs receive process."); + return (1, ''); + } + + my $stdout; + my $ret; + if ($pvsize == 0) { + ($stdout, $ret) = tee_stderr { + system("$synccmd"); + }; + } else { + ($stdout, $ret) = tee_stdout { + system("$synccmd"); + }; + } + + if ($ret != 0) { + writelog('CRITICAL', "$synccmd failed: $?"); + return (2, $stdout); + } else { + return 0; + } +} # end runsendcmd() + +sub syncfull { + my ($sourcehost, $sourcefs, $targethost, $targetfs, $snapname) = @_; + + my $sourcefsescaped = escapeshellparam($sourcefs); + my $snapescaped = escapeshellparam($snapname); + my $sendsource = "$sourcefsescaped\@$snapescaped"; + my $pvsize = getsendsize($sourcehost,"$sourcefs\@$snapname",0,$sourceisroot); + + return runsynccmd($sourcehost, $sourcefs, $sendsource, $targethost, $targetfs, $pvsize); +} # end syncfull() + +sub syncincremental { + my ($sourcehost, $sourcefs, $targethost, $targetfs, $fromsnap, $tosnap, $skipintermediate) = @_; + + my $streamarg = ($skipintermediate == 1 ? '-i' : '-I'); + my $sourcefsescaped = escapeshellparam($sourcefs); + my $fromsnapescaped = escapeshellparam($fromsnap); + my $tosnapescaped = escapeshellparam($tosnap); + my $sendsource = "$streamarg $sourcefsescaped\@$fromsnapescaped $sourcefsescaped\@$tosnapescaped"; + my $pvsize = getsendsize($sourcehost,"$sourcefs\@$fromsnap","$sourcefs\@$tosnap",$sourceisroot); + + return runsynccmd($sourcehost, $sourcefs, $sendsource, $targethost, $targetfs, $pvsize); +} # end syncincremental() + +sub syncclone { + my ($sourcehost, $sourcefs, $origin, $targethost, $targetfs, $tosnap) = @_; + + my $sourcefsescaped = escapeshellparam($sourcefs); + my $originescaped = escapeshellparam($origin); + my $tosnapescaped = escapeshellparam($tosnap); + my $sendsource = "-i $originescaped $sourcefsescaped\@$tosnapescaped"; + my $pvsize = getsendsize($sourcehost,$origin,"$sourcefs\@$tosnap",$sourceisroot); + + return runsynccmd($sourcehost, $sourcefs, $sendsource, $targethost, $targetfs, $pvsize); +} # end syncclone() + +sub syncresume { + my ($sourcehost, $sourcefs, $targethost, $targetfs, $receivetoken) = @_; + + my $sendsource = "-t $receivetoken"; + my $pvsize = getsendsize($sourcehost,"","",$sourceisroot,$receivetoken); + + return runsynccmd($sourcehost, $sourcefs, $sendsource, $targethost, $targetfs, $pvsize); +} # end syncresume() + +sub syncbookmark { + my ($sourcehost, $sourcefs, $targethost, $targetfs, $bookmark, $tosnap) = @_; + + my $sourcefsescaped = escapeshellparam($sourcefs); + my $bookmarkescaped = escapeshellparam($bookmark); + my $tosnapescaped = escapeshellparam($tosnap); + my $sendsource = "-i $sourcefsescaped#$bookmarkescaped $sourcefsescaped\@$tosnapescaped"; + + return runsynccmd($sourcehost, $sourcefs, $sendsource, $targethost, $targetfs, 0); +} # end syncbookmark + +sub createbookmark { + my ($sourcehost, $sourcefs, $snapname, $bookmark) = @_; + + my $sourcefsescaped = escapeshellparam($sourcefs); + my $bookmarkescaped = escapeshellparam($bookmark); + my $snapnameescaped = escapeshellparam($snapname); + my $cmd = "$sourcesudocmd $zfscmd bookmark $sourcefsescaped\@$snapname $sourcefsescaped\#$bookmark"; + if ($sourcehost ne '') { + $cmd = "$sshcmd $sourcehost " . escapeshellparam($cmd); + } + + writelog('DEBUG', "$cmd"); + return system($cmd); +} # end createbookmark() + sub compressargset { my ($value) = @_; my $DEFAULT_COMPRESSION = 'lzo'; From 603c286b50128a3ceb061f0e2c03310a00220977 Mon Sep 17 00:00:00 2001 From: Vinnie Okada Date: Sat, 20 Nov 2021 13:09:10 -0700 Subject: [PATCH 039/130] Don't iterate over snaps twice Process snapshots in one pass rather than looping separately for both guid and create time. --- syncoid | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/syncoid b/syncoid index 1ceed0c..d0afe10 100755 --- a/syncoid +++ b/syncoid @@ -1541,11 +1541,8 @@ sub getsnaps() { $snap =~ s/^.*\@(.*)\tguid.*$/$1/; $snaps{$type}{$snap}{'guid'}=$guid; } - } - - foreach my $line (@rawsnaps) { # only import snap creations from the specified filesystem - if ($line =~ /\Q$fs\E\@.*creation/) { + elsif ($line =~ /\Q$fs\E\@.*creation/) { chomp $line; my $creation = $line; $creation =~ s/^.*\tcreation\t*(\d*).*/$1/; From 9a067729a9b1c71f3656129f177c371bd5c6b43d Mon Sep 17 00:00:00 2001 From: Vinnie Okada Date: Sat, 20 Nov 2021 20:16:21 -0700 Subject: [PATCH 040/130] Implement include-snaps and exclude-snaps Add --include-snaps and --exclude-snaps options to filter the snapshots that syncoid uses. --- syncoid | 96 +++++++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 93 insertions(+), 3 deletions(-) diff --git a/syncoid b/syncoid index d0afe10..8f8db31 100755 --- a/syncoid +++ b/syncoid @@ -25,8 +25,8 @@ GetOptions(\%args, "no-command-checks", "monitor-version", "compress=s", "dumpsn "source-bwlimit=s", "target-bwlimit=s", "sshkey=s", "sshport=i", "sshcipher|c=s", "sshoption|o=s@", "debug", "quiet", "no-stream", "no-sync-snap", "no-resume", "exclude=s@", "skip-parent", "identifier=s", "no-clone-handling", "no-privilege-elevation", "force-delete", "create-bookmark", - "pv-options=s" => \$pvoptions, "keep-sync-snap", "preserve-recordsize", "mbuffer-size=s" => \$mbuffer_size) - or pod2usage(2); + "pv-options=s" => \$pvoptions, "keep-sync-snap", "preserve-recordsize", "mbuffer-size=s" => \$mbuffer_size, + "include-snaps=s@", "exclude-snaps=s@") or pod2usage(2); my %compressargs = %{compressargset($args{'compress'} || 'default')}; # Can't be done with GetOptions arg, as default still needs to be set @@ -367,6 +367,16 @@ sub syncdataset { # we already whined about the error return 0; } + # Don't send the sync snap if it's filtered out by --exclude-snaps or + # --include-snaps + if (!snapisincluded($newsyncsnap)) { + $newsyncsnap = getnewestsnapshot($sourcehost,$sourcefs,$sourceisroot); + if ($newsyncsnap eq 0) { + writelog('WARN', "CRITICAL: no snapshots exist on source $sourcefs, and you asked for --no-sync-snap."); + if ($exitcode < 1) { $exitcode = 1; } + return 0; + } + } } else { # we don't want sync snapshots created, so use the newest snapshot we can find. $newsyncsnap = getnewestsnapshot($sourcehost,$sourcefs,$sourceisroot); @@ -798,7 +808,49 @@ sub syncfull { sub syncincremental { my ($sourcehost, $sourcefs, $targethost, $targetfs, $fromsnap, $tosnap, $skipintermediate) = @_; - my $streamarg = ($skipintermediate == 1 ? '-i' : '-I'); + my $streamarg = '-I'; + + if ($skipintermediate) { + $streamarg = '-i'; + } + + # If this is an -I sync but we're filtering snaps, then we should do a series + # of -i syncs instead. + if (!$skipintermediate) { + if (defined($args{'exclude-snaps'}) || defined($args{'include-snaps'})) { + writelog('INFO', '--no-stream is omitted but snaps are filtered. Simulating -I with filtered snaps'); + + # Get the snap names between $fromsnap and $tosnap + my @intsnaps = (); + my $inrange = 0; + foreach my $testsnap (sort { $snaps{'source'}{$a}{'creation'}<=>$snaps{'source'}{$b}{'creation'} } keys %{ $snaps{'source'} }) { + if ($testsnap eq $fromsnap) { $inrange = 1; } + + if ($inrange) { push(@intsnaps, $testsnap); } + + if ($testsnap eq $tosnap) { last; } + } + + # If we created a new sync snap, it won't be in @intsnaps yet + if ($intsnaps[-1] ne $tosnap) { + # Make sure that the sync snap isn't filtered out by --include-snaps or --exclude-snaps + if (snapisincluded($tosnap)) { + push(@intsnaps, $tosnap); + } + } + + foreach my $i (0..(scalar(@intsnaps) - 2)) { + my $snapa = $intsnaps[$i]; + my $snapb = $intsnaps[$i + 1]; + writelog('INFO', "Performing an incremental sync between '$snapa' and '$snapb'"); + syncincremental($sourcehost, $sourcefs, $targethost, $targetfs, $snapa, $snapb, 1) == 0 or return $?; + } + + # Return after finishing the -i syncs so that we don't try to do another -I + return 0; + } + } + my $sourcefsescaped = escapeshellparam($sourcefs); my $fromsnapescaped = escapeshellparam($fromsnap); my $tosnapescaped = escapeshellparam($tosnap); @@ -1532,6 +1584,11 @@ sub getsnaps() { my %creationtimes=(); foreach my $line (@rawsnaps) { + $line =~ /\Q$fs\E\@(\S*)/; + my $snapname = $1; + + if (!snapisincluded($snapname)) { next; } + # only import snap guids from the specified filesystem if ($line =~ /\Q$fs\E\@.*guid/) { chomp $line; @@ -1944,6 +2001,37 @@ sub writelog { } } +sub snapisincluded { + my ($snapname) = @_; + + # Return false if the snapshot matches an exclude-snaps pattern + if (defined $args{'exclude-snaps'}) { + my $excludes = $args{'exclude-snaps'}; + foreach (@$excludes) { + if ($snapname =~ /$_/) { + writelog('DEBUG', "excluded $snapname because of exclude pattern /$_/"); + return 0; + } + } + } + + # Return true if the snapshot matches an include-snaps pattern + if (defined $args{'include-snaps'}) { + my $includes = $args{'include-snaps'}; + foreach (@$includes) { + if ($snapname =~ /$_/) { + writelog('DEBUG', "included $snapname because of include pattern /$_/"); + return 1; + } + } + + # Return false if the snapshot didn't match any inclusion patterns + return 0; + } + + return 1; +} + __END__ =head1 NAME @@ -1976,6 +2064,8 @@ Options: --create-bookmark Creates a zfs bookmark for the newest snapshot on the source after replication succeeds (only works with --no-sync-snap) --preserve-recordsize Preserves the recordsize on initial sends to the target --exclude=REGEX Exclude specific datasets which match the given regular expression. Can be specified multiple times + --exclude-snaps=REGEX Exclude specific snapshots that match the given regular expression. Can be specified multiple times. If a snapshot matches both the exclude-snaps and include-snaps patterns, then it will be excluded. + --include-snaps=REGEX Only include snapshots that match the given regular expression. Can be specified multiple times. If a snapshot matches both the exclude-snaps and include-snaps patterns, then it will be excluded. --sendoptions=OPTIONS Use advanced options for zfs send (the arguments are filtered as needed), e.g. syncoid --sendoptions="Lc e" sets zfs send -L -c -e ... --recvoptions=OPTIONS Use advanced options for zfs receive (the arguments are filtered as needed), e.g. syncoid --recvoptions="ux recordsize o compression=lz4" sets zfs receive -u -x recordsize -o compression=lz4 ... --sshkey=FILE Specifies a ssh key to use to connect From 3a1b1b006ffbe44afdb45c77172493fabcf8235c Mon Sep 17 00:00:00 2001 From: Vinnie Okada Date: Tue, 30 Nov 2021 19:46:41 -0700 Subject: [PATCH 041/130] Add new syncoid options to the README Update the README with the new --include-snaps and --exclude-snaps syncoid options. --- README.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/README.md b/README.md index 21432ac..385f66f 100644 --- a/README.md +++ b/README.md @@ -332,6 +332,14 @@ As of 1.4.18, syncoid also automatically supports and enables resume of interrup The given regular expression will be matched against all datasets which would be synced by this run and excludes them. This argument can be specified multiple times. ++ --exclude-snaps=REGEX + + Exclude specific snapshots that match the given regular expression. Can be specified multiple times. If a snapshot matches both the exclude-snaps and include-snaps patterns, then it will be excluded. + ++ --include-snaps=REGEX + + Only include snapshots that match the given regular expression. Can be specified multiple times. If a snapshot matches both the exclude-snaps and include-snaps patterns, then it will be excluded. + + --no-resume This argument tells syncoid to not use resumeable zfs send/receive streams. From 8e867c6f142bda4bcb0b7151246c2b549f2e3ab8 Mon Sep 17 00:00:00 2001 From: Vinnie Okada Date: Mon, 29 Nov 2021 20:48:12 -0700 Subject: [PATCH 042/130] Add new syncoid tests Test the new --include-snaps and --exclude-snaps options for syncoid. --- tests/syncoid/8_filter_snaps/run.sh | 142 ++++++++++++++++++++++++++++ 1 file changed, 142 insertions(+) create mode 100755 tests/syncoid/8_filter_snaps/run.sh diff --git a/tests/syncoid/8_filter_snaps/run.sh b/tests/syncoid/8_filter_snaps/run.sh new file mode 100755 index 0000000..1b91ff4 --- /dev/null +++ b/tests/syncoid/8_filter_snaps/run.sh @@ -0,0 +1,142 @@ +#!/bin/bash + +# test filtering snapshot names using --include-snaps and --exclude-snaps + +set -x +set -e + +. ../../common/lib.sh + +POOL_IMAGE="/tmp/syncoid-test-8.zpool" +MOUNT_TARGET="/tmp/syncoid-test-8.mount" +POOL_SIZE="100M" +POOL_NAME="syncoid-test-8" + +truncate -s "${POOL_SIZE}" "${POOL_IMAGE}" + +zpool create -m none -f "${POOL_NAME}" "${POOL_IMAGE}" + +##### +# Create source snapshots and destroy the destination snaps and dataset. +##### +function setup_snaps { + # create intermediate snapshots + # sleep is needed so creation time can be used for proper sorting + sleep 1 + zfs snapshot "${POOL_NAME}"/src@monthly1 + sleep 1 + zfs snapshot "${POOL_NAME}"/src@daily1 + sleep 1 + zfs snapshot "${POOL_NAME}"/src@daily2 + sleep 1 + zfs snapshot "${POOL_NAME}"/src@hourly1 + sleep 1 + zfs snapshot "${POOL_NAME}"/src@hourly2 + sleep 1 + zfs snapshot "${POOL_NAME}"/src@daily3 + sleep 1 + zfs snapshot "${POOL_NAME}"/src@hourly3 + sleep 1 + zfs snapshot "${POOL_NAME}"/src@hourly4 +} + +##### +# Remove the destination snapshots and dataset so that each test starts with a +# blank slate. +##### +function clean_snaps { + zfs destroy "${POOL_NAME}"/dst@% + zfs destroy "${POOL_NAME}"/dst +} + +##### +# Verify that the correct set of snapshots is present on the destination. +##### +function verify_checksum { + zfs list -r -t snap "${POOL_NAME}" + + checksum=$(zfs list -t snap -r -H -o name "${POOL_NAME}" | sed 's/@syncoid_.*/@syncoid_/' | shasum -a 256) + + echo "Expected checksum: $1" + echo "Actual checksum: $checksum" + return $( [[ "$checksum" == "$1" ]] ) +} + +function cleanUp { + zpool export "${POOL_NAME}" +} + +# export pool in any case +trap cleanUp EXIT + +zfs create "${POOL_NAME}"/src +setup_snaps + +##### +# TEST 1 +# +# --exclude-snaps is provided and --no-stream is omitted. Hourly snaps should +# be missing from the destination, and all other intermediate snaps should be +# present. +##### + +../../../syncoid --debug --compress=none --no-sync-snap --exclude-snaps='hourly' "${POOL_NAME}"/src "${POOL_NAME}"/dst +verify_checksum 'fb408c21b8540b3c1bd04781b6091d77ff9432defef3303c1a34321b45e8b6a9 -' +clean_snaps + +##### +# TEST 2 +# +# --exclude-snaps and --no-stream are provided. Only the daily3 snap should be +# present on the destination. +##### + +../../../syncoid --debug --compress=none --no-sync-snap --exclude-snaps='hourly' --no-stream "${POOL_NAME}"/src "${POOL_NAME}"/dst +verify_checksum 'c9ad1d3e07156847f957509fcd4805edc7d4c91fe955c605ac4335076367d19a -' +clean_snaps + +##### +# TEST 3 +# +# --include-snaps is provided and --no-stream is omitted. Hourly snaps should +# be present on the destination, and all other snaps should be missing +##### + +../../../syncoid --debug --compress=none --no-sync-snap --include-snaps='hourly' "${POOL_NAME}"/src "${POOL_NAME}"/dst +verify_checksum 'f2fb62a2b475bec85796dbf4f6c02af5b4ccaca01f9995ef3d0909787213cbde -' +clean_snaps + +##### +# TEST 4 +# +# --include-snaps and --no-stream are provided. Only the hourly4 snap should +# be present on the destination. +##### + +../../../syncoid --debug --compress=none --no-sync-snap --include-snaps='hourly' --no-stream "${POOL_NAME}"/src "${POOL_NAME}"/dst +verify_checksum '194e60e9d635783f7c7d64e2b0d9f0897c926e69a86ffa2858cf0ca874ffeeb4 -' +clean_snaps + +##### +# TEST 5 +# +# --include-snaps='hourly' and --exclude-snaps='3' are both provided. The +# hourly snaps should be present on the destination except for hourly3; daily +# and monthly snaps should be missing. +##### + +../../../syncoid --debug --compress=none --no-sync-snap --include-snaps='hourly' --exclude-snaps='3' "${POOL_NAME}"/src "${POOL_NAME}"/dst +verify_checksum '55267405e346e64d6f7eed29d62bc9bb9ea0e15c9515103a92ee47a7439a99a2 -' +clean_snaps + +##### +# TEST 6 +# +# --exclude-snaps='syncoid' and --no-stream are provided, and --no-sync-snap is +# omitted. The sync snap should be created on the source but not sent to the +# destination; only hourly4 should be sent. +##### + +../../../syncoid --debug --compress=none --no-stream --exclude-snaps='syncoid' "${POOL_NAME}"/src "${POOL_NAME}"/dst +verify_checksum '47380e1711d08c46fb1691fa4bd65e5551084fd5b961baa2de7f91feff2cb4b8 -' +clean_snaps From 14ed85163a82c743d67c2b25fac1882f302ab0ea Mon Sep 17 00:00:00 2001 From: Vinnie Okada Date: Wed, 1 Dec 2021 20:40:03 -0700 Subject: [PATCH 043/130] Filter snapshots in getsnapsfallback() --- syncoid | 2 ++ 1 file changed, 2 insertions(+) diff --git a/syncoid b/syncoid index 8f8db31..33e4694 100755 --- a/syncoid +++ b/syncoid @@ -1674,6 +1674,7 @@ sub getsnapsfallback() { $guid =~ s/^.*\tguid\t*(\d*).*/$1/; my $snap = $line; $snap =~ s/^.*\@(.*)\tguid.*$/$1/; + if (!snapisincluded($snap)) { next; } $snaps{$type}{$snap}{'guid'}=$guid; } elsif ($state eq 2) { if ($line !~ /\Q$fs\E\@.*creation/) { @@ -1685,6 +1686,7 @@ sub getsnapsfallback() { $creation =~ s/^.*\tcreation\t*(\d*).*/$1/; my $snap = $line; $snap =~ s/^.*\@(.*)\tcreation.*$/$1/; + if (!snapisincluded($snap)) { next; } # the accuracy of the creation timestamp is only for a second, but # snapshots in the same second are highly likely. The list command From 0c577fc73541e4ca9d971aa18b6ee076830bb34f Mon Sep 17 00:00:00 2001 From: Vinnie Okada Date: Thu, 2 Dec 2021 21:36:52 -0700 Subject: [PATCH 044/130] Deprecate the --exclude option Add a new option, --exclude-datasets, to replace --exclude. This makes the naming more consistent now that there are options to filter both snapshots and datasets. Also add more information to the README about the distinction between --exclude-datasets and --(in|ex)clude-snaps. --- README.md | 12 +++++++++--- syncoid | 19 +++++++++++++++---- 2 files changed, 24 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 385f66f..81d70d5 100644 --- a/README.md +++ b/README.md @@ -330,15 +330,21 @@ As of 1.4.18, syncoid also automatically supports and enables resume of interrup + --exclude=REGEX - The given regular expression will be matched against all datasets which would be synced by this run and excludes them. This argument can be specified multiple times. + __DEPRECATION NOTICE:__ `--exclude` has been deprecated and will be removed in a future release. Please use `--exclude-datasets` instead. + + The given regular expression will be matched against all datasets which would be synced by this run and excludes them. This argument can be specified multiple times. The provided regex pattern is matched against the dataset name only; this option does not affect which snapshots are synchronized. If both `--exclude` and `--exclude-datasets` are provided, then `--exclude` is ignored. + ++ --exclude-datasets=REGEX + + The given regular expression will be matched against all datasets which would be synced by this run and excludes them. This argument can be specified multiple times. The provided regex pattern is matched against the dataset name only; this option does not affect which snapshots are synchronized. + --exclude-snaps=REGEX - Exclude specific snapshots that match the given regular expression. Can be specified multiple times. If a snapshot matches both the exclude-snaps and include-snaps patterns, then it will be excluded. + Exclude specific snapshots that match the given regular expression. The provided regex pattern is matched against the snapshot name only. Can be specified multiple times. If a snapshot matches both the exclude-snaps and include-snaps patterns, then it will be excluded. + --include-snaps=REGEX - Only include snapshots that match the given regular expression. Can be specified multiple times. If a snapshot matches both the exclude-snaps and include-snaps patterns, then it will be excluded. + Only include snapshots that match the given regular expression. The provided regex pattern is matched against the snapshot name only. Can be specified multiple times. If a snapshot matches both the exclude-snaps and include-snaps patterns, then it will be excluded. + --no-resume diff --git a/syncoid b/syncoid index 33e4694..6e37af2 100755 --- a/syncoid +++ b/syncoid @@ -26,10 +26,20 @@ GetOptions(\%args, "no-command-checks", "monitor-version", "compress=s", "dumpsn "debug", "quiet", "no-stream", "no-sync-snap", "no-resume", "exclude=s@", "skip-parent", "identifier=s", "no-clone-handling", "no-privilege-elevation", "force-delete", "create-bookmark", "pv-options=s" => \$pvoptions, "keep-sync-snap", "preserve-recordsize", "mbuffer-size=s" => \$mbuffer_size, - "include-snaps=s@", "exclude-snaps=s@") or pod2usage(2); + "include-snaps=s@", "exclude-snaps=s@", "exclude-datasets=s@") or pod2usage(2); my %compressargs = %{compressargset($args{'compress'} || 'default')}; # Can't be done with GetOptions arg, as default still needs to be set +if (defined($args{'exclude'})) { + writelog('WARN', 'The --exclude option is deprecated, please use --exclude-datasets instead'); + + # If both --exclude and --exclude-datasets are provided, then ignore + # --exclude + if (!defined($args{'exclude-datasets'})) { + $args{'exclude-datasets'} = $args{'exclude'}; + } +} + my @sendoptions = (); if (length $args{'sendoptions'}) { @sendoptions = parsespecialoptions($args{'sendoptions'}); @@ -256,8 +266,8 @@ sub getchilddatasets { my ($dataset, $origin) = /^([^\t]+)\t([^\t]+)/; - if (defined $args{'exclude'}) { - my $excludes = $args{'exclude'}; + if (defined $args{'exclude-datasets'}) { + my $excludes = $args{'exclude-datasets'}; foreach (@$excludes) { if ($dataset =~ /$_/) { writelog('DEBUG', "excluded $dataset because of $_"); @@ -2065,7 +2075,8 @@ Options: --keep-sync-snap Don't destroy created sync snapshots --create-bookmark Creates a zfs bookmark for the newest snapshot on the source after replication succeeds (only works with --no-sync-snap) --preserve-recordsize Preserves the recordsize on initial sends to the target - --exclude=REGEX Exclude specific datasets which match the given regular expression. Can be specified multiple times + --exclude=REGEX DEPRECATED. Equivalent to --exclude-datasets, but will be removed in a future release. Ignored if --exclude-datasets is also provided. + --exclude-datasets=REGEX Exclude specific datasets which match the given regular expression. Can be specified multiple times --exclude-snaps=REGEX Exclude specific snapshots that match the given regular expression. Can be specified multiple times. If a snapshot matches both the exclude-snaps and include-snaps patterns, then it will be excluded. --include-snaps=REGEX Only include snapshots that match the given regular expression. Can be specified multiple times. If a snapshot matches both the exclude-snaps and include-snaps patterns, then it will be excluded. --sendoptions=OPTIONS Use advanced options for zfs send (the arguments are filtered as needed), e.g. syncoid --sendoptions="Lc e" sets zfs send -L -c -e ... From 9f898439e0150d8abe7f8390ee86ac2b8cfca889 Mon Sep 17 00:00:00 2001 From: Josh Soref <2119212+jsoref@users.noreply.github.com> Date: Sun, 1 Jan 2023 14:30:47 -0500 Subject: [PATCH 045/130] spelling: a lot Signed-off-by: Josh Soref <2119212+jsoref@users.noreply.github.com> --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 21432ac..629a1c8 100644 --- a/README.md +++ b/README.md @@ -103,7 +103,7 @@ For more full details on sanoid.conf settings see [Wiki page](https://github.com + --debug - This prints out quite alot of additional information during a sanoid run, and is normally not needed. + This prints out quite a lot of additional information during a sanoid run, and is normally not needed. + --readonly @@ -374,7 +374,7 @@ As of 1.4.18, syncoid also automatically supports and enables resume of interrup + --debug - This prints out quite alot of additional information during a sanoid run, and is normally not needed. + This prints out quite a lot of additional information during a sanoid run, and is normally not needed. + --help From 7d24d97045a91cc477c758b4498767eb6b021ee0 Mon Sep 17 00:00:00 2001 From: Josh Soref <2119212+jsoref@users.noreply.github.com> Date: Sun, 1 Jan 2023 14:30:07 -0500 Subject: [PATCH 046/130] spelling: available Signed-off-by: Josh Soref <2119212+jsoref@users.noreply.github.com> --- CHANGELIST | 2 +- packages/debian/changelog | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELIST b/CHANGELIST index 49fcd50..78fcd0f 100644 --- a/CHANGELIST +++ b/CHANGELIST @@ -2,7 +2,7 @@ [syncoid] do not require user to be specified for syncoid (@aerusso) [syncoid] implemented option for keeping sync snaps (@phreaker0) [syncoid] use sudo if neccessary for checking pool capabilities regarding resumeable send (@phreaker0) - [syncoid] catch another case were the resume state isn't availabe anymore (@phreaker0) + [syncoid] catch another case were the resume state isn't available anymore (@phreaker0) [syncoid] check for an invalid argument combination (@phreaker0) [syncoid] fix iszfsbusy check for similar dataset names (@phreaker0) [syncoid] append timezone offset to the syncoid snapshot name to fix DST collisions (@phreaker0) diff --git a/packages/debian/changelog b/packages/debian/changelog index b394acb..b4e3f6c 100644 --- a/packages/debian/changelog +++ b/packages/debian/changelog @@ -4,7 +4,7 @@ sanoid (2.1.0) unstable; urgency=medium [syncoid] do not require user to be specified for syncoid (@aerusso) [syncoid] implemented option for keeping sync snaps (@phreaker0) [syncoid] use sudo if neccessary for checking pool capabilities regarding resumeable send (@phreaker0) - [syncoid] catch another case were the resume state isn't availabe anymore (@phreaker0) + [syncoid] catch another case were the resume state isn't available anymore (@phreaker0) [syncoid] check for an invalid argument combination (@phreaker0) [syncoid] fix iszfsbusy check for similar dataset names (@phreaker0) [syncoid] append timezone offset to the syncoid snapshot name to fix DST collisions (@phreaker0) From cad12155018216d42fc06b33b00275dd8f73162a Mon Sep 17 00:00:00 2001 From: Josh Soref <2119212+jsoref@users.noreply.github.com> Date: Sun, 1 Jan 2023 14:30:07 -0500 Subject: [PATCH 047/130] spelling: debugging Signed-off-by: Josh Soref <2119212+jsoref@users.noreply.github.com> --- sanoid | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sanoid b/sanoid index 13ea085..06712a4 100755 --- a/sanoid +++ b/sanoid @@ -1137,7 +1137,7 @@ sub check_zpool() { } } - # Tony: Debuging + # Tony: Debugging # print "Size: $size \t Used: $used \t Avai: $avail \t Cap: $cap \t Health: $health\n"; close(STAT); From c37f412797424993a2cce5b54b86cfb5da8d99ea Mon Sep 17 00:00:00 2001 From: Josh Soref <2119212+jsoref@users.noreply.github.com> Date: Sun, 1 Jan 2023 14:35:48 -0500 Subject: [PATCH 048/130] spelling: errlevel Signed-off-by: Josh Soref <2119212+jsoref@users.noreply.github.com> --- sanoid | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/sanoid b/sanoid index 06712a4..81a7eac 100755 --- a/sanoid +++ b/sanoid @@ -130,7 +130,7 @@ sub monitor_snapshots { my ($config, $snaps, $snapsbytype, $snapsbypath) = @_; my %datestamp = get_date(); - my $errorlevel = 0; + my $errlevel = 0; my $msg; my @msgs; my @paths; @@ -169,7 +169,7 @@ sub monitor_snapshots { my $dispcrit = displaytime($crit); if ( $elapsed > $crit || $elapsed == -1) { if ($crit > 0) { - if (! $config{$section}{'monitor_dont_crit'}) { $errorlevel = 2; } + if (! $config{$section}{'monitor_dont_crit'}) { $errlevel = 2; } if ($elapsed == -1) { push @msgs, "CRIT: $path has no $type snapshots at all!"; } else { @@ -178,7 +178,7 @@ sub monitor_snapshots { } } elsif ($elapsed > $warn) { if ($warn > 0) { - if (! $config{$section}{'monitor_dont_warn'} && ($errorlevel < 2) ) { $errorlevel = 1; } + if (! $config{$section}{'monitor_dont_warn'} && ($errlevel < 2) ) { $errlevel = 1; } push @msgs, "WARN: $path newest $type snapshot is $dispelapsed old (should be < $dispwarn)"; } } else { @@ -196,7 +196,7 @@ sub monitor_snapshots { if ($msg eq '') { $msg = "OK: all monitored datasets \($paths\) have fresh snapshots"; } print "$msg\n"; - exit $errorlevel; + exit $errlevel; } From 12e962a133996f1b30d200b11a507bb820c10a3c Mon Sep 17 00:00:00 2001 From: Josh Soref <2119212+jsoref@users.noreply.github.com> Date: Sun, 1 Jan 2023 14:36:04 -0500 Subject: [PATCH 049/130] spelling: errors Signed-off-by: Josh Soref <2119212+jsoref@users.noreply.github.com> --- CHANGELIST | 2 +- packages/debian/changelog | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELIST b/CHANGELIST index 78fcd0f..259ee3c 100644 --- a/CHANGELIST +++ b/CHANGELIST @@ -29,7 +29,7 @@ 2.0.2 [overall] documentation updates, new dependencies, small fixes, more warnings (@benyanke, @matveevandrey, @RulerOf, @klemens-u, @johnramsden, @danielewood, @g-a-c, @hartzell, @fryfrog, @phreaker0) [sanoid] changed and simplified DST handling (@shodanshok) [syncoid] reset partially resume state automatically (@phreaker0) - [syncoid] handle some zfs erros automatically by parsing the stderr outputs (@phreaker0) + [syncoid] handle some zfs errors automatically by parsing the stderr outputs (@phreaker0) [syncoid] fixed ordering of snapshots with the same creation timestamp (@phreaker0) [syncoid] don't use hardcoded paths (@phreaker0) [syncoid] fix for special setup with listsnapshots=on (@phreaker0) diff --git a/packages/debian/changelog b/packages/debian/changelog index b4e3f6c..096f2ad 100644 --- a/packages/debian/changelog +++ b/packages/debian/changelog @@ -39,7 +39,7 @@ sanoid (2.0.2) unstable; urgency=medium [overall] documentation updates, new dependencies, small fixes, more warnings (@benyanke, @matveevandrey, @RulerOf, @klemens-u, @johnramsden, @danielewood, @g-a-c, @hartzell, @fryfrog, @phreaker0) [syncoid] changed and simplified DST handling (@shodanshok) [syncoid] reset partially resume state automatically (@phreaker0) - [syncoid] handle some zfs erros automatically by parsing the stderr outputs (@phreaker0) + [syncoid] handle some zfs errors automatically by parsing the stderr outputs (@phreaker0) [syncoid] fixed ordering of snapshots with the same creation timestamp (@phreaker0) [syncoid] don't use hardcoded paths (@phreaker0) [syncoid] fix for special setup with listsnapshots=on (@phreaker0) From c43048598a09d9fd6522f5a0bdaf2a7da925fe1c Mon Sep 17 00:00:00 2001 From: Josh Soref <2119212+jsoref@users.noreply.github.com> Date: Sun, 1 Jan 2023 14:18:24 -0500 Subject: [PATCH 050/130] spelling: github Signed-off-by: Josh Soref <2119212+jsoref@users.noreply.github.com> --- CHANGELIST | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/CHANGELIST b/CHANGELIST index 259ee3c..fbee5c1 100644 --- a/CHANGELIST +++ b/CHANGELIST @@ -118,12 +118,12 @@ replicating to target/parent/child2. This could still use some cleanup TBH; syncoid SHOULD exit 3 if any of these errors happen (to assist detection of errors in scripting) but now would exit 0. -1.4.12 Sanoid now strips trailing whitespace in template definitions in sanoid.conf, per Github #61 +1.4.12 Sanoid now strips trailing whitespace in template definitions in sanoid.conf, per GitHub #61 1.4.11 enhanced Syncoid to use zfs `guid` property rather than `creation` property to ensure snapshots on source and target actually match. This immediately prevents conflicts due to timezone differences on source and target, and also paves the way in the future for Syncoid to find matching snapshots even after `zfs rename` on source - or target. Thank you Github user @mailinglists35 for the idea! + or target. Thank you GitHub user @mailinglists35 for the idea! 1.4.10 added --compress=pigz-fast and --compress=pigz-slow. On a Xeon E3-1231v3, pigz-fast is equivalent compression to --compress=gzip but with compressed throughput of 75.2 MiB/s instead of 18.1 MiB/s. pigz-slow is around 5% @@ -241,4 +241,4 @@ 1.0.1 ported slightly modified iszfsbusy sub from syncoid to sanoid (to keep from thinning snapshots during replications) -1.0.0 initial commit to Github +1.0.0 initial commit to GitHub From a6664247423351b6d96a899b9376648769b0e6b5 Mon Sep 17 00:00:00 2001 From: Josh Soref <2119212+jsoref@users.noreply.github.com> Date: Sun, 1 Jan 2023 14:38:09 -0500 Subject: [PATCH 051/130] spelling: mbytes Signed-off-by: Josh Soref <2119212+jsoref@users.noreply.github.com> --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 629a1c8..1674ed9 100644 --- a/README.md +++ b/README.md @@ -294,7 +294,7 @@ As of 1.4.18, syncoid also automatically supports and enables resume of interrup + --target-bwlimit - This is the bandwidth limit in bytes (kbytes, mbytesm etc) per second imposed upon the target. This is mainly used if the source does not have mbuffer installed, but bandwidth limits are desired. + This is the bandwidth limit in bytes (kbytes, mbytes, etc) per second imposed upon the target. This is mainly used if the source does not have mbuffer installed, but bandwidth limits are desired. + --no-command-checks From 9421892402b45aacefc6284b12c2aa88bfd12ac7 Mon Sep 17 00:00:00 2001 From: Josh Soref <2119212+jsoref@users.noreply.github.com> Date: Sun, 1 Jan 2023 14:30:08 -0500 Subject: [PATCH 052/130] spelling: naming Signed-off-by: Josh Soref <2119212+jsoref@users.noreply.github.com> --- syncoid | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/syncoid b/syncoid index ec6ae9d..e7a9194 100755 --- a/syncoid +++ b/syncoid @@ -821,7 +821,7 @@ sub syncdataset { } if ($debug) { print "DEBUG: $bookmarkcmd\n"; } system($bookmarkcmd) == 0 or do { - # fallback: assume nameing conflict and try again with guid based suffix + # fallback: assume naming conflict and try again with guid based suffix my $guid = $snaps{'source'}{$newsyncsnap}{'guid'}; $guid = substr($guid, 0, 6); From 93f2b8863e037bb945d6b5cf30d278276eeba2d6 Mon Sep 17 00:00:00 2001 From: Josh Soref <2119212+jsoref@users.noreply.github.com> Date: Sun, 1 Jan 2023 14:30:08 -0500 Subject: [PATCH 053/130] spelling: necessary Signed-off-by: Josh Soref <2119212+jsoref@users.noreply.github.com> --- CHANGELIST | 2 +- packages/debian/changelog | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELIST b/CHANGELIST index fbee5c1..8c3d76e 100644 --- a/CHANGELIST +++ b/CHANGELIST @@ -1,7 +1,7 @@ 2.1.0 [overall] documentation updates, small fixes (@HavardLine, @croadfeldt, @jimsalterjrs, @jim-perkins, @kr4z33, @phreaker0) [syncoid] do not require user to be specified for syncoid (@aerusso) [syncoid] implemented option for keeping sync snaps (@phreaker0) - [syncoid] use sudo if neccessary for checking pool capabilities regarding resumeable send (@phreaker0) + [syncoid] use sudo if necessary for checking pool capabilities regarding resumeable send (@phreaker0) [syncoid] catch another case were the resume state isn't available anymore (@phreaker0) [syncoid] check for an invalid argument combination (@phreaker0) [syncoid] fix iszfsbusy check for similar dataset names (@phreaker0) diff --git a/packages/debian/changelog b/packages/debian/changelog index 096f2ad..3796a37 100644 --- a/packages/debian/changelog +++ b/packages/debian/changelog @@ -3,7 +3,7 @@ sanoid (2.1.0) unstable; urgency=medium [overall] documentation updates, small fixes (@HavardLine, @croadfeldt, @jimsalterjrs, @jim-perkins, @kr4z33, @phreaker0) [syncoid] do not require user to be specified for syncoid (@aerusso) [syncoid] implemented option for keeping sync snaps (@phreaker0) - [syncoid] use sudo if neccessary for checking pool capabilities regarding resumeable send (@phreaker0) + [syncoid] use sudo if necessary for checking pool capabilities regarding resumeable send (@phreaker0) [syncoid] catch another case were the resume state isn't available anymore (@phreaker0) [syncoid] check for an invalid argument combination (@phreaker0) [syncoid] fix iszfsbusy check for similar dataset names (@phreaker0) From 7a8b0adb6d91c6e9d114dc41c138959bf1b08aad Mon Sep 17 00:00:00 2001 From: Josh Soref <2119212+jsoref@users.noreply.github.com> Date: Sun, 1 Jan 2023 14:30:08 -0500 Subject: [PATCH 054/130] spelling: overridden Signed-off-by: Josh Soref <2119212+jsoref@users.noreply.github.com> --- sanoid | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sanoid b/sanoid index 81a7eac..4dd1937 100755 --- a/sanoid +++ b/sanoid @@ -889,7 +889,7 @@ sub init { # for sections directly when they've already been defined recursively, without starting them over from scratch. if (! defined ($config{$section}{'initialized'})) { if ($args{'debug'}) { print "DEBUG: initializing \$config\{$section\} with default values from $default_conf_file.\n"; } - # set default values from %defaults, which can then be overriden by template + # set default values from %defaults, which can then be overridden by template # and/or local settings within the module. foreach my $key (keys %{$defaults{'template_default'}}) { if (! ($key =~ /template|recursive|children_only/)) { From 3ffa57c72fec37e1b3bd8ed43489bd5b19174779 Mon Sep 17 00:00:00 2001 From: Josh Soref <2119212+jsoref@users.noreply.github.com> Date: Sun, 1 Jan 2023 14:30:09 -0500 Subject: [PATCH 055/130] spelling: resumable Signed-off-by: Josh Soref <2119212+jsoref@users.noreply.github.com> --- CHANGELIST | 4 ++-- README.md | 4 ++-- packages/debian/changelog | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/CHANGELIST b/CHANGELIST index 8c3d76e..6ddc565 100644 --- a/CHANGELIST +++ b/CHANGELIST @@ -1,7 +1,7 @@ 2.1.0 [overall] documentation updates, small fixes (@HavardLine, @croadfeldt, @jimsalterjrs, @jim-perkins, @kr4z33, @phreaker0) [syncoid] do not require user to be specified for syncoid (@aerusso) [syncoid] implemented option for keeping sync snaps (@phreaker0) - [syncoid] use sudo if necessary for checking pool capabilities regarding resumeable send (@phreaker0) + [syncoid] use sudo if necessary for checking pool capabilities regarding resumable send (@phreaker0) [syncoid] catch another case were the resume state isn't available anymore (@phreaker0) [syncoid] check for an invalid argument combination (@phreaker0) [syncoid] fix iszfsbusy check for similar dataset names (@phreaker0) @@ -84,7 +84,7 @@ [sanoid] implemented monitor-capacity flag for checking zpool capacity limits (@phreaker0) [syncoid] Added support for ZStandard compression.(@danielewood) [syncoid] implemented support for excluding datasets from replication with regular expressions (@phreaker0) - [syncoid] correctly parse zfs column output, fixes resumeable send with datasets containing spaces (@phreaker0) + [syncoid] correctly parse zfs column output, fixes resumable send with datasets containing spaces (@phreaker0) [syncoid] added option for using extra identification in the snapshot name for replication to multiple targets (@phreaker0) [syncoid] added option for skipping the parent dataset in recursive replication (@phreaker0) [syncoid] typos (@UnlawfulMonad, @jsavikko, @phreaker0) diff --git a/README.md b/README.md index 1674ed9..4e890ff 100644 --- a/README.md +++ b/README.md @@ -232,7 +232,7 @@ syncoid root@remotehost:data/images/vm backup/images/vm Which would pull-replicate the filesystem from the remote host to the local system over an SSH tunnel. Syncoid supports recursive replication (replication of a dataset and all its child datasets) and uses mbuffer buffering, lzop compression, and pv progress bars if the utilities are available on the systems used. -If ZFS supports resumeable send/receive streams on both the source and target those will be enabled as default. +If ZFS supports resumable send/receive streams on both the source and target those will be enabled as default. As of 1.4.18, syncoid also automatically supports and enables resume of interrupted replication when both source and target support this feature. @@ -334,7 +334,7 @@ As of 1.4.18, syncoid also automatically supports and enables resume of interrup + --no-resume - This argument tells syncoid to not use resumeable zfs send/receive streams. + This argument tells syncoid to not use resumable zfs send/receive streams. + --force-delete diff --git a/packages/debian/changelog b/packages/debian/changelog index 3796a37..4cab69b 100644 --- a/packages/debian/changelog +++ b/packages/debian/changelog @@ -3,7 +3,7 @@ sanoid (2.1.0) unstable; urgency=medium [overall] documentation updates, small fixes (@HavardLine, @croadfeldt, @jimsalterjrs, @jim-perkins, @kr4z33, @phreaker0) [syncoid] do not require user to be specified for syncoid (@aerusso) [syncoid] implemented option for keeping sync snaps (@phreaker0) - [syncoid] use sudo if necessary for checking pool capabilities regarding resumeable send (@phreaker0) + [syncoid] use sudo if necessary for checking pool capabilities regarding resumable send (@phreaker0) [syncoid] catch another case were the resume state isn't available anymore (@phreaker0) [syncoid] check for an invalid argument combination (@phreaker0) [syncoid] fix iszfsbusy check for similar dataset names (@phreaker0) @@ -102,7 +102,7 @@ sanoid (2.0.0) unstable; urgency=medium [sanoid] implemented monitor-capacity flag for checking zpool capacity limits (@phreaker0) [syncoid] Added support for ZStandard compression.(@danielewood) [syncoid] implemented support for excluding datasets from replication with regular expressions (@phreaker0) - [syncoid] correctly parse zfs column output, fixes resumeable send with datasets containing spaces (@phreaker0) + [syncoid] correctly parse zfs column output, fixes resumable send with datasets containing spaces (@phreaker0) [syncoid] added option for using extra identification in the snapshot name for replication to multiple targets (@phreaker0) [syncoid] added option for skipping the parent dataset in recursive replication (@phreaker0) [syncoid] typos (@UnlawfulMonad, @jsavikko, @phreaker0) From 0fcaab54f603e70bc1da14fa0c62376784e5d285 Mon Sep 17 00:00:00 2001 From: Josh Soref <2119212+jsoref@users.noreply.github.com> Date: Sun, 1 Jan 2023 14:30:09 -0500 Subject: [PATCH 056/130] spelling: snapshotted Signed-off-by: Josh Soref <2119212+jsoref@users.noreply.github.com> --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 4e890ff..5f7794c 100644 --- a/README.md +++ b/README.md @@ -125,7 +125,7 @@ Will be executed before the snapshot(s) of a single dataset are taken. The follo | ----------------- | ----------- | | `SANOID_SCRIPT` | The type of script being executed, one of `pre`, `post`, or `prune`. Allows for one script to be used for multiple tasks | | `SANOID_TARGET` | **DEPRECATED** The dataset about to be snapshot (only the first dataset will be provided) | -| `SANOID_TARGETS` | Comma separated list of all datasets to be snapshoted (currently only a single dataset, multiple datasets will be possible later with atomic groups) | +| `SANOID_TARGETS` | Comma separated list of all datasets to be snapshotted (currently only a single dataset, multiple datasets will be possible later with atomic groups) | | `SANOID_SNAPNAME` | **DEPRECATED** The name of the snapshot that will be taken (only the first name will be provided, does not include the dataset name) | | `SANOID_SNAPNAMES` | Comma separated list of all snapshot names that will be taken (does not include the dataset name) | | `SANOID_TYPES` | Comma separated list of all snapshot types to be taken (yearly, monthly, weekly, daily, hourly, frequently) | From 2333f1142d4bab0985f8a997a1d61a8f5d20c26c Mon Sep 17 00:00:00 2001 From: Josh Soref <2119212+jsoref@users.noreply.github.com> Date: Sun, 1 Jan 2023 14:30:10 -0500 Subject: [PATCH 057/130] spelling: suppress Signed-off-by: Josh Soref <2119212+jsoref@users.noreply.github.com> --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 5f7794c..1f3df88 100644 --- a/README.md +++ b/README.md @@ -95,7 +95,7 @@ For more full details on sanoid.conf settings see [Wiki page](https://github.com + --quiet - Supress non-error output. + Suppress non-error output. + --verbose @@ -370,7 +370,7 @@ As of 1.4.18, syncoid also automatically supports and enables resume of interrup + --quiet - Supress non-error output. + Suppress non-error output. + --debug From ae28c10982212d80b3f7565df6e91723d03c89c3 Mon Sep 17 00:00:00 2001 From: Josh Soref <2119212+jsoref@users.noreply.github.com> Date: Sun, 1 Jan 2023 14:42:58 -0500 Subject: [PATCH 058/130] spelling: want Signed-off-by: Josh Soref <2119212+jsoref@users.noreply.github.com> --- tests/common/lib.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/common/lib.sh b/tests/common/lib.sh index 904c98f..9c88eff 100644 --- a/tests/common/lib.sh +++ b/tests/common/lib.sh @@ -34,7 +34,7 @@ function checkEnvironment { echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" echo "you should be running this test in a" echo "dedicated vm, as it will mess with your system!" - echo "Are you sure you wan't to continue? (y)" + echo "Are you sure you want to continue? (y)" echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" set -x From 38f2d625a881d1cfb37e830fe4f769d8c2f608d5 Mon Sep 17 00:00:00 2001 From: dodexahedron Date: Thu, 2 Feb 2023 19:32:08 -0700 Subject: [PATCH 059/130] Clarified that compression is on the wire --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 21432ac..4b0c197 100644 --- a/README.md +++ b/README.md @@ -286,7 +286,7 @@ As of 1.4.18, syncoid also automatically supports and enables resume of interrup + --compress - Currently accepted options: gzip, pigz-fast, pigz-slow, zstd-fast, zstd-slow, lz4, xz, lzo (default) & none. If the selected compression method is unavailable on the source and destination, no compression will be used. + Compression method to use for network transfer. Currently accepted options: gzip, pigz-fast, pigz-slow, zstd-fast, zstd-slow, lz4, xz, lzo (default) & none. If the selected compression method is unavailable on the source and destination, no compression will be used. + --source-bwlimit From a7f9f08f4789108429d016d484bb3d62b873eb54 Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Tue, 21 Mar 2023 16:44:38 +0100 Subject: [PATCH 060/130] fixes some tests which fail on fast storage --- tests/syncoid/5_reset_resume_state/run.sh | 3 +++ tests/syncoid/6_reset_resume_state2/run.sh | 3 +++ 2 files changed, 6 insertions(+) diff --git a/tests/syncoid/5_reset_resume_state/run.sh b/tests/syncoid/5_reset_resume_state/run.sh index 6e71002..43ec78f 100755 --- a/tests/syncoid/5_reset_resume_state/run.sh +++ b/tests/syncoid/5_reset_resume_state/run.sh @@ -45,6 +45,9 @@ wait sleep 1 ../../../syncoid --debug --compress=none --no-resume "${POOL_NAME}"/src "${POOL_NAME}"/dst | grep "reset partial receive state of syncoid" + +sleep 1 + ../../../syncoid --debug --compress=none "${POOL_NAME}"/src "${POOL_NAME}"/dst exit $? diff --git a/tests/syncoid/6_reset_resume_state2/run.sh b/tests/syncoid/6_reset_resume_state2/run.sh index 1afc921..d05696b 100755 --- a/tests/syncoid/6_reset_resume_state2/run.sh +++ b/tests/syncoid/6_reset_resume_state2/run.sh @@ -47,6 +47,9 @@ sleep 1 zfs destroy "${POOL_NAME}"/src@big ../../../syncoid --debug --compress=none "${POOL_NAME}"/src "${POOL_NAME}"/dst # | grep "reset partial receive state of syncoid" + +sleep 1 + ../../../syncoid --debug --compress=none "${POOL_NAME}"/src "${POOL_NAME}"/dst exit $? From 91d96a4c8739d4955983153b571cccb0bacbff81 Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Fri, 24 Mar 2023 09:00:07 +0100 Subject: [PATCH 061/130] support bookmarks which are taken in the same second --- syncoid | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/syncoid b/syncoid index e5046f3..af9518e 100755 --- a/syncoid +++ b/syncoid @@ -25,7 +25,7 @@ GetOptions(\%args, "no-command-checks", "monitor-version", "compress=s", "dumpsn "source-bwlimit=s", "target-bwlimit=s", "sshconfig=s", "sshkey=s", "sshport=i", "sshcipher|c=s", "sshoption|o=s@", "debug", "quiet", "no-stream", "no-sync-snap", "no-resume", "exclude=s@", "skip-parent", "identifier=s", "no-clone-handling", "no-privilege-elevation", "force-delete", "no-rollback", "create-bookmark", - "pv-options=s" => \$pvoptions, "keep-sync-snap", "preserve-recordsize", "mbuffer-size=s" => \$mbuffer_size) + "pv-options=s" => \$pvoptions, "keep-sync-snap", "preserve-recordsize", "mbuffer-size=s" => \$mbuffer_size) or pod2usage(2); my %compressargs = %{compressargset($args{'compress'} || 'default')}; # Can't be done with GetOptions arg, as default still needs to be set @@ -1729,6 +1729,7 @@ sub getbookmarks() { # as though each were an entirely separate get command. my $lastguid; + my %creationtimes=(); foreach my $line (@rawbookmarks) { # only import bookmark guids, creation from the specified filesystem @@ -1745,7 +1746,24 @@ sub getbookmarks() { $creation =~ s/^.*\tcreation\t*(\d*).*/$1/; my $bookmark = $line; $bookmark =~ s/^.*\#(.*)\tcreation.*$/$1/; - $bookmarks{$lastguid}{'creation'}=$creation . "000"; + + # the accuracy of the creation timestamp is only for a second, but + # bookmarks in the same second are possible. The list command + # has an ordered output so we append another three digit running number + # to the creation timestamp and make sure those are ordered correctly + # for bookmarks with the same creation timestamp + my $counter = 0; + my $creationsuffix; + while ($counter < 999) { + $creationsuffix = sprintf("%s%03d", $creation, $counter); + if (!defined $creationtimes{$creationsuffix}) { + $creationtimes{$creationsuffix} = 1; + last; + } + $counter += 1; + } + + $bookmarks{$lastguid}{'creation'}=$creationsuffix; } } From 2d89434ac3181cd7e8d1b2f9d5fe676d85eadd17 Mon Sep 17 00:00:00 2001 From: Mathieu Arnold Date: Fri, 13 Mar 2020 14:20:25 +0100 Subject: [PATCH 062/130] Add target snapshot deletion. --- README.md | 3 +++ syncoid | 18 +++++++++++++++++- 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 1cb2f17..51ac568 100644 --- a/README.md +++ b/README.md @@ -319,6 +319,9 @@ As of 1.4.18, syncoid also automatically supports and enables resume of interrup + --preserve-recordsize This argument tells syncoid to set the recordsize on the target before writing any data to it matching the one set on the replication src. This only applies to initial sends. ++ --push-snap-removal + + With this argument snapshots that are removed in the source will also be removed. Use this if you only want to handle snapshots on the source. + --no-clone-rollback diff --git a/syncoid b/syncoid index e5046f3..3796ce1 100755 --- a/syncoid +++ b/syncoid @@ -25,7 +25,8 @@ GetOptions(\%args, "no-command-checks", "monitor-version", "compress=s", "dumpsn "source-bwlimit=s", "target-bwlimit=s", "sshconfig=s", "sshkey=s", "sshport=i", "sshcipher|c=s", "sshoption|o=s@", "debug", "quiet", "no-stream", "no-sync-snap", "no-resume", "exclude=s@", "skip-parent", "identifier=s", "no-clone-handling", "no-privilege-elevation", "force-delete", "no-rollback", "create-bookmark", - "pv-options=s" => \$pvoptions, "keep-sync-snap", "preserve-recordsize", "mbuffer-size=s" => \$mbuffer_size) + "pv-options=s" => \$pvoptions, "keep-sync-snap", "preserve-recordsize", "mbuffer-size=s" => \$mbuffer_size, + "push-snap-removal",) or pod2usage(2); my %compressargs = %{compressargset($args{'compress'} || 'default')}; # Can't be done with GetOptions arg, as default still needs to be set @@ -888,6 +889,20 @@ sub syncdataset { } } + if (defined $args{'push-snap-removal'}) { + foreach my $snap ( sort { $snaps{'target'}{$a}{'creation'}<=>$snaps{'target'}{$b}{'creation'} } keys %{ $snaps{'target'} }) { + if (!exists $snaps{'source'}{$snap}) { + if ($targethost ne '') { + if ($debug) { print "$sshcmd $targethost $targetsudocmd $zfscmd destroy $targetfsescaped\@$snap\n"; } + system ("$sshcmd $targethost " . escapeshellparam("$targetsudocmd $zfscmd destroy $targetfsescaped\@$snap")); + } else { + if ($debug) { print "$targetsudocmd $zfscmd destroy $targetfsescaped\@$snap\n"; } + system ("$targetsudocmd $zfscmd destroy $targetfsescaped\@$snap"); + } + } + } + } + } # end syncdataset() sub compressargset { @@ -1999,6 +2014,7 @@ Options: --create-bookmark Creates a zfs bookmark for the newest snapshot on the source after replication succeeds (only works with --no-sync-snap) --preserve-recordsize Preserves the recordsize on initial sends to the target --no-rollback Does not rollback snapshots on target (it probably requires a readonly target) + --push-snap-removal Remove snapshots on the target that do not exist on the source any more --exclude=REGEX Exclude specific datasets which match the given regular expression. Can be specified multiple times --sendoptions=OPTIONS Use advanced options for zfs send (the arguments are filtered as needed), e.g. syncoid --sendoptions="Lc e" sets zfs send -L -c -e ... --recvoptions=OPTIONS Use advanced options for zfs receive (the arguments are filtered as needed), e.g. syncoid --recvoptions="ux recordsize o compression=lz4" sets zfs receive -u -x recordsize -o compression=lz4 ... From 63dd819ec55d181283e184e898a88bc3751e7e89 Mon Sep 17 00:00:00 2001 From: Mathieu Arnold Date: Mon, 11 May 2020 16:37:40 +0200 Subject: [PATCH 063/130] Rename option. --- README.md | 5 +++-- syncoid | 6 +++--- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 51ac568..f6028b0 100644 --- a/README.md +++ b/README.md @@ -319,9 +319,10 @@ As of 1.4.18, syncoid also automatically supports and enables resume of interrup + --preserve-recordsize This argument tells syncoid to set the recordsize on the target before writing any data to it matching the one set on the replication src. This only applies to initial sends. -+ --push-snap-removal - With this argument snapshots that are removed in the source will also be removed. Use this if you only want to handle snapshots on the source. ++ --delete-target-snapshots + + With this argument snapshots which are missing on the source will be destroyed on the target. Use this if you only want to handle snapshots on the source. + --no-clone-rollback diff --git a/syncoid b/syncoid index 3796ce1..19c8656 100755 --- a/syncoid +++ b/syncoid @@ -26,7 +26,7 @@ GetOptions(\%args, "no-command-checks", "monitor-version", "compress=s", "dumpsn "debug", "quiet", "no-stream", "no-sync-snap", "no-resume", "exclude=s@", "skip-parent", "identifier=s", "no-clone-handling", "no-privilege-elevation", "force-delete", "no-rollback", "create-bookmark", "pv-options=s" => \$pvoptions, "keep-sync-snap", "preserve-recordsize", "mbuffer-size=s" => \$mbuffer_size, - "push-snap-removal",) + "delete-target-snapshots") or pod2usage(2); my %compressargs = %{compressargset($args{'compress'} || 'default')}; # Can't be done with GetOptions arg, as default still needs to be set @@ -889,7 +889,7 @@ sub syncdataset { } } - if (defined $args{'push-snap-removal'}) { + if (defined $args{'delete-target-snapshots'}) { foreach my $snap ( sort { $snaps{'target'}{$a}{'creation'}<=>$snaps{'target'}{$b}{'creation'} } keys %{ $snaps{'target'} }) { if (!exists $snaps{'source'}{$snap}) { if ($targethost ne '') { @@ -2014,7 +2014,7 @@ Options: --create-bookmark Creates a zfs bookmark for the newest snapshot on the source after replication succeeds (only works with --no-sync-snap) --preserve-recordsize Preserves the recordsize on initial sends to the target --no-rollback Does not rollback snapshots on target (it probably requires a readonly target) - --push-snap-removal Remove snapshots on the target that do not exist on the source any more + --delete-target-snapshots With this argument snapshots which are missing on the source will be destroyed on the target. Use this if you only want to handle snapshots on the source. --exclude=REGEX Exclude specific datasets which match the given regular expression. Can be specified multiple times --sendoptions=OPTIONS Use advanced options for zfs send (the arguments are filtered as needed), e.g. syncoid --sendoptions="Lc e" sets zfs send -L -c -e ... --recvoptions=OPTIONS Use advanced options for zfs receive (the arguments are filtered as needed), e.g. syncoid --recvoptions="ux recordsize o compression=lz4" sets zfs receive -u -x recordsize -o compression=lz4 ... From ecd14005395059e5dbbbdd8f7530d479d1e5843b Mon Sep 17 00:00:00 2001 From: Mathieu Arnold Date: Mon, 17 Aug 2020 16:36:14 +0200 Subject: [PATCH 064/130] Handle output/errors of those zfs destroy commands. If there was an obsolete remote syncoid_hostname_* snapshot that did not get removed at the correct time, for some reason, like, maybe, network problems, it would have been cleaned up in pruneoldsyncsnaps just before this code, and we would get a strange error message saying: could not find any snapshots to destroy; check snapshot names. Also, when using --quiet, do not output anything, as failing to remove an obsolete snapshot is not really a big problem. --- syncoid | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/syncoid b/syncoid index 19c8656..4393e4f 100755 --- a/syncoid +++ b/syncoid @@ -890,15 +890,18 @@ sub syncdataset { } if (defined $args{'delete-target-snapshots'}) { - foreach my $snap ( sort { $snaps{'target'}{$a}{'creation'}<=>$snaps{'target'}{$b}{'creation'} } keys %{ $snaps{'target'} }) { - if (!exists $snaps{'source'}{$snap}) { - if ($targethost ne '') { - if ($debug) { print "$sshcmd $targethost $targetsudocmd $zfscmd destroy $targetfsescaped\@$snap\n"; } - system ("$sshcmd $targethost " . escapeshellparam("$targetsudocmd $zfscmd destroy $targetfsescaped\@$snap")); - } else { - if ($debug) { print "$targetsudocmd $zfscmd destroy $targetfsescaped\@$snap\n"; } - system ("$targetsudocmd $zfscmd destroy $targetfsescaped\@$snap"); - } + my $snaps = join ',', grep {!exists $snaps{'source'}{$_}} keys %{ $snaps{'target'} }; + if ($snaps ne '') { + my $command; + if ($targethost ne '') { + $command = "$sshcmd $targethost " . escapeshellparam("$targetsudocmd $zfscmd destroy $targetfsescaped\@$snaps"); + } else { + $command = "$targetsudocmd $zfscmd destroy $targetfsescaped\@$snaps"; + } + if ($debug) { print "$command\n"; } + my ($stdout, $stderr, $result) = capture { system $command; }; + if ($result != 0 && !$quiet) { + warn "$command failed: $stderr"; } } } From f711e6bf28e8b10f9e0f91f81f93ff6149c8492d Mon Sep 17 00:00:00 2001 From: Mathieu Arnold Date: Mon, 17 May 2021 13:32:22 +0200 Subject: [PATCH 065/130] Add a note about when snapshot deletion is done. --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index f6028b0..5ad2e0c 100644 --- a/README.md +++ b/README.md @@ -323,6 +323,7 @@ As of 1.4.18, syncoid also automatically supports and enables resume of interrup + --delete-target-snapshots With this argument snapshots which are missing on the source will be destroyed on the target. Use this if you only want to handle snapshots on the source. + Note that snapshot deletion is only done after a successful synchronization. If no new snapshots are found, no synchronization is done and no deletion either. + --no-clone-rollback From 2f706a4ae1cab80417e76cc071aa1c561706791d Mon Sep 17 00:00:00 2001 From: Mathieu Arnold Date: Sat, 8 Apr 2023 09:58:40 +0200 Subject: [PATCH 066/130] Batch snapshot deletion. This is to prevent a problem with a large amount of snapshots which exceed the allowed shell command length. --- syncoid | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/syncoid b/syncoid index 4393e4f..1736afe 100755 --- a/syncoid +++ b/syncoid @@ -890,8 +890,14 @@ sub syncdataset { } if (defined $args{'delete-target-snapshots'}) { - my $snaps = join ',', grep {!exists $snaps{'source'}{$_}} keys %{ $snaps{'target'} }; - if ($snaps ne '') { + # Find the snapshots that exist on the target, filter with + # those that exist on the source. Remaining are the snapshots + # that are only on the target. Then sort by creation date, as + # to remove the oldest snapshots first. + my @to_delete = sort { $snaps{'target'}{$a}{'creation'}<=>$snaps{'target'}{$b}{'creation'} } grep {!exists $snaps{'source'}{$_}} keys %{ $snaps{'target'} }; + while (@to_delete) { + # Create batch of snapshots to remove + my $snaps = join ',', splice(@to_delete, 0, 50); my $command; if ($targethost ne '') { $command = "$sshcmd $targethost " . escapeshellparam("$targetsudocmd $zfscmd destroy $targetfsescaped\@$snaps"); From e301b5b153a33d37f0ab062a90531226a718e1a4 Mon Sep 17 00:00:00 2001 From: Nick Liu Date: Tue, 25 Apr 2023 13:58:40 -0500 Subject: [PATCH 067/130] refactor(syncoid): Simplify getsnaps to parse a hash rather than lines * The part that was "a little obnoxious" has been rewritten to extract the desired properties in a single loop after importing each line into a hash rather than processing line by line with a state tracking flag. * The `getsnapsfallback` subroutine had duplicated logic that has been absorbed into `getsnaps` with a recursion argument to enable the fallback mode. --- syncoid | 158 ++++++++++++-------------------------------------------- 1 file changed, 33 insertions(+), 125 deletions(-) diff --git a/syncoid b/syncoid index 577df56..3890dee 100755 --- a/syncoid +++ b/syncoid @@ -415,10 +415,10 @@ sub syncdataset { if (!defined($receivetoken)) { # build hashes of the snaps on the source and target filesystems. - %snaps = getsnaps('source',$sourcehost,$sourcefs,$sourceisroot); + %snaps = getsnaps('source',$sourcehost,$sourcefs,$sourceisroot,0); if ($targetexists) { - my %targetsnaps = getsnaps('target',$targethost,$targetfs,$targetisroot); + my %targetsnaps = getsnaps('target',$targethost,$targetfs,$targetisroot,0); my %sourcesnaps = %snaps; %snaps = (%sourcesnaps, %targetsnaps); } @@ -1803,21 +1803,22 @@ sub dumphash() { writelog('INFO', Dumper($hash)); } -sub getsnaps() { - my ($type,$rhost,$fs,$isroot,%snaps) = @_; +sub getsnaps { + my ($type,$rhost,$fs,$isroot,$use_fallback,%snaps) = @_; my $mysudocmd; my $fsescaped = escapeshellparam($fs); if ($isroot) { $mysudocmd = ''; } else { $mysudocmd = $sudocmd; } - my $rhostOriginal = $rhost; - if ($rhost ne '') { $rhost = "$sshcmd $rhost"; # double escaping needed $fsescaped = escapeshellparam($fsescaped); } - my $getsnapcmd = "$rhost $mysudocmd $zfscmd get -Hpd 1 -t snapshot guid,creation $fsescaped"; + my $getsnapcmd = $use_fallback + ? "$rhost $mysudocmd $zfscmd get -Hpd 1 type,guid,creation $fsescaped" + : "$rhost $mysudocmd $zfscmd get -Hpd 1 -t snapshot guid,creation $fsescaped"; + if ($debug) { $getsnapcmd = "$getsnapcmd |"; writelog('DEBUG', "getting list of snapshots on $fs using $getsnapcmd..."); @@ -1827,141 +1828,48 @@ sub getsnaps() { open FH, $getsnapcmd; my @rawsnaps = ; close FH or do { - # fallback (solaris for example doesn't support the -t option) - return getsnapsfallback($type,$rhostOriginal,$fs,$isroot,%snaps); + if (!$use_fallback) { + writelog('WARN', "snapshot listing failed, trying fallback command"); + return getsnaps($type, $rhost, $fs, $isroot, 1, %snaps); + } + die "CRITICAL ERROR: snapshots couldn't be listed for $fs (exit code $?)"; }; - # this is a little obnoxious. get guid,creation returns guid,creation on two separate lines - # as though each were an entirely separate get command. + my %snap_data; + my %creationtimes; - my %creationtimes=(); + for my $line (@rawsnaps) { + chomp $line; + my ($dataset, $property, $value) = split /\t/, $line; + my ($fs, $snap) = split /@/, $dataset; + if (!snapisincluded($snap)) { next; } + $snap_data{$snap}{$property} = $value; - foreach my $line (@rawsnaps) { - $line =~ /\Q$fs\E\@(\S*)/; - my $snapname = $1; - - if (!snapisincluded($snapname)) { next; } - - # only import snap guids from the specified filesystem - if ($line =~ /\Q$fs\E\@.*\tguid/) { - chomp $line; - my $guid = $line; - $guid =~ s/^.*\tguid\t*(\d*).*/$1/; - my $snap = $line; - $snap =~ s/^.*\@(.*)\tguid.*$/$1/; - $snaps{$type}{$snap}{'guid'}=$guid; - } - # only import snap creations from the specified filesystem - elsif ($line =~ /\Q$fs\E\@.*\tcreation/) { - chomp $line; - my $creation = $line; - $creation =~ s/^.*\tcreation\t*(\d*).*/$1/; - my $snap = $line; - $snap =~ s/^.*\@(.*)\tcreation.*$/$1/; - - # the accuracy of the creation timestamp is only for a second, but - # snapshots in the same second are highly likely. The list command - # has an ordered output so we append another three digit running number - # to the creation timestamp and make sure those are ordered correctly - # for snapshot with the same creation timestamp + # the accuracy of the creation timestamp is only for a second, but + # snapshots in the same second are highly likely. The list command + # has an ordered output so we append another three digit running number + # to the creation timestamp and make sure those are ordered correctly + # for snapshot with the same creation timestamp + if ($property eq 'creation') { my $counter = 0; my $creationsuffix; while ($counter < 999) { - $creationsuffix = sprintf("%s%03d", $creation, $counter); + $creationsuffix = sprintf("%s%03d", $value, $counter); if (!defined $creationtimes{$creationsuffix}) { $creationtimes{$creationsuffix} = 1; last; } $counter += 1; } - - $snaps{$type}{$snap}{'creation'}=$creationsuffix; + $snap_data{$snap}{'creation'} = $creationsuffix; } } - return %snaps; -} - -sub getsnapsfallback() { - # fallback (solaris for example doesn't support the -t option) - my ($type,$rhost,$fs,$isroot,%snaps) = @_; - my $mysudocmd; - my $fsescaped = escapeshellparam($fs); - if ($isroot) { $mysudocmd = ''; } else { $mysudocmd = $sudocmd; } - - if ($rhost ne '') { - $rhost = "$sshcmd $rhost"; - # double escaping needed - $fsescaped = escapeshellparam($fsescaped); - } - - my $getsnapcmd = "$rhost $mysudocmd $zfscmd get -Hpd 1 type,guid,creation $fsescaped |"; - writelog('WARN', "snapshot listing failed, trying fallback command"); - writelog('DEBUG', "FALLBACK, getting list of snapshots on $fs using $getsnapcmd..."); - open FH, $getsnapcmd; - my @rawsnaps = ; - close FH or die "CRITICAL ERROR: snapshots couldn't be listed for $fs (exit code $?)"; - - my %creationtimes=(); - - my $state = 0; - foreach my $line (@rawsnaps) { - if ($state < 0) { - $state++; - next; + for my $snap (keys %snap_data) { + if (!$use_fallback || $snap_data{$snap}{'type'} eq 'snapshot') { + $snaps{$type}{$snap}{'guid'} = $snap_data{$snap}{'guid'}; + $snaps{$type}{$snap}{'creation'} = $snap_data{$snap}{'creation'}; } - - if ($state eq 0) { - if ($line !~ /\Q$fs\E\@.*\ttype\s*snapshot/) { - # skip non snapshot type object - $state = -2; - next; - } - } elsif ($state eq 1) { - if ($line !~ /\Q$fs\E\@.*\tguid/) { - die "CRITICAL ERROR: snapshots couldn't be listed for $fs (guid parser error)"; - } - - chomp $line; - my $guid = $line; - $guid =~ s/^.*\tguid\t*(\d*).*/$1/; - my $snap = $line; - $snap =~ s/^.*\@(.*)\tguid.*$/$1/; - if (!snapisincluded($snap)) { next; } - $snaps{$type}{$snap}{'guid'}=$guid; - } elsif ($state eq 2) { - if ($line !~ /\Q$fs\E\@.*\tcreation/) { - die "CRITICAL ERROR: snapshots couldn't be listed for $fs (creation parser error)"; - } - - chomp $line; - my $creation = $line; - $creation =~ s/^.*\tcreation\t*(\d*).*/$1/; - my $snap = $line; - $snap =~ s/^.*\@(.*)\tcreation.*$/$1/; - if (!snapisincluded($snap)) { next; } - - # the accuracy of the creation timestamp is only for a second, but - # snapshots in the same second are highly likely. The list command - # has an ordered output so we append another three digit running number - # to the creation timestamp and make sure those are ordered correctly - # for snapshot with the same creation timestamp - my $counter = 0; - my $creationsuffix; - while ($counter < 999) { - $creationsuffix = sprintf("%s%03d", $creation, $counter); - if (!defined $creationtimes{$creationsuffix}) { - $creationtimes{$creationsuffix} = 1; - last; - } - $counter += 1; - } - - $snaps{$type}{$snap}{'creation'}=$creationsuffix; - $state = -1; - } - - $state++; } return %snaps; From 8fabaae5b8efd04c2df9e4f6fdcdb82720dd963e Mon Sep 17 00:00:00 2001 From: Nick Liu Date: Tue, 25 Apr 2023 14:01:54 -0500 Subject: [PATCH 068/130] feat(syncoid): Add "createtxg" property to `getsnaps` The `getsnaps` subroutine now retrieves the "createtxg" property of the snapshot. This is necessary to support the fix for https://github.com/jimsalterjrs/sanoid/issues/815 (Syncoid: Data loss because getoldestsnapshot() might not choose the first snapshot). --- syncoid | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/syncoid b/syncoid index 3890dee..3a5a9e4 100755 --- a/syncoid +++ b/syncoid @@ -1816,8 +1816,8 @@ sub getsnaps { } my $getsnapcmd = $use_fallback - ? "$rhost $mysudocmd $zfscmd get -Hpd 1 type,guid,creation $fsescaped" - : "$rhost $mysudocmd $zfscmd get -Hpd 1 -t snapshot guid,creation $fsescaped"; + ? "$rhost $mysudocmd $zfscmd get -Hpd 1 all $fsescaped" + : "$rhost $mysudocmd $zfscmd get -Hpd 1 -t snapshot all $fsescaped"; if ($debug) { $getsnapcmd = "$getsnapcmd |"; @@ -1841,8 +1841,13 @@ sub getsnaps { for my $line (@rawsnaps) { chomp $line; my ($dataset, $property, $value) = split /\t/, $line; - my ($fs, $snap) = split /@/, $dataset; - if (!snapisincluded($snap)) { next; } + die "CRITICAL ERROR: Unexpected line format in $line" unless defined $value; + + my (undef, $snap) = split /@/, $dataset; + die "CRITICAL ERROR: Unexpected dataset format in $line" unless $snap; + + if (!snapisincluded($snap)) { next; } + $snap_data{$snap}{$property} = $value; # the accuracy of the creation timestamp is only for a second, but @@ -1868,6 +1873,7 @@ sub getsnaps { for my $snap (keys %snap_data) { if (!$use_fallback || $snap_data{$snap}{'type'} eq 'snapshot') { $snaps{$type}{$snap}{'guid'} = $snap_data{$snap}{'guid'}; + $snaps{$type}{$snap}{'createtxg'} = $snap_data{$snap}{'createtxg'}; $snaps{$type}{$snap}{'creation'} = $snap_data{$snap}{'creation'}; } } From 8907e0cb2f2be688743e9e004ccdbad3a613498c Mon Sep 17 00:00:00 2001 From: Nick Liu Date: Fri, 28 Apr 2023 00:43:47 -0500 Subject: [PATCH 069/130] feat(syncoid): Sort snapshots by `createtxg` if possible It is possible for `creation` of a subsequent snapshot to be in the past compared to the current snapshot due to system clock discrepancies, which leads to earlier snapshots not being replicated in the initial syncoid sync. Also, `syncoid --no-sync-snap` might not pick up the most recently taken snapshot if the clock moved backwards before taking that snapshot. Sorting snapshots by the `createtxg` value is reliable and documented in `man 8 zfsprops` as the proper way to order snapshots, but it was not available in ZFS versions before 0.7. To maintain backwards compatibility, the sorting falls back to sorting by the `creation` property, which was the old behavior. Fixes: https://github.com/jimsalterjrs/sanoid/issues/815 --- syncoid | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/syncoid b/syncoid index 3a5a9e4..44e8486 100755 --- a/syncoid +++ b/syncoid @@ -862,9 +862,9 @@ sub syncdataset { if (defined $args{'delete-target-snapshots'}) { # Find the snapshots that exist on the target, filter with # those that exist on the source. Remaining are the snapshots - # that are only on the target. Then sort by creation date, as - # to remove the oldest snapshots first. - my @to_delete = sort { $snaps{'target'}{$a}{'creation'}<=>$snaps{'target'}{$b}{'creation'} } grep {!exists $snaps{'source'}{$_}} keys %{ $snaps{'target'} }; + # that are only on the target. Then sort to remove the oldest + # snapshots first. + my @to_delete = sort { sortsnapshots(\%snaps, $a, $b) } grep {!exists $snaps{'source'}{$_}} keys %{ $snaps{'target'} }; while (@to_delete) { # Create batch of snapshots to remove my $snaps = join ',', splice(@to_delete, 0, 50); @@ -1480,9 +1480,17 @@ sub readablebytes { return $disp; } +sub sortsnapshots { + my ($snaps, $left, $right) = @_; + if (defined $snaps->{'source'}{$left}{'createtxg'} && defined $snaps->{'source'}{$right}{'createtxg'}) { + return $snaps->{'source'}{$left}{'createtxg'} <=> $snaps->{'source'}{$right}{'createtxg'}; + } + return $snaps->{'source'}{$left}{'creation'} <=> $snaps->{'source'}{$right}{'creation'}; +} + sub getoldestsnapshot { my $snaps = shift; - foreach my $snap ( sort { $snaps{'source'}{$a}{'creation'}<=>$snaps{'source'}{$b}{'creation'} } keys %{ $snaps{'source'} }) { + foreach my $snap (sort { sortsnapshots($snaps, $a, $b) } keys %{ $snaps{'source'} }) { # return on first snap found - it's the oldest return $snap; } @@ -1496,7 +1504,7 @@ sub getoldestsnapshot { sub getnewestsnapshot { my $snaps = shift; - foreach my $snap ( sort { $snaps{'source'}{$b}{'creation'}<=>$snaps{'source'}{$a}{'creation'} } keys %{ $snaps{'source'} }) { + foreach my $snap (sort { sortsnapshots($snaps, $b, $a) } keys %{ $snaps{'source'} }) { # return on first snap found - it's the newest writelog('INFO', "NEWEST SNAPSHOT: $snap"); return $snap; @@ -1675,7 +1683,7 @@ sub pruneoldsyncsnaps { sub getmatchingsnapshot { my ($sourcefs, $targetfs, $snaps) = @_; - foreach my $snap ( sort { $snaps{'source'}{$b}{'creation'}<=>$snaps{'source'}{$a}{'creation'} } keys %{ $snaps{'source'} }) { + foreach my $snap ( sort { sortsnapshots($snaps, $b, $a) } keys %{ $snaps{'source'} }) { if (defined $snaps{'target'}{$snap}) { if ($snaps{'source'}{$snap}{'guid'} == $snaps{'target'}{$snap}{'guid'}) { return $snap; From ab361017e7d3eb5e140d5ce14eafbc41c91d44d7 Mon Sep 17 00:00:00 2001 From: Nick Liu Date: Tue, 25 Apr 2023 17:07:32 -0500 Subject: [PATCH 070/130] feat(syncoid): Match snapshots to bookmarks by `createtxg` if possible This is a continuation of a previous commit to sort snapshots by `createtxg` if possible. Now, we have to match the behavior when selecting an appropriate snapshot based on the transaction group of the relevant bookmark in `syncdataset()`. Supersedes: https://github.com/jimsalterjrs/sanoid/pull/667 --- syncoid | 89 ++++++++++++++++++++++++++++----------------------------- 1 file changed, 44 insertions(+), 45 deletions(-) diff --git a/syncoid b/syncoid index 44e8486..61814d2 100755 --- a/syncoid +++ b/syncoid @@ -438,7 +438,7 @@ sub syncdataset { # Don't send the sync snap if it's filtered out by --exclude-snaps or # --include-snaps if (!snapisincluded($newsyncsnap)) { - $newsyncsnap = getnewestsnapshot($sourcehost,$sourcefs,$sourceisroot); + $newsyncsnap = getnewestsnapshot(\%snaps); if ($newsyncsnap eq 0) { writelog('WARN', "CRITICAL: no snapshots exist on source $sourcefs, and you asked for --no-sync-snap."); if ($exitcode < 1) { $exitcode = 1; } @@ -447,7 +447,7 @@ sub syncdataset { } } else { # we don't want sync snapshots created, so use the newest snapshot we can find. - $newsyncsnap = getnewestsnapshot($sourcehost,$sourcefs,$sourceisroot); + $newsyncsnap = getnewestsnapshot(\%snaps); if ($newsyncsnap eq 0) { writelog('WARN', "CRITICAL: no snapshots exist on source $sourcefs, and you asked for --no-sync-snap."); if ($exitcode < 1) { $exitcode = 1; } @@ -584,8 +584,7 @@ sub syncdataset { my $targetsize = getzfsvalue($targethost,$targetfs,$targetisroot,'-p used'); - my $bookmark = 0; - my $bookmarkcreation = 0; + my %bookmark = (); $matchingsnap = getmatchingsnapshot($sourcefs, $targetfs, \%snaps); if (! $matchingsnap) { @@ -593,19 +592,18 @@ sub syncdataset { my %bookmarks = getbookmarks($sourcehost,$sourcefs,$sourceisroot); # check for matching guid of source bookmark and target snapshot (oldest first) - foreach my $snap ( sort { $snaps{'target'}{$b}{'creation'}<=>$snaps{'target'}{$a}{'creation'} } keys %{ $snaps{'target'} }) { + foreach my $snap ( sort { sortsnapshots(\%snaps, $b, $a) } keys %{ $snaps{'target'} }) { my $guid = $snaps{'target'}{$snap}{'guid'}; if (defined $bookmarks{$guid}) { # found a match - $bookmark = $bookmarks{$guid}{'name'}; - $bookmarkcreation = $bookmarks{$guid}{'creation'}; + %bookmark = %{ $bookmarks{$guid} }; $matchingsnap = $snap; last; } } - if (! $bookmark) { + if (! %bookmark) { # force delete is not possible for the root dataset if ($args{'force-delete'} && index($targetfs, '/') != -1) { writelog('INFO', "Removing $targetfs because no matching snapshots were found"); @@ -678,15 +676,18 @@ sub syncdataset { my $nextsnapshot = 0; - if ($bookmark) { - my $bookmarkescaped = escapeshellparam($bookmark); + if (%bookmark) { if (!defined $args{'no-stream'}) { # if intermediate snapshots are needed we need to find the next oldest snapshot, # do an replication to it and replicate as always from oldest to newest # because bookmark sends doesn't support intermediates directly - foreach my $snap ( sort { $snaps{'source'}{$a}{'creation'}<=>$snaps{'source'}{$b}{'creation'} } keys %{ $snaps{'source'} }) { - if ($snaps{'source'}{$snap}{'creation'} >= $bookmarkcreation) { + foreach my $snap ( sort { sortsnapshots(\%snaps, $a, $b) } keys %{ $snaps{'source'} }) { + my $comparisonkey = 'creation'; + if (defined $snaps{'source'}{$snap}{'createtxg'} && defined $bookmark{'createtxg'}) { + $comparisonkey = 'createtxg'; + } + if ($snaps{'source'}{$snap}{$comparisonkey} >= $bookmark{$comparisonkey}) { $nextsnapshot = $snap; last; } @@ -694,13 +695,13 @@ sub syncdataset { } if ($nextsnapshot) { - ($exit, $stdout) = syncbookmark($sourcehost, $sourcefs, $targethost, $targetfs, $bookmark, $nextsnapshot); + ($exit, $stdout) = syncbookmark($sourcehost, $sourcefs, $targethost, $targetfs, $bookmark{'name'}, $nextsnapshot); $exit == 0 or do { if (!$resume && $stdout =~ /\Qcontains partially-complete state\E/) { writelog('WARN', "resetting partially receive state"); resetreceivestate($targethost,$targetfs,$targetisroot); - (my $ret) = syncbookmark($sourcehost, $sourcefs, $targethost, $targetfs, $bookmark, $nextsnapshot); + (my $ret) = syncbookmark($sourcehost, $sourcefs, $targethost, $targetfs, $bookmark{'name'}, $nextsnapshot); $ret == 0 or do { if ($exitcode < 2) { $exitcode = 2; } return 0; @@ -714,13 +715,13 @@ sub syncdataset { $matchingsnap = $nextsnapshot; $matchingsnapescaped = escapeshellparam($matchingsnap); } else { - ($exit, $stdout) = syncbookmark($sourcehost, $sourcefs, $targethost, $targetfs, $bookmark, $newsyncsnap); + ($exit, $stdout) = syncbookmark($sourcehost, $sourcefs, $targethost, $targetfs, $bookmark{'name'}, $newsyncsnap); $exit == 0 or do { if (!$resume && $stdout =~ /\Qcontains partially-complete state\E/) { writelog('WARN', "resetting partially receive state"); resetreceivestate($targethost,$targetfs,$targetisroot); - (my $ret) = syncbookmark($sourcehost, $sourcefs, $targethost, $targetfs, $bookmark, $newsyncsnap); + (my $ret) = syncbookmark($sourcehost, $sourcefs, $targethost, $targetfs, $bookmark{'name'}, $newsyncsnap); $ret == 0 or do { if ($exitcode < 2) { $exitcode = 2; } return 0; @@ -735,7 +736,7 @@ sub syncdataset { # do a normal replication if bookmarks aren't used or if previous # bookmark replication was only done to the next oldest snapshot - if (!$bookmark || $nextsnapshot) { + if (!%bookmark || $nextsnapshot) { if ($matchingsnap eq $newsyncsnap) { # edge case: bookmark replication used the latest snapshot return 0; @@ -1902,7 +1903,7 @@ sub getbookmarks() { } my $error = 0; - my $getbookmarkcmd = "$rhost $mysudocmd $zfscmd get -Hpd 1 -t bookmark guid,creation $fsescaped 2>&1 |"; + my $getbookmarkcmd = "$rhost $mysudocmd $zfscmd get -Hpd 1 -t bookmark all $fsescaped 2>&1 |"; writelog('DEBUG', "getting list of bookmarks on $fs using $getbookmarkcmd..."); open FH, $getbookmarkcmd; my @rawbookmarks = ; @@ -1917,48 +1918,46 @@ sub getbookmarks() { die "CRITICAL ERROR: bookmarks couldn't be listed for $fs (exit code $?)"; } - # this is a little obnoxious. get guid,creation returns guid,creation on two separate lines - # as though each were an entirely separate get command. + my %bookmark_data; + my %creationtimes; - my $lastguid; - my %creationtimes=(); + for my $line (@rawbookmarks) { + chomp $line; + my ($dataset, $property, $value) = split /\t/, $line; + die "CRITICAL ERROR: Unexpected line format in $line" unless defined $value; - foreach my $line (@rawbookmarks) { - # only import bookmark guids, creation from the specified filesystem - if ($line =~ /\Q$fs\E\#.*\tguid/) { - chomp $line; - $lastguid = $line; - $lastguid =~ s/^.*\tguid\t*(\d*).*/$1/; - my $bookmark = $line; - $bookmark =~ s/^.*\#(.*)\tguid.*$/$1/; - $bookmarks{$lastguid}{'name'}=$bookmark; - } elsif ($line =~ /\Q$fs\E\#.*\tcreation/) { - chomp $line; - my $creation = $line; - $creation =~ s/^.*\tcreation\t*(\d*).*/$1/; - my $bookmark = $line; - $bookmark =~ s/^.*\#(.*)\tcreation.*$/$1/; + my (undef, $bookmark) = split /#/, $dataset; + die "CRITICAL ERROR: Unexpected dataset format in $line" unless $bookmark; - # the accuracy of the creation timestamp is only for a second, but - # bookmarks in the same second are possible. The list command - # has an ordered output so we append another three digit running number - # to the creation timestamp and make sure those are ordered correctly - # for bookmarks with the same creation timestamp + $bookmark_data{$bookmark}{$property} = $value; + + # the accuracy of the creation timestamp is only for a second, but + # bookmarks in the same second are possible. The list command + # has an ordered output so we append another three digit running number + # to the creation timestamp and make sure those are ordered correctly + # for bookmarks with the same creation timestamp + if ($property eq 'creation') { my $counter = 0; my $creationsuffix; while ($counter < 999) { - $creationsuffix = sprintf("%s%03d", $creation, $counter); + $creationsuffix = sprintf("%s%03d", $value, $counter); if (!defined $creationtimes{$creationsuffix}) { $creationtimes{$creationsuffix} = 1; last; } $counter += 1; } - - $bookmarks{$lastguid}{'creation'}=$creationsuffix; + $bookmark_data{$bookmark}{'creation'} = $creationsuffix; } } + for my $bookmark (keys %bookmark_data) { + my $guid = $bookmark_data{$bookmark}{'guid'}; + $bookmarks{$guid}{'name'} = $bookmark; + $bookmarks{$guid}{'creation'} = $bookmark_data{$bookmark}{'creation'}; + $bookmarks{$guid}{'createtxg'} = $bookmark_data{$bookmark}{'createtxg'}; + } + return %bookmarks; } From b37092f37681467ae000b59be7ca61ef6971c586 Mon Sep 17 00:00:00 2001 From: Nick Liu Date: Tue, 25 Apr 2023 17:35:45 -0500 Subject: [PATCH 071/130] test(syncoid): Add test to verify out-of-order snapshot sync See https://github.com/jimsalterjrs/sanoid/issues/815 for the original test. --- .../815_sync_out-of-order_snapshots/run.sh | 46 +++++++++++++++++++ 1 file changed, 46 insertions(+) create mode 100755 tests/syncoid/815_sync_out-of-order_snapshots/run.sh diff --git a/tests/syncoid/815_sync_out-of-order_snapshots/run.sh b/tests/syncoid/815_sync_out-of-order_snapshots/run.sh new file mode 100755 index 0000000..af67b36 --- /dev/null +++ b/tests/syncoid/815_sync_out-of-order_snapshots/run.sh @@ -0,0 +1,46 @@ +#!/bin/bash + +# test verifying snapshots with out-of-order snapshot creation datetimes + +set -x +set -e + +. ../../common/lib.sh + +POOL_IMAGE="/tmp/jimsalterjrs_sanoid_815.img" +POOL_SIZE="64M" +POOL_NAME="jimsalterjrs_sanoid_815" + +truncate -s "${POOL_SIZE}" "${POOL_IMAGE}" + +zpool create -m none -f "${POOL_NAME}" "${POOL_IMAGE}" + +function cleanUp { + zpool export "${POOL_NAME}" + rm -f "${POOL_IMAGE}" +} + +# export pool and remove the image in any case +trap cleanUp EXIT + +zfs create "${POOL_NAME}"/before +zfs snapshot "${POOL_NAME}"/before@this-snapshot-should-make-it-into-the-after-dataset + +disableTimeSync +setdate 1155533696 +zfs snapshot "${POOL_NAME}"/before@oldest-snapshot + +zfs snapshot "${POOL_NAME}"/before@another-snapshot-does-not-matter +../../../syncoid --sendoptions="Lec" "${POOL_NAME}"/before "${POOL_NAME}"/after + +# verify +saveSnapshotList "${POOL_NAME}" "snapshot-list.txt" + +grep "${POOL_NAME}/before@this-snapshot-should-make-it-into-the-after-dataset" "snapshot-list.txt" || exit $? +grep "${POOL_NAME}/after@this-snapshot-should-make-it-into-the-after-dataset" "snapshot-list.txt" || exit $? +grep "${POOL_NAME}/before@oldest-snapshot" "snapshot-list.txt" || exit $? +grep "${POOL_NAME}/after@oldest-snapshot" "snapshot-list.txt" || exit $? +grep "${POOL_NAME}/before@another-snapshot-does-not-matter" "snapshot-list.txt" || exit $? +grep "${POOL_NAME}/after@another-snapshot-does-not-matter" "snapshot-list.txt" || exit $? + +exit 0 From cc495183afb80d4803aa0d7717dbae0b49688274 Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Wed, 26 Apr 2023 19:47:49 +0200 Subject: [PATCH 072/130] fix default behaviour if run without providing arguments --- sanoid | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sanoid b/sanoid index 6de6c30..12a184a 100755 --- a/sanoid +++ b/sanoid @@ -30,7 +30,7 @@ GetOptions(\%args, "verbose", "debug", "cron", "readonly", "quiet", ) or pod2usage(2); # If only config directory (or nothing) has been specified, default to --cron --verbose -if (keys %args < 2) { +if (keys %args < 4) { $args{'cron'} = 1; $args{'verbose'} = 1; } From a904ba02f3158b971786f2e864fabe5227075cc8 Mon Sep 17 00:00:00 2001 From: Nick Liu Date: Fri, 28 Apr 2023 01:00:03 -0500 Subject: [PATCH 073/130] enh(run-tests.sh): Sort tests with "general numeric sort" The sort before tended to be alphabetical, which put test `8_force_delete_snapshot` after `815_sync_out-of-order_snapshots`, but `8` should come before `815`. Before: ``` root@demo:~/sanoid/tests/syncoid# ./run-tests.sh Running test 1_bookmark_replication_intermediate ... [PASS] Running test 2_bookmark_replication_no_intermediate ... [PASS] Running test 3_force_delete ... [PASS] Running test 4_bookmark_replication_edge_case ... [PASS] Running test 5_reset_resume_state ... mbuffer: error: outputThread: error writing to at offset 0x90000: Broken pipe mbuffer: warning: error during output to : Broken pipe [PASS] Running test 6_reset_resume_state2 ... [PASS] Running test 7_preserve_recordsize ... [PASS] Running test 815_sync_out-of-order_snapshots ... [PASS] Running test 8_force_delete_snapshot ... [PASS] ``` After: ``` root@demo:~/sanoid/tests/syncoid# ./run-tests.sh Running test 1_bookmark_replication_intermediate ... [PASS] Running test 2_bookmark_replication_no_intermediate ... [PASS] Running test 3_force_delete ... [PASS] Running test 4_bookmark_replication_edge_case ... [PASS] Running test 5_reset_resume_state ... mbuffer: error: outputThread: error writing to at offset 0xf0000: Broken pipe mbuffer: warning: error during output to : Broken pipe [PASS] Running test 6_reset_resume_state2 ... [PASS] Running test 7_preserve_recordsize ... [PASS] Running test 8_force_delete_snapshot ... [PASS] Running test 815_sync_out-of-order_snapshots ... [PASS] ``` --- tests/run-tests.sh | 2 +- tests/syncoid/run-tests.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/run-tests.sh b/tests/run-tests.sh index 38054b0..418657c 100755 --- a/tests/run-tests.sh +++ b/tests/run-tests.sh @@ -2,7 +2,7 @@ # run's all the available tests -for test in */; do +for test in $(find . -mindepth 1 -maxdepth 1 -type d -printf "%P\n" | sort -g); do if [ ! -x "${test}/run.sh" ]; then continue fi diff --git a/tests/syncoid/run-tests.sh b/tests/syncoid/run-tests.sh index a9843a5..5564667 100755 --- a/tests/syncoid/run-tests.sh +++ b/tests/syncoid/run-tests.sh @@ -2,7 +2,7 @@ # run's all the available tests -for test in */; do +for test in $(find . -mindepth 1 -maxdepth 1 -type d -printf "%P\n" | sort -g); do if [ ! -x "${test}/run.sh" ]; then continue fi From f3d4d309b5a6ccba11512b698041e80a0c8518fa Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Tue, 18 Jul 2023 08:38:40 +0200 Subject: [PATCH 074/130] implemented flag for preserving properties without the zfs -p flag --- README.md | 4 ++ syncoid | 64 ++++++++++++++++++++- tests/syncoid/9_preserve_properties/run.sh | 66 ++++++++++++++++++++++ 3 files changed, 131 insertions(+), 3 deletions(-) create mode 100755 tests/syncoid/9_preserve_properties/run.sh diff --git a/README.md b/README.md index 7a10dac..6549617 100644 --- a/README.md +++ b/README.md @@ -323,6 +323,10 @@ As of 1.4.18, syncoid also automatically supports and enables resume of interrup This argument tells syncoid to set the recordsize on the target before writing any data to it matching the one set on the replication src. This only applies to initial sends. ++ --preserve-properties + + This argument tells syncoid to get all locally set dataset properties from the source and apply all supported ones on the target before writing any data. It's similar to the '-p' flag for zfs send but also works for encrypted datasets in non raw sends. This only applies to initial sends. + + --delete-target-snapshots With this argument snapshots which are missing on the source will be destroyed on the target. Use this if you only want to handle snapshots on the source. diff --git a/syncoid b/syncoid index eb38539..6cde9f9 100755 --- a/syncoid +++ b/syncoid @@ -26,7 +26,7 @@ GetOptions(\%args, "no-command-checks", "monitor-version", "compress=s", "dumpsn "debug", "quiet", "no-stream", "no-sync-snap", "no-resume", "exclude=s@", "skip-parent", "identifier=s", "no-clone-handling", "no-privilege-elevation", "force-delete", "no-rollback", "create-bookmark", "use-hold", "pv-options=s" => \$pvoptions, "keep-sync-snap", "preserve-recordsize", "mbuffer-size=s" => \$mbuffer_size, - "delete-target-snapshots", "insecure-direct-connection=s") + "delete-target-snapshots", "insecure-direct-connection=s", "preserve-properties") or pod2usage(2); my %compressargs = %{compressargset($args{'compress'} || 'default')}; # Can't be done with GetOptions arg, as default still needs to be set @@ -487,11 +487,19 @@ sub syncdataset { } my $oldestsnapescaped = escapeshellparam($oldestsnap); - if (defined $args{'preserve-recordsize'}) { + if (defined $args{'preserve-properties'}) { + my %properties = getlocalzfsvalues($sourcehost,$sourcefs,$sourceisroot); + + foreach my $key (keys %properties) { + my $value = $properties{$key}; + if ($debug) { print "DEBUG: will set $key to $value ...\n"; } + $recvoptions .= " -o $key=$value"; + } + } elsif (defined $args{'preserve-recordsize'}) { my $type = getzfsvalue($sourcehost,$sourcefs,$sourceisroot,'type'); if ($type eq "filesystem") { my $recordsize = getzfsvalue($sourcehost,$sourcefs,$sourceisroot,'recordsize'); - $recvoptions .= "-o recordsize=$recordsize" + $recvoptions .= "-o recordsize=$recordsize"; } } @@ -1335,6 +1343,55 @@ sub getzfsvalue { return $wantarray ? ($value, $error) : $value; } +sub getlocalzfsvalues { + my ($rhost,$fs,$isroot) = @_; + + my $fsescaped = escapeshellparam($fs); + + if ($rhost ne '') { + $rhost = "$sshcmd $rhost"; + # double escaping needed + $fsescaped = escapeshellparam($fsescaped); + } + + if ($debug) { print "DEBUG: getting locally set values of properties on $fs...\n"; } + my $mysudocmd; + if ($isroot) { $mysudocmd = ''; } else { $mysudocmd = $sudocmd; } + if ($debug) { print "$rhost $mysudocmd $zfscmd get all -s local -H $fsescaped\n"; } + my ($values, $error, $exit) = capture { + system("$rhost $mysudocmd $zfscmd get all -s local -H $fsescaped"); + }; + + my %properties=(); + + if ($exit != 0) { + warn "WARNING: getlocalzfsvalues failed for $fs: $error"; + if ($exitcode < 1) { $exitcode = 1; } + return %properties; + } + + my @blacklist = ( + "available", "compressratio", "createtxg", "creation", "clones", + "defer_destroy", "encryptionroot", "filesystem_count", "keystatus", "guid", + "logicalreferenced", "logicalused", "mounted", "objsetid", "origin", + "receive_resume_token", "redact_snaps", "referenced", "refcompressratio", "snapshot_count", + "type", "used", "usedbychildren", "usedbydataset", "usedbyrefreservation", + "usedbysnapshots", "userrefs", "snapshots_changed", "volblocksize", "written", + "version", "volsize", "casesensitivity", "normalization", "utf8only" + ); + my %blacklisthash = map {$_ => 1} @blacklist; + + foreach (split(/\n/,$values)) { + my @parts = split(/\t/, $_); + if (exists $blacklisthash{$parts[1]}) { + next; + } + $properties{$parts[1]} = $parts[2]; + } + + return %properties; +} + sub readablebytes { my $bytes = shift; my $disp; @@ -2153,6 +2210,7 @@ Options: --create-bookmark Creates a zfs bookmark for the newest snapshot on the source after replication succeeds (only works with --no-sync-snap) --use-hold Adds a hold to the newest snapshot on the source and target after replication succeeds and removes the hold after the next succesful replication. The hold name incldues the identifier if set. This allows for separate holds in case of multiple targets --preserve-recordsize Preserves the recordsize on initial sends to the target + --preserve-properties Preserves locally set dataset properties similiar to the zfs send -p flag but this one will also work for encrypted datasets in non raw sends --no-rollback Does not rollback snapshots on target (it probably requires a readonly target) --delete-target-snapshots With this argument snapshots which are missing on the source will be destroyed on the target. Use this if you only want to handle snapshots on the source. --exclude=REGEX Exclude specific datasets which match the given regular expression. Can be specified multiple times diff --git a/tests/syncoid/9_preserve_properties/run.sh b/tests/syncoid/9_preserve_properties/run.sh new file mode 100755 index 0000000..497ce9a --- /dev/null +++ b/tests/syncoid/9_preserve_properties/run.sh @@ -0,0 +1,66 @@ +#!/bin/bash + +# test preserving locally set properties from the src dataset to the target one + +set -x +set -e + +. ../../common/lib.sh + +POOL_IMAGE="/tmp/syncoid-test-9.zpool" +MOUNT_TARGET="/tmp/syncoid-test-9.mount" +POOL_SIZE="1000M" +POOL_NAME="syncoid-test-9" + +truncate -s "${POOL_SIZE}" "${POOL_IMAGE}" + +zpool create -m none -f "${POOL_NAME}" "${POOL_IMAGE}" + +function cleanUp { + zpool export "${POOL_NAME}" +} + +# export pool in any case +trap cleanUp EXIT + +zfs create -o recordsize=16k -o xattr=on -o mountpoint=none -o primarycache=none "${POOL_NAME}"/src +zfs create -V 100M -o volblocksize=8k "${POOL_NAME}"/src/zvol8 +zfs create -V 100M -o volblocksize=16k -o primarycache=all "${POOL_NAME}"/src/zvol16 +zfs create -V 100M -o volblocksize=64k "${POOL_NAME}"/src/zvol64 +zfs create -o recordsize=16k -o primarycache=none "${POOL_NAME}"/src/16 +zfs create -o recordsize=32k -o acltype=posixacl "${POOL_NAME}"/src/32 + +../../../syncoid --preserve-properties --recursive --debug --compress=none "${POOL_NAME}"/src "${POOL_NAME}"/dst + + +if [ "$(zfs get recordsize -H -o value -t filesystem "${POOL_NAME}"/dst)" != "16K" ]; then + exit 1 +fi + +if [ "$(zfs get mountpoint -H -o value -t filesystem "${POOL_NAME}"/dst)" != "none" ]; then + exit 1 +fi + +if [ "$(zfs get xattr -H -o value -t filesystem "${POOL_NAME}"/dst)" != "on" ]; then + exit 1 +fi + +if [ "$(zfs get primarycache -H -o value -t filesystem "${POOL_NAME}"/dst)" != "none" ]; then + exit 1 +fi + +if [ "$(zfs get recordsize -H -o value -t filesystem "${POOL_NAME}"/dst/16)" != "16K" ]; then + exit 1 +fi + +if [ "$(zfs get primarycache -H -o value -t filesystem "${POOL_NAME}"/dst/16)" != "none" ]; then + exit 1 +fi + +if [ "$(zfs get recordsize -H -o value -t filesystem "${POOL_NAME}"/dst/32)" != "32K" ]; then + exit 1 +fi + +if [ "$(zfs get acltype -H -o value -t filesystem "${POOL_NAME}"/dst/32)" != "posix" ]; then + exit 1 +fi From 538416879d45baa981307aeadd3abe34adae3874 Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Tue, 18 Jul 2023 18:09:51 +0200 Subject: [PATCH 075/130] prepare 2.2.0 --- CHANGELIST | 14 ++++++++++++++ VERSION | 2 +- findoid | 2 +- packages/debian/changelog | 18 ++++++++++++++++++ packages/rhel/sanoid.spec | 4 +++- sanoid | 2 +- syncoid | 2 +- 7 files changed, 39 insertions(+), 5 deletions(-) diff --git a/CHANGELIST b/CHANGELIST index 6ddc565..314e431 100644 --- a/CHANGELIST +++ b/CHANGELIST @@ -1,3 +1,17 @@ +2.2.0 [overall] documentation updates, small fixes (@azmodude, @deviantintegral, @jimsalterjrs, @alexhaydock, @cbreak-black, @kd8bny, @JavaScriptDude, @veeableful, @rsheasby, @Topslakr, @mavhc, @adam-stamand, @joelishness, @jsoref, @dodexahedron, @phreaker0) + [syncoid] implemented flag for preserving properties without the zfs -p flag (@phreaker0) + [syncoid] implemented target snapshot deletion (@mat813) + [syncoid] support bookmarks which are taken in the same second (@delxg, @phreaker0) + [syncoid] exit with an error if the specified src dataset doesn't exist (@phreaker0) + [syncoid] rollback is now done implicitly instead of explicit (@jimsalterjrs, @phreaker0) + [syncoid] append a rand int to the socket name to prevent collisions with parallel invocations (@Gryd3) + [syncoid] implemented support for ssh_config(5) files (@endreszabo) + [syncoid] snapshot hold/unhold support (@rbike) + [sanoid] handle duplicate key definitions gracefully (@phreaker0) + [syncoid] implemented removal of conflicting snapshots with force-delete option (@phreaker0) + [sanoid] implemented pre pruning script hook (@phreaker0) + [syncoid] implemented direct connection support (bypass ssh) for the actual data transfer (@phreaker0) + 2.1.0 [overall] documentation updates, small fixes (@HavardLine, @croadfeldt, @jimsalterjrs, @jim-perkins, @kr4z33, @phreaker0) [syncoid] do not require user to be specified for syncoid (@aerusso) [syncoid] implemented option for keeping sync snaps (@phreaker0) diff --git a/VERSION b/VERSION index 7ec1d6d..ccbccc3 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.1.0 +2.2.0 diff --git a/findoid b/findoid index 98ad581..0bb5e5f 100755 --- a/findoid +++ b/findoid @@ -4,7 +4,7 @@ # from http://www.gnu.org/licenses/gpl-3.0.html on 2014-11-17. A copy should also be available in this # project's Git repository at https://github.com/jimsalterjrs/sanoid/blob/master/LICENSE. -$::VERSION = '2.1.0'; +$::VERSION = '2.2.0'; use strict; use warnings; diff --git a/packages/debian/changelog b/packages/debian/changelog index 4cab69b..00c0c07 100644 --- a/packages/debian/changelog +++ b/packages/debian/changelog @@ -1,3 +1,21 @@ +sanoid (2.2.0) unstable; urgency=medium + + [overall] documentation updates, small fixes (@azmodude, @deviantintegral, @jimsalterjrs, @alexhaydock, @cbreak-black, @kd8bny, @JavaScriptDude, @veeableful, @rsheasby, @Topslakr, @mavhc, @adam-stamand, @joelishness, @jsoref, @dodexahedron, @phreaker0) + [syncoid] implemented flag for preserving properties without the zfs -p flag (@phreaker0) + [syncoid] implemented target snapshot deletion (@mat813) + [syncoid] support bookmarks which are taken in the same second (@delxg, @phreaker0) + [syncoid] exit with an error if the specified src dataset doesn't exist (@phreaker0) + [syncoid] rollback is now done implicitly instead of explicit (@jimsalterjrs, @phreaker0) + [syncoid] append a rand int to the socket name to prevent collisions with parallel invocations (@Gryd3) + [syncoid] implemented support for ssh_config(5) files (@endreszabo) + [syncoid] snapshot hold/unhold support (@rbike) + [sanoid] handle duplicate key definitions gracefully (@phreaker0) + [syncoid] implemented removal of conflicting snapshots with force-delete option (@phreaker0) + [sanoid] implemented pre pruning script hook (@phreaker0) + [syncoid] implemented direct connection support (bypass ssh) for the actual data transfer (@phreaker0) + + -- Jim Salter Tue, 18 Jul 2023 10:04:00 +0200 + sanoid (2.1.0) unstable; urgency=medium [overall] documentation updates, small fixes (@HavardLine, @croadfeldt, @jimsalterjrs, @jim-perkins, @kr4z33, @phreaker0) diff --git a/packages/rhel/sanoid.spec b/packages/rhel/sanoid.spec index 376f58a..218f52d 100644 --- a/packages/rhel/sanoid.spec +++ b/packages/rhel/sanoid.spec @@ -1,4 +1,4 @@ -%global version 2.1.0 +%global version 2.2.0 %global git_tag v%{version} # Enable with systemctl "enable sanoid.timer" @@ -111,6 +111,8 @@ echo "* * * * * root %{_sbindir}/sanoid --cron" > %{buildroot}%{_docdir}/%{name} %endif %changelog +* Tue Jul 18 2023 Christoph Klaffl - 2.2.0 +- Bump to 2.2.0 * Tue Nov 24 2020 Christoph Klaffl - 2.1.0 - Bump to 2.1.0 * Wed Oct 02 2019 Christoph Klaffl - 2.0.3 diff --git a/sanoid b/sanoid index 12a184a..5150f3b 100755 --- a/sanoid +++ b/sanoid @@ -4,7 +4,7 @@ # from http://www.gnu.org/licenses/gpl-3.0.html on 2014-11-17. A copy should also be available in this # project's Git repository at https://github.com/jimsalterjrs/sanoid/blob/master/LICENSE. -$::VERSION = '2.1.0'; +$::VERSION = '2.2.0'; my $MINIMUM_DEFAULTS_VERSION = 2; use strict; diff --git a/syncoid b/syncoid index 6cde9f9..5ed00f0 100755 --- a/syncoid +++ b/syncoid @@ -4,7 +4,7 @@ # from http://www.gnu.org/licenses/gpl-3.0.html on 2014-11-17. A copy should also be available in this # project's Git repository at https://github.com/jimsalterjrs/sanoid/blob/master/LICENSE. -$::VERSION = '2.1.0'; +$::VERSION = '2.2.0'; use strict; use warnings; From 7ab0ac0cf6bdc112b2b0a5a93f464892af66ca8a Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Tue, 25 Jul 2023 22:56:25 +0200 Subject: [PATCH 076/130] trim config values --- sanoid | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sanoid b/sanoid index 5150f3b..8848e3b 100755 --- a/sanoid +++ b/sanoid @@ -905,6 +905,8 @@ sub init { warn "duplicate key '$key' in section '$section', using the value from the first occurence and ignoring the others.\n"; $ini{$section}{$key} = $value->[0]; } + # trim + $ini{$section}{$key} =~ s/^\s+|\s+$//g; } if ($section =~ /^template_/) { next; } # don't process templates directly From a881d22c85613328ef5dd08529c559b32ebc25e0 Mon Sep 17 00:00:00 2001 From: Vitalii Zurian Date: Tue, 1 Aug 2023 10:05:46 +0200 Subject: [PATCH 077/130] Update INSTALL.md --- INSTALL.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/INSTALL.md b/INSTALL.md index 4ea39aa..9c6acfa 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -160,7 +160,7 @@ Now, proceed to configure [**Sanoid**](#configuration) Install prerequisite software: ```bash -pkg install p5-Config-Inifiles p5-Capture-Tiny pv mbuffer lzop +pkg install p5-Config-Inifiles p5-Capture-Tiny pv mbuffer lzop sanoid ``` **Additional notes:** From 6b874a7e3c36c1429867aba50bca1a6b70c75fa9 Mon Sep 17 00:00:00 2001 From: Michael Jeanson Date: Thu, 3 Aug 2023 16:17:41 -0400 Subject: [PATCH 078/130] Fix typos in syncoid documentation Signed-off-by: Michael Jeanson --- README.md | 2 +- syncoid | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 6549617..96301ad 100644 --- a/README.md +++ b/README.md @@ -317,7 +317,7 @@ As of 1.4.18, syncoid also automatically supports and enables resume of interrup This argument tells syncoid to create a zfs bookmark for the newest snapshot after it got replicated successfully. The bookmark name will be equal to the snapshot name. Only works in combination with the --no-sync-snap option. This can be very useful for irregular replication where the last matching snapshot on the source was already deleted but the bookmark remains so a replication is still possible. + --use-hold - This argument tells syncoid to add a hold to the newest snapshot on the source and target after replication succeeds and to remove the hold after the next succesful replication. Setting a hold prevents the snapshots from being destroyed. The hold name incldues the identifier if set. This allows for separate holds in case of replication to multiple targets. + This argument tells syncoid to add a hold to the newest snapshot on the source and target after replication succeeds and to remove the hold after the next successful replication. Setting a hold prevents the snapshots from being destroyed. The hold name includes the identifier if set. This allows for separate holds in case of replication to multiple targets. + --preserve-recordsize diff --git a/syncoid b/syncoid index 5ed00f0..976baba 100755 --- a/syncoid +++ b/syncoid @@ -2208,9 +2208,9 @@ Options: --no-sync-snap Does not create new snapshot, only transfers existing --keep-sync-snap Don't destroy created sync snapshots --create-bookmark Creates a zfs bookmark for the newest snapshot on the source after replication succeeds (only works with --no-sync-snap) - --use-hold Adds a hold to the newest snapshot on the source and target after replication succeeds and removes the hold after the next succesful replication. The hold name incldues the identifier if set. This allows for separate holds in case of multiple targets + --use-hold Adds a hold to the newest snapshot on the source and target after replication succeeds and removes the hold after the next successful replication. The hold name includes the identifier if set. This allows for separate holds in case of multiple targets --preserve-recordsize Preserves the recordsize on initial sends to the target - --preserve-properties Preserves locally set dataset properties similiar to the zfs send -p flag but this one will also work for encrypted datasets in non raw sends + --preserve-properties Preserves locally set dataset properties similar to the zfs send -p flag but this one will also work for encrypted datasets in non raw sends --no-rollback Does not rollback snapshots on target (it probably requires a readonly target) --delete-target-snapshots With this argument snapshots which are missing on the source will be destroyed on the target. Use this if you only want to handle snapshots on the source. --exclude=REGEX Exclude specific datasets which match the given regular expression. Can be specified multiple times From 18ccb7df350d22bd02a4efd01e4fefd1db4bdc3f Mon Sep 17 00:00:00 2001 From: "Jiawen (Kevin) Chen" Date: Mon, 14 Aug 2023 22:52:16 -0700 Subject: [PATCH 079/130] Fix tiny typo in README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 6549617..152382a 100644 --- a/README.md +++ b/README.md @@ -391,7 +391,7 @@ As of 1.4.18, syncoid also automatically supports and enables resume of interrup + --debug - This prints out quite a lot of additional information during a sanoid run, and is normally not needed. + This prints out quite a lot of additional information during a syncoid run, and is normally not needed. + --help From 07b6d6344c2ba6d230f34a3e4216d4f8e4b73e0e Mon Sep 17 00:00:00 2001 From: Axel Gembe Date: Sun, 15 Oct 2023 14:07:09 +0700 Subject: [PATCH 080/130] debian: add openzfs-zfsutils as an alternative to zfsutils-linux The package produced by ZFS 2.2.0 `make native-deb-utils` is called `openzfs-zfsutils`. --- packages/debian/control | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/debian/control b/packages/debian/control index d154147..1dfe087 100644 --- a/packages/debian/control +++ b/packages/debian/control @@ -12,7 +12,7 @@ Package: sanoid Architecture: all Depends: libcapture-tiny-perl, libconfig-inifiles-perl, - zfsutils-linux | zfs, + zfsutils-linux | zfs | openzfs-zfsutils, ${misc:Depends}, ${perl:Depends} Recommends: gzip, From a5a6fc0f58f2ee491f426391dfbf9ae5c910d161 Mon Sep 17 00:00:00 2001 From: pajkastare Date: Mon, 23 Oct 2023 21:43:46 +0200 Subject: [PATCH 081/130] Fixes jimsalterjrs/sanoid#851 --- sanoid | 59 ++++++++++++++++++++++++++++++++++++++-------------------- 1 file changed, 39 insertions(+), 20 deletions(-) diff --git a/sanoid b/sanoid index 8848e3b..6373c35 100755 --- a/sanoid +++ b/sanoid @@ -4,7 +4,7 @@ # from http://www.gnu.org/licenses/gpl-3.0.html on 2014-11-17. A copy should also be available in this # project's Git repository at https://github.com/jimsalterjrs/sanoid/blob/master/LICENSE. -$::VERSION = '2.2.0'; +$::VERSION = '2.2.1'; my $MINIMUM_DEFAULTS_VERSION = 2; use strict; @@ -34,6 +34,14 @@ if (keys %args < 4) { $args{'cron'} = 1; $args{'verbose'} = 1; } +my $no_need_for_cache_update = 0; +# Do not update the snapshot cache file if _only_ "--monitor-*" action commands are given (ignore "--verbose", "--configdir" etc) +if (($args{'monitor-snapshots'} || $args{'monitor-health'} || $args{'monitor-capacity'}) && ! ($args{'cron'} || $args{'force-update'} || $args{'take-snapshots'} || $args{'prune-snapshots'} || $args{'force-prune'})) { + # The command combination above must not assert true for any command that takes or prunes snapshots + # As long as no snapshots are taken, no conflict with the $forcecacheupdate variable below should occur + $no_need_for_cache_update = 1; + if ($args{'debug'}) { print "DEBUG: command combo means that the cache file (provided it exists) will not be updated regardless of age.\n"; } +} # for compatibility reasons, older versions used hardcoded command paths $ENV{'PATH'} = $ENV{'PATH'} . ":/bin:/sbin"; @@ -813,31 +821,42 @@ sub getsnaps { my ($dev, $ino, $mode, $nlink, $uid, $gid, $rdev, $size, $atime, $mtime, $ctime, $blksize, $blocks) = stat($cache); if ( $forcecacheupdate || ! -f $cache || (time() - $mtime) > $cacheTTL ) { - if (checklock('sanoid_cacheupdate')) { - writelock('sanoid_cacheupdate'); - if ($args{'verbose'}) { - if ($args{'force-update'}) { - print "INFO: cache forcibly expired - updating from zfs list.\n"; - } else { - print "INFO: cache expired - updating from zfs list.\n"; - } - } - open FH, "$zfs get -Hrpt snapshot creation |"; - @rawsnaps = ; - close FH; - - open FH, "> $cache" or die 'Could not write to $cache!\n'; - print FH @rawsnaps; - close FH; - removelock('sanoid_cacheupdate'); - } else { - if ($args{'verbose'}) { print "INFO: deferring cache update - valid cache update lock held by another sanoid process.\n"; } + if ( -f $cache && ! $forcecacheupdate && $no_need_for_cache_update ) { + # Even though $forcecacheupdate and $no_need_for_cache_update should never be true at the same time, let $forcecacheupdate take precedence + if ($args{'debug'}) { print "DEBUG: no need to update cache even though it's expired, so don't.\n"; } open FH, "< $cache"; @rawsnaps = ; close FH; + } else { + if (checklock('sanoid_cacheupdate')) { + writelock('sanoid_cacheupdate'); + if ($args{'verbose'}) { + if ($args{'force-update'}) { + print "INFO: cache forcibly expired - updating from zfs list.\n"; + } else { + print "INFO: cache expired - updating from zfs list.\n"; + } + } + open FH, "$zfs get -Hrpt snapshot creation |"; + @rawsnaps = ; + close FH; + + open FH, "> $cache" or die 'Could not write to $cache!\n'; + print FH @rawsnaps; + close FH; + removelock('sanoid_cacheupdate'); + } else { + if ($args{'verbose'}) { print "INFO: deferring cache update - valid cache update lock held by another sanoid process.\n"; } + open FH, "< $cache"; + @rawsnaps = ; + close FH; + } } } else { # if ($args{'debug'}) { print "DEBUG: cache not expired (" . (time() - $mtime) . " seconds old with TTL of $cacheTTL): pulling snapshot list from cache.\n"; } + if ( $no_need_for_cache_update ) { + if ($args{'debug'}) { print "DEBUG: cache has not expired, so will not update it, but wouldn't have even if it had.\n"; } + } open FH, "< $cache"; @rawsnaps = ; close FH; From 605b7bac1cde092fdb92639f70a7bc903f16a34e Mon Sep 17 00:00:00 2001 From: Jan Krieg Date: Sun, 29 Oct 2023 17:46:28 +0100 Subject: [PATCH 082/130] fix "creation"/"guid" regex detection --- syncoid | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/syncoid b/syncoid index 5ed00f0..4bad0d7 100755 --- a/syncoid +++ b/syncoid @@ -1765,7 +1765,7 @@ sub getsnaps() { foreach my $line (@rawsnaps) { # only import snap guids from the specified filesystem - if ($line =~ /\Q$fs\E\@.*guid/) { + if ($line =~ /\Q$fs\E\@.*\tguid/) { chomp $line; my $guid = $line; $guid =~ s/^.*\tguid\t*(\d*).*/$1/; @@ -1777,7 +1777,7 @@ sub getsnaps() { foreach my $line (@rawsnaps) { # only import snap creations from the specified filesystem - if ($line =~ /\Q$fs\E\@.*creation/) { + if ($line =~ /\Q$fs\E\@.*\tcreation/) { chomp $line; my $creation = $line; $creation =~ s/^.*\tcreation\t*(\d*).*/$1/; @@ -1837,13 +1837,13 @@ sub getsnapsfallback() { } if ($state eq 0) { - if ($line !~ /\Q$fs\E\@.*type\s*snapshot/) { + if ($line !~ /\Q$fs\E\@.*\ttype\s*snapshot/) { # skip non snapshot type object $state = -2; next; } } elsif ($state eq 1) { - if ($line !~ /\Q$fs\E\@.*guid/) { + if ($line !~ /\Q$fs\E\@.*\tguid/) { die "CRITICAL ERROR: snapshots couldn't be listed for $fs (guid parser error)"; } @@ -1854,7 +1854,7 @@ sub getsnapsfallback() { $snap =~ s/^.*\@(.*)\tguid.*$/$1/; $snaps{$type}{$snap}{'guid'}=$guid; } elsif ($state eq 2) { - if ($line !~ /\Q$fs\E\@.*creation/) { + if ($line !~ /\Q$fs\E\@.*\tcreation/) { die "CRITICAL ERROR: snapshots couldn't be listed for $fs (creation parser error)"; } @@ -1926,14 +1926,14 @@ sub getbookmarks() { foreach my $line (@rawbookmarks) { # only import bookmark guids, creation from the specified filesystem - if ($line =~ /\Q$fs\E\#.*guid/) { + if ($line =~ /\Q$fs\E\#.*\tguid/) { chomp $line; $lastguid = $line; $lastguid =~ s/^.*\tguid\t*(\d*).*/$1/; my $bookmark = $line; $bookmark =~ s/^.*\#(.*)\tguid.*$/$1/; $bookmarks{$lastguid}{'name'}=$bookmark; - } elsif ($line =~ /\Q$fs\E\#.*creation/) { + } elsif ($line =~ /\Q$fs\E\#.*\tcreation/) { chomp $line; my $creation = $line; $creation =~ s/^.*\tcreation\t*(\d*).*/$1/; From dbbaac8ac360c5f4b6f3dc4bf731248e2666b059 Mon Sep 17 00:00:00 2001 From: Justin Wolf Date: Sun, 10 Dec 2023 21:16:42 -0600 Subject: [PATCH 083/130] modify zfs-get argument order for portability --- syncoid | 4 ++-- tests/syncoid/7_preserve_recordsize/run.sh | 10 +++++----- tests/syncoid/9_preserve_properties/run.sh | 16 ++++++++-------- 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/syncoid b/syncoid index 5ed00f0..02799d7 100755 --- a/syncoid +++ b/syncoid @@ -1357,9 +1357,9 @@ sub getlocalzfsvalues { if ($debug) { print "DEBUG: getting locally set values of properties on $fs...\n"; } my $mysudocmd; if ($isroot) { $mysudocmd = ''; } else { $mysudocmd = $sudocmd; } - if ($debug) { print "$rhost $mysudocmd $zfscmd get all -s local -H $fsescaped\n"; } + if ($debug) { print "$rhost $mysudocmd $zfscmd get -s local -H all $fsescaped\n"; } my ($values, $error, $exit) = capture { - system("$rhost $mysudocmd $zfscmd get all -s local -H $fsescaped"); + system("$rhost $mysudocmd $zfscmd get -s local -H all $fsescaped"); }; my %properties=(); diff --git a/tests/syncoid/7_preserve_recordsize/run.sh b/tests/syncoid/7_preserve_recordsize/run.sh index 3085b9b..9f7d7f4 100755 --- a/tests/syncoid/7_preserve_recordsize/run.sh +++ b/tests/syncoid/7_preserve_recordsize/run.sh @@ -32,17 +32,17 @@ zfs create -o recordsize=32k "${POOL_NAME}"/src/32 zfs create -o recordsize=128k "${POOL_NAME}"/src/128 ../../../syncoid --preserve-recordsize --recursive --debug --compress=none "${POOL_NAME}"/src "${POOL_NAME}"/dst -zfs get recordsize -t filesystem -r "${POOL_NAME}"/dst -zfs get volblocksize -t volume -r "${POOL_NAME}"/dst +zfs get -t filesystem -r recordsize "${POOL_NAME}"/dst +zfs get -t volume -r volblocksize "${POOL_NAME}"/dst -if [ "$(zfs get recordsize -H -o value -t filesystem "${POOL_NAME}"/dst/16)" != "16K" ]; then +if [ "$(zfs get -H -o value -t filesystem recordsize "${POOL_NAME}"/dst/16)" != "16K" ]; then exit 1 fi -if [ "$(zfs get recordsize -H -o value -t filesystem "${POOL_NAME}"/dst/32)" != "32K" ]; then +if [ "$(zfs get -H -o value -t filesystem recordsize "${POOL_NAME}"/dst/32)" != "32K" ]; then exit 1 fi -if [ "$(zfs get recordsize -H -o value -t filesystem "${POOL_NAME}"/dst/128)" != "128K" ]; then +if [ "$(zfs get -H -o value -t filesystem recordsize "${POOL_NAME}"/dst/128)" != "128K" ]; then exit 1 fi diff --git a/tests/syncoid/9_preserve_properties/run.sh b/tests/syncoid/9_preserve_properties/run.sh index 497ce9a..35f1bfa 100755 --- a/tests/syncoid/9_preserve_properties/run.sh +++ b/tests/syncoid/9_preserve_properties/run.sh @@ -33,34 +33,34 @@ zfs create -o recordsize=32k -o acltype=posixacl "${POOL_NAME}"/src/32 ../../../syncoid --preserve-properties --recursive --debug --compress=none "${POOL_NAME}"/src "${POOL_NAME}"/dst -if [ "$(zfs get recordsize -H -o value -t filesystem "${POOL_NAME}"/dst)" != "16K" ]; then +if [ "$(zfs get -H -o value -t filesystem recordsize "${POOL_NAME}"/dst)" != "16K" ]; then exit 1 fi -if [ "$(zfs get mountpoint -H -o value -t filesystem "${POOL_NAME}"/dst)" != "none" ]; then +if [ "$(zfs get -H -o value -t filesystem mountpoint "${POOL_NAME}"/dst)" != "none" ]; then exit 1 fi -if [ "$(zfs get xattr -H -o value -t filesystem "${POOL_NAME}"/dst)" != "on" ]; then +if [ "$(zfs get -H -o value -t filesystem xattr "${POOL_NAME}"/dst)" != "on" ]; then exit 1 fi -if [ "$(zfs get primarycache -H -o value -t filesystem "${POOL_NAME}"/dst)" != "none" ]; then +if [ "$(zfs get -H -o value -t filesystem primarycache "${POOL_NAME}"/dst)" != "none" ]; then exit 1 fi -if [ "$(zfs get recordsize -H -o value -t filesystem "${POOL_NAME}"/dst/16)" != "16K" ]; then +if [ "$(zfs get -H -o value -t filesystem recordsize "${POOL_NAME}"/dst/16)" != "16K" ]; then exit 1 fi -if [ "$(zfs get primarycache -H -o value -t filesystem "${POOL_NAME}"/dst/16)" != "none" ]; then +if [ "$(zfs get -H -o value -t filesystem primarycache "${POOL_NAME}"/dst/16)" != "none" ]; then exit 1 fi -if [ "$(zfs get recordsize -H -o value -t filesystem "${POOL_NAME}"/dst/32)" != "32K" ]; then +if [ "$(zfs get -H -o value -t filesystem recordsize "${POOL_NAME}"/dst/32)" != "32K" ]; then exit 1 fi -if [ "$(zfs get acltype -H -o value -t filesystem "${POOL_NAME}"/dst/32)" != "posix" ]; then +if [ "$(zfs get -H -o value -t filesystem acltype "${POOL_NAME}"/dst/32)" != "posix" ]; then exit 1 fi From 48d89c785ee34b9f87aca13c6155846eb1acd2db Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Tue, 9 Jan 2024 19:53:03 +0100 Subject: [PATCH 084/130] prevent destroying of root dataset which leads to infinite loop because it can't be destroyed --- syncoid | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/syncoid b/syncoid index 5ed00f0..98dd2a3 100755 --- a/syncoid +++ b/syncoid @@ -678,7 +678,8 @@ sub syncdataset { } if (! $bookmark) { - if ($args{'force-delete'}) { + # force delete is not possible for the root dataset + if ($args{'force-delete'} && index($targetfs, '/') != -1) { if (!$quiet) { print "Removing $targetfs because no matching snapshots were found\n"; } my $rcommand = ''; From 4369576ac4206071c3d5b32c00c1fe101379868d Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Tue, 9 Jan 2024 20:40:33 +0100 Subject: [PATCH 085/130] escape property key and value pair in case of property preservation --- syncoid | 3 ++- tests/syncoid/9_preserve_properties/run.sh | 5 +++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/syncoid b/syncoid index 5ed00f0..e1e03ec 100755 --- a/syncoid +++ b/syncoid @@ -493,7 +493,8 @@ sub syncdataset { foreach my $key (keys %properties) { my $value = $properties{$key}; if ($debug) { print "DEBUG: will set $key to $value ...\n"; } - $recvoptions .= " -o $key=$value"; + my $pair = escapeshellparam("$key=$value"); + $recvoptions .= " -o $pair"; } } elsif (defined $args{'preserve-recordsize'}) { my $type = getzfsvalue($sourcehost,$sourcefs,$sourceisroot,'type'); diff --git a/tests/syncoid/9_preserve_properties/run.sh b/tests/syncoid/9_preserve_properties/run.sh index 497ce9a..76d7a82 100755 --- a/tests/syncoid/9_preserve_properties/run.sh +++ b/tests/syncoid/9_preserve_properties/run.sh @@ -29,6 +29,7 @@ zfs create -V 100M -o volblocksize=16k -o primarycache=all "${POOL_NAME}"/src/zv zfs create -V 100M -o volblocksize=64k "${POOL_NAME}"/src/zvol64 zfs create -o recordsize=16k -o primarycache=none "${POOL_NAME}"/src/16 zfs create -o recordsize=32k -o acltype=posixacl "${POOL_NAME}"/src/32 +zfs set 'net.openoid:var-name'='with whitespace and !"§$%&/()= symbols' "${POOL_NAME}"/src/32 ../../../syncoid --preserve-properties --recursive --debug --compress=none "${POOL_NAME}"/src "${POOL_NAME}"/dst @@ -64,3 +65,7 @@ fi if [ "$(zfs get acltype -H -o value -t filesystem "${POOL_NAME}"/dst/32)" != "posix" ]; then exit 1 fi + +if [ "$(zfs get 'net.openoid:var-name' -H -o value -t filesystem "${POOL_NAME}"/dst/32)" != "with whitespace and !\"§$%&/()= symbols" ]; then + exit 1 +fi From 676100493939d4744a677856c6c8c0d999abc554 Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Thu, 11 Jan 2024 21:02:04 +0100 Subject: [PATCH 086/130] update possible zfs send options --- syncoid | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/syncoid b/syncoid index 5ed00f0..f6a8721 100755 --- a/syncoid +++ b/syncoid @@ -451,7 +451,7 @@ sub syncdataset { # with ZFS on Linux (possibly OpenZFS in general) when setting/unsetting readonly. #my $originaltargetreadonly; - my $sendoptions = getoptionsline(\@sendoptions, ('D','L','P','R','c','e','h','p','v','w')); + my $sendoptions = getoptionsline(\@sendoptions, ('L','P','V','R','X','b','c','e','h','p','s','v','w')); my $recvoptions = getoptionsline(\@recvoptions, ('h','o','x','u','v')); # sync 'em up. @@ -600,7 +600,7 @@ sub syncdataset { # and because this will ony resume the receive to the next # snapshot, do a normal sync after that if (defined($receivetoken)) { - $sendoptions = getoptionsline(\@sendoptions, ('P','e','v','w')); + $sendoptions = getoptionsline(\@sendoptions, ('P','e','v')); my $sendcmd = "$sourcesudocmd $zfscmd send $sendoptions -t $receivetoken"; my $recvcmd = "$targetsudocmd $zfscmd receive $recvoptions $receiveextraargs $forcedrecv $targetfsescaped 2>&1"; my $pvsize = getsendsize($sourcehost,"","",$sourceisroot,$receivetoken); @@ -760,7 +760,7 @@ sub syncdataset { my $pvsize = 0; my $disp_pvsize = "UNKNOWN"; - $sendoptions = getoptionsline(\@sendoptions, ('L','c','e','w')); + $sendoptions = getoptionsline(\@sendoptions, ('L','V','c','e','w')); if ($nextsnapshot) { my $nextsnapshotescaped = escapeshellparam($nextsnapshot); my $sendcmd = "$sourcesudocmd $zfscmd send $sendoptions -i $sourcefsescaped#$bookmarkescaped $sourcefsescaped\@$nextsnapshotescaped"; @@ -830,7 +830,7 @@ sub syncdataset { return 0; } - $sendoptions = getoptionsline(\@sendoptions, ('D','L','P','R','c','e','h','p','v','w')); + $sendoptions = getoptionsline(\@sendoptions, ('L','P','V','R','X','c','e','h','p','s','v','w')); my $sendcmd = "$sourcesudocmd $zfscmd send $sendoptions $args{'streamarg'} $sourcefsescaped\@$matchingsnapescaped $sourcefsescaped\@$newsyncsnapescaped"; my $recvcmd = "$targetsudocmd $zfscmd receive $recvoptions $receiveextraargs $forcedrecv $targetfsescaped 2>&1"; my $pvsize = getsendsize($sourcehost,"$sourcefs\@$matchingsnap","$sourcefs\@$newsyncsnap",$sourceisroot); @@ -1998,9 +1998,9 @@ sub getsendsize { my $sendoptions; if (defined($receivetoken)) { - $sendoptions = getoptionsline(\@sendoptions, ('e')); + $sendoptions = getoptionsline(\@sendoptions, ('V','e')); } else { - $sendoptions = getoptionsline(\@sendoptions, ('D','L','R','c','e','h','p','w')); + $sendoptions = getoptionsline(\@sendoptions, ('L','V','R','X','b','c','e','h','p','s','w')); } my $getsendsizecmd = "$sourcessh $mysudocmd $zfscmd send $sendoptions -nvP $snaps"; if ($debug) { print "DEBUG: getting estimated transfer size from source $sourcehost using \"$getsendsizecmd 2>&1 |\"...\n"; } From 8ce1ea4dc8b6ec0ec6c0bed8ae61656d67bfed7f Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Sat, 13 Jan 2024 19:49:20 +0100 Subject: [PATCH 087/130] fixed refactoring regression --- syncoid | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/syncoid b/syncoid index 4bf7072..b30abce 100755 --- a/syncoid +++ b/syncoid @@ -928,7 +928,7 @@ sub runsynccmd { } my $sendcmd = "$sourcesudocmd $zfscmd send $sendoptions $sendsource"; - my $recvcmd = "$targetsudocmd $zfscmd receive $recvoptions $targetfsescaped"; + my $recvcmd = "$targetsudocmd $zfscmd receive $recvoptions $targetfsescaped 2>&1"; my $synccmd = buildsynccmd($sendcmd,$recvcmd,$pvsize,$sourceisroot,$targetisroot); writelog('INFO', "Sync size: ~$disp_pvsize"); From 9ed32d177d692330f0f15895468d4442c67fed0b Mon Sep 17 00:00:00 2001 From: Dan Langille Date: Mon, 15 Jan 2024 09:56:47 -0500 Subject: [PATCH 088/130] sanoid.conf: document two options for recursive zfs and yes are the options, one uses zfs, the other sanoid code --- sanoid.conf | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/sanoid.conf b/sanoid.conf index 6bd5c62..db380d1 100644 --- a/sanoid.conf +++ b/sanoid.conf @@ -17,7 +17,11 @@ # you can also handle datasets recursively. [zpoolname/parent] use_template = production - recursive = yes + # there are two options for recursive: zfs or yes + # * zfs - taken a zfs snapshot with the '-r' flag; zfs will recursively take a snapshot of the whole + # dataset tree which is consistent. + # * yes - the snapshots will be taken one-at-time through the sanoid code; not necessarily consistent. + recursive = zfs # if you want sanoid to manage the child datasets but leave this one alone, set process_children_only. process_children_only = yes From a8c15c977a96476d74b34b7c3e1766aad3988b3f Mon Sep 17 00:00:00 2001 From: pajkastare Date: Wed, 24 Jan 2024 13:32:22 +0100 Subject: [PATCH 089/130] Fixes jimsalterjrs/sanoid#851, updated based on review in discussion thread --- sanoid | 64 +++++++++++++++++++++++++--------------------------------- 1 file changed, 28 insertions(+), 36 deletions(-) diff --git a/sanoid b/sanoid index 6373c35..a22bc87 100755 --- a/sanoid +++ b/sanoid @@ -4,7 +4,7 @@ # from http://www.gnu.org/licenses/gpl-3.0.html on 2014-11-17. A copy should also be available in this # project's Git repository at https://github.com/jimsalterjrs/sanoid/blob/master/LICENSE. -$::VERSION = '2.2.1'; +$::VERSION = '2.2.0'; my $MINIMUM_DEFAULTS_VERSION = 2; use strict; @@ -34,13 +34,17 @@ if (keys %args < 4) { $args{'cron'} = 1; $args{'verbose'} = 1; } -my $no_need_for_cache_update = 0; -# Do not update the snapshot cache file if _only_ "--monitor-*" action commands are given (ignore "--verbose", "--configdir" etc) + + +my $cacheTTL = 900; # 15 minutes + +# Allow a much older snapshot cache file than default if _only_ "--monitor-*" action commands are given +# (ignore "--verbose", "--configdir" etc) if (($args{'monitor-snapshots'} || $args{'monitor-health'} || $args{'monitor-capacity'}) && ! ($args{'cron'} || $args{'force-update'} || $args{'take-snapshots'} || $args{'prune-snapshots'} || $args{'force-prune'})) { # The command combination above must not assert true for any command that takes or prunes snapshots # As long as no snapshots are taken, no conflict with the $forcecacheupdate variable below should occur - $no_need_for_cache_update = 1; - if ($args{'debug'}) { print "DEBUG: command combo means that the cache file (provided it exists) will not be updated regardless of age.\n"; } + $cacheTTL = 18000; # 5 hours + if ($args{'debug'}) { print "DEBUG: command combo means that the cache file (provided it exists) will be allowed to be older than default.\n"; } } # for compatibility reasons, older versions used hardcoded command paths @@ -66,7 +70,6 @@ make_path($run_dir); # if we call getsnaps(%config,1) it will forcibly update the cache, TTL or no TTL my $forcecacheupdate = 0; my $cache = "$cache_dir/snapshots.txt"; -my $cacheTTL = 900; # 15 minutes my %snaps = getsnaps( \%config, $cacheTTL, $forcecacheupdate ); my %pruned; my %capacitycache; @@ -821,42 +824,31 @@ sub getsnaps { my ($dev, $ino, $mode, $nlink, $uid, $gid, $rdev, $size, $atime, $mtime, $ctime, $blksize, $blocks) = stat($cache); if ( $forcecacheupdate || ! -f $cache || (time() - $mtime) > $cacheTTL ) { - if ( -f $cache && ! $forcecacheupdate && $no_need_for_cache_update ) { - # Even though $forcecacheupdate and $no_need_for_cache_update should never be true at the same time, let $forcecacheupdate take precedence - if ($args{'debug'}) { print "DEBUG: no need to update cache even though it's expired, so don't.\n"; } + if (checklock('sanoid_cacheupdate')) { + writelock('sanoid_cacheupdate'); + if ($args{'verbose'}) { + if ($args{'force-update'}) { + print "INFO: cache forcibly expired - updating from zfs list.\n"; + } else { + print "INFO: cache expired - updating from zfs list.\n"; + } + } + open FH, "$zfs get -Hrpt snapshot creation |"; + @rawsnaps = ; + close FH; + + open FH, "> $cache" or die 'Could not write to $cache!\n'; + print FH @rawsnaps; + close FH; + removelock('sanoid_cacheupdate'); + } else { + if ($args{'verbose'}) { print "INFO: deferring cache update - valid cache update lock held by another sanoid process.\n"; } open FH, "< $cache"; @rawsnaps = ; close FH; - } else { - if (checklock('sanoid_cacheupdate')) { - writelock('sanoid_cacheupdate'); - if ($args{'verbose'}) { - if ($args{'force-update'}) { - print "INFO: cache forcibly expired - updating from zfs list.\n"; - } else { - print "INFO: cache expired - updating from zfs list.\n"; - } - } - open FH, "$zfs get -Hrpt snapshot creation |"; - @rawsnaps = ; - close FH; - - open FH, "> $cache" or die 'Could not write to $cache!\n'; - print FH @rawsnaps; - close FH; - removelock('sanoid_cacheupdate'); - } else { - if ($args{'verbose'}) { print "INFO: deferring cache update - valid cache update lock held by another sanoid process.\n"; } - open FH, "< $cache"; - @rawsnaps = ; - close FH; - } } } else { # if ($args{'debug'}) { print "DEBUG: cache not expired (" . (time() - $mtime) . " seconds old with TTL of $cacheTTL): pulling snapshot list from cache.\n"; } - if ( $no_need_for_cache_update ) { - if ($args{'debug'}) { print "DEBUG: cache has not expired, so will not update it, but wouldn't have even if it had.\n"; } - } open FH, "< $cache"; @rawsnaps = ; close FH; From 01053e6cceccf2d5194b5bc6e61277685146a079 Mon Sep 17 00:00:00 2001 From: pajkastare Date: Wed, 24 Jan 2024 13:51:24 +0100 Subject: [PATCH 090/130] Removed unnecessary comment, no code change --- sanoid | 1 - 1 file changed, 1 deletion(-) diff --git a/sanoid b/sanoid index a22bc87..295957b 100755 --- a/sanoid +++ b/sanoid @@ -42,7 +42,6 @@ my $cacheTTL = 900; # 15 minutes # (ignore "--verbose", "--configdir" etc) if (($args{'monitor-snapshots'} || $args{'monitor-health'} || $args{'monitor-capacity'}) && ! ($args{'cron'} || $args{'force-update'} || $args{'take-snapshots'} || $args{'prune-snapshots'} || $args{'force-prune'})) { # The command combination above must not assert true for any command that takes or prunes snapshots - # As long as no snapshots are taken, no conflict with the $forcecacheupdate variable below should occur $cacheTTL = 18000; # 5 hours if ($args{'debug'}) { print "DEBUG: command combo means that the cache file (provided it exists) will be allowed to be older than default.\n"; } } From 6667f02d3576cb152f78efce99b6dd981df6126c Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Thu, 25 Jan 2024 21:13:00 +0100 Subject: [PATCH 091/130] Update sanoid.conf --- sanoid.conf | 2 -- 1 file changed, 2 deletions(-) diff --git a/sanoid.conf b/sanoid.conf index b5f57f3..0650c3d 100644 --- a/sanoid.conf +++ b/sanoid.conf @@ -36,8 +36,6 @@ # dataset tree which is consistent. # * yes - the snapshots will be taken one-at-time through the sanoid code; not necessarily consistent. recursive = zfs - # if you want sanoid to manage the child datasets but leave this one alone, set process_children_only. - process_children_only = yes From 6919bc3324525f86d67981b0f11d1c2b215cdf45 Mon Sep 17 00:00:00 2001 From: Jim Salter Date: Thu, 1 Feb 2024 13:57:02 -0500 Subject: [PATCH 092/130] Update README.md --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index f1572da..59087de 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ -

sanoid logo

+

sanoid logo

Sanoid is provided to you free and libre via the GPL v3.0 license. If you would like to support the project, please consider becoming a member of the Practical ZFS patreon community. Becoming a member helps support development and maintenance of Sanoid, and helps keep the lights on at the Practical ZFS forum--which, by the way, is an excellent place to ask for help with Sanoid or with OpenZFS in general!

-Sanoid is a policy-driven snapshot management tool for ZFS filesystems. When combined with the Linux KVM hypervisor, you can use it to make your systems functionally immortal. +Sanoid is a policy-driven snapshot management tool for ZFS filesystems. When combined with the Linux KVM hypervisor, you can use it to make your systems functionally immortal via automated snapshot management and over-the-air replication.

sanoid rollback demo
(Real time demo: rolling back a full-scale cryptomalware infection in seconds!)

From 7940f659415072e06462ac7f4c411810ae20217d Mon Sep 17 00:00:00 2001 From: Jim Salter Date: Thu, 1 Feb 2024 13:58:33 -0500 Subject: [PATCH 093/130] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 59087de..098a73f 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@

sanoid logo

Sanoid is provided to you free and libre via the GPL v3.0 license. If you would like to support the project, please consider becoming a member of the Practical ZFS patreon community. Becoming a member helps support development and maintenance of Sanoid, and helps keep the lights on at the Practical ZFS forum--which, by the way, is an excellent place to ask for help with Sanoid or with OpenZFS in general!

-Sanoid is a policy-driven snapshot management tool for ZFS filesystems. When combined with the Linux KVM hypervisor, you can use it to make your systems functionally immortal via automated snapshot management and over-the-air replication. +Sanoid is a policy-driven snapshot management tool for ZFS filesystems. When combined with the Linux KVM hypervisor, you can use it to make your systems functionally immortal via automated snapshot management and over-the-air replication.

sanoid rollback demo
(Real time demo: rolling back a full-scale cryptomalware infection in seconds!)

From 5de562eb7f98126a35b6134fb50025cc9316c5dd Mon Sep 17 00:00:00 2001 From: Jim Salter Date: Thu, 1 Feb 2024 14:38:45 -0500 Subject: [PATCH 094/130] Update README.md --- README.md | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 098a73f..f2ce10c 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,17 @@ -

sanoid logo

Sanoid is provided to you free and libre via the GPL v3.0 license. If you would like to support the project, please consider becoming a member of the Practical ZFS patreon community. Becoming a member helps support development and maintenance of Sanoid, and helps keep the lights on at the Practical ZFS forum--which, by the way, is an excellent place to ask for help with Sanoid or with OpenZFS in general!

+ + + + +
+

+ sanoid logo +

+ +

Sanoid is provided to you completely free and libre, now and in perpetuity, via the GPL v3.0 license. If you find the project useful, please consider either a recurring or one-time donation at Patreon or PayPal—your contributions will support both this project and the Practical ZFS forum. +

+
-Sanoid is a policy-driven snapshot management tool for ZFS filesystems. When combined with the Linux KVM hypervisor, you can use it to make your systems functionally immortal via automated snapshot management and over-the-air replication. +Sanoid is a policy-driven snapshot management tool for ZFS filesystems. When combined with the Linux KVM hypervisor, you can use it to make your systems functionally immortal via automated snapshot management and over-the-air replication.

sanoid rollback demo
(Real time demo: rolling back a full-scale cryptomalware infection in seconds!)

From b420048d9599faa6e5fbdd4088e10f27ba12c027 Mon Sep 17 00:00:00 2001 From: Jim Salter Date: Thu, 1 Feb 2024 14:45:33 -0500 Subject: [PATCH 095/130] Create CODE_OF_CONDUCT.md --- CODE_OF_CONDUCT.md | 128 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 128 insertions(+) create mode 100644 CODE_OF_CONDUCT.md diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..18c9147 --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,128 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, religion, or sexual identity +and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, +diverse, inclusive, and healthy community. + +## Our Standards + +Examples of behavior that contributes to a positive environment for our +community include: + +* Demonstrating empathy and kindness toward other people +* Being respectful of differing opinions, viewpoints, and experiences +* Giving and gracefully accepting constructive feedback +* Accepting responsibility and apologizing to those affected by our mistakes, + and learning from the experience +* Focusing on what is best not just for us as individuals, but for the + overall community + +Examples of unacceptable behavior include: + +* The use of sexualized language or imagery, and sexual attention or + advances of any kind +* Trolling, insulting or derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or email + address, without their explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Enforcement Responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of +acceptable behavior and will take appropriate and fair corrective action in +response to any behavior that they deem inappropriate, threatening, offensive, +or harmful. + +Community leaders have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, and will communicate reasons for moderation +decisions when appropriate. + +## Scope + +This Code of Conduct applies within all community spaces, and also applies when +an individual is officially representing the community in public spaces. +Examples of representing our community include using an official e-mail address, +posting via an official social media account, or acting as an appointed +representative at an online or offline event. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported to the community leaders responsible for enforcement at +. +All complaints will be reviewed and investigated promptly and fairly. + +All community leaders are obligated to respect the privacy and security of the +reporter of any incident. + +## Enforcement Guidelines + +Community leaders will follow these Community Impact Guidelines in determining +the consequences for any action they deem in violation of this Code of Conduct: + +### 1. Correction + +**Community Impact**: Use of inappropriate language or other behavior deemed +unprofessional or unwelcome in the community. + +**Consequence**: A private, written warning from community leaders, providing +clarity around the nature of the violation and an explanation of why the +behavior was inappropriate. A public apology may be requested. + +### 2. Warning + +**Community Impact**: A violation through a single incident or series +of actions. + +**Consequence**: A warning with consequences for continued behavior. No +interaction with the people involved, including unsolicited interaction with +those enforcing the Code of Conduct, for a specified period of time. This +includes avoiding interactions in community spaces as well as external channels +like social media. Violating these terms may lead to a temporary or +permanent ban. + +### 3. Temporary Ban + +**Community Impact**: A serious violation of community standards, including +sustained inappropriate behavior. + +**Consequence**: A temporary ban from any sort of interaction or public +communication with the community for a specified period of time. No public or +private interaction with the people involved, including unsolicited interaction +with those enforcing the Code of Conduct, is allowed during this period. +Violating these terms may lead to a permanent ban. + +### 4. Permanent Ban + +**Community Impact**: Demonstrating a pattern of violation of community +standards, including sustained inappropriate behavior, harassment of an +individual, or aggression toward or disparagement of classes of individuals. + +**Consequence**: A permanent ban from any sort of public interaction within +the community. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 2.0, available at +https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. + +Community Impact Guidelines were inspired by [Mozilla's code of conduct +enforcement ladder](https://github.com/mozilla/diversity). + +[homepage]: https://www.contributor-covenant.org + +For answers to common questions about this code of conduct, see the FAQ at +https://www.contributor-covenant.org/faq. Translations are available at +https://www.contributor-covenant.org/translations. From 4e101bbc16aa34ce0b26ae963deaab0258ad6701 Mon Sep 17 00:00:00 2001 From: Jim Salter Date: Thu, 1 Feb 2024 14:52:25 -0500 Subject: [PATCH 096/130] Create CONTRIBUTING.md --- CONTRIBUTING.md | 1 + 1 file changed, 1 insertion(+) create mode 100644 CONTRIBUTING.md diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..15b33b7 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1 @@ +Any and all contributions made to this project must be compatible with the project's own GPLv3 license. From d5ce1889d6ff8b88462c260aabba5516c3b4f412 Mon Sep 17 00:00:00 2001 From: Jim Salter Date: Thu, 1 Feb 2024 14:59:43 -0500 Subject: [PATCH 097/130] Create SECURITY.md --- SECURITY.md | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 SECURITY.md diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 0000000..d1bd71c --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,13 @@ +# Security Policy + +## Supported Versions + +The Sanoid project directly supports both the code in the main branch, and the last two releases found here on GitHub. + +Community support is available for all versions, with the understanding that in some cases "upgrade to a newer version" may be the support offered. +If you've installed Sanoid from your distribution's repositories, we're happy to offer community support with the same caveat! + +## Reporting a Vulnerability + +If you believe you've found a serious security vulnerability in Sanoid, please create an Issue here on GitHub. If you prefer a private contact channel to disclose +particularly sensitive or private details, you may request one in the GitHub Issue you create. From 19fc237476452bfa7499e6dfda77a8a6eee20b4f Mon Sep 17 00:00:00 2001 From: Jim Salter Date: Thu, 1 Feb 2024 15:05:08 -0500 Subject: [PATCH 098/130] Update INSTALL.md --- INSTALL.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/INSTALL.md b/INSTALL.md index 9c6acfa..e0048b6 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -169,7 +169,7 @@ pkg install p5-Config-Inifiles p5-Capture-Tiny pv mbuffer lzop sanoid * Simplest path workaround is symlinks, eg `ln -s /usr/local/bin/lzop /usr/bin/lzop` or similar, as appropriate to create links in **/usr/bin** to wherever the utilities actually are on your system. -* See note about mbuffer and other things in FREEBSD.readme +* See note about tcsh unpleasantness and other things in FREEBSD.readme ## Alpine Linux / busybox based distributions From f89372967f8975e6cf0e5bfbf8052186ad4133e0 Mon Sep 17 00:00:00 2001 From: Adam Fulton Date: Mon, 1 Apr 2024 11:53:45 -0500 Subject: [PATCH 099/130] fix(syncoid): regather $snaps on --delete-target-snapshots flag --- syncoid | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/syncoid b/syncoid index 61814d2..ea067b7 100755 --- a/syncoid +++ b/syncoid @@ -865,6 +865,16 @@ sub syncdataset { # those that exist on the source. Remaining are the snapshots # that are only on the target. Then sort to remove the oldest # snapshots first. + + # regather snapshots on source and target + %snaps = getsnaps('source',$sourcehost,$sourcefs,$sourceisroot); + + if ($targetexists) { + my %targetsnaps = getsnaps('target',$targethost,$targetfs,$targetisroot); + my %sourcesnaps = %snaps; + %snaps = (%sourcesnaps, %targetsnaps); + } + my @to_delete = sort { sortsnapshots(\%snaps, $a, $b) } grep {!exists $snaps{'source'}{$_}} keys %{ $snaps{'target'} }; while (@to_delete) { # Create batch of snapshots to remove From d08b2882b7255ba630f5d338936d411a3d56e44c Mon Sep 17 00:00:00 2001 From: Adam Fulton Date: Mon, 1 Apr 2024 13:16:16 -0500 Subject: [PATCH 100/130] finish rebase to master --- syncoid | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/syncoid b/syncoid index ea067b7..cb18897 100755 --- a/syncoid +++ b/syncoid @@ -867,13 +867,13 @@ sub syncdataset { # snapshots first. # regather snapshots on source and target - %snaps = getsnaps('source',$sourcehost,$sourcefs,$sourceisroot); + %snaps = getsnaps('source',$sourcehost,$sourcefs,$sourceisroot,0); - if ($targetexists) { - my %targetsnaps = getsnaps('target',$targethost,$targetfs,$targetisroot); - my %sourcesnaps = %snaps; - %snaps = (%sourcesnaps, %targetsnaps); - } + if ($targetexists) { + my %targetsnaps = getsnaps('target',$targethost,$targetfs,$targetisroot,0); + my %sourcesnaps = %snaps; + %snaps = (%sourcesnaps, %targetsnaps); + } my @to_delete = sort { sortsnapshots(\%snaps, $a, $b) } grep {!exists $snaps{'source'}{$_}} keys %{ $snaps{'target'} }; while (@to_delete) { From 7c8a34eceb40043bc5a09990b668e67235a5e81b Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Fri, 5 Apr 2024 15:20:28 +0200 Subject: [PATCH 101/130] * proper order of tests * timing fixes for fast NVME pools * skip invasive tests by default --- tests/run-tests.sh | 5 ++++- .../run.sh | 0 .../run.sh | 0 tests/syncoid/{3_force_delete => 003_force_delete}/run.sh | 0 .../run.sh | 0 .../run.sh | 2 ++ .../run.sh | 2 ++ .../run.sh | 0 .../run.sh | 0 .../run.sh | 0 .../syncoid/{10_filter_snaps => 010_filter_snaps}/run.sh | 0 .../run.sh | 8 ++++++-- tests/syncoid/run-tests.sh | 5 ++++- 13 files changed, 18 insertions(+), 4 deletions(-) rename tests/syncoid/{1_bookmark_replication_intermediate => 001_bookmark_replication_intermediate}/run.sh (100%) rename tests/syncoid/{2_bookmark_replication_no_intermediate => 002_bookmark_replication_no_intermediate}/run.sh (100%) rename tests/syncoid/{3_force_delete => 003_force_delete}/run.sh (100%) rename tests/syncoid/{4_bookmark_replication_edge_case => 004_bookmark_replication_edge_case}/run.sh (100%) rename tests/syncoid/{5_reset_resume_state => 005_reset_resume_state}/run.sh (99%) rename tests/syncoid/{6_reset_resume_state2 => 006_reset_resume_state2}/run.sh (99%) rename tests/syncoid/{7_preserve_recordsize => 007_preserve_recordsize}/run.sh (100%) rename tests/syncoid/{8_force_delete_snapshot => 008_force_delete_snapshot}/run.sh (100%) rename tests/syncoid/{9_preserve_properties => 009_preserve_properties}/run.sh (100%) rename tests/syncoid/{10_filter_snaps => 010_filter_snaps}/run.sh (100%) rename tests/syncoid/{815_sync_out-of-order_snapshots => 011_sync_out-of-order_snapshots}/run.sh (91%) diff --git a/tests/run-tests.sh b/tests/run-tests.sh index 418657c..ec14721 100755 --- a/tests/run-tests.sh +++ b/tests/run-tests.sh @@ -17,8 +17,11 @@ for test in $(find . -mindepth 1 -maxdepth 1 -type d -printf "%P\n" | sort -g); cd "${test}" echo -n y | bash run.sh > "${LOGFILE}" 2>&1 - if [ $? -eq 0 ]; then + ret=$? + if [ $ret -eq 0 ]; then echo "[PASS]" + elif [ $ret -eq 130 ]; then + echo "[SKIPPED]" else echo "[FAILED] (see ${LOGFILE})" fi diff --git a/tests/syncoid/1_bookmark_replication_intermediate/run.sh b/tests/syncoid/001_bookmark_replication_intermediate/run.sh similarity index 100% rename from tests/syncoid/1_bookmark_replication_intermediate/run.sh rename to tests/syncoid/001_bookmark_replication_intermediate/run.sh diff --git a/tests/syncoid/2_bookmark_replication_no_intermediate/run.sh b/tests/syncoid/002_bookmark_replication_no_intermediate/run.sh similarity index 100% rename from tests/syncoid/2_bookmark_replication_no_intermediate/run.sh rename to tests/syncoid/002_bookmark_replication_no_intermediate/run.sh diff --git a/tests/syncoid/3_force_delete/run.sh b/tests/syncoid/003_force_delete/run.sh similarity index 100% rename from tests/syncoid/3_force_delete/run.sh rename to tests/syncoid/003_force_delete/run.sh diff --git a/tests/syncoid/4_bookmark_replication_edge_case/run.sh b/tests/syncoid/004_bookmark_replication_edge_case/run.sh similarity index 100% rename from tests/syncoid/4_bookmark_replication_edge_case/run.sh rename to tests/syncoid/004_bookmark_replication_edge_case/run.sh diff --git a/tests/syncoid/5_reset_resume_state/run.sh b/tests/syncoid/005_reset_resume_state/run.sh similarity index 99% rename from tests/syncoid/5_reset_resume_state/run.sh rename to tests/syncoid/005_reset_resume_state/run.sh index 43ec78f..4eb4af6 100755 --- a/tests/syncoid/5_reset_resume_state/run.sh +++ b/tests/syncoid/005_reset_resume_state/run.sh @@ -28,6 +28,8 @@ zfs create -o mountpoint="${MOUNT_TARGET}" "${POOL_NAME}"/src dd if=/dev/urandom of="${MOUNT_TARGET}"/big_file bs=1M count=200 +sleep 1 + ../../../syncoid --debug --compress=none --source-bwlimit=2m "${POOL_NAME}"/src "${POOL_NAME}"/dst & syncoid_pid=$! sleep 5 diff --git a/tests/syncoid/6_reset_resume_state2/run.sh b/tests/syncoid/006_reset_resume_state2/run.sh similarity index 99% rename from tests/syncoid/6_reset_resume_state2/run.sh rename to tests/syncoid/006_reset_resume_state2/run.sh index d05696b..c568fd4 100755 --- a/tests/syncoid/6_reset_resume_state2/run.sh +++ b/tests/syncoid/006_reset_resume_state2/run.sh @@ -28,6 +28,8 @@ zfs create -o mountpoint="${MOUNT_TARGET}" "${POOL_NAME}"/src dd if=/dev/urandom of="${MOUNT_TARGET}"/big_file bs=1M count=200 +sleep 1 + zfs snapshot "${POOL_NAME}"/src@big ../../../syncoid --debug --no-sync-snap --compress=none --source-bwlimit=2m "${POOL_NAME}"/src "${POOL_NAME}"/dst & syncoid_pid=$! diff --git a/tests/syncoid/7_preserve_recordsize/run.sh b/tests/syncoid/007_preserve_recordsize/run.sh similarity index 100% rename from tests/syncoid/7_preserve_recordsize/run.sh rename to tests/syncoid/007_preserve_recordsize/run.sh diff --git a/tests/syncoid/8_force_delete_snapshot/run.sh b/tests/syncoid/008_force_delete_snapshot/run.sh similarity index 100% rename from tests/syncoid/8_force_delete_snapshot/run.sh rename to tests/syncoid/008_force_delete_snapshot/run.sh diff --git a/tests/syncoid/9_preserve_properties/run.sh b/tests/syncoid/009_preserve_properties/run.sh similarity index 100% rename from tests/syncoid/9_preserve_properties/run.sh rename to tests/syncoid/009_preserve_properties/run.sh diff --git a/tests/syncoid/10_filter_snaps/run.sh b/tests/syncoid/010_filter_snaps/run.sh similarity index 100% rename from tests/syncoid/10_filter_snaps/run.sh rename to tests/syncoid/010_filter_snaps/run.sh diff --git a/tests/syncoid/815_sync_out-of-order_snapshots/run.sh b/tests/syncoid/011_sync_out-of-order_snapshots/run.sh similarity index 91% rename from tests/syncoid/815_sync_out-of-order_snapshots/run.sh rename to tests/syncoid/011_sync_out-of-order_snapshots/run.sh index af67b36..af87979 100755 --- a/tests/syncoid/815_sync_out-of-order_snapshots/run.sh +++ b/tests/syncoid/011_sync_out-of-order_snapshots/run.sh @@ -7,9 +7,13 @@ set -e . ../../common/lib.sh -POOL_IMAGE="/tmp/jimsalterjrs_sanoid_815.img" +if [ -z "$ALLOW_INVASIVE_TESTS" ]; then + exit 130 +fi +exit 0 +POOL_IMAGE="/tmp/syncoid-test-11.zpool" POOL_SIZE="64M" -POOL_NAME="jimsalterjrs_sanoid_815" +POOL_NAME="syncoid-test-11" truncate -s "${POOL_SIZE}" "${POOL_IMAGE}" diff --git a/tests/syncoid/run-tests.sh b/tests/syncoid/run-tests.sh index 5564667..8307413 100755 --- a/tests/syncoid/run-tests.sh +++ b/tests/syncoid/run-tests.sh @@ -17,8 +17,11 @@ for test in $(find . -mindepth 1 -maxdepth 1 -type d -printf "%P\n" | sort -g); cd "${test}" echo | bash run.sh > "${LOGFILE}" 2>&1 - if [ $? -eq 0 ]; then + ret=$? + if [ $ret -eq 0 ]; then echo "[PASS]" + elif [ $ret -eq 130 ]; then + echo "[SKIPPED]" else echo "[FAILED] (see ${LOGFILE})" fi From 4e86733c1a618b2084046a0950a7bbf2bf6b88e9 Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Fri, 5 Apr 2024 15:22:13 +0200 Subject: [PATCH 102/130] missed debug statement --- tests/syncoid/011_sync_out-of-order_snapshots/run.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/syncoid/011_sync_out-of-order_snapshots/run.sh b/tests/syncoid/011_sync_out-of-order_snapshots/run.sh index af87979..bb96ad0 100755 --- a/tests/syncoid/011_sync_out-of-order_snapshots/run.sh +++ b/tests/syncoid/011_sync_out-of-order_snapshots/run.sh @@ -10,7 +10,7 @@ set -e if [ -z "$ALLOW_INVASIVE_TESTS" ]; then exit 130 fi -exit 0 + POOL_IMAGE="/tmp/syncoid-test-11.zpool" POOL_SIZE="64M" POOL_NAME="syncoid-test-11" From d7ed4bdf540de61995e7377103395a1534ee905c Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Fri, 5 Apr 2024 15:24:42 +0200 Subject: [PATCH 103/130] support relative paths --- findoid | 3 +++ 1 file changed, 3 insertions(+) diff --git a/findoid b/findoid index 0bb5e5f..2561246 100755 --- a/findoid +++ b/findoid @@ -25,6 +25,9 @@ if ($args{'path'} eq '') { } } +# resolve given path to a canonical one +$args{'path'} = Cwd::realpath($args{'path'}); + my $dataset = getdataset($args{'path'}); my %versions = getversions($args{'path'}, $dataset); From a059054ffb78ece128f79de8c9fc65342e3006d5 Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Wed, 17 Apr 2024 08:14:04 +0200 Subject: [PATCH 104/130] rename ssh control socket to avoid problem with length limits and conflicts --- syncoid | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/syncoid b/syncoid index 61814d2..a1cf416 100755 --- a/syncoid +++ b/syncoid @@ -1786,8 +1786,15 @@ sub getssh { if ($rhost ne "") { if ($remoteuser eq 'root' || $args{'no-privilege-elevation'}) { $isroot = 1; } else { $isroot = 0; } + + my $sanitizedrhost = $rhost; + $sanitizedrhost =~ s/[^a-zA-Z0-9-]//g; + # unix socket path have a length limit of about 104 characters so make sure it's not exceeded + $sanitizedrhost = substr($sanitizedrhost, 0, 50); + # now we need to establish a persistent master SSH connection - $socket = "/tmp/syncoid-$rhost-" . time() . "-" . int(rand(10000)); + $socket = "/tmp/syncoid-$sanitizedrhost-" . time() . "-" . $$ . "-" . int(rand(10000)); + open FH, "$sshcmd -M -S $socket -o ControlPersist=1m $args{'sshport'} $rhost exit |"; close FH; From eb4fe8a01cf1916d275bb809247cb0744dc3b33f Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Thu, 18 Apr 2024 07:42:47 +0200 Subject: [PATCH 105/130] added missing status information about what is done and provide more details --- syncoid | 71 +++++++++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 57 insertions(+), 14 deletions(-) diff --git a/syncoid b/syncoid index a1cf416..2be2ab0 100755 --- a/syncoid +++ b/syncoid @@ -498,7 +498,6 @@ sub syncdataset { my $ret; if (defined $origin) { - writelog('INFO', "Clone is recreated on target $targetfs based on $origin"); ($ret, $stdout) = syncclone($sourcehost, $sourcefs, $origin, $targethost, $targetfs, $oldestsnap); if ($ret) { writelog('INFO', "clone creation failed, trying ordinary replication as fallback"); @@ -506,12 +505,6 @@ sub syncdataset { return 0; } } else { - if (!defined ($args{'no-stream'}) ) { - writelog('INFO', "Sending oldest full snapshot $sourcefs\@$oldestsnap to new target filesystem:"); - } else { - writelog('INFO', "--no-stream selected; sending newest full snapshot $sourcefs\@$oldestsnap to new target filesystem:"); - } - ($ret, $stdout) = syncfull($sourcehost, $sourcefs, $targethost, $targetfs, $oldestsnap); } @@ -532,8 +525,6 @@ sub syncdataset { # $originaltargetreadonly = getzfsvalue($targethost,$targetfs,$targetisroot,'readonly'); # setzfsvalue($targethost,$targetfs,$targetisroot,'readonly','on'); - writelog('INFO', "Updating new target filesystem with incremental $sourcefs\@$oldestsnap ... $newsyncsnap:"); - (my $ret, $stdout) = syncincremental($sourcehost, $sourcefs, $targethost, $targetfs, $oldestsnap, $newsyncsnap, 0); if ($ret != 0) { @@ -898,7 +889,6 @@ sub runsynccmd { my $disp_pvsize = $pvsize == 0 ? 'UNKNOWN' : readablebytes($pvsize); my $sendoptions; if ($sendsource =~ / -t /) { - writelog('INFO', "Resuming interrupted zfs send/receive from $sourcefs to $targetfs (~ $disp_pvsize remaining):"); $sendoptions = getoptionsline(\@sendoptions, ('P','V','e','v')); } elsif ($sendsource =~ /#/) { $sendoptions = getoptionsline(\@sendoptions, ('L','V','c','e','w')); @@ -934,12 +924,13 @@ sub runsynccmd { my $recvcmd = "$targetsudocmd $zfscmd receive $recvoptions $targetfsescaped 2>&1"; my $synccmd = buildsynccmd($sendcmd,$recvcmd,$pvsize,$sourceisroot,$targetisroot); - writelog('INFO', "Sync size: ~$disp_pvsize"); + writelog('DEBUG', "sync size: ~$disp_pvsize"); writelog('DEBUG', "$synccmd"); # make sure target is (still) not currently in receive. if (iszfsbusy($targethost,$targetfs,$targetisroot)) { - writelog('WARN', "Cannot sync now: $targetfs is already target of a zfs receive process."); + my $targetname = buildnicename($targethost, $targetfs); + writelog('WARN', "Cannot sync now: $targetname is already target of a zfs receive process."); return (1, ''); } @@ -971,6 +962,16 @@ sub syncfull { my $sendsource = "$sourcefsescaped\@$snapescaped"; my $pvsize = getsendsize($sourcehost,"$sourcefs\@$snapname",0,$sourceisroot); + my $srcname = buildnicename($sourcehost, $sourcefs, $snapname); + my $targetname = buildnicename($targethost, $targetfs); + my $disp_pvsize = $pvsize == 0 ? 'UNKNOWN' : readablebytes($pvsize); + + if (!defined ($args{'no-stream'}) ) { + writelog('INFO', "Sending oldest full snapshot $srcname to new target filesystem $targetname (~ $disp_pvsize):"); + } else { + writelog('INFO', "--no-stream selected; sending newest full snapshot $srcname to new target filesystem $targetname: (~ $disp_pvsize)"); + } + return runsynccmd($sourcehost, $sourcefs, $sendsource, $targethost, $targetfs, $pvsize); } # end syncfull() @@ -1011,7 +1012,6 @@ sub syncincremental { foreach my $i (0..(scalar(@intsnaps) - 2)) { my $snapa = $intsnaps[$i]; my $snapb = $intsnaps[$i + 1]; - writelog('INFO', "Performing an incremental sync between '$snapa' and '$snapb'"); syncincremental($sourcehost, $sourcefs, $targethost, $targetfs, $snapa, $snapb, 1) == 0 or return $?; } @@ -1026,6 +1026,12 @@ sub syncincremental { my $sendsource = "$streamarg $sourcefsescaped\@$fromsnapescaped $sourcefsescaped\@$tosnapescaped"; my $pvsize = getsendsize($sourcehost,"$sourcefs\@$fromsnap","$sourcefs\@$tosnap",$sourceisroot); + my $srcname = buildnicename($sourcehost, $sourcefs, $fromsnap); + my $targetname = buildnicename($targethost, $targetfs); + my $disp_pvsize = $pvsize == 0 ? 'UNKNOWN' : readablebytes($pvsize); + + writelog('INFO', "Sending incremental $srcname ... $tosnap to $targetname (~ $disp_pvsize):"); + return runsynccmd($sourcehost, $sourcefs, $sendsource, $targethost, $targetfs, $pvsize); } # end syncincremental() @@ -1038,6 +1044,12 @@ sub syncclone { my $sendsource = "-i $originescaped $sourcefsescaped\@$tosnapescaped"; my $pvsize = getsendsize($sourcehost,$origin,"$sourcefs\@$tosnap",$sourceisroot); + my $srcname = buildnicename($sourcehost, $origin); + my $targetname = buildnicename($targethost, $targetfs); + my $disp_pvsize = $pvsize == 0 ? 'UNKNOWN' : readablebytes($pvsize); + + writelog('INFO', "Clone is recreated on target $targetname based on $srcname (~ $disp_pvsize):"); + return runsynccmd($sourcehost, $sourcefs, $sendsource, $targethost, $targetfs, $pvsize); } # end syncclone() @@ -1047,6 +1059,12 @@ sub syncresume { my $sendsource = "-t $receivetoken"; my $pvsize = getsendsize($sourcehost,"","",$sourceisroot,$receivetoken); + my $srcname = buildnicename($sourcehost, $sourcefs); + my $targetname = buildnicename($targethost, $targetfs); + my $disp_pvsize = $pvsize == 0 ? 'UNKNOWN' : readablebytes($pvsize); + + writelog('INFO', "Resuming interrupted zfs send/receive from $srcname to $targetname (~ $disp_pvsize remaining):"); + return runsynccmd($sourcehost, $sourcefs, $sendsource, $targethost, $targetfs, $pvsize); } # end syncresume() @@ -1058,6 +1076,11 @@ sub syncbookmark { my $tosnapescaped = escapeshellparam($tosnap); my $sendsource = "-i $sourcefsescaped#$bookmarkescaped $sourcefsescaped\@$tosnapescaped"; + my $srcname = buildnicename($sourcehost, $sourcefs, '', $bookmark); + my $targetname = buildnicename($targethost, $targetfs); + + writelog('INFO', "Sending incremental $srcname ... $tosnap to $targetname:"); + return runsynccmd($sourcehost, $sourcefs, $sendsource, $targethost, $targetfs, 0); } # end syncbookmark @@ -1507,7 +1530,7 @@ sub getnewestsnapshot { my $snaps = shift; foreach my $snap (sort { sortsnapshots($snaps, $b, $a) } keys %{ $snaps{'source'} }) { # return on first snap found - it's the newest - writelog('INFO', "NEWEST SNAPSHOT: $snap"); + writelog('DEBUG', "NEWEST SNAPSHOT: $snap"); return $snap; } # must not have had any snapshots on source - looks like we'd better create one! @@ -2233,6 +2256,26 @@ sub snapisincluded { return 1; } +sub buildnicename { + my ($host,$fs,$snapname,$bookmarkname) = @_; + + my $name; + if ($host) { + $host =~ s/-S \/tmp\/syncoid[a-zA-Z0-9-@]+ //g; + $name = "$host:$fs"; + } else { + $name = "$fs"; + } + + if ($snapname) { + $name = "$name\@$snapname"; + } elsif ($bookmarkname) { + $name = "$name#$bookmarkname"; + } + + return $name; +} + __END__ =head1 NAME From 6c1e31e551bc5ef6222becbbcfd52f0d617dda4b Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Thu, 18 Apr 2024 08:22:37 +0200 Subject: [PATCH 106/130] handle error output for filtered replications --- syncoid | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/syncoid b/syncoid index a1cf416..b57aa43 100755 --- a/syncoid +++ b/syncoid @@ -1012,7 +1012,11 @@ sub syncincremental { my $snapa = $intsnaps[$i]; my $snapb = $intsnaps[$i + 1]; writelog('INFO', "Performing an incremental sync between '$snapa' and '$snapb'"); - syncincremental($sourcehost, $sourcefs, $targethost, $targetfs, $snapa, $snapb, 1) == 0 or return $?; + (my $ret, my $stdout) = syncincremental($sourcehost, $sourcefs, $targethost, $targetfs, $snapa, $snapb, 1); + + if ($ret != 0) { + return ($ret, $stdout); + } } # Return after finishing the -i syncs so that we don't try to do another -I From 03c3db3d9aa30f5354ec433b7528f9f9385faec9 Mon Sep 17 00:00:00 2001 From: Steffen Dettmer Date: Sat, 20 Apr 2024 12:55:44 +0200 Subject: [PATCH 107/130] sanoid #912: sanoid --prune-snapshots performance boost by removing unneeded iszfsbusy() --- README.md | 4 ---- sanoid | 62 +++++++++++++++---------------------------------------- 2 files changed, 17 insertions(+), 49 deletions(-) diff --git a/README.md b/README.md index f2ce10c..85539d5 100644 --- a/README.md +++ b/README.md @@ -80,10 +80,6 @@ For more full details on sanoid.conf settings see [Wiki page](https://github.com This will process your sanoid.conf file, it will NOT create snapshots, but it will purge expired ones. -+ --force-prune - - Purges expired snapshots even if a send/recv is in progress - + --monitor-snapshots This option is designed to be run by a Nagios monitoring system. It reports on the health of your snapshots. diff --git a/sanoid b/sanoid index 295957b..22f630e 100755 --- a/sanoid +++ b/sanoid @@ -25,7 +25,7 @@ my %args = ( GetOptions(\%args, "verbose", "debug", "cron", "readonly", "quiet", "configdir=s", "cache-dir=s", "run-dir=s", "monitor-health", "force-update", - "monitor-snapshots", "take-snapshots", "prune-snapshots", "force-prune", + "monitor-snapshots", "take-snapshots", "prune-snapshots", "monitor-capacity" ) or pod2usage(2); @@ -40,7 +40,7 @@ my $cacheTTL = 900; # 15 minutes # Allow a much older snapshot cache file than default if _only_ "--monitor-*" action commands are given # (ignore "--verbose", "--configdir" etc) -if (($args{'monitor-snapshots'} || $args{'monitor-health'} || $args{'monitor-capacity'}) && ! ($args{'cron'} || $args{'force-update'} || $args{'take-snapshots'} || $args{'prune-snapshots'} || $args{'force-prune'})) { +if (($args{'monitor-snapshots'} || $args{'monitor-health'} || $args{'monitor-capacity'}) && ! ($args{'cron'} || $args{'force-update'} || $args{'take-snapshots'} || $args{'prune-snapshots'})) { # The command combination above must not assert true for any command that takes or prunes snapshots $cacheTTL = 18000; # 5 hours if ($args{'debug'}) { print "DEBUG: command combo means that the cache file (provided it exists) will be allowed to be older than default.\n"; } @@ -349,26 +349,23 @@ sub prune_snapshots { } if ($args{'verbose'}) { print "INFO: pruning $snap ... \n"; } - if (!$args{'force-prune'} && iszfsbusy($path)) { - if ($args{'verbose'}) { print "INFO: deferring pruning of $snap - $path is currently in zfs send or receive.\n"; } - } else { - if (! $args{'readonly'}) { - if (system($zfs, "destroy", $snap) == 0) { - $pruned{$snap} = 1; - if ($config{$dataset}{'pruning_script'}) { - $ENV{'SANOID_TARGET'} = $dataset; - $ENV{'SANOID_SNAPNAME'} = $snapname; - $ENV{'SANOID_SCRIPT'} = 'prune'; - if ($args{'verbose'}) { print "executing pruning_script '".$config{$dataset}{'pruning_script'}."' on dataset '$dataset'\n"; } - my $ret = runscript('pruning_script',$dataset); - delete $ENV{'SANOID_TARGET'}; - delete $ENV{'SANOID_SNAPNAME'}; - delete $ENV{'SANOID_SCRIPT'}; - } - } else { - warn "could not remove $snap : $?"; + if (! $args{'readonly'}) { + if (system($zfs, "destroy", $snap) == 0) { + $pruned{$snap} = 1; + if ($config{$dataset}{'pruning_script'}) { + $ENV{'SANOID_TARGET'} = $dataset; + $ENV{'SANOID_SNAPNAME'} = $snapname; + $ENV{'SANOID_SCRIPT'} = 'prune'; + if ($args{'verbose'}) { print "executing pruning_script '".$config{$dataset}{'pruning_script'}."' on dataset '$dataset'\n"; } + my $ret = runscript('pruning_script',$dataset); + + delete $ENV{'SANOID_TARGET'}; + delete $ENV{'SANOID_SNAPNAME'}; + delete $ENV{'SANOID_SCRIPT'}; } + } else { + warn "could not remove $snap : $?"; } } } @@ -1557,30 +1554,6 @@ sub writelock { close FH; } -sub iszfsbusy { - # check to see if ZFS filesystem passed in as argument currently has a zfs send or zfs receive process referencing it. - # return true if busy (currently being sent or received), return false if not. - - my $fs = shift; - # if (args{'debug'}) { print "DEBUG: checking to see if $fs on is already in zfs receive using $pscmd -Ao args= ...\n"; } - - open PL, "$pscmd -Ao args= |"; - my @processes = ; - close PL; - - foreach my $process (@processes) { - # if ($args{'debug'}) { print "DEBUG: checking process $process...\n"; } - if ($process =~ /zfs *(send|receive|recv).*$fs/) { - # there's already a zfs send/receive process for our target filesystem - return true - # if ($args{'debug'}) { print "DEBUG: process $process matches target $fs!\n"; } - return 1; - } - } - - # no zfs receive processes for our target filesystem found - return false - return 0; -} - #######################################################################################################################3 #######################################################################################################################3 #######################################################################################################################3 @@ -1752,7 +1725,6 @@ Options: --monitor-snapshots Reports on snapshot "health", in a Nagios compatible format --take-snapshots Creates snapshots as specified in sanoid.conf --prune-snapshots Purges expired snapshots as specified in sanoid.conf - --force-prune Purges expired snapshots even if a send/recv is in progress --help Prints this helptext --version Prints the version number From 8b7d29d5a030d8620ceefc7822b5226d9729c71a Mon Sep 17 00:00:00 2001 From: 0xFelix Date: Sat, 20 Apr 2024 18:41:43 +0200 Subject: [PATCH 108/130] syncoid: Add zstdmt compress options Add the zstdmt-fast and zstdmt-slow compress options to allow use of multithreading when using zstd compression. Signed-off-by: 0xFelix --- syncoid | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/syncoid b/syncoid index b57aa43..9f9eb89 100755 --- a/syncoid +++ b/syncoid @@ -1114,12 +1114,24 @@ sub compressargset { decomrawcmd => 'zstd', decomargs => '-dc', }, + 'zstdmt-fast' => { + rawcmd => 'zstdmt', + args => '-3', + decomrawcmd => 'zstdmt', + decomargs => '-dc', + }, 'zstd-slow' => { rawcmd => 'zstd', args => '-19', decomrawcmd => 'zstd', decomargs => '-dc', }, + 'zstdmt-slow' => { + rawcmd => 'zstdmt', + args => '-19', + decomrawcmd => 'zstdmt', + decomargs => '-dc', + }, 'xz' => { rawcmd => 'xz', args => '', @@ -1142,7 +1154,7 @@ sub compressargset { if ($value eq 'default') { $value = $DEFAULT_COMPRESSION; - } elsif (!(grep $value eq $_, ('gzip', 'pigz-fast', 'pigz-slow', 'zstd-fast', 'zstd-slow', 'lz4', 'xz', 'lzo', 'default', 'none'))) { + } elsif (!(grep $value eq $_, ('gzip', 'pigz-fast', 'pigz-slow', 'zstd-fast', 'zstdmt-fast', 'zstd-slow', 'zstdmt-slow', 'lz4', 'xz', 'lzo', 'default', 'none'))) { writelog('WARN', "Unrecognised compression value $value, defaulting to $DEFAULT_COMPRESSION"); $value = $DEFAULT_COMPRESSION; } @@ -2255,7 +2267,7 @@ syncoid - ZFS snapshot replication tool Options: - --compress=FORMAT Compresses data during transfer. Currently accepted options are gzip, pigz-fast, pigz-slow, zstd-fast, zstd-slow, lz4, xz, lzo (default) & none + --compress=FORMAT Compresses data during transfer. Currently accepted options are gzip, pigz-fast, pigz-slow, zstd-fast, zstdmt-fast, zstd-slow, zstdmt-slow, lz4, xz, lzo (default) & none --identifier=EXTRA Extra identifier which is included in the snapshot name. Can be used for replicating to multiple targets. --recursive|r Also transfers child datasets --skip-parent Skips syncing of the parent dataset. Does nothing without '--recursive' option. From 6f74c7c4b39a7ab35d671e8df6f26919cc347e12 Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Tue, 23 Apr 2024 23:38:47 +0200 Subject: [PATCH 109/130] * improve performance (especially for monitor commands) by caching the dataset list * list snapshots only when needed --- sanoid | 145 +++++++++++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 120 insertions(+), 25 deletions(-) diff --git a/sanoid b/sanoid index 295957b..533f9ea 100755 --- a/sanoid +++ b/sanoid @@ -35,17 +35,6 @@ if (keys %args < 4) { $args{'verbose'} = 1; } - -my $cacheTTL = 900; # 15 minutes - -# Allow a much older snapshot cache file than default if _only_ "--monitor-*" action commands are given -# (ignore "--verbose", "--configdir" etc) -if (($args{'monitor-snapshots'} || $args{'monitor-health'} || $args{'monitor-capacity'}) && ! ($args{'cron'} || $args{'force-update'} || $args{'take-snapshots'} || $args{'prune-snapshots'} || $args{'force-prune'})) { - # The command combination above must not assert true for any command that takes or prunes snapshots - $cacheTTL = 18000; # 5 hours - if ($args{'debug'}) { print "DEBUG: command combo means that the cache file (provided it exists) will be allowed to be older than default.\n"; } -} - # for compatibility reasons, older versions used hardcoded command paths $ENV{'PATH'} = $ENV{'PATH'} . ":/bin:/sbin"; @@ -57,25 +46,70 @@ my $zpool = 'zpool'; my $conf_file = "$args{'configdir'}/sanoid.conf"; my $default_conf_file = "$args{'configdir'}/sanoid.defaults.conf"; -# parse config file -my %config = init($conf_file,$default_conf_file); - my $cache_dir = $args{'cache-dir'}; my $run_dir = $args{'run-dir'}; make_path($cache_dir); make_path($run_dir); -# if we call getsnaps(%config,1) it will forcibly update the cache, TTL or no TTL -my $forcecacheupdate = 0; +my $cacheTTL = 1200; # 20 minutes + +# Allow a much older snapshot cache file than default if _only_ "--monitor-*" action commands are given +# (ignore "--verbose", "--configdir" etc) +if ( + ( + $args{'monitor-snapshots'} + || $args{'monitor-health'} + || $args{'monitor-capacity'} + ) && ! ( + $args{'cron'} + || $args{'force-update'} + || $args{'take-snapshots'} + || $args{'prune-snapshots'} + || $args{'force-prune'} + ) +) { + # The command combination above must not assert true for any command that takes or prunes snapshots + $cacheTTL = 18000; # 5 hours + if ($args{'debug'}) { print "DEBUG: command combo means that the cache file (provided it exists) will be allowed to be older than default.\n"; } +} + +# snapshot cache my $cache = "$cache_dir/snapshots.txt"; -my %snaps = getsnaps( \%config, $cacheTTL, $forcecacheupdate ); + +# configured dataset cache +my $cachedatasetspath = "$cache_dir/datasets.txt"; +my @cachedatasets; + +# parse config file +my %config = init($conf_file,$default_conf_file); + my %pruned; my %capacitycache; -my %snapsbytype = getsnapsbytype( \%config, \%snaps ); +my %snaps; +my %snapsbytype; +my %snapsbypath; -my %snapsbypath = getsnapsbypath( \%config, \%snaps ); +# get snapshot list only if needed +if ($args{'monitor-snapshots'} + || $args{'monitor-health'} + || $args{'cron'} + || $args{'take-snapshots'} + || $args{'prune-snapshots'} + || $args{'force-update'} + || $args{'debug'} +) { + my $forcecacheupdate = 0; + if ($args{'force-update'}) { + $forcecacheupdate = 1; + } + + %snaps = getsnaps( \%config, $cacheTTL, $forcecacheupdate); + + %snapsbytype = getsnapsbytype( \%config, \%snaps ); + %snapsbypath = getsnapsbypath( \%config, \%snaps ); +} # let's make it a little easier to be consistent passing these hashes in the same order to each sub my @params = ( \%config, \%snaps, \%snapsbytype, \%snapsbypath ); @@ -84,7 +118,6 @@ if ($args{'debug'}) { $args{'verbose'}=1; blabber (@params); } if ($args{'monitor-snapshots'}) { monitor_snapshots(@params); } if ($args{'monitor-health'}) { monitor_health(@params); } if ($args{'monitor-capacity'}) { monitor_capacity(@params); } -if ($args{'force-update'}) { my $snaps = getsnaps( \%config, $cacheTTL, 1 ); } if ($args{'cron'}) { if ($args{'quiet'}) { $args{'verbose'} = 0; } @@ -275,7 +308,6 @@ sub prune_snapshots { my ($config, $snaps, $snapsbytype, $snapsbypath) = @_; my %datestamp = get_date(); - my $forcecacheupdate = 0; foreach my $section (keys %config) { if ($section =~ /^template/) { next; } @@ -826,7 +858,7 @@ sub getsnaps { if (checklock('sanoid_cacheupdate')) { writelock('sanoid_cacheupdate'); if ($args{'verbose'}) { - if ($args{'force-update'}) { + if ($forcecacheupdate) { print "INFO: cache forcibly expired - updating from zfs list.\n"; } else { print "INFO: cache expired - updating from zfs list.\n"; @@ -901,6 +933,20 @@ sub init { die "FATAL: you're using sanoid.defaults.conf v$defaults_version, this version of sanoid requires a minimum sanoid.defaults.conf v$MINIMUM_DEFAULTS_VERSION"; } + my @updatedatasets; + + # load dataset cache if valid + if (!$args{'force-update'} && -f $cachedatasetspath) { + my ($dev, $ino, $mode, $nlink, $uid, $gid, $rdev, $size, $atime, $mtime, $ctime, $blksize, $blocks) = stat($cachedatasetspath); + + if ((time() - $mtime) <= $cacheTTL) { + if ($args{'debug'}) { print "DEBUG: dataset cache not expired (" . (time() - $mtime) . " seconds old with TTL of $cacheTTL): pulling dataset list from cache.\n"; } + open FH, "< $cachedatasetspath"; + @cachedatasets = ; + close FH; + } + } + foreach my $section (keys %ini) { # first up - die with honor if unknown parameters are set in any modules or templates by the user. @@ -990,6 +1036,10 @@ sub init { $config{$section}{'path'} = $section; } + if (! @cachedatasets) { + push (@updatedatasets, "$config{$section}{'path'}\n"); + } + # how 'bout some recursion? =) if ($config{$section}{'zfs_recursion'} && $config{$section}{'zfs_recursion'} == 1 && $config{$section}{'autosnap'} == 1) { warn "ignored autosnap configuration for '$section' because it's part of a zfs recursion.\n"; @@ -1007,6 +1057,10 @@ sub init { @datasets = getchilddatasets($config{$section}{'path'}); DATASETS: foreach my $dataset(@datasets) { + if (! @cachedatasets) { + push (@updatedatasets, $dataset); + } + chomp $dataset; if ($zfsRecursive) { @@ -1038,9 +1092,26 @@ sub init { $config{$dataset}{'initialized'} = 1; } } + } - - + # update dataset cache if it was unused + if (! @cachedatasets) { + if (checklock('sanoid_cachedatasetupdate')) { + writelock('sanoid_cachedatasetupdate'); + if ($args{'verbose'}) { + if ($args{'force-update'}) { + print "INFO: dataset cache forcibly expired - updating from zfs list.\n"; + } else { + print "INFO: dataset cache expired - updating from zfs list.\n"; + } + } + open FH, "> $cachedatasetspath" or die 'Could not write to $cachedatasetspath!\n'; + print FH @updatedatasets; + close FH; + removelock('sanoid_cachedatasetupdate'); + } else { + if ($args{'verbose'}) { print "INFO: deferring dataset cache update - valid cache update lock held by another sanoid process.\n"; } + } } return %config; @@ -1590,6 +1661,30 @@ sub getchilddatasets { my $fs = shift; my $mysudocmd = ''; + # use dataset cache if available + if (@cachedatasets) { + my $foundparent = 0; + my @cachechildren = (); + foreach my $dataset (@cachedatasets) { + chomp $dataset; + my $ret = rindex $dataset, "${fs}/", 0; + if ($ret == 0) { + push (@cachechildren, $dataset); + } else { + if ($dataset eq $fs) { + $foundparent = 1; + } + } + } + + # sanity check + if ($foundparent) { + return @cachechildren; + } + + # fallback if cache misses items for whatever reason + } + my $getchildrencmd = "$mysudocmd $zfs list -o name -t filesystem,volume -Hr $fs |"; if ($args{'debug'}) { print "DEBUG: getting list of child datasets on $fs using $getchildrencmd...\n"; } open FH, $getchildrencmd; @@ -1645,7 +1740,7 @@ sub removecachedsnapshots { close FH; removelock('sanoid_cacheupdate'); - %snaps = getsnaps(\%config,$cacheTTL,$forcecacheupdate); + %snaps = getsnaps(\%config,$cacheTTL,0); # clear hash undef %pruned; From 9c0468ee45b1af1e5a0c809bbefbcd4e6855f364 Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Wed, 24 Apr 2024 00:09:40 +0200 Subject: [PATCH 110/130] write cache files in an atomic way to prevent race conditions --- sanoid | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/sanoid b/sanoid index 533f9ea..f74f731 100755 --- a/sanoid +++ b/sanoid @@ -868,9 +868,10 @@ sub getsnaps { @rawsnaps = ; close FH; - open FH, "> $cache" or die 'Could not write to $cache!\n'; + open FH, "> $cache.tmp" or die 'Could not write to $cache.tmp!\n'; print FH @rawsnaps; close FH; + rename("$cache.tmp", "$cache") or die 'Could not rename to $cache!\n'; removelock('sanoid_cacheupdate'); } else { if ($args{'verbose'}) { print "INFO: deferring cache update - valid cache update lock held by another sanoid process.\n"; } @@ -1105,9 +1106,10 @@ sub init { print "INFO: dataset cache expired - updating from zfs list.\n"; } } - open FH, "> $cachedatasetspath" or die 'Could not write to $cachedatasetspath!\n'; + open FH, "> $cachedatasetspath.tmp" or die 'Could not write to $cachedatasetspath.tmp!\n'; print FH @updatedatasets; close FH; + rename("$cachedatasetspath.tmp", "$cachedatasetspath") or die 'Could not rename to $cachedatasetspath!\n'; removelock('sanoid_cachedatasetupdate'); } else { if ($args{'verbose'}) { print "INFO: deferring dataset cache update - valid cache update lock held by another sanoid process.\n"; } @@ -1731,13 +1733,14 @@ sub removecachedsnapshots { my @rawsnaps = ; close FH; - open FH, "> $cache" or die 'Could not write to $cache!\n'; + open FH, "> $cache.tmp" or die 'Could not write to $cache.tmp!\n'; foreach my $snapline ( @rawsnaps ) { my @columns = split("\t", $snapline); my $snap = $columns[0]; print FH $snapline unless ( exists($pruned{$snap}) ); } close FH; + rename("$cache.tmp", "$cache") or die 'Could not rename to $cache!\n'; removelock('sanoid_cacheupdate'); %snaps = getsnaps(\%config,$cacheTTL,0); From fab4b4076cb10b4f4722e827e0fad3d940fc4a5b Mon Sep 17 00:00:00 2001 From: Nick Liu Date: Wed, 24 Apr 2024 13:08:21 -0500 Subject: [PATCH 111/130] fix(syncoid): `zfs send` arg allowlist when sendsource is receivetoken The `runsynccmd` subroutine was not matching the `$sendsource` when a receive resume token is passed in. All usages that pass in the receive resume token do not begin with a space; instead, they start with `-t `. Fixes: https://github.com/jimsalterjrs/sanoid/issues/918 --- syncoid | 2 +- tests/syncoid/012_receive_resume_token/run.sh | 55 +++++++++++++++++++ 2 files changed, 56 insertions(+), 1 deletion(-) create mode 100755 tests/syncoid/012_receive_resume_token/run.sh diff --git a/syncoid b/syncoid index 79ad45f..94bdb03 100755 --- a/syncoid +++ b/syncoid @@ -898,7 +898,7 @@ sub runsynccmd { my $disp_pvsize = $pvsize == 0 ? 'UNKNOWN' : readablebytes($pvsize); my $sendoptions; - if ($sendsource =~ / -t /) { + if ($sendsource =~ /^-t /) { $sendoptions = getoptionsline(\@sendoptions, ('P','V','e','v')); } elsif ($sendsource =~ /#/) { $sendoptions = getoptionsline(\@sendoptions, ('L','V','c','e','w')); diff --git a/tests/syncoid/012_receive_resume_token/run.sh b/tests/syncoid/012_receive_resume_token/run.sh new file mode 100755 index 0000000..a28becc --- /dev/null +++ b/tests/syncoid/012_receive_resume_token/run.sh @@ -0,0 +1,55 @@ +#!/bin/bash + +# test verifying syncoid behavior with partial transfers + +set -x + +. ../../common/lib.sh + +POOL_IMAGE="/tmp/syncoid-test-012.zpool" +POOL_SIZE="128M" +POOL_NAME="syncoid-test-012" +MOUNT_TARGET="/tmp/syncoid-test-012.mount" + +truncate -s "${POOL_SIZE}" "${POOL_IMAGE}" + +zpool create -O mountpoint="${MOUNT_TARGET}" -f "${POOL_NAME}" "${POOL_IMAGE}" + +function cleanUp { + zpool destroy "${POOL_NAME}" + rm -f "${POOL_IMAGE}" +} + +# Clean up the pool and image file on exit +trap cleanUp EXIT + +zfs create "${POOL_NAME}/source" +zfs snap "${POOL_NAME}/source@empty" +dd if=/dev/urandom of="${MOUNT_TARGET}/source/garbage.bin" bs=1M count=16 +zfs snap "${POOL_NAME}/source@something" + +# Simulate interrupted transfer +zfs send -pwR "${POOL_NAME}/source@something" | head --bytes=8M | zfs recv -s "${POOL_NAME}/destination" + +# Using syncoid to continue interrupted transfer +../../../syncoid --sendoptions="pw" "${POOL_NAME}/source" "${POOL_NAME}/destination" + +# Check if syncoid succeeded in handling the interrupted transfer +if [ $? -eq 0 ]; then + echo "Syncoid resumed transfer successfully." + + # Verify data integrity with sha256sum comparison + original_sum=$(sha256sum "${MOUNT_TARGET}/source/garbage.bin" | cut -d ' ' -f 1) + received_sum=$(sha256sum "${MOUNT_TARGET}/destination/garbage.bin" | cut -d ' ' -f 1) + + if [ "${original_sum}" == "${received_sum}" ]; then + echo "Data integrity verified." + exit 0 + else + echo "Data integrity check failed." + exit 1 + fi +else + echo "Regression detected: syncoid did not handle the resuming correctly." + exit 1 +fi From 4d39e3921768fac5f5dc6cb7ce24b3cc804b5c8c Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Mon, 3 Jun 2024 23:46:54 +0200 Subject: [PATCH 112/130] fix debian packaging with debian 12 and ubuntu 24.04 --- packages/debian/postinst | 2 ++ packages/debian/rules | 6 ++---- .../{sanoid-prune.service => sanoid.sanoid-prune.service} | 0 3 files changed, 4 insertions(+), 4 deletions(-) rename packages/debian/{sanoid-prune.service => sanoid.sanoid-prune.service} (100%) diff --git a/packages/debian/postinst b/packages/debian/postinst index 0d6142f..646a461 100755 --- a/packages/debian/postinst +++ b/packages/debian/postinst @@ -2,3 +2,5 @@ # remove old cache file [ -f /var/cache/sanoidsnapshots.txt ] && rm /var/cache/sanoidsnapshots.txt || true +[ -f /var/cache/sanoid/snapshots.txt ] && rm /var/cache/sanoid/snapshots.txt || true +[ -f /var/cache/sanoid/datasets.txt ] && rm /var/cache/sanoid/datasets.txt || true diff --git a/packages/debian/rules b/packages/debian/rules index 51e52af..05a3754 100755 --- a/packages/debian/rules +++ b/packages/debian/rules @@ -12,10 +12,6 @@ override_dh_auto_install: install -d $(DESTDIR)/etc/sanoid install -m 664 sanoid.defaults.conf $(DESTDIR)/etc/sanoid - install -d $(DESTDIR)/lib/systemd/system - install -m 664 debian/sanoid-prune.service debian/sanoid.timer \ - $(DESTDIR)/lib/systemd/system - install -d $(DESTDIR)/usr/sbin install -m 775 \ findoid sanoid sleepymutex syncoid \ @@ -25,6 +21,8 @@ override_dh_auto_install: install -m 664 sanoid.conf \ $(DESTDIR)/usr/share/doc/sanoid/sanoid.conf.example + dh_installsystemd --name sanoid-prune + override_dh_installinit: dh_installinit --noscripts diff --git a/packages/debian/sanoid-prune.service b/packages/debian/sanoid.sanoid-prune.service similarity index 100% rename from packages/debian/sanoid-prune.service rename to packages/debian/sanoid.sanoid-prune.service From cf0ecb30ae2fa62ba57910efe73cb91ccbcd1510 Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Tue, 4 Jun 2024 08:40:41 +0200 Subject: [PATCH 113/130] added deprecation warning for removed force-prune --- sanoid | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/sanoid b/sanoid index 4bffdf6..3585846 100755 --- a/sanoid +++ b/sanoid @@ -25,7 +25,7 @@ my %args = ( GetOptions(\%args, "verbose", "debug", "cron", "readonly", "quiet", "configdir=s", "cache-dir=s", "run-dir=s", "monitor-health", "force-update", - "monitor-snapshots", "take-snapshots", "prune-snapshots", + "monitor-snapshots", "take-snapshots", "prune-snapshots", "force-prune", "monitor-capacity" ) or pod2usage(2); @@ -54,6 +54,10 @@ make_path($run_dir); my $cacheTTL = 1200; # 20 minutes +if ($args{'force-prune'}) { + warn "WARN: --force-prune argument is deprecated and its behavior is now standard"; +} + # Allow a much older snapshot cache file than default if _only_ "--monitor-*" action commands are given # (ignore "--verbose", "--configdir" etc) if ( From b27b120c1974e22f9204809334edb26b1ca2b0f3 Mon Sep 17 00:00:00 2001 From: Ossi A Date: Tue, 25 Jun 2024 11:11:29 +0300 Subject: [PATCH 114/130] syncoid: add -X send option in special options --- syncoid | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/syncoid b/syncoid index 79ad45f..3213195 100755 --- a/syncoid +++ b/syncoid @@ -2175,7 +2175,7 @@ sub parsespecialoptions { return undef; } - if ($char eq 'o' || $char eq 'x') { + if ($char eq 'o' || $char eq 'x' || $char eq 'X') { $lastOption = $char; $optionValue = 1; } else { From 81938c3952e75ccc298a32aa13ae5bdb33168b54 Mon Sep 17 00:00:00 2001 From: Aditya Bhargava Date: Fri, 9 Aug 2024 18:18:16 -0400 Subject: [PATCH 115/130] Fix single quotes that should be double quotes --- sanoid | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/sanoid b/sanoid index f74f731..c3242a1 100755 --- a/sanoid +++ b/sanoid @@ -868,10 +868,10 @@ sub getsnaps { @rawsnaps = ; close FH; - open FH, "> $cache.tmp" or die 'Could not write to $cache.tmp!\n'; + open FH, "> $cache.tmp" or die "Could not write to $cache.tmp!\n"; print FH @rawsnaps; close FH; - rename("$cache.tmp", "$cache") or die 'Could not rename to $cache!\n'; + rename("$cache.tmp", "$cache") or die "Could not rename to $cache!\n"; removelock('sanoid_cacheupdate'); } else { if ($args{'verbose'}) { print "INFO: deferring cache update - valid cache update lock held by another sanoid process.\n"; } @@ -1106,10 +1106,10 @@ sub init { print "INFO: dataset cache expired - updating from zfs list.\n"; } } - open FH, "> $cachedatasetspath.tmp" or die 'Could not write to $cachedatasetspath.tmp!\n'; + open FH, "> $cachedatasetspath.tmp" or die "Could not write to $cachedatasetspath.tmp!\n"; print FH @updatedatasets; close FH; - rename("$cachedatasetspath.tmp", "$cachedatasetspath") or die 'Could not rename to $cachedatasetspath!\n'; + rename("$cachedatasetspath.tmp", "$cachedatasetspath") or die "Could not rename to $cachedatasetspath!\n"; removelock('sanoid_cachedatasetupdate'); } else { if ($args{'verbose'}) { print "INFO: deferring dataset cache update - valid cache update lock held by another sanoid process.\n"; } @@ -1733,14 +1733,14 @@ sub removecachedsnapshots { my @rawsnaps = ; close FH; - open FH, "> $cache.tmp" or die 'Could not write to $cache.tmp!\n'; + open FH, "> $cache.tmp" or die "Could not write to $cache.tmp!\n"; foreach my $snapline ( @rawsnaps ) { my @columns = split("\t", $snapline); my $snap = $columns[0]; print FH $snapline unless ( exists($pruned{$snap}) ); } close FH; - rename("$cache.tmp", "$cache") or die 'Could not rename to $cache!\n'; + rename("$cache.tmp", "$cache") or die "Could not rename to $cache!\n"; removelock('sanoid_cacheupdate'); %snaps = getsnaps(\%config,$cacheTTL,0); From 8fa906b97194471c96a1d815496d7083c6ff6ee9 Mon Sep 17 00:00:00 2001 From: Jim Salter Date: Thu, 19 Sep 2024 23:43:39 -0400 Subject: [PATCH 116/130] Update sanoid.conf further clarify recursion=zfs vs recursion=yes --- sanoid.conf | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/sanoid.conf b/sanoid.conf index 0650c3d..04e281f 100644 --- a/sanoid.conf +++ b/sanoid.conf @@ -33,8 +33,11 @@ use_template = production # there are two options for recursive: zfs or yes # * zfs - taken a zfs snapshot with the '-r' flag; zfs will recursively take a snapshot of the whole - # dataset tree which is consistent. + # dataset tree which is consistent. Newly-added child datasets will not immediately get snapshots, + # and must instead slowly catch up to policy over time. Slightly lower storage load. + # # * yes - the snapshots will be taken one-at-time through the sanoid code; not necessarily consistent. + # newly added child datasets will be immediately brought into policy. Slightly higher storage load. recursive = zfs From 3942254e30e915f57d1496def3d58a29b054314e Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Fri, 20 Sep 2024 07:38:12 +0200 Subject: [PATCH 117/130] ignore duplicate template keys --- sanoid | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/sanoid b/sanoid index f74f731..c2d2ebd 100755 --- a/sanoid +++ b/sanoid @@ -1008,6 +1008,12 @@ sub init { } if ($args{'debug'}) { print "DEBUG: overriding $key on $section with value from user-defined template $template.\n"; } $config{$section}{$key} = $ini{$template}{$key}; + + my $value = $config{$section}{$key}; + if (ref($value) eq 'ARRAY') { + # handle duplicates silently (warning was already printed above) + $config{$section}{$key} = $value->[0]; + } } } } From 4e545ada0c02be39c5b45038b4ff4589ae973977 Mon Sep 17 00:00:00 2001 From: Myna IT Labs Date: Mon, 23 Sep 2024 14:29:14 -0700 Subject: [PATCH 118/130] Update INSTALL.md Fixed bad link to CentOS section. And clarified that this area is applicable for RHEL family of popular OS distributions: RHEL/CentOS/AlmaLinux --- INSTALL.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/INSTALL.md b/INSTALL.md index e0048b6..12b6734 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -6,7 +6,7 @@ - [Installation](#installation) - [Debian/Ubuntu](#debianubuntu) - - [CentOS](#centos) + - [RHEL/CentOS/AlmaLinux](#RHEL/CentOS/AlmaLinux) - [FreeBSD](#freebsd) - [Alpine Linux / busybox](#alpine-Linux-busybox-based-distributions) - [Other OSes](#other-oses) @@ -44,7 +44,7 @@ Enable sanoid timer: sudo systemctl enable --now sanoid.timer ``` -## CentOS/RHEL +## RHEL/CentOS/AlmaLinux Install prerequisite software: From 19f8877dcb1d17637c86c1556e5f86ff7863713b Mon Sep 17 00:00:00 2001 From: Alex Garel Date: Tue, 5 Nov 2024 11:20:31 +0100 Subject: [PATCH 119/130] docs: clarify that scripts are run only if autosnap or autoprune are set --- README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index f2ce10c..b46e36f 100644 --- a/README.md +++ b/README.md @@ -126,7 +126,9 @@ For more full details on sanoid.conf settings see [Wiki page](https://github.com ### Sanoid script hooks -There are three script types which can optionally be executed at various stages in the lifecycle of a snapshot: +There are three script types which can optionally be executed at various stages in the lifecycle of a snapshot. + +**Note** that snapshots related script are triggered only if you have `autosnap = yes` and pruning scripts are triggered only if you have `autoprune = yes`. #### `pre_snapshot_script` From f4e425d6827ae01c271f4738d2fba570f24de072 Mon Sep 17 00:00:00 2001 From: Christopher Morrow Date: Wed, 27 Nov 2024 18:24:08 -0800 Subject: [PATCH 120/130] Add Install instructions for EL9 systems Added to INSTALL.md the command to add the `crb` repo for Rocky Linux 9 and AlmaLinux 9. Necessary for perl-Capture-Tiny package. --- INSTALL.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/INSTALL.md b/INSTALL.md index 12b6734..6aa4347 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -59,6 +59,8 @@ sudo yum config-manager --set-enabled powertools sudo dnf config-manager --set-enabled powertools # On RHEL, instead of PowerTools, we need to enable the CodeReady Builder repo: sudo subscription-manager repos --enable=codeready-builder-for-rhel-8-x86_64-rpms +# For Rocky Linux 9 or AlmaLinux 9 you need the CodeReady Builder repo, and it is labelled `crb` +sudo dnf config-manager --set-enabled crb # Install the packages that Sanoid depends on: sudo yum install -y perl-Config-IniFiles perl-Data-Dumper perl-Capture-Tiny perl-Getopt-Long lzop mbuffer mhash pv # The repositories above should contain all the relevant Perl modules, but if you From 4a9db9541d7a6df23aad363c6cf25cbe8341c962 Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Tue, 3 Dec 2024 19:32:47 +0100 Subject: [PATCH 121/130] fix warning in edge cases ("Use of uninitialized value in numeric comparison") --- syncoid | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/syncoid b/syncoid index 79ad45f..f3cac71 100755 --- a/syncoid +++ b/syncoid @@ -1535,7 +1535,12 @@ sub sortsnapshots { if (defined $snaps->{'source'}{$left}{'createtxg'} && defined $snaps->{'source'}{$right}{'createtxg'}) { return $snaps->{'source'}{$left}{'createtxg'} <=> $snaps->{'source'}{$right}{'createtxg'}; } - return $snaps->{'source'}{$left}{'creation'} <=> $snaps->{'source'}{$right}{'creation'}; + + if (defined $snaps->{'source'}{$left}{'creation'} && defined $snaps->{'source'}{$right}{'creation'}) { + return $snaps->{'source'}{$left}{'creation'} <=> $snaps->{'source'}{$right}{'creation'}; + } + + return 0; } sub getoldestsnapshot { From 7062b7347eea637e68b915bf8a65bb0a85a67f88 Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Fri, 24 Jan 2025 14:01:59 +0100 Subject: [PATCH 122/130] blacklist encryption property from preserving --- syncoid | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/syncoid b/syncoid index 79ad45f..9dce246 100755 --- a/syncoid +++ b/syncoid @@ -1501,7 +1501,8 @@ sub getlocalzfsvalues { "receive_resume_token", "redact_snaps", "referenced", "refcompressratio", "snapshot_count", "type", "used", "usedbychildren", "usedbydataset", "usedbyrefreservation", "usedbysnapshots", "userrefs", "snapshots_changed", "volblocksize", "written", - "version", "volsize", "casesensitivity", "normalization", "utf8only" + "version", "volsize", "casesensitivity", "normalization", "utf8only", + "encryption" ); my %blacklisthash = map {$_ => 1} @blacklist; From 6beef5fee67deb2c17f160244953bd5a1983e1ad Mon Sep 17 00:00:00 2001 From: Jim Salter Date: Tue, 11 Feb 2025 19:54:53 -0500 Subject: [PATCH 123/130] Update INSTALL.md add cd /tmp to Debian and Ubuntu package build instructions, to avoid newbies getting confused by limited permissions under eg /root add reference to dependency on Capture::Tiny in sanoid and syncoid --- INSTALL.md | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/INSTALL.md b/INSTALL.md index 12b6734..2133f3f 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -26,9 +26,10 @@ apt install debhelper libcapture-tiny-perl libconfig-inifiles-perl pv lzop mbuff ``` -Clone this repo, build the debian package and install it (alternatively you can skip the package and do it manually like described below for CentOS): +Clone this repo under /tmp (to make sure the apt user has access to the unpacked clone), build the debian package and install it (alternatively you can skip the package and do it manually like described below for CentOS): ```bash +cd /tmp git clone https://github.com/jimsalterjrs/sanoid.git cd sanoid # checkout latest stable release or stay on master for bleeding edge stuff (but expect bugs!) @@ -73,6 +74,7 @@ cpan # answer the questions and paste the following lines: Clone this repo, then put the executables and config files into the appropriate directories: ```bash +cd /tmp # Download the repo as root to avoid changing permissions later sudo git clone https://github.com/jimsalterjrs/sanoid.git cd sanoid @@ -225,9 +227,9 @@ sudo launchctl load /Library/LaunchDaemons/net.openoid.Sanoid.plist ## Other OSes -**Sanoid** depends on the Perl module Config::IniFiles and will not operate without it. Config::IniFiles may be installed from CPAN, though the project strongly recommends using your distribution's repositories instead. +**Sanoid** depends on the Perl modules Config::IniFiles and Capture::Tiny and will not operate without them. These modules may be installed from CPAN, though the project strongly recommends using your distribution's repositories instead. -**Syncoid** depends on ssh, pv, gzip, lzop, and mbuffer. It can run with reduced functionality in the absence of any or all of the above. SSH is only required for remote synchronization. On newer FreeBSD and Ubuntu Xenial chacha20-poly1305@openssh.com, on other distributions arcfour crypto is the default for SSH transport since v1.4.6. Syncoid runs will fail if one of them is not available on either end of the transport. +**Syncoid** depends on ssh, pv, gzip, lzop, and mbuffer as well as sharing sanoid's dependency on Capture::Tiny. Capture::Tiny is mandatory, but syncoid can function with reduced functionality without any or all of the command-line dependencies. SSH is only required for remote synchronization. On newer FreeBSD and Ubuntu Xenial chacha20-poly1305@openssh.com, on other distributions arcfour crypto is the default for SSH transport since v1.4.6. Syncoid runs will fail if one of them is not available on either end of the transport. ### General outline for installation From 749490830f74def5dddd32244fa19b39b4e61de1 Mon Sep 17 00:00:00 2001 From: Jernej Jakob Date: Mon, 31 Mar 2025 14:54:46 +0200 Subject: [PATCH 124/130] syncoid: fix directtimeout in directmbuffer mode If --insecure-direct-connection contained 4 parts (including the ',mbuffer' at the end), the 3rd part (timeout) was silently ignored and left at the default 60s. Do not ignore the timeout part even in directmbuffer mode. --- syncoid | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/syncoid b/syncoid index 79ad45f..91bc277 100755 --- a/syncoid +++ b/syncoid @@ -178,7 +178,7 @@ if (length $args{'insecure-direct-connection'}) { $directlisten = $args{'insecure-direct-connection'}; } - if (scalar @parts == 3) { + if (scalar @parts >= 3) { $directtimeout = $parts[2]; } From 27fc1794907c95570cd0c8d3d6cd11301d5e5a53 Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Thu, 5 Jun 2025 21:59:30 +0200 Subject: [PATCH 125/130] implemented adding of taken snapshot to the cache file and a new parameter for setting an custom cache expire time --- README.md | 4 +++ sanoid | 86 +++++++++++++++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 87 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index f2ce10c..fadffb4 100644 --- a/README.md +++ b/README.md @@ -100,6 +100,10 @@ For more full details on sanoid.conf settings see [Wiki page](https://github.com This clears out sanoid's zfs snapshot listing cache. This is normally not needed. ++ --cache-ttl=SECONDS + + Set custom cache expire time in seconds (default: 20 minutes). + + --version This prints the version number, and exits. diff --git a/sanoid b/sanoid index c3242a1..57a942f 100755 --- a/sanoid +++ b/sanoid @@ -12,6 +12,7 @@ use warnings; use Config::IniFiles; # read samba-style conf file use Data::Dumper; # debugging - print contents of hash use File::Path 'make_path'; +use File::Copy; use Getopt::Long qw(:config auto_version auto_help); use Pod::Usage; # pod2usage use Time::Local; # to parse dates in reverse @@ -26,7 +27,7 @@ GetOptions(\%args, "verbose", "debug", "cron", "readonly", "quiet", "configdir=s", "cache-dir=s", "run-dir=s", "monitor-health", "force-update", "monitor-snapshots", "take-snapshots", "prune-snapshots", "force-prune", - "monitor-capacity" + "monitor-capacity", "cache-ttl=i" ) or pod2usage(2); # If only config directory (or nothing) has been specified, default to --cron --verbose @@ -54,6 +55,13 @@ make_path($run_dir); my $cacheTTL = 1200; # 20 minutes +if ($args{'cache-ttl'}) { + if ($args{'cache-ttl'} < 0) { + die "ERROR: cache-ttl needs to be positive!\n"; + } + $cacheTTL = $args{'cache-ttl'}; +} + # Allow a much older snapshot cache file than default if _only_ "--monitor-*" action commands are given # (ignore "--verbose", "--configdir" etc) if ( @@ -67,6 +75,7 @@ if ( || $args{'take-snapshots'} || $args{'prune-snapshots'} || $args{'force-prune'} + || $args{'cache-ttl'} ) ) { # The command combination above must not assert true for any command that takes or prunes snapshots @@ -86,6 +95,7 @@ my %config = init($conf_file,$default_conf_file); my %pruned; my %capacitycache; +my %taken; my %snaps; my %snapsbytype; @@ -592,6 +602,7 @@ sub take_snapshots { } if (%newsnapsgroup) { + $forcecacheupdate = 0; while ((my $path, my $snapData) = each(%newsnapsgroup)) { my $recursiveFlag = $snapData->{recursive}; my $dstHandling = $snapData->{handleDst}; @@ -662,9 +673,17 @@ sub take_snapshots { } }; + if ($exit == 0) { + $taken{$snap} = { + 'time' => time(), + 'recursive' => $recursiveFlag + }; + } + $exit == 0 or do { if ($dstHandling) { if ($stderr =~ /already exists/) { + $forcecacheupdate = 1; $exit = 0; $snap =~ s/_([a-z]+)$/dst_$1/g; if ($args{'verbose'}) { print "taking dst snapshot $snap$extraMessage\n"; } @@ -714,8 +733,8 @@ sub take_snapshots { } } } - $forcecacheupdate = 1; - %snaps = getsnaps(%config,$cacheTTL,$forcecacheupdate); + addcachedsnapshots(); + %snaps = getsnaps(\%config,$cacheTTL,$forcecacheupdate); } } @@ -1740,6 +1759,11 @@ sub removecachedsnapshots { print FH $snapline unless ( exists($pruned{$snap}) ); } close FH; + + # preserve mtime of cache for expire check + my ($dev, $ino, $mode, $nlink, $uid, $gid, $rdev, $size, $atime, $mtime, $ctime, $blksize, $blocks) = stat($cache); + utime($atime, $mtime, "$cache.tmp"); + rename("$cache.tmp", "$cache") or die "Could not rename to $cache!\n"; removelock('sanoid_cacheupdate'); @@ -1753,6 +1777,61 @@ sub removecachedsnapshots { #######################################################################################################################3 #######################################################################################################################3 +sub addcachedsnapshots { + if (not %taken) { + return; + } + + my $unlocked = checklock('sanoid_cacheupdate'); + + # wait until we can get a lock to do our cache changes + while (not $unlocked) { + if ($args{'verbose'}) { print "INFO: waiting for cache update lock held by another sanoid process.\n"; } + sleep(10); + $unlocked = checklock('sanoid_cacheupdate'); + } + + writelock('sanoid_cacheupdate'); + + if ($args{'verbose'}) { + print "INFO: adding taken snapshots to cache.\n"; + } + + copy($cache, "$cache.tmp") or die "Could not copy to $cache.tmp!\n"; + + open FH, ">> $cache.tmp" or die "Could not write to $cache.tmp!\n"; + while((my $snap, my $details) = each(%taken)) { + my @parts = split("@", $snap, 2); + + my $suffix = $parts[1] . "\tcreation\t" . $details->{time} . "\t-"; + my $dataset = $parts[0]; + + print FH "${dataset}\@${suffix}\n"; + + if ($details->{recursive}) { + my @datasets = getchilddatasets($dataset); + + foreach my $dataset(@datasets) { + print FH "${dataset}\@${suffix}\n"; + } + } + } + + close FH; + + # preserve mtime of cache for expire check + my ($dev, $ino, $mode, $nlink, $uid, $gid, $rdev, $size, $atime, $mtime, $ctime, $blksize, $blocks) = stat($cache); + utime($atime, $mtime, "$cache.tmp"); + + rename("$cache.tmp", "$cache") or die "Could not rename to $cache!\n"; + + removelock('sanoid_cacheupdate'); +} + +#######################################################################################################################3 +#######################################################################################################################3 +#######################################################################################################################3 + sub runscript { my $key=shift; my $dataset=shift; @@ -1851,6 +1930,7 @@ Options: --take-snapshots Creates snapshots as specified in sanoid.conf --prune-snapshots Purges expired snapshots as specified in sanoid.conf --force-prune Purges expired snapshots even if a send/recv is in progress + --cache-ttl=SECONDS Set custom cache expire time in seconds (default: 20 minutes) --help Prints this helptext --version Prints the version number From b794da6f145fdeb8ff1e48d267629cc51193470c Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Thu, 5 Jun 2025 23:23:30 +0200 Subject: [PATCH 126/130] Revert "Merge pull request #818 from Deltik/fix/815" This reverts commit 7c225a1d7b2350817468c97d0e33a599b9d02344, reversing changes made to acdc0938c9cf33878787244d194e9379ee8d5bb1. --- syncoid | 304 +++++++++++------- tests/run-tests.sh | 2 +- .../011_sync_out-of-order_snapshots/run.sh | 50 --- tests/syncoid/run-tests.sh | 2 +- 4 files changed, 186 insertions(+), 172 deletions(-) delete mode 100755 tests/syncoid/011_sync_out-of-order_snapshots/run.sh diff --git a/syncoid b/syncoid index 5dc6436..bd041ae 100755 --- a/syncoid +++ b/syncoid @@ -415,10 +415,10 @@ sub syncdataset { if (!defined($receivetoken)) { # build hashes of the snaps on the source and target filesystems. - %snaps = getsnaps('source',$sourcehost,$sourcefs,$sourceisroot,0); + %snaps = getsnaps('source',$sourcehost,$sourcefs,$sourceisroot); if ($targetexists) { - my %targetsnaps = getsnaps('target',$targethost,$targetfs,$targetisroot,0); + my %targetsnaps = getsnaps('target',$targethost,$targetfs,$targetisroot); my %sourcesnaps = %snaps; %snaps = (%sourcesnaps, %targetsnaps); } @@ -438,7 +438,7 @@ sub syncdataset { # Don't send the sync snap if it's filtered out by --exclude-snaps or # --include-snaps if (!snapisincluded($newsyncsnap)) { - $newsyncsnap = getnewestsnapshot(\%snaps); + $newsyncsnap = getnewestsnapshot($sourcehost,$sourcefs,$sourceisroot); if ($newsyncsnap eq 0) { writelog('WARN', "CRITICAL: no snapshots exist on source $sourcefs, and you asked for --no-sync-snap."); if ($exitcode < 1) { $exitcode = 1; } @@ -447,7 +447,7 @@ sub syncdataset { } } else { # we don't want sync snapshots created, so use the newest snapshot we can find. - $newsyncsnap = getnewestsnapshot(\%snaps); + $newsyncsnap = getnewestsnapshot($sourcehost,$sourcefs,$sourceisroot); if ($newsyncsnap eq 0) { writelog('WARN', "CRITICAL: no snapshots exist on source $sourcefs, and you asked for --no-sync-snap."); if ($exitcode < 1) { $exitcode = 1; } @@ -575,7 +575,8 @@ sub syncdataset { my $targetsize = getzfsvalue($targethost,$targetfs,$targetisroot,'-p used'); - my %bookmark = (); + my $bookmark = 0; + my $bookmarkcreation = 0; $matchingsnap = getmatchingsnapshot($sourcefs, $targetfs, \%snaps); if (! $matchingsnap) { @@ -583,18 +584,19 @@ sub syncdataset { my %bookmarks = getbookmarks($sourcehost,$sourcefs,$sourceisroot); # check for matching guid of source bookmark and target snapshot (oldest first) - foreach my $snap ( sort { sortsnapshots(\%snaps, $b, $a) } keys %{ $snaps{'target'} }) { + foreach my $snap ( sort { $snaps{'target'}{$b}{'creation'}<=>$snaps{'target'}{$a}{'creation'} } keys %{ $snaps{'target'} }) { my $guid = $snaps{'target'}{$snap}{'guid'}; if (defined $bookmarks{$guid}) { # found a match - %bookmark = %{ $bookmarks{$guid} }; + $bookmark = $bookmarks{$guid}{'name'}; + $bookmarkcreation = $bookmarks{$guid}{'creation'}; $matchingsnap = $snap; last; } } - if (! %bookmark) { + if (! $bookmark) { # force delete is not possible for the root dataset if ($args{'force-delete'} && index($targetfs, '/') != -1) { writelog('INFO', "Removing $targetfs because no matching snapshots were found"); @@ -667,18 +669,15 @@ sub syncdataset { my $nextsnapshot = 0; - if (%bookmark) { + if ($bookmark) { + my $bookmarkescaped = escapeshellparam($bookmark); if (!defined $args{'no-stream'}) { # if intermediate snapshots are needed we need to find the next oldest snapshot, # do an replication to it and replicate as always from oldest to newest # because bookmark sends doesn't support intermediates directly - foreach my $snap ( sort { sortsnapshots(\%snaps, $a, $b) } keys %{ $snaps{'source'} }) { - my $comparisonkey = 'creation'; - if (defined $snaps{'source'}{$snap}{'createtxg'} && defined $bookmark{'createtxg'}) { - $comparisonkey = 'createtxg'; - } - if ($snaps{'source'}{$snap}{$comparisonkey} >= $bookmark{$comparisonkey}) { + foreach my $snap ( sort { $snaps{'source'}{$a}{'creation'}<=>$snaps{'source'}{$b}{'creation'} } keys %{ $snaps{'source'} }) { + if ($snaps{'source'}{$snap}{'creation'} >= $bookmarkcreation) { $nextsnapshot = $snap; last; } @@ -686,13 +685,13 @@ sub syncdataset { } if ($nextsnapshot) { - ($exit, $stdout) = syncbookmark($sourcehost, $sourcefs, $targethost, $targetfs, $bookmark{'name'}, $nextsnapshot); + ($exit, $stdout) = syncbookmark($sourcehost, $sourcefs, $targethost, $targetfs, $bookmark, $nextsnapshot); $exit == 0 or do { if (!$resume && $stdout =~ /\Qcontains partially-complete state\E/) { writelog('WARN', "resetting partially receive state"); resetreceivestate($targethost,$targetfs,$targetisroot); - (my $ret) = syncbookmark($sourcehost, $sourcefs, $targethost, $targetfs, $bookmark{'name'}, $nextsnapshot); + (my $ret) = syncbookmark($sourcehost, $sourcefs, $targethost, $targetfs, $bookmark, $nextsnapshot); $ret == 0 or do { if ($exitcode < 2) { $exitcode = 2; } return 0; @@ -706,13 +705,13 @@ sub syncdataset { $matchingsnap = $nextsnapshot; $matchingsnapescaped = escapeshellparam($matchingsnap); } else { - ($exit, $stdout) = syncbookmark($sourcehost, $sourcefs, $targethost, $targetfs, $bookmark{'name'}, $newsyncsnap); + ($exit, $stdout) = syncbookmark($sourcehost, $sourcefs, $targethost, $targetfs, $bookmark, $newsyncsnap); $exit == 0 or do { if (!$resume && $stdout =~ /\Qcontains partially-complete state\E/) { writelog('WARN', "resetting partially receive state"); resetreceivestate($targethost,$targetfs,$targetisroot); - (my $ret) = syncbookmark($sourcehost, $sourcefs, $targethost, $targetfs, $bookmark{'name'}, $newsyncsnap); + (my $ret) = syncbookmark($sourcehost, $sourcefs, $targethost, $targetfs, $bookmark, $newsyncsnap); $ret == 0 or do { if ($exitcode < 2) { $exitcode = 2; } return 0; @@ -727,7 +726,7 @@ sub syncdataset { # do a normal replication if bookmarks aren't used or if previous # bookmark replication was only done to the next oldest snapshot - if (!%bookmark || $nextsnapshot) { + if (!$bookmark || $nextsnapshot) { if ($matchingsnap eq $newsyncsnap) { # edge case: bookmark replication used the latest snapshot return 0; @@ -854,19 +853,9 @@ sub syncdataset { if (defined $args{'delete-target-snapshots'}) { # Find the snapshots that exist on the target, filter with # those that exist on the source. Remaining are the snapshots - # that are only on the target. Then sort to remove the oldest - # snapshots first. - - # regather snapshots on source and target - %snaps = getsnaps('source',$sourcehost,$sourcefs,$sourceisroot,0); - - if ($targetexists) { - my %targetsnaps = getsnaps('target',$targethost,$targetfs,$targetisroot,0); - my %sourcesnaps = %snaps; - %snaps = (%sourcesnaps, %targetsnaps); - } - - my @to_delete = sort { sortsnapshots(\%snaps, $a, $b) } grep {!exists $snaps{'source'}{$_}} keys %{ $snaps{'target'} }; + # that are only on the target. Then sort by creation date, as + # to remove the oldest snapshots first. + my @to_delete = sort { $snaps{'target'}{$a}{'creation'}<=>$snaps{'target'}{$b}{'creation'} } grep {!exists $snaps{'source'}{$_}} keys %{ $snaps{'target'} }; while (@to_delete) { # Create batch of snapshots to remove my $snaps = join ',', splice(@to_delete, 0, 50); @@ -1531,22 +1520,9 @@ sub readablebytes { return $disp; } -sub sortsnapshots { - my ($snaps, $left, $right) = @_; - if (defined $snaps->{'source'}{$left}{'createtxg'} && defined $snaps->{'source'}{$right}{'createtxg'}) { - return $snaps->{'source'}{$left}{'createtxg'} <=> $snaps->{'source'}{$right}{'createtxg'}; - } - - if (defined $snaps->{'source'}{$left}{'creation'} && defined $snaps->{'source'}{$right}{'creation'}) { - return $snaps->{'source'}{$left}{'creation'} <=> $snaps->{'source'}{$right}{'creation'}; - } - - return 0; -} - sub getoldestsnapshot { my $snaps = shift; - foreach my $snap (sort { sortsnapshots($snaps, $a, $b) } keys %{ $snaps{'source'} }) { + foreach my $snap ( sort { $snaps{'source'}{$a}{'creation'}<=>$snaps{'source'}{$b}{'creation'} } keys %{ $snaps{'source'} }) { # return on first snap found - it's the oldest return $snap; } @@ -1560,7 +1536,7 @@ sub getoldestsnapshot { sub getnewestsnapshot { my $snaps = shift; - foreach my $snap (sort { sortsnapshots($snaps, $b, $a) } keys %{ $snaps{'source'} }) { + foreach my $snap ( sort { $snaps{'source'}{$b}{'creation'}<=>$snaps{'source'}{$a}{'creation'} } keys %{ $snaps{'source'} }) { # return on first snap found - it's the newest writelog('DEBUG', "NEWEST SNAPSHOT: $snap"); return $snap; @@ -1739,7 +1715,7 @@ sub pruneoldsyncsnaps { sub getmatchingsnapshot { my ($sourcefs, $targetfs, $snaps) = @_; - foreach my $snap ( sort { sortsnapshots($snaps, $b, $a) } keys %{ $snaps{'source'} }) { + foreach my $snap ( sort { $snaps{'source'}{$b}{'creation'}<=>$snaps{'source'}{$a}{'creation'} } keys %{ $snaps{'source'} }) { if (defined $snaps{'target'}{$snap}) { if ($snaps{'source'}{$snap}{'guid'} == $snaps{'target'}{$snap}{'guid'}) { return $snap; @@ -1874,8 +1850,88 @@ sub dumphash() { writelog('INFO', Dumper($hash)); } -sub getsnaps { - my ($type,$rhost,$fs,$isroot,$use_fallback,%snaps) = @_; +sub getsnaps() { + my ($type,$rhost,$fs,$isroot,%snaps) = @_; + my $mysudocmd; + my $fsescaped = escapeshellparam($fs); + if ($isroot) { $mysudocmd = ''; } else { $mysudocmd = $sudocmd; } + + my $rhostOriginal = $rhost; + + if ($rhost ne '') { + $rhost = "$sshcmd $rhost"; + # double escaping needed + $fsescaped = escapeshellparam($fsescaped); + } + + my $getsnapcmd = "$rhost $mysudocmd $zfscmd get -Hpd 1 -t snapshot guid,creation $fsescaped"; + if ($debug) { + $getsnapcmd = "$getsnapcmd |"; + writelog('DEBUG', "getting list of snapshots on $fs using $getsnapcmd..."); + } else { + $getsnapcmd = "$getsnapcmd 2>/dev/null |"; + } + open FH, $getsnapcmd; + my @rawsnaps = ; + close FH or do { + # fallback (solaris for example doesn't support the -t option) + return getsnapsfallback($type,$rhostOriginal,$fs,$isroot,%snaps); + }; + + # this is a little obnoxious. get guid,creation returns guid,creation on two separate lines + # as though each were an entirely separate get command. + + my %creationtimes=(); + + foreach my $line (@rawsnaps) { + $line =~ /\Q$fs\E\@(\S*)/; + my $snapname = $1; + + if (!snapisincluded($snapname)) { next; } + + # only import snap guids from the specified filesystem + if ($line =~ /\Q$fs\E\@.*\tguid/) { + chomp $line; + my $guid = $line; + $guid =~ s/^.*\tguid\t*(\d*).*/$1/; + my $snap = $line; + $snap =~ s/^.*\@(.*)\tguid.*$/$1/; + $snaps{$type}{$snap}{'guid'}=$guid; + } + # only import snap creations from the specified filesystem + elsif ($line =~ /\Q$fs\E\@.*\tcreation/) { + chomp $line; + my $creation = $line; + $creation =~ s/^.*\tcreation\t*(\d*).*/$1/; + my $snap = $line; + $snap =~ s/^.*\@(.*)\tcreation.*$/$1/; + + # the accuracy of the creation timestamp is only for a second, but + # snapshots in the same second are highly likely. The list command + # has an ordered output so we append another three digit running number + # to the creation timestamp and make sure those are ordered correctly + # for snapshot with the same creation timestamp + my $counter = 0; + my $creationsuffix; + while ($counter < 999) { + $creationsuffix = sprintf("%s%03d", $creation, $counter); + if (!defined $creationtimes{$creationsuffix}) { + $creationtimes{$creationsuffix} = 1; + last; + } + $counter += 1; + } + + $snaps{$type}{$snap}{'creation'}=$creationsuffix; + } + } + + return %snaps; +} + +sub getsnapsfallback() { + # fallback (solaris for example doesn't support the -t option) + my ($type,$rhost,$fs,$isroot,%snaps) = @_; my $mysudocmd; my $fsescaped = escapeshellparam($fs); if ($isroot) { $mysudocmd = ''; } else { $mysudocmd = $sudocmd; } @@ -1886,67 +1942,73 @@ sub getsnaps { $fsescaped = escapeshellparam($fsescaped); } - my $getsnapcmd = $use_fallback - ? "$rhost $mysudocmd $zfscmd get -Hpd 1 all $fsescaped" - : "$rhost $mysudocmd $zfscmd get -Hpd 1 -t snapshot all $fsescaped"; - - if ($debug) { - $getsnapcmd = "$getsnapcmd |"; - writelog('DEBUG', "getting list of snapshots on $fs using $getsnapcmd..."); - } else { - $getsnapcmd = "$getsnapcmd 2>/dev/null |"; - } + my $getsnapcmd = "$rhost $mysudocmd $zfscmd get -Hpd 1 type,guid,creation $fsescaped |"; + writelog('WARN', "snapshot listing failed, trying fallback command"); + writelog('DEBUG', "FALLBACK, getting list of snapshots on $fs using $getsnapcmd..."); open FH, $getsnapcmd; my @rawsnaps = ; - close FH or do { - if (!$use_fallback) { - writelog('WARN', "snapshot listing failed, trying fallback command"); - return getsnaps($type, $rhost, $fs, $isroot, 1, %snaps); + close FH or die "CRITICAL ERROR: snapshots couldn't be listed for $fs (exit code $?)"; + + my %creationtimes=(); + + my $state = 0; + foreach my $line (@rawsnaps) { + if ($state < 0) { + $state++; + next; } - die "CRITICAL ERROR: snapshots couldn't be listed for $fs (exit code $?)"; - }; - my %snap_data; - my %creationtimes; + if ($state eq 0) { + if ($line !~ /\Q$fs\E\@.*\ttype\s*snapshot/) { + # skip non snapshot type object + $state = -2; + next; + } + } elsif ($state eq 1) { + if ($line !~ /\Q$fs\E\@.*\tguid/) { + die "CRITICAL ERROR: snapshots couldn't be listed for $fs (guid parser error)"; + } - for my $line (@rawsnaps) { - chomp $line; - my ($dataset, $property, $value) = split /\t/, $line; - die "CRITICAL ERROR: Unexpected line format in $line" unless defined $value; + chomp $line; + my $guid = $line; + $guid =~ s/^.*\tguid\t*(\d*).*/$1/; + my $snap = $line; + $snap =~ s/^.*\@(.*)\tguid.*$/$1/; + if (!snapisincluded($snap)) { next; } + $snaps{$type}{$snap}{'guid'}=$guid; + } elsif ($state eq 2) { + if ($line !~ /\Q$fs\E\@.*\tcreation/) { + die "CRITICAL ERROR: snapshots couldn't be listed for $fs (creation parser error)"; + } - my (undef, $snap) = split /@/, $dataset; - die "CRITICAL ERROR: Unexpected dataset format in $line" unless $snap; + chomp $line; + my $creation = $line; + $creation =~ s/^.*\tcreation\t*(\d*).*/$1/; + my $snap = $line; + $snap =~ s/^.*\@(.*)\tcreation.*$/$1/; + if (!snapisincluded($snap)) { next; } - if (!snapisincluded($snap)) { next; } - - $snap_data{$snap}{$property} = $value; - - # the accuracy of the creation timestamp is only for a second, but - # snapshots in the same second are highly likely. The list command - # has an ordered output so we append another three digit running number - # to the creation timestamp and make sure those are ordered correctly - # for snapshot with the same creation timestamp - if ($property eq 'creation') { + # the accuracy of the creation timestamp is only for a second, but + # snapshots in the same second are highly likely. The list command + # has an ordered output so we append another three digit running number + # to the creation timestamp and make sure those are ordered correctly + # for snapshot with the same creation timestamp my $counter = 0; my $creationsuffix; while ($counter < 999) { - $creationsuffix = sprintf("%s%03d", $value, $counter); + $creationsuffix = sprintf("%s%03d", $creation, $counter); if (!defined $creationtimes{$creationsuffix}) { $creationtimes{$creationsuffix} = 1; last; } $counter += 1; } - $snap_data{$snap}{'creation'} = $creationsuffix; - } - } - for my $snap (keys %snap_data) { - if (!$use_fallback || $snap_data{$snap}{'type'} eq 'snapshot') { - $snaps{$type}{$snap}{'guid'} = $snap_data{$snap}{'guid'}; - $snaps{$type}{$snap}{'createtxg'} = $snap_data{$snap}{'createtxg'}; - $snaps{$type}{$snap}{'creation'} = $snap_data{$snap}{'creation'}; + $snaps{$type}{$snap}{'creation'}=$creationsuffix; + $state = -1; } + + $state++; } return %snaps; @@ -1965,7 +2027,7 @@ sub getbookmarks() { } my $error = 0; - my $getbookmarkcmd = "$rhost $mysudocmd $zfscmd get -Hpd 1 -t bookmark all $fsescaped 2>&1 |"; + my $getbookmarkcmd = "$rhost $mysudocmd $zfscmd get -Hpd 1 -t bookmark guid,creation $fsescaped 2>&1 |"; writelog('DEBUG', "getting list of bookmarks on $fs using $getbookmarkcmd..."); open FH, $getbookmarkcmd; my @rawbookmarks = ; @@ -1980,44 +2042,46 @@ sub getbookmarks() { die "CRITICAL ERROR: bookmarks couldn't be listed for $fs (exit code $?)"; } - my %bookmark_data; - my %creationtimes; + # this is a little obnoxious. get guid,creation returns guid,creation on two separate lines + # as though each were an entirely separate get command. - for my $line (@rawbookmarks) { - chomp $line; - my ($dataset, $property, $value) = split /\t/, $line; - die "CRITICAL ERROR: Unexpected line format in $line" unless defined $value; + my $lastguid; + my %creationtimes=(); - my (undef, $bookmark) = split /#/, $dataset; - die "CRITICAL ERROR: Unexpected dataset format in $line" unless $bookmark; + foreach my $line (@rawbookmarks) { + # only import bookmark guids, creation from the specified filesystem + if ($line =~ /\Q$fs\E\#.*\tguid/) { + chomp $line; + $lastguid = $line; + $lastguid =~ s/^.*\tguid\t*(\d*).*/$1/; + my $bookmark = $line; + $bookmark =~ s/^.*\#(.*)\tguid.*$/$1/; + $bookmarks{$lastguid}{'name'}=$bookmark; + } elsif ($line =~ /\Q$fs\E\#.*\tcreation/) { + chomp $line; + my $creation = $line; + $creation =~ s/^.*\tcreation\t*(\d*).*/$1/; + my $bookmark = $line; + $bookmark =~ s/^.*\#(.*)\tcreation.*$/$1/; - $bookmark_data{$bookmark}{$property} = $value; - - # the accuracy of the creation timestamp is only for a second, but - # bookmarks in the same second are possible. The list command - # has an ordered output so we append another three digit running number - # to the creation timestamp and make sure those are ordered correctly - # for bookmarks with the same creation timestamp - if ($property eq 'creation') { + # the accuracy of the creation timestamp is only for a second, but + # bookmarks in the same second are possible. The list command + # has an ordered output so we append another three digit running number + # to the creation timestamp and make sure those are ordered correctly + # for bookmarks with the same creation timestamp my $counter = 0; my $creationsuffix; while ($counter < 999) { - $creationsuffix = sprintf("%s%03d", $value, $counter); + $creationsuffix = sprintf("%s%03d", $creation, $counter); if (!defined $creationtimes{$creationsuffix}) { $creationtimes{$creationsuffix} = 1; last; } $counter += 1; } - $bookmark_data{$bookmark}{'creation'} = $creationsuffix; - } - } - for my $bookmark (keys %bookmark_data) { - my $guid = $bookmark_data{$bookmark}{'guid'}; - $bookmarks{$guid}{'name'} = $bookmark; - $bookmarks{$guid}{'creation'} = $bookmark_data{$bookmark}{'creation'}; - $bookmarks{$guid}{'createtxg'} = $bookmark_data{$bookmark}{'createtxg'}; + $bookmarks{$lastguid}{'creation'}=$creationsuffix; + } } return %bookmarks; diff --git a/tests/run-tests.sh b/tests/run-tests.sh index ec14721..34813d1 100755 --- a/tests/run-tests.sh +++ b/tests/run-tests.sh @@ -2,7 +2,7 @@ # run's all the available tests -for test in $(find . -mindepth 1 -maxdepth 1 -type d -printf "%P\n" | sort -g); do +for test in */; do if [ ! -x "${test}/run.sh" ]; then continue fi diff --git a/tests/syncoid/011_sync_out-of-order_snapshots/run.sh b/tests/syncoid/011_sync_out-of-order_snapshots/run.sh deleted file mode 100755 index bb96ad0..0000000 --- a/tests/syncoid/011_sync_out-of-order_snapshots/run.sh +++ /dev/null @@ -1,50 +0,0 @@ -#!/bin/bash - -# test verifying snapshots with out-of-order snapshot creation datetimes - -set -x -set -e - -. ../../common/lib.sh - -if [ -z "$ALLOW_INVASIVE_TESTS" ]; then - exit 130 -fi - -POOL_IMAGE="/tmp/syncoid-test-11.zpool" -POOL_SIZE="64M" -POOL_NAME="syncoid-test-11" - -truncate -s "${POOL_SIZE}" "${POOL_IMAGE}" - -zpool create -m none -f "${POOL_NAME}" "${POOL_IMAGE}" - -function cleanUp { - zpool export "${POOL_NAME}" - rm -f "${POOL_IMAGE}" -} - -# export pool and remove the image in any case -trap cleanUp EXIT - -zfs create "${POOL_NAME}"/before -zfs snapshot "${POOL_NAME}"/before@this-snapshot-should-make-it-into-the-after-dataset - -disableTimeSync -setdate 1155533696 -zfs snapshot "${POOL_NAME}"/before@oldest-snapshot - -zfs snapshot "${POOL_NAME}"/before@another-snapshot-does-not-matter -../../../syncoid --sendoptions="Lec" "${POOL_NAME}"/before "${POOL_NAME}"/after - -# verify -saveSnapshotList "${POOL_NAME}" "snapshot-list.txt" - -grep "${POOL_NAME}/before@this-snapshot-should-make-it-into-the-after-dataset" "snapshot-list.txt" || exit $? -grep "${POOL_NAME}/after@this-snapshot-should-make-it-into-the-after-dataset" "snapshot-list.txt" || exit $? -grep "${POOL_NAME}/before@oldest-snapshot" "snapshot-list.txt" || exit $? -grep "${POOL_NAME}/after@oldest-snapshot" "snapshot-list.txt" || exit $? -grep "${POOL_NAME}/before@another-snapshot-does-not-matter" "snapshot-list.txt" || exit $? -grep "${POOL_NAME}/after@another-snapshot-does-not-matter" "snapshot-list.txt" || exit $? - -exit 0 diff --git a/tests/syncoid/run-tests.sh b/tests/syncoid/run-tests.sh index 8307413..0e7570e 100755 --- a/tests/syncoid/run-tests.sh +++ b/tests/syncoid/run-tests.sh @@ -2,7 +2,7 @@ # run's all the available tests -for test in $(find . -mindepth 1 -maxdepth 1 -type d -printf "%P\n" | sort -g); do +for test in */; do if [ ! -x "${test}/run.sh" ]; then continue fi From aa2c693e622036c6d6c472b115dea1273b3ca41a Mon Sep 17 00:00:00 2001 From: Adam Fulton Date: Mon, 1 Apr 2024 11:53:45 -0500 Subject: [PATCH 127/130] fix(syncoid): regather $snaps on --delete-target-snapshots flag --- syncoid | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/syncoid b/syncoid index bd041ae..b7a1c13 100755 --- a/syncoid +++ b/syncoid @@ -853,8 +853,18 @@ sub syncdataset { if (defined $args{'delete-target-snapshots'}) { # Find the snapshots that exist on the target, filter with # those that exist on the source. Remaining are the snapshots - # that are only on the target. Then sort by creation date, as - # to remove the oldest snapshots first. + # that are only on the target. Then sort to remove the oldest + # snapshots first. + + # regather snapshots on source and target + %snaps = getsnaps('source',$sourcehost,$sourcefs,$sourceisroot); + + if ($targetexists) { + my %targetsnaps = getsnaps('target',$targethost,$targetfs,$targetisroot); + my %sourcesnaps = %snaps; + %snaps = (%sourcesnaps, %targetsnaps); + } + my @to_delete = sort { $snaps{'target'}{$a}{'creation'}<=>$snaps{'target'}{$b}{'creation'} } grep {!exists $snaps{'source'}{$_}} keys %{ $snaps{'target'} }; while (@to_delete) { # Create batch of snapshots to remove From becec663207b792f15e5f47a16438899664dd560 Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Mon, 9 Jun 2025 23:21:44 +0200 Subject: [PATCH 128/130] prepare v2.3.0 --- CHANGELIST | 20 ++++++++++++++++++++ VERSION | 2 +- findoid | 2 +- packages/debian/changelog | 24 ++++++++++++++++++++++++ packages/rhel/sanoid.spec | 4 +++- sanoid | 2 +- syncoid | 2 +- tests/1_one_year/run.sh | 2 +- tests/2_dst_handling/run.sh | 2 +- tests/common/lib.sh | 10 +++++++++- 10 files changed, 62 insertions(+), 8 deletions(-) diff --git a/CHANGELIST b/CHANGELIST index 314e431..de00f66 100644 --- a/CHANGELIST +++ b/CHANGELIST @@ -1,3 +1,23 @@ +2.3.0 [overall] documentation updates, small fixes (@thecatontheflat, @mjeanson, @jiawen, @EchterAgo, @jan-krieg, @dlangille, @rightaditya, @MynaITLabs, @ossimoi, @alexgarel, @TopherIsSwell, @jimsalterjrs, @phreaker0) + [sanoid] implemented adding of taken snapshots to the cache file and a new parameter for setting an custom cache expire time (@phreaker0) + [sanoid] ignore duplicate template keys (@phreaker0) + [packaging] fix debian packaging with debian 12 and ubuntu 24.04 (@phreaker0) + [syncoid] fix typo preventing resumed transfer with --sendoptions (@Deltik) + [sanoid] remove iszfsbusy check to boost performance (@sdettmer) + [sanoid] write cache files in an atomic way to prevent race conditions (@phreaker0) + [sanoid] improve performance (especially for monitor commands) by caching the dataset list (@phreaker0) + [syncoid] add zstdmt compress options (@0xFelix) + [syncoid] added missing status information about what is done and provide more details (@phreaker0) + [syncoid] rename ssh control socket to avoid problem with length limits and conflicts (@phreaker0) + [syncoid] support relative paths (@phreaker0) + [syncoid] regather snapshots on --delete-target-snapshots flag (@Adam Fulton) + [sanoid] allow monitor commands to be run without root by using only the cache file (@Pajkastare) + [syncoid] add --include-snaps and --exclude-snaps options (@mr-vinn, @phreaker0) + [syncoid] escape property key and value pair in case of property preservation (@phreaker0) + [syncoid] prevent destroying of root dataset which leads to infinite loop because it can't be destroyed (@phreaker0) + [syncoid] modify zfs-get argument order for portability (@Rantherhin) + [sanoid] trim config values (@phreaker0) + 2.2.0 [overall] documentation updates, small fixes (@azmodude, @deviantintegral, @jimsalterjrs, @alexhaydock, @cbreak-black, @kd8bny, @JavaScriptDude, @veeableful, @rsheasby, @Topslakr, @mavhc, @adam-stamand, @joelishness, @jsoref, @dodexahedron, @phreaker0) [syncoid] implemented flag for preserving properties without the zfs -p flag (@phreaker0) [syncoid] implemented target snapshot deletion (@mat813) diff --git a/VERSION b/VERSION index ccbccc3..276cbf9 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.2.0 +2.3.0 diff --git a/findoid b/findoid index 2561246..adda5cc 100755 --- a/findoid +++ b/findoid @@ -4,7 +4,7 @@ # from http://www.gnu.org/licenses/gpl-3.0.html on 2014-11-17. A copy should also be available in this # project's Git repository at https://github.com/jimsalterjrs/sanoid/blob/master/LICENSE. -$::VERSION = '2.2.0'; +$::VERSION = '2.3.0'; use strict; use warnings; diff --git a/packages/debian/changelog b/packages/debian/changelog index 00c0c07..ba369a1 100644 --- a/packages/debian/changelog +++ b/packages/debian/changelog @@ -1,3 +1,27 @@ +sanoid (2.3.0) unstable; urgency=medium + + [overall] documentation updates, small fixes (@thecatontheflat, @mjeanson, @jiawen, @EchterAgo, @jan-krieg, @dlangille, @rightaditya, @MynaITLabs, @ossimoi, @alexgarel, @TopherIsSwell, @jimsalterjrs, @phreaker0) + [sanoid] implemented adding of taken snapshots to the cache file and a new parameter for setting an custom cache expire time (@phreaker0) + [sanoid] ignore duplicate template keys (@phreaker0) + [packaging] fix debian packaging with debian 12 and ubuntu 24.04 (@phreaker0) + [syncoid] fix typo preventing resumed transfer with --sendoptions (@Deltik) + [sanoid] remove iszfsbusy check to boost performance (@sdettmer) + [sanoid] write cache files in an atomic way to prevent race conditions (@phreaker0) + [sanoid] improve performance (especially for monitor commands) by caching the dataset list (@phreaker0) + [syncoid] add zstdmt compress options (@0xFelix) + [syncoid] added missing status information about what is done and provide more details (@phreaker0) + [syncoid] rename ssh control socket to avoid problem with length limits and conflicts (@phreaker0) + [syncoid] support relative paths (@phreaker0) + [syncoid] regather snapshots on --delete-target-snapshots flag (@Adam Fulton) + [sanoid] allow monitor commands to be run without root by using only the cache file (@Pajkastare) + [syncoid] add --include-snaps and --exclude-snaps options (@mr-vinn, @phreaker0) + [syncoid] escape property key and value pair in case of property preservation (@phreaker0) + [syncoid] prevent destroying of root dataset which leads to infinite loop because it can't be destroyed (@phreaker0) + [syncoid] modify zfs-get argument order for portability (@Rantherhin) + [sanoid] trim config values (@phreaker0) + + -- Jim Salter Tue, 05 Jun 2025 22:47:00 +0200 + sanoid (2.2.0) unstable; urgency=medium [overall] documentation updates, small fixes (@azmodude, @deviantintegral, @jimsalterjrs, @alexhaydock, @cbreak-black, @kd8bny, @JavaScriptDude, @veeableful, @rsheasby, @Topslakr, @mavhc, @adam-stamand, @joelishness, @jsoref, @dodexahedron, @phreaker0) diff --git a/packages/rhel/sanoid.spec b/packages/rhel/sanoid.spec index 218f52d..ce48247 100644 --- a/packages/rhel/sanoid.spec +++ b/packages/rhel/sanoid.spec @@ -1,4 +1,4 @@ -%global version 2.2.0 +%global version 2.3.0 %global git_tag v%{version} # Enable with systemctl "enable sanoid.timer" @@ -111,6 +111,8 @@ echo "* * * * * root %{_sbindir}/sanoid --cron" > %{buildroot}%{_docdir}/%{name} %endif %changelog +* Tue Jun 05 2025 Christoph Klaffl - 2.3.0 +- Bump to 2.3.0 * Tue Jul 18 2023 Christoph Klaffl - 2.2.0 - Bump to 2.2.0 * Tue Nov 24 2020 Christoph Klaffl - 2.1.0 diff --git a/sanoid b/sanoid index c33451c..4a71319 100755 --- a/sanoid +++ b/sanoid @@ -4,7 +4,7 @@ # from http://www.gnu.org/licenses/gpl-3.0.html on 2014-11-17. A copy should also be available in this # project's Git repository at https://github.com/jimsalterjrs/sanoid/blob/master/LICENSE. -$::VERSION = '2.2.0'; +$::VERSION = '2.3.0'; my $MINIMUM_DEFAULTS_VERSION = 2; use strict; diff --git a/syncoid b/syncoid index b7a1c13..956f3e7 100755 --- a/syncoid +++ b/syncoid @@ -4,7 +4,7 @@ # from http://www.gnu.org/licenses/gpl-3.0.html on 2014-11-17. A copy should also be available in this # project's Git repository at https://github.com/jimsalterjrs/sanoid/blob/master/LICENSE. -$::VERSION = '2.2.0'; +$::VERSION = '2.3.0'; use strict; use warnings; diff --git a/tests/1_one_year/run.sh b/tests/1_one_year/run.sh index fe76946..48b3c7b 100755 --- a/tests/1_one_year/run.sh +++ b/tests/1_one_year/run.sh @@ -39,7 +39,7 @@ function cleanUp { trap cleanUp EXIT while [ $timestamp -le $END ]; do - setdate $timestamp; date; "${SANOID}" --cron --verbose + setdate $timestamp; date; "${SANOID}" --cron --verbose --cache-ttl=2592000 timestamp=$((timestamp+3600)) done diff --git a/tests/2_dst_handling/run.sh b/tests/2_dst_handling/run.sh index 3231631..2c3fd3e 100755 --- a/tests/2_dst_handling/run.sh +++ b/tests/2_dst_handling/run.sh @@ -42,7 +42,7 @@ function cleanUp { trap cleanUp EXIT while [ $timestamp -le $END ]; do - setdate $timestamp; date; "${SANOID}" --cron --verbose + setdate $timestamp; date; "${SANOID}" --cron --verbose --cache-ttl=2592000 timestamp=$((timestamp+900)) done diff --git a/tests/common/lib.sh b/tests/common/lib.sh index 9c88eff..84b2c63 100644 --- a/tests/common/lib.sh +++ b/tests/common/lib.sh @@ -10,7 +10,10 @@ function setup { export SANOID="../../sanoid" # make sure that there is no cache file - rm -f /var/cache/sanoidsnapshots.txt + rm -f /var/cache/sanoid/snapshots.txt + rm -f /var/cache/sanoid/datasets.txt + + mkdir -p /etc/sanoid # install needed sanoid configuration files [ -f sanoid.conf ] && cp sanoid.conf /etc/sanoid/sanoid.conf @@ -51,6 +54,11 @@ function disableTimeSync { if [ $? -eq 0 ]; then timedatectl set-ntp 0 fi + + which systemctl > /dev/null + if [ $? -eq 0 ]; then + systemctl is-active virtualbox-guest-utils.service && systemctl stop virtualbox-guest-utils.service + fi } function saveSnapshotList { From 680194fa330819b844c20ef30b93708739699c02 Mon Sep 17 00:00:00 2001 From: aabccd021 Date: Thu, 12 Jun 2025 12:58:26 +0700 Subject: [PATCH 129/130] Fix readme formatting --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index c8ce6cb..aff3b68 100644 --- a/README.md +++ b/README.md @@ -330,6 +330,7 @@ As of 1.4.18, syncoid also automatically supports and enables resume of interrup This argument tells syncoid to create a zfs bookmark for the newest snapshot after it got replicated successfully. The bookmark name will be equal to the snapshot name. Only works in combination with the --no-sync-snap option. This can be very useful for irregular replication where the last matching snapshot on the source was already deleted but the bookmark remains so a replication is still possible. + --use-hold + This argument tells syncoid to add a hold to the newest snapshot on the source and target after replication succeeds and to remove the hold after the next successful replication. Setting a hold prevents the snapshots from being destroyed. The hold name includes the identifier if set. This allows for separate holds in case of replication to multiple targets. + --preserve-recordsize From efd52f416d1ecf95c9e7db225476c6a5fd1d1808 Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Tue, 12 Aug 2025 14:19:47 +0200 Subject: [PATCH 130/130] fixed file handle conflict --- sanoid | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/sanoid b/sanoid index 4a71319..8e0d186 100755 --- a/sanoid +++ b/sanoid @@ -1084,11 +1084,9 @@ sub init { @datasets = getchilddatasets($config{$section}{'path'}); DATASETS: foreach my $dataset(@datasets) { if (! @cachedatasets) { - push (@updatedatasets, $dataset); + push (@updatedatasets, "$dataset\n"); } - chomp $dataset; - if ($zfsRecursive) { # don't try to take the snapshot ourself, recursive zfs snapshot will take care of that $config{$dataset}{'autosnap'} = 0; @@ -1691,7 +1689,7 @@ sub getchilddatasets { my $getchildrencmd = "$mysudocmd $zfs list -o name -t filesystem,volume -Hr $fs |"; if ($args{'debug'}) { print "DEBUG: getting list of child datasets on $fs using $getchildrencmd...\n"; } open FH, $getchildrencmd; - my @children = ; + chomp( my @children = ); close FH; # parent dataset is the first element @@ -1781,25 +1779,26 @@ sub addcachedsnapshots { copy($cache, "$cache.tmp") or die "Could not copy to $cache.tmp!\n"; - open FH, ">> $cache.tmp" or die "Could not write to $cache.tmp!\n"; + open my $fh, ">> $cache.tmp" or die "Could not write to $cache.tmp!\n"; while((my $snap, my $details) = each(%taken)) { my @parts = split("@", $snap, 2); my $suffix = $parts[1] . "\tcreation\t" . $details->{time} . "\t-"; my $dataset = $parts[0]; - print FH "${dataset}\@${suffix}\n"; + print $fh "${dataset}\@${suffix}\n"; if ($details->{recursive}) { my @datasets = getchilddatasets($dataset); foreach my $dataset(@datasets) { - print FH "${dataset}\@${suffix}\n"; + print "${dataset}\@${suffix}\n"; + print $fh "${dataset}\@${suffix}\n"; } } } - close FH; + close $fh; # preserve mtime of cache for expire check my ($dev, $ino, $mode, $nlink, $uid, $gid, $rdev, $size, $atime, $mtime, $ctime, $blksize, $blocks) = stat($cache);