From dc2371775afe08af799d3097d47b48182d1716eb Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Thu, 6 Jun 2019 08:25:18 +0200 Subject: [PATCH 01/35] don't use hardcoded paths --- sanoid | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/sanoid b/sanoid index c28a9de..44ed37f 100755 --- a/sanoid +++ b/sanoid @@ -29,10 +29,13 @@ if (keys %args < 2) { $args{'verbose'} = 1; } -my $pscmd = '/bin/ps'; +# for compatibility reasons, older versions used hardcoded command paths +$ENV{'PATH'} = $ENV{'PATH'} . ":/bin:/sbin"; -my $zfs = '/sbin/zfs'; -my $zpool = '/sbin/zpool'; +my $pscmd = 'ps'; + +my $zfs = 'zfs'; +my $zpool = 'zpool'; my $conf_file = "$args{'configdir'}/sanoid.conf"; my $default_conf_file = "$args{'configdir'}/sanoid.defaults.conf"; From 6252cbf51494e2393f15555182c8e30142e82f38 Mon Sep 17 00:00:00 2001 From: tiedotguy Date: Sat, 2 Nov 2019 09:28:34 +1100 Subject: [PATCH 02/35] Restructure to pass hashes around in take_snapshots This commit makes it easier to pass structured data between the loop which decides what to snapshot, and the loop performing the actual snapshot. --- sanoid | 49 ++++++++++++++++--------------------------------- 1 file changed, 16 insertions(+), 33 deletions(-) diff --git a/sanoid b/sanoid index a17b91d..56fcd96 100755 --- a/sanoid +++ b/sanoid @@ -503,47 +503,30 @@ sub take_snapshots { # update to most current possible datestamp %datestamp = get_date(); # print "we should have had a $type snapshot of $path $maxage seconds ago; most recent is $newestage seconds old.\n"; - - my $flags = ""; - # use zfs (atomic) recursion if specified in config - if ($config{$section}{'zfs_recursion'}) { - $flags .= "r"; - } - if ($handleDst) { - $flags .= "d"; - } - - if ($flags ne "") { - push(@newsnaps, "$path\@autosnap_$datestamp{'sortable'}_$type\@$flags"); - } else { - push(@newsnaps, "$path\@autosnap_$datestamp{'sortable'}_$type"); - } + my $snap = { + 'dataset' => $path, + 'snapshot' => "autosnap_$datestamp{'sortable'}_$type", + 'recursive' => $config{$section}{'zfs_recrsion'}, # use zfs (atomic) recursion if specified in config + 'handleDst' => $handleDst, + }; + push(@newsnaps, $snap); } } } } if ( (scalar(@newsnaps)) > 0) { - foreach my $snap ( @newsnaps ) { + foreach my $snapData ( @newsnaps ) { + my $dataset = $snapData->{dataset}; + my $snapname = $snapData->{snapshot}; + my $recursiveFlag = $snapData->{recursive}; + my $dstHandling = $snapData->{handleDst}; my $extraMessage = ""; - my @split = split '@', $snap, -1; - my $recursiveFlag = 0; - my $dstHandling = 0; - if (scalar(@split) == 3) { - my $flags = $split[2]; - if (index($flags, "r") != -1) { - $recursiveFlag = 1; - $extraMessage = " (zfs recursive)"; - chop $snap; - } - if (index($flags, "d") != -1) { - $dstHandling = 1; - chop $snap; - } - chop $snap; + if ($recursiveFlag) { + $extraMessage = " (zfs recursive)"; } - my $dataset = $split[0]; - my $snapname = $split[1]; + my $snap = "$dataset\@$snapname"; + my $presnapshotfailure = 0; my $ret = 0; if ($config{$dataset}{'pre_snapshot_script'}) { From 46a640859f21beeba2922f5d08ed1057abab7547 Mon Sep 17 00:00:00 2001 From: tiedotguy Date: Sat, 2 Nov 2019 12:24:05 +1100 Subject: [PATCH 03/35] Collect and pass additional information to script execution This will collect information about what snapshots are being taken in a single batch (multiple snapshot types taken on a single dataset at the same time) and pass it to pre/post scripts. It also passes what what type of script (pre, post, prune), and what type of snapshot is being taken. --- sanoid | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/sanoid b/sanoid index 56fcd96..a7c3cb0 100755 --- a/sanoid +++ b/sanoid @@ -317,11 +317,13 @@ sub prune_snapshots { if ($config{$dataset}{'pruning_script'}) { $ENV{'SANOID_TARGET'} = $dataset; $ENV{'SANOID_SNAPNAME'} = $snapname; + $ENV{'SANOID_SCRIPT'} = 'prune'; if ($args{'verbose'}) { print "executing pruning_script '".$config{$dataset}{'pruning_script'}."' on dataset '$dataset'\n"; } my $ret = runscript('pruning_script',$dataset); delete $ENV{'SANOID_TARGET'}; delete $ENV{'SANOID_SNAPNAME'}; + delete $ENV{'SANOID_SCRIPT'}; } } else { warn "could not remove $snap : $?"; @@ -378,7 +380,8 @@ sub take_snapshots { if ($config{$section}{'process_children_only'}) { next; } my $path = $config{$section}{'path'}; - my @types = ('yearly','monthly','weekly','daily','hourly','frequently'); + my @types = ('yearly','monthly','weekly','daily','hourly','frequently'); + my @batch = (); foreach my $type (@types) { if ($config{$section}{$type} > 0) { @@ -508,7 +511,10 @@ sub take_snapshots { 'snapshot' => "autosnap_$datestamp{'sortable'}_$type", 'recursive' => $config{$section}{'zfs_recrsion'}, # use zfs (atomic) recursion if specified in config 'handleDst' => $handleDst, + 'type' => $type, + 'batch' => \@batch, # Reference the source array, because we may be adding to it in subsequent loops }; + push(@batch, $snap->{snapshot}); push(@newsnaps, $snap); } } @@ -521,6 +527,8 @@ sub take_snapshots { my $snapname = $snapData->{snapshot}; my $recursiveFlag = $snapData->{recursive}; my $dstHandling = $snapData->{handleDst}; + my $f = $snapData->{batch}; + my $batch = join(",", @$f); my $extraMessage = ""; if ($recursiveFlag) { $extraMessage = " (zfs recursive)"; @@ -532,6 +540,9 @@ sub take_snapshots { if ($config{$dataset}{'pre_snapshot_script'}) { $ENV{'SANOID_TARGET'} = $dataset; $ENV{'SANOID_SNAPNAME'} = $snapname; + $ENV{'SANOID_TYPE'} = $snapData->{type}; + $ENV{'SANOID_BATCH'} = $batch; + $ENV{'SANOID_SCRIPT'} = 'pre'; if ($args{'verbose'}) { print "executing pre_snapshot_script '".$config{$dataset}{'pre_snapshot_script'}."' on dataset '$dataset'\n"; } if (!$args{'readonly'}) { @@ -540,6 +551,9 @@ sub take_snapshots { delete $ENV{'SANOID_TARGET'}; delete $ENV{'SANOID_SNAPNAME'}; + delete $ENV{'SANOID_TYPE'}; + delete $ENV{'SANOID_BATCH'}; + delete $ENV{'SANOID_SCRIPT'}; if ($ret != 0) { # warning was already thrown by runscript function @@ -588,6 +602,10 @@ sub take_snapshots { if (!$presnapshotfailure or $config{$dataset}{'force_post_snapshot_script'}) { $ENV{'SANOID_TARGET'} = $dataset; $ENV{'SANOID_SNAPNAME'} = $snapname; + $ENV{'SANOID_TYPE'} = $snapData->{type}; + $ENV{'SANOID_BATCH'} = $batch; + $ENV{'SANOID_SCRIPT'} = 'post'; + $ENV{'SANOID_PRE_FAILURE'} = $presnapshotfailure; if ($args{'verbose'}) { print "executing post_snapshot_script '".$config{$dataset}{'post_snapshot_script'}."' on dataset '$dataset'\n"; } if (!$args{'readonly'}) { @@ -596,6 +614,10 @@ sub take_snapshots { delete $ENV{'SANOID_TARGET'}; delete $ENV{'SANOID_SNAPNAME'}; + delete $ENV{'SANOID_TYPE'}; + delete $ENV{'SANOID_BATCH'}; + delete $ENV{'SANOID_SCRIPT'}; + delete $ENV{'SANOID_PRE_FAILURE'}; } } } From d51c8ab2c85a37f0ef1e5abcca3053be3145055f Mon Sep 17 00:00:00 2001 From: tiedotguy Date: Sat, 2 Nov 2019 12:26:31 +1100 Subject: [PATCH 04/35] Document all the new script information passed --- README.md | 45 ++++++++++++++++++++++++++++++++++++++++++++ sanoid.conf | 4 ++-- sanoid.defaults.conf | 1 + 3 files changed, 48 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index bc8ed83..c9ea974 100644 --- a/README.md +++ b/README.md @@ -101,6 +101,51 @@ Which would be enough to tell sanoid to take and keep 36 hourly snapshots, 30 da Show help message. +### Sanoid script hooks + +There are 3 scripts which can optionally be executed at various stages in the lifecycle of a snapshot: + +##### `pre_snapshot_script` + +This script will be executed before a snapshot is taken. The following environment variables with be passed: + +| Env vars | Description +| ----------------- | ----------- +| `SANOID_SCRIPT` | The type of script being executed, one of `pre`, `post`, or `prune`. Allows for one script to be used for multiple tasks +| `SANOID_TARGET` | The dataset about to be snapshot +| `SANOID_SNAPNAME` | The name of the snapshot that will be taken (does not include the dataset name) +| `SANOID_TYPE` | The type of snapshot to be taken (yearly, monthly, weekly, daily, hourly, frequently) +| `SANOID_BATCH` | All the snapshots which will be taken against this dataset (does not include the dataset name), joined by commas. Note that not all of the snapshots will have been taken. For example, monthly is taken before weekly, but weekly is still included when `SANOID_TYPE` is monthly. It is guaranteed to take snapshots in ascending frequency: yearly, monthly, ... frequently + +If the script returns a non-zero exit code, the snapshot will not be taken unless `no_inconsistent_snapshot` is false. + +##### `post_snapshot_script` + +This script will be executed when: + +- The pre-snapshot script succeeded or +- The pre-snapshot script failed and `force_post_snapshot_script` is true. + +| Env vars | Description +| -------------------- | ----------- +| `SANOID_SCRIPT` | as above | +| `SANOID_TARGET` | as above | +| `SANOID_SNAPNAME` | as above | +| `SANOID_TYPE` | as above | +| `SANOID_BATCH` | as above | +| `SANOID_PRE_FAILURE` | This will indicate if the pre-snapshot script failed | + + +##### `pruning_script` + +This script will be executed after a snapshot is successfully deleted. The following environment variables will be passed: + +| Env vars | Description +| ----------------- | ----------- +| `SANOID_SCRIPT` | as above | +| `SANOID_TARGET` | as above | +| `SANOID_SNAPNAME` | as above | + ---------- # Syncoid diff --git a/sanoid.conf b/sanoid.conf index 6bd5c62..e55b698 100644 --- a/sanoid.conf +++ b/sanoid.conf @@ -91,8 +91,8 @@ daily_crit = 4d [template_scripts] - ### dataset and snapshot name will be supplied as environment variables - ### for all pre/post/prune scripts ($SANOID_TARGET, $SANOID_SNAPNAME) + ### information about the snapshot will be supplied as environment variables, + ### see the README.md file for details about what is passed when. ### run script before snapshot pre_snapshot_script = /path/to/script.sh ### run script after snapshot diff --git a/sanoid.defaults.conf b/sanoid.defaults.conf index a9ca382..23ded03 100644 --- a/sanoid.defaults.conf +++ b/sanoid.defaults.conf @@ -19,6 +19,7 @@ use_template = process_children_only = skip_children = +# See "Sanoid script hooks" in README.md for information about scripts. pre_snapshot_script = post_snapshot_script = pruning_script = From e01dceaee3b7bf6a9f272b184fc04cb6131f8e71 Mon Sep 17 00:00:00 2001 From: tiedotguy Date: Sat, 2 Nov 2019 22:35:58 +1100 Subject: [PATCH 05/35] Remove unnecessary temporary --- sanoid | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/sanoid b/sanoid index a7c3cb0..593b29f 100755 --- a/sanoid +++ b/sanoid @@ -527,8 +527,7 @@ sub take_snapshots { my $snapname = $snapData->{snapshot}; my $recursiveFlag = $snapData->{recursive}; my $dstHandling = $snapData->{handleDst}; - my $f = $snapData->{batch}; - my $batch = join(",", @$f); + my $batch = join(",", @{$snapData->{batch}}); my $extraMessage = ""; if ($recursiveFlag) { $extraMessage = " (zfs recursive)"; From 9e429de59d8810b90c10d6f8a33a567d21fc2ba6 Mon Sep 17 00:00:00 2001 From: tiedotguy Date: Sun, 3 Nov 2019 09:34:25 +1100 Subject: [PATCH 06/35] Fix typo in recursive --- sanoid | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sanoid b/sanoid index 593b29f..5de6f46 100755 --- a/sanoid +++ b/sanoid @@ -509,7 +509,7 @@ sub take_snapshots { my $snap = { 'dataset' => $path, 'snapshot' => "autosnap_$datestamp{'sortable'}_$type", - 'recursive' => $config{$section}{'zfs_recrsion'}, # use zfs (atomic) recursion if specified in config + 'recursive' => $config{$section}{'zfs_recursion'}, # use zfs (atomic) recursion if specified in config 'handleDst' => $handleDst, 'type' => $type, 'batch' => \@batch, # Reference the source array, because we may be adding to it in subsequent loops From 353799ccb90822795ac4da1bc2ca5efe726e4fd4 Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Tue, 5 Nov 2019 17:36:36 +0100 Subject: [PATCH 07/35] reworked argument parsing and error out if file path is not provided --- findoid | 89 +++++++++++++++++++-------------------------------------- 1 file changed, 29 insertions(+), 60 deletions(-) diff --git a/findoid b/findoid index 48301a4..c83ae1a 100755 --- a/findoid +++ b/findoid @@ -4,16 +4,26 @@ # from http://www.gnu.org/licenses/gpl-3.0.html on 2014-11-17. A copy should also be available in this # project's Git repository at https://github.com/jimsalterjrs/sanoid/blob/master/LICENSE. +$::VERSION = '2.0.2'; use strict; use warnings; +use Getopt::Long qw(:config auto_version auto_help); +use Pod::Usage; my $zfs = '/sbin/zfs'; -my %args = getargs(@ARGV); +my %args = ('path' => ''); +GetOptions(\%args, "path=s") or pod2usage(2); -my $progversion = '1.4.7'; - -if ($args{'version'}) { print "$progversion\n"; exit 0; } +if ($args{'path'} eq '') { + if (scalar(@ARGV) < 1) { + warn "file path missing!\n"; + pod2usage(2); + exit 127; + } else { + $args{'path'} = $ARGV[0]; + } +} my $dataset = getdataset($args{'path'}); @@ -119,62 +129,21 @@ sub getdataset { return $bestmatch; } -sub getargs { - my @args = @_; - my %args; +__END__ - my %novaluearg; - my %validarg; - push my @validargs, ('debug','version'); - foreach my $item (@validargs) { $validarg{$item} = 1; } - push my @novalueargs, ('debug','version'); - foreach my $item (@novalueargs) { $novaluearg{$item} = 1; } +=head1 NAME - while (my $rawarg = shift(@args)) { - my $arg = $rawarg; - my $argvalue; - if ($rawarg =~ /=/) { - # user specified the value for a CLI argument with = - # instead of with blank space. separate appropriately. - $argvalue = $arg; - $arg =~ s/=.*$//; - $argvalue =~ s/^.*=//; - } - if ($rawarg =~ /^--/) { - # doubledash arg - $arg =~ s/^--//; - if (! $validarg{$arg}) { die "ERROR: don't understand argument $rawarg.\n"; } - if ($novaluearg{$arg}) { - $args{$arg} = 1; - } else { - # if this CLI arg takes a user-specified value and - # we don't already have it, then the user must have - # specified with a space, so pull in the next value - # from the array as this value rather than as the - # next argument. - if ($argvalue eq '') { $argvalue = shift(@args); } - $args{$arg} = $argvalue; - } - } elsif ($arg =~ /^-/) { - # singledash arg - $arg =~ s/^-//; - if (! $validarg{$arg}) { die "ERROR: don't understand argument $rawarg.\n"; } - if ($novaluearg{$arg}) { - $args{$arg} = 1; - } else { - # if this CLI arg takes a user-specified value and - # we don't already have it, then the user must have - # specified with a space, so pull in the next value - # from the array as this value rather than as the - # next argument. - if ($argvalue eq '') { $argvalue = shift(@args); } - $args{$arg} = $argvalue; - } - } else { - # bare arg - $args{'path'} = $arg; - } - } +findoid - ZFS file version listing tool - return %args; -} +=head1 SYNOPSIS + + findoid [options] FILE + + FILE local path to file for version listing + +Options: + + --path=FILE alternative to specify file path to list versions for + + --help Prints this helptext + --version Prints the version number From a1f5e4c0c006e16a5047a16fc65c9b3663adb81e Mon Sep 17 00:00:00 2001 From: Ben Wolsieffer Date: Wed, 4 Dec 2019 22:19:29 -0500 Subject: [PATCH 08/35] Add cache-dir option. --- README.md | 4 ++++ sanoid | 17 +++++++++++++---- 2 files changed, 17 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index bc8ed83..d5e143d 100644 --- a/README.md +++ b/README.md @@ -49,6 +49,10 @@ Which would be enough to tell sanoid to take and keep 36 hourly snapshots, 30 da Specify a location for the config file named sanoid.conf. Defaults to /etc/sanoid ++ --cache-dir + + Specify a directory to store the zfs snapshot cache. Defaults to /var/cache/sanoid + + --take-snapshots This will process your sanoid.conf file, create snapshots, but it will NOT purge expired ones. (Note that snapshots taken are atomic in an individual dataset context, not a global context - snapshots of pool/dataset1 and pool/dataset2 will each be internally consistent and atomic, but one may be a few filesystem transactions "newer" than the other.) diff --git a/sanoid b/sanoid index 2b566ef..fd39f3a 100755 --- a/sanoid +++ b/sanoid @@ -11,15 +11,19 @@ use strict; use warnings; use Config::IniFiles; # read samba-style conf file use Data::Dumper; # debugging - print contents of hash -use File::Path; # for rmtree command in use_prune +use File::Path 'make_path'; use Getopt::Long qw(:config auto_version auto_help); use Pod::Usage; # pod2usage use Time::Local; # to parse dates in reverse use Capture::Tiny ':all'; -my %args = ("configdir" => "/etc/sanoid"); +my %args = ( + "configdir" => "/etc/sanoid", + "cache-dir" => "/var/cache/sanoid" +); GetOptions(\%args, "verbose", "debug", "cron", "readonly", "quiet", - "monitor-health", "force-update", "configdir=s", + "configdir=s", "cache-dir=s", + "monitor-health", "force-update", "monitor-snapshots", "take-snapshots", "prune-snapshots", "force-prune", "monitor-capacity" ) or pod2usage(2); @@ -41,9 +45,13 @@ my $default_conf_file = "$args{'configdir'}/sanoid.defaults.conf"; # parse config file my %config = init($conf_file,$default_conf_file); +my $cache_dir = $args{'cache-dir'}; + +make_path($cache_dir); + # if we call getsnaps(%config,1) it will forcibly update the cache, TTL or no TTL my $forcecacheupdate = 0; -my $cache = '/var/cache/sanoidsnapshots.txt'; +my $cache = "$cache_dir/snapshots.txt"; my $cacheTTL = 900; # 15 minutes my %snaps = getsnaps( \%config, $cacheTTL, $forcecacheupdate ); my %pruned; @@ -1659,6 +1667,7 @@ Assumes --cron --verbose if no other arguments (other than configdir) are specif Options: --configdir=DIR Specify a directory to find config file sanoid.conf + --cache-dir=DIR Specify a directory to store the zfs snapshot cache --cron Creates snapshots and purges expired snapshots --verbose Prints out additional information during a sanoid run From 59a07f92b4920952cc9137b03c1533656f48b121 Mon Sep 17 00:00:00 2001 From: Ben Wolsieffer Date: Fri, 6 Dec 2019 14:28:37 -0500 Subject: [PATCH 09/35] Add run-dir option. --- README.md | 4 ++++ sanoid | 24 ++++++++++++++---------- 2 files changed, 18 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index d5e143d..c2b502b 100644 --- a/README.md +++ b/README.md @@ -53,6 +53,10 @@ Which would be enough to tell sanoid to take and keep 36 hourly snapshots, 30 da Specify a directory to store the zfs snapshot cache. Defaults to /var/cache/sanoid ++ --run-dir + + Specify a directory for temporary files such as lock files. Defaults to /var/run/sanoid + + --take-snapshots This will process your sanoid.conf file, create snapshots, but it will NOT purge expired ones. (Note that snapshots taken are atomic in an individual dataset context, not a global context - snapshots of pool/dataset1 and pool/dataset2 will each be internally consistent and atomic, but one may be a few filesystem transactions "newer" than the other.) diff --git a/sanoid b/sanoid index fd39f3a..7f9c2fa 100755 --- a/sanoid +++ b/sanoid @@ -19,10 +19,11 @@ use Capture::Tiny ':all'; my %args = ( "configdir" => "/etc/sanoid", - "cache-dir" => "/var/cache/sanoid" + "cache-dir" => "/var/cache/sanoid", + "run-dir" => "/var/run/sanoid" ); GetOptions(\%args, "verbose", "debug", "cron", "readonly", "quiet", - "configdir=s", "cache-dir=s", + "configdir=s", "cache-dir=s", "run-dir=s", "monitor-health", "force-update", "monitor-snapshots", "take-snapshots", "prune-snapshots", "force-prune", "monitor-capacity" @@ -46,8 +47,10 @@ my $default_conf_file = "$args{'configdir'}/sanoid.defaults.conf"; my %config = init($conf_file,$default_conf_file); my $cache_dir = $args{'cache-dir'}; +my $run_dir = $args{'run-dir'}; make_path($cache_dir); +make_path($run_dir); # if we call getsnaps(%config,1) it will forcibly update the cache, TTL or no TTL my $forcecacheupdate = 0; @@ -1373,10 +1376,10 @@ sub get_zpool_capacity { sub checklock { # take argument $lockname. # - # read /var/run/$lockname.lock for a pid on first line and a mutex on second line. + # read $run_dir/$lockname.lock for a pid on first line and a mutex on second line. # - # check process list to see if the pid from /var/run/$lockname.lock is still active with - # the original mutex found in /var/run/$lockname.lock. + # check process list to see if the pid from $run_dir/$lockname.lock is still active with + # the original mutex found in $run_dir/$lockname.lock. # # return: # 0 if lock is present and valid for another process @@ -1388,7 +1391,7 @@ sub checklock { # my $lockname = shift; - my $lockfile = "/var/run/$lockname.lock"; + my $lockfile = "$run_dir/$lockname.lock"; if (! -e $lockfile) { # no lockfile @@ -1441,11 +1444,11 @@ sub checklock { sub removelock { # take argument $lockname. # - # make sure /var/run/$lockname.lock actually belongs to me (contains my pid and mutex) + # make sure $run_dir/$lockname.lock actually belongs to me (contains my pid and mutex) # and remove it if it does, die if it doesn't. my $lockname = shift; - my $lockfile = "/var/run/$lockname.lock"; + my $lockfile = "$run_dir/$lockname.lock"; if (checklock($lockname) == 2) { unlink $lockfile; @@ -1460,11 +1463,11 @@ sub removelock { sub writelock { # take argument $lockname. # - # write a lockfile to /var/run/$lockname.lock with first line + # write a lockfile to $run_dir/$lockname.lock with first line # being my pid and second line being my mutex. my $lockname = shift; - my $lockfile = "/var/run/$lockname.lock"; + my $lockfile = "$run_dir/$lockname.lock"; # die honorably rather than overwriting a valid, existing lock if (! checklock($lockname)) { @@ -1668,6 +1671,7 @@ Options: --configdir=DIR Specify a directory to find config file sanoid.conf --cache-dir=DIR Specify a directory to store the zfs snapshot cache + --run-dir=DIR Specify a directory for temporary files such as lock files --cron Creates snapshots and purges expired snapshots --verbose Prints out additional information during a sanoid run From 2de006072f0dc3b40f4caf2021253b1763079eba Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Fri, 17 Jan 2020 01:04:16 +0100 Subject: [PATCH 10/35] fixed missing ; --- sanoid | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sanoid b/sanoid index 2720f60..ece2380 100755 --- a/sanoid +++ b/sanoid @@ -1402,7 +1402,7 @@ sub checklock { close FH; # if we didn't get exactly 2 items from the lock file there is a problem if (scalar(@lock) != 2) { - warn "WARN: deleting invalid $lockfile\n" + warn "WARN: deleting invalid $lockfile\n"; unlink $lockfile; return 1 } From bb30496d19affedace5180e282f37b39c151ac84 Mon Sep 17 00:00:00 2001 From: croadfeldt Date: Sat, 18 Jan 2020 01:52:12 -0600 Subject: [PATCH 11/35] Fixed Typo in arguments. Removed errant A in zfs list command line arguments. --- syncoid | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/syncoid b/syncoid index bfbff7d..2eef326 100755 --- a/syncoid +++ b/syncoid @@ -1470,7 +1470,7 @@ sub getsnaps() { $fsescaped = escapeshellparam($fsescaped); } - my $getsnapcmd = "$rhost $mysudocmd $zfscmd get A-Hpd 1 -t snapshot guid,creation $fsescaped"; + my $getsnapcmd = "$rhost $mysudocmd $zfscmd get -Hpd 1 -t snapshot guid,creation $fsescaped"; if ($debug) { $getsnapcmd = "$getsnapcmd |"; print "DEBUG: getting list of snapshots on $fs using $getsnapcmd...\n"; From d3309d311f26b029753f98f67212c76c073a0937 Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Wed, 22 Jan 2020 17:22:44 +0100 Subject: [PATCH 12/35] post install script for debian package to remove old unused snapshot cache file --- packages/debian/postinst | 4 ++++ 1 file changed, 4 insertions(+) create mode 100755 packages/debian/postinst diff --git a/packages/debian/postinst b/packages/debian/postinst new file mode 100755 index 0000000..d23e6bc --- /dev/null +++ b/packages/debian/postinst @@ -0,0 +1,4 @@ +#!/bin/bash + +# remove old cache file +[ -f /var/cache/sanoidsnapshots.txt ] && rm /var/cache/sanoidsnapshots.txt From dedf35df85b8ab788bc5722cf16ec33ac9ff4220 Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Fri, 31 Jan 2020 09:13:18 +0100 Subject: [PATCH 13/35] fix debian postinst in the case of non existing legacy cache file --- packages/debian/postinst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/debian/postinst b/packages/debian/postinst index d23e6bc..0d6142f 100755 --- a/packages/debian/postinst +++ b/packages/debian/postinst @@ -1,4 +1,4 @@ #!/bin/bash # remove old cache file -[ -f /var/cache/sanoidsnapshots.txt ] && rm /var/cache/sanoidsnapshots.txt +[ -f /var/cache/sanoidsnapshots.txt ] && rm /var/cache/sanoidsnapshots.txt || true From 0b582f6200181b09d0fd09d1e797fa848393ba1e Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Wed, 12 Feb 2020 17:48:42 +0100 Subject: [PATCH 14/35] group snapshot creation together so pre/post scripts are only run once per dataset and prepare for future atomic grouping feature --- README.md | 107 ++++++++++++++++++++++++++++----------- sanoid | 147 +++++++++++++++++++++++++++++++----------------------- 2 files changed, 162 insertions(+), 92 deletions(-) diff --git a/README.md b/README.md index e35ef27..6f631df 100644 --- a/README.md +++ b/README.md @@ -111,48 +111,97 @@ Which would be enough to tell sanoid to take and keep 36 hourly snapshots, 30 da ### Sanoid script hooks -There are 3 scripts which can optionally be executed at various stages in the lifecycle of a snapshot: +There are three script types which can optionally be executed at various stages in the lifecycle of a snapshot: -##### `pre_snapshot_script` +#### `pre_snapshot_script` -This script will be executed before a snapshot is taken. The following environment variables with be passed: +Will be executed before the snapshot(s) of a single dataset are taken. The following environment variables are passed: -| Env vars | Description -| ----------------- | ----------- -| `SANOID_SCRIPT` | The type of script being executed, one of `pre`, `post`, or `prune`. Allows for one script to be used for multiple tasks -| `SANOID_TARGET` | The dataset about to be snapshot -| `SANOID_SNAPNAME` | The name of the snapshot that will be taken (does not include the dataset name) -| `SANOID_TYPE` | The type of snapshot to be taken (yearly, monthly, weekly, daily, hourly, frequently) -| `SANOID_BATCH` | All the snapshots which will be taken against this dataset (does not include the dataset name), joined by commas. Note that not all of the snapshots will have been taken. For example, monthly is taken before weekly, but weekly is still included when `SANOID_TYPE` is monthly. It is guaranteed to take snapshots in ascending frequency: yearly, monthly, ... frequently +| Env vars | Description | +| ----------------- | ----------- | +| `SANOID_SCRIPT` | The type of script being executed, one of `pre`, `post`, or `prune`. Allows for one script to be used for multiple tasks | +| `SANOID_TARGET` | **DEPRECATED** The dataset about to be snapshot (only the first dataset will be provided) | +| `SANOID_TARGETS` | Comma separated list of all datasets to be snapshoted (currently only a single dataset, multiple datasets will be possible later with atomic groups) | +| `SANOID_SNAPNAME` | **DEPRECATED** The name of the snapshot that will be taken (only the first name will be provided, does not include the dataset name) | +| `SANOID_SNAPNAMES` | Comma separated list of all snapshot names that will be taken (does not include the dataset name) | +| `SANOID_TYPES` | Comma separated list of all snapshot types to be taken (yearly, monthly, weekly, daily, hourly, frequently) | -If the script returns a non-zero exit code, the snapshot will not be taken unless `no_inconsistent_snapshot` is false. +If the script returns a non-zero exit code, the snapshot(s) will not be taken unless `no_inconsistent_snapshot` is false. -##### `post_snapshot_script` +#### `post_snapshot_script` -This script will be executed when: +Will be executed when: - The pre-snapshot script succeeded or - The pre-snapshot script failed and `force_post_snapshot_script` is true. -| Env vars | Description -| -------------------- | ----------- -| `SANOID_SCRIPT` | as above | -| `SANOID_TARGET` | as above | -| `SANOID_SNAPNAME` | as above | -| `SANOID_TYPE` | as above | -| `SANOID_BATCH` | as above | +| Env vars | Description | +| -------------------- | ----------- | +| `SANOID_SCRIPT` | as above | +| `SANOID_TARGET` | **DEPRECATED** as above | +| `SANOID_TARGETS` | as above | +| `SANOID_SNAPNAME` | **DEPRECATED** as above | +| `SANOID_SNAPNAMES` | as above | +| `SANOID_TYPES` | as above | | `SANOID_PRE_FAILURE` | This will indicate if the pre-snapshot script failed | +#### `pruning_script` -##### `pruning_script` +Will be executed after a snapshot is successfully deleted. The following environment variables will be passed: -This script will be executed after a snapshot is successfully deleted. The following environment variables will be passed: +| Env vars | Description | +| ----------------- | ----------- | +| `SANOID_SCRIPT` | as above | +| `SANOID_TARGET` | as above | +| `SANOID_SNAPNAME` | as above | -| Env vars | Description -| ----------------- | ----------- -| `SANOID_SCRIPT` | as above | -| `SANOID_TARGET` | as above | -| `SANOID_SNAPNAME` | as above | + +#### example + +**sanoid.conf**: +``` +... +[sanoid-test-0] + use_template = production + recursive = yes + pre_snapshot_script = /tmp/debug.sh + post_snapshot_script = /tmp/debug.sh + pruning_script = /tmp/debug.sh +... +``` + +**verbose sanoid output**: +``` +... +executing pre_snapshot_script '/tmp/debug.sh' on dataset 'sanoid-test-0' +taking snapshot sanoid-test-0@autosnap_2020-02-12_14:49:33_yearly +taking snapshot sanoid-test-0@autosnap_2020-02-12_14:49:33_monthly +taking snapshot sanoid-test-0@autosnap_2020-02-12_14:49:33_daily +taking snapshot sanoid-test-0@autosnap_2020-02-12_14:49:33_hourly +executing post_snapshot_script '/tmp/debug.sh' on dataset 'sanoid-test-0' +... +``` + +**pre script env variables**: +``` +SANOID_SCRIPT=pre +SANOID_TARGET=sanoid-test-0/b/bb +SANOID_TARGETS=sanoid-test-0/b/bb +SANOID_SNAPNAME=autosnap_2020-02-12_14:49:32_yearly +SANOID_SNAPNAMES=autosnap_2020-02-12_14:49:32_yearly,autosnap_2020-02-12_14:49:32_monthly,autosnap_2020-02-12_14:49:32_daily,autosnap_2020-02-12_14:49:32_hourly +SANOID_TYPES=yearly,monthly,daily,hourly +``` + +**post script env variables**: +``` +SANOID_SCRIPT=post +SANOID_TARGET=sanoid-test-0/b/bb +SANOID_TARGETS=sanoid-test-0/b/bb +SANOID_SNAPNAME=autosnap_2020-02-12_14:49:32_yearly +SANOID_SNAPNAMES=autosnap_2020-02-12_14:49:32_yearly,autosnap_2020-02-12_14:49:32_monthly,autosnap_2020-02-12_14:49:32_daily,autosnap_2020-02-12_14:49:32_hourly +SANOID_TYPES=yearly,monthly,daily,hourly +SANOID_PRE_FAILURE=0 +``` ---------- @@ -237,7 +286,7 @@ As of 1.4.18, syncoid also automatically supports and enables resume of interrup + --source-bwlimit - This is the bandwidth limit in bytes (kbytes, mbytes, etc) per second imposed upon the source. This is mainly used if the target does not have mbuffer installed, but bandwidth limits are desired. + This is the bandwidth limit in bytes (kbytes, mbytes, etc) per second imposed upon the source. This is mainly used if the target does not have mbuffer installed, but bandwidth limits are desired. + --target-bw-limit @@ -257,7 +306,7 @@ As of 1.4.18, syncoid also automatically supports and enables resume of interrup + --create-bookmark - This argument tells syncoid to create a zfs bookmark for the newest snapshot after it got replicated successfully. The bookmark name will be equal to the snapshot name. Only works in combination with the --no-sync-snap option. This can be very useful for irregular replication where the last matching snapshot on the source was already deleted but the bookmark remains so a replication is still possible. + This argument tells syncoid to create a zfs bookmark for the newest snapshot after it got replicated successfully. The bookmark name will be equal to the snapshot name. Only works in combination with the --no-sync-snap option. This can be very useful for irregular replication where the last matching snapshot on the source was already deleted but the bookmark remains so a replication is still possible. + --no-clone-rollback diff --git a/sanoid b/sanoid index 0b41b18..ea2711a 100755 --- a/sanoid +++ b/sanoid @@ -369,7 +369,7 @@ sub take_snapshots { my %datestamp = get_date(); my $forcecacheupdate = 0; - my @newsnaps; + my %newsnapsgroup; # get utc timestamp of the current day for DST check my $daystartUtc = timelocal(0, 0, 0, $datestamp{'mday'}, ($datestamp{'mon'}-1), $datestamp{'year'}); @@ -392,9 +392,8 @@ sub take_snapshots { my $path = $config{$section}{'path'}; my @types = ('yearly','monthly','weekly','daily','hourly','frequently'); - my @batch = (); - foreach my $type (@types) { + foreach my $type (@types) { if ($config{$section}{$type} > 0) { my $newestage; # in seconds @@ -514,44 +513,57 @@ sub take_snapshots { my $maxage = time()-$lastpreferred; if ( $newestage > $maxage ) { - # update to most current possible datestamp - %datestamp = get_date(); # print "we should have had a $type snapshot of $path $maxage seconds ago; most recent is $newestage seconds old.\n"; - my $snap = { - 'dataset' => $path, - 'snapshot' => "autosnap_$datestamp{'sortable'}_$type", - 'recursive' => $config{$section}{'zfs_recursion'}, # use zfs (atomic) recursion if specified in config - 'handleDst' => $handleDst, - 'type' => $type, - 'batch' => \@batch, # Reference the source array, because we may be adding to it in subsequent loops - }; - push(@batch, $snap->{snapshot}); - push(@newsnaps, $snap); + if (!exists $newsnapsgroup{$path}) { + $newsnapsgroup{$path} = { + 'recursive' => $config{$section}{'zfs_recursion'}, + 'handleDst' => $handleDst, + 'datasets' => [$path], # for later atomic grouping, currently only a one element array + 'types' => [] + }; + } + + push(@{$newsnapsgroup{$path}{'types'}}, $type); } } } } - if ( (scalar(@newsnaps)) > 0) { - foreach my $snapData ( @newsnaps ) { - my $dataset = $snapData->{dataset}; - my $snapname = $snapData->{snapshot}; + if (%newsnapsgroup) { + while ((my $path, my $snapData) = each(%newsnapsgroup)) { my $recursiveFlag = $snapData->{recursive}; my $dstHandling = $snapData->{handleDst}; - my $batch = join(",", @{$snapData->{batch}}); + + my @datasets = @{$snapData->{datasets}}; + my $dataset = $datasets[0]; + my @types = @{$snapData->{types}}; + + # same timestamp for all snapshots types (daily, hourly, ...) + my %datestamp = get_date(); + my @snapshots; + + foreach my $type (@types) { + my $snapname = "autosnap_$datestamp{'sortable'}_$type"; + push(@snapshots, $snapname); + } + + my $datasetString = join(",", @datasets); + my $typeString = join(",", @types); + my $snapshotString = join(",", @snapshots); + my $extraMessage = ""; if ($recursiveFlag) { $extraMessage = " (zfs recursive)"; } - my $snap = "$dataset\@$snapname"; my $presnapshotfailure = 0; my $ret = 0; if ($config{$dataset}{'pre_snapshot_script'}) { $ENV{'SANOID_TARGET'} = $dataset; - $ENV{'SANOID_SNAPNAME'} = $snapname; - $ENV{'SANOID_TYPE'} = $snapData->{type}; - $ENV{'SANOID_BATCH'} = $batch; + $ENV{'SANOID_TARGETS'} = $datasetString; + $ENV{'SANOID_SNAPNAME'} = @snapshots[0]; + $ENV{'SANOID_SNAPNAMES'} = $snapshotString; + $ENV{'SANOID_TYPES'} = $typeString; $ENV{'SANOID_SCRIPT'} = 'pre'; if ($args{'verbose'}) { print "executing pre_snapshot_script '".$config{$dataset}{'pre_snapshot_script'}."' on dataset '$dataset'\n"; } @@ -560,9 +572,10 @@ sub take_snapshots { } delete $ENV{'SANOID_TARGET'}; + delete $ENV{'SANOID_TARGETS'}; delete $ENV{'SANOID_SNAPNAME'}; - delete $ENV{'SANOID_TYPE'}; - delete $ENV{'SANOID_BATCH'}; + delete $ENV{'SANOID_SNAPNAMES'}; + delete $ENV{'SANOID_TYPES'}; delete $ENV{'SANOID_SCRIPT'}; if ($ret != 0) { @@ -571,49 +584,56 @@ sub take_snapshots { $presnapshotfailure = 1; } } - if ($args{'verbose'}) { print "taking snapshot $snap$extraMessage\n"; } - if (!$args{'readonly'}) { - my $stderr; - my $exit; - ($stderr, $exit) = tee_stderr { - if ($recursiveFlag) { - system($zfs, "snapshot", "-r", "$snap"); - } else { - system($zfs, "snapshot", "$snap"); - } - }; - $exit == 0 or do { - if ($dstHandling) { - if ($stderr =~ /already exists/) { - $exit = 0; - $snap =~ s/_([a-z]+)$/dst_$1/g; - if ($args{'verbose'}) { print "taking dst snapshot $snap$extraMessage\n"; } - if ($recursiveFlag) { - system($zfs, "snapshot", "-r", "$snap") == 0 - or warn "CRITICAL ERROR: $zfs snapshot -r $snap failed, $?"; - } else { - system($zfs, "snapshot", "$snap") == 0 - or warn "CRITICAL ERROR: $zfs snapshot $snap failed, $?"; + foreach my $snap (@snapshots) { + $snap = "$dataset\@$snap"; + if ($args{'verbose'}) { print "taking snapshot $snap$extraMessage\n"; } + + if (!$args{'readonly'}) { + my $stderr; + my $exit; + ($stderr, $exit) = tee_stderr { + if ($recursiveFlag) { + system($zfs, "snapshot", "-r", "$snap"); + } else { + system($zfs, "snapshot", "$snap"); + } + }; + + $exit == 0 or do { + if ($dstHandling) { + if ($stderr =~ /already exists/) { + $exit = 0; + $snap =~ s/_([a-z]+)$/dst_$1/g; + if ($args{'verbose'}) { print "taking dst snapshot $snap$extraMessage\n"; } + if ($recursiveFlag) { + system($zfs, "snapshot", "-r", "$snap") == 0 + or warn "CRITICAL ERROR: $zfs snapshot -r $snap failed, $?"; + } else { + system($zfs, "snapshot", "$snap") == 0 + or warn "CRITICAL ERROR: $zfs snapshot $snap failed, $?"; + } } } - } - }; + }; - $exit == 0 or do { - if ($recursiveFlag) { - warn "CRITICAL ERROR: $zfs snapshot -r $snap failed, $?"; - } else { - warn "CRITICAL ERROR: $zfs snapshot $snap failed, $?"; - } - }; + $exit == 0 or do { + if ($recursiveFlag) { + warn "CRITICAL ERROR: $zfs snapshot -r $snap failed, $?"; + } else { + warn "CRITICAL ERROR: $zfs snapshot $snap failed, $?"; + } + }; + } } + if ($config{$dataset}{'post_snapshot_script'}) { if (!$presnapshotfailure or $config{$dataset}{'force_post_snapshot_script'}) { $ENV{'SANOID_TARGET'} = $dataset; - $ENV{'SANOID_SNAPNAME'} = $snapname; - $ENV{'SANOID_TYPE'} = $snapData->{type}; - $ENV{'SANOID_BATCH'} = $batch; + $ENV{'SANOID_TARGETS'} = $datasetString; + $ENV{'SANOID_SNAPNAME'} = @snapshots[0]; + $ENV{'SANOID_SNAPNAMES'} = $snapshotString; + $ENV{'SANOID_TYPES'} = $typeString; $ENV{'SANOID_SCRIPT'} = 'post'; $ENV{'SANOID_PRE_FAILURE'} = $presnapshotfailure; if ($args{'verbose'}) { print "executing post_snapshot_script '".$config{$dataset}{'post_snapshot_script'}."' on dataset '$dataset'\n"; } @@ -623,9 +643,10 @@ sub take_snapshots { } delete $ENV{'SANOID_TARGET'}; + delete $ENV{'SANOID_TARGETS'}; delete $ENV{'SANOID_SNAPNAME'}; - delete $ENV{'SANOID_TYPE'}; - delete $ENV{'SANOID_BATCH'}; + delete $ENV{'SANOID_SNAPNAMES'}; + delete $ENV{'SANOID_TYPES'}; delete $ENV{'SANOID_SCRIPT'}; delete $ENV{'SANOID_PRE_FAILURE'}; } From 1bad3cd25b9282947e03ee1d2d6962ee326933b3 Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Wed, 12 Feb 2020 18:24:08 +0100 Subject: [PATCH 15/35] append timezone offset to the syncoid snapshot name to fix DST collisions --- syncoid | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/syncoid b/syncoid index 2eef326..2663bea 100755 --- a/syncoid +++ b/syncoid @@ -1748,7 +1748,19 @@ sub getsendsize { } sub getdate { - my ($sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdst) = localtime(time); + my @time = localtime(time); + + # get timezone info + my $offset = timegm(@time) - timelocal(@time); + my $sign = ''; # + is not allowed in a snapshot name + if ($offset < 0) { + $sign = '-'; + $offset = abs($offset); + } + my $hours = int($offset / 3600); + my $minutes = int($offset / 60) - $hours * 60; + + my ($sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdst) = @time; $year += 1900; my %date; $date{'unix'} = (((((((($year - 1971) * 365) + $yday) * 24) + $hour) * 60) + $min) * 60) + $sec; @@ -1758,7 +1770,8 @@ sub getdate { $date{'hour'} = sprintf ("%02u", $hour); $date{'mday'} = sprintf ("%02u", $mday); $date{'mon'} = sprintf ("%02u", ($mon + 1)); - $date{'stamp'} = "$date{'year'}-$date{'mon'}-$date{'mday'}:$date{'hour'}:$date{'min'}:$date{'sec'}"; + $date{'tzoffset'} = sprintf ("GMT%s%02d:%02u", $sign, $hours, $minutes); + $date{'stamp'} = "$date{'year'}-$date{'mon'}-$date{'mday'}:$date{'hour'}:$date{'min'}:$date{'sec'}-$date{'tzoffset'}"; return %date; } From ef38110d69c771607978a2a4b49103551846ff48 Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Wed, 12 Feb 2020 18:35:03 +0100 Subject: [PATCH 16/35] instruct to use the latest stable release instead of master --- INSTALL.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/INSTALL.md b/INSTALL.md index bc56558..57f225b 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -31,6 +31,8 @@ Clone this repo, build the debian package and install it (alternatively you can # Download the repo as root to avoid changing permissions later sudo git clone https://github.com/jimsalterjrs/sanoid.git cd sanoid +# checkout latest stable release or stay on master for bleeding edge stuff (but expect bugs!) +git checkout $(git tag | grep "^v" | tail -n 1) ln -s packages/debian . dpkg-buildpackage -uc -us apt install ../sanoid_*_all.deb @@ -60,6 +62,8 @@ Clone this repo, then put the executables and config files into the appropriate # Download the repo as root to avoid changing permissions later sudo git clone https://github.com/jimsalterjrs/sanoid.git cd sanoid +# checkout latest stable release or stay on master for bleeding edge stuff (but expect bugs!) +git checkout $(git tag | grep "^v" | tail -n 1) # Install the executables sudo cp sanoid syncoid findoid sleepymutex /usr/local/sbin # Create the config directory From a3f242d0d411b2e08643c16f7385fd022a798038 Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Wed, 12 Feb 2020 20:10:28 +0100 Subject: [PATCH 17/35] added cron setup example --- INSTALL.md | 11 +++++++++++ README.md | 6 +++--- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/INSTALL.md b/INSTALL.md index bc56558..b29987b 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -167,6 +167,17 @@ pkg install p5-Config-Inifiles p5-Capture-Tiny pv mbuffer lzop 3. Create the config directory `/etc/sanoid` and put `sanoid.defaults.conf` in there, and create `sanoid.conf` in it too 4. Create a cron job or a systemd timer that runs `sanoid --cron` once per minute +## cron + +If you use cron there is the need to ensure that only one instance of sanoid is run at any time (or else there will be funny error messages about missing snapshots, ...). It's also good practice to separate the snapshot taking and pruning so the later won't block the former in case of long running pruning operations. Following is the recommend setup for a standard install: + +``` +*/15 * * * * root flock -n /var/run/sanoid/cron-take.lock -c "TZ=UTC sanoid --take-snapshots" +*/15 * * * * root flock -n /var/run/sanoid/cron-prune.lock -c "sanoid --prune-snapshots" +``` + +Adapt the timer interval to the lowest configured snapshot interval. + # Configuration **Sanoid** won't do anything useful unless you tell it how to handle your ZFS datasets in `/etc/sanoid/sanoid.conf`. diff --git a/README.md b/README.md index c2b502b..d9e42ab 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@

sanoid rollback demo
(Real time demo: rolling back a full-scale cryptomalware infection in seconds!)

-More prosaically, you can use Sanoid to create, automatically thin, and monitor snapshots and pool health from a single eminently human-readable TOML config file at /etc/sanoid/sanoid.conf. (Sanoid also requires a "defaults" file located at /etc/sanoid/sanoid.defaults.conf, which is not user-editable.) A typical Sanoid system would have a single cron job: +More prosaically, you can use Sanoid to create, automatically thin, and monitor snapshots and pool health from a single eminently human-readable TOML config file at /etc/sanoid/sanoid.conf. (Sanoid also requires a "defaults" file located at /etc/sanoid/sanoid.defaults.conf, which is not user-editable.) A typical Sanoid system would have a single cron job but see INSTALL.md fore more details: ``` * * * * * TZ=UTC /usr/local/bin/sanoid --cron ``` @@ -192,7 +192,7 @@ As of 1.4.18, syncoid also automatically supports and enables resume of interrup + --source-bwlimit - This is the bandwidth limit in bytes (kbytes, mbytes, etc) per second imposed upon the source. This is mainly used if the target does not have mbuffer installed, but bandwidth limits are desired. + This is the bandwidth limit in bytes (kbytes, mbytes, etc) per second imposed upon the source. This is mainly used if the target does not have mbuffer installed, but bandwidth limits are desired. + --target-bw-limit @@ -212,7 +212,7 @@ As of 1.4.18, syncoid also automatically supports and enables resume of interrup + --create-bookmark - This argument tells syncoid to create a zfs bookmark for the newest snapshot after it got replicated successfully. The bookmark name will be equal to the snapshot name. Only works in combination with the --no-sync-snap option. This can be very useful for irregular replication where the last matching snapshot on the source was already deleted but the bookmark remains so a replication is still possible. + This argument tells syncoid to create a zfs bookmark for the newest snapshot after it got replicated successfully. The bookmark name will be equal to the snapshot name. Only works in combination with the --no-sync-snap option. This can be very useful for irregular replication where the last matching snapshot on the source was already deleted but the bookmark remains so a replication is still possible. + --no-clone-rollback From 3a7f468939f5fdcd9bd97860539836ef41cb014d Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Wed, 12 Feb 2020 20:20:46 +0100 Subject: [PATCH 18/35] extended install instructions for centos if perl dependencies can't be found --- INSTALL.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/INSTALL.md b/INSTALL.md index bc56558..021b5b7 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -52,6 +52,11 @@ Install prerequisite software: sudo yum install -y epel-release git # Install the packages that Sanoid depends on: sudo yum install -y perl-Config-IniFiles perl-Data-Dumper perl-Capture-Tiny lzop mbuffer mhash pv +# if the perl depencies can't be found in the configured repositories you can install them from CPAN manually: +sudo dnf install perl-CPAN perl-CPAN +cpan # answer the questions and past the following lines +# install Capture::Tiny +# install Config::IniFiles ``` Clone this repo, then put the executables and config files into the appropriate directories: From d1a161ca722bed2992d04c87a75c44a5028f3974 Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Wed, 12 Feb 2020 20:30:52 +0100 Subject: [PATCH 19/35] fix typo in argument description --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index c2b502b..23179c4 100644 --- a/README.md +++ b/README.md @@ -192,9 +192,9 @@ As of 1.4.18, syncoid also automatically supports and enables resume of interrup + --source-bwlimit - This is the bandwidth limit in bytes (kbytes, mbytes, etc) per second imposed upon the source. This is mainly used if the target does not have mbuffer installed, but bandwidth limits are desired. + This is the bandwidth limit in bytes (kbytes, mbytes, etc) per second imposed upon the source. This is mainly used if the target does not have mbuffer installed, but bandwidth limits are desired. -+ --target-bw-limit ++ --target-bwlimit This is the bandwidth limit in bytes (kbytes, mbytesm etc) per second imposed upon the target. This is mainly used if the source does not have mbuffer installed, but bandwidth limits are desired. @@ -212,7 +212,7 @@ As of 1.4.18, syncoid also automatically supports and enables resume of interrup + --create-bookmark - This argument tells syncoid to create a zfs bookmark for the newest snapshot after it got replicated successfully. The bookmark name will be equal to the snapshot name. Only works in combination with the --no-sync-snap option. This can be very useful for irregular replication where the last matching snapshot on the source was already deleted but the bookmark remains so a replication is still possible. + This argument tells syncoid to create a zfs bookmark for the newest snapshot after it got replicated successfully. The bookmark name will be equal to the snapshot name. Only works in combination with the --no-sync-snap option. This can be very useful for irregular replication where the last matching snapshot on the source was already deleted but the bookmark remains so a replication is still possible. + --no-clone-rollback From dd95496cd0b8e091e2efed7322fc58396ead4cc4 Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Wed, 12 Feb 2020 20:38:21 +0100 Subject: [PATCH 20/35] document the atomic way recursively take snapshots --- sanoid.conf | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/sanoid.conf b/sanoid.conf index 6bd5c62..db146dd 100644 --- a/sanoid.conf +++ b/sanoid.conf @@ -28,6 +28,11 @@ use_template = demo +# you can also handle datasets recursively in an atomic way without the possibility to override settings for child datasets. +[zpoolname/parent2] + use_template = production + recursive = zfs + ############################# From cf7ab69e69c7cdf4a63d876d89633d768fc6a262 Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Wed, 12 Feb 2020 21:14:08 +0100 Subject: [PATCH 21/35] added note about capacity limit overriding --- sanoid.defaults.conf | 1 + 1 file changed, 1 insertion(+) diff --git a/sanoid.defaults.conf b/sanoid.defaults.conf index a9ca382..589d4cd 100644 --- a/sanoid.defaults.conf +++ b/sanoid.defaults.conf @@ -108,5 +108,6 @@ yearly_warn = 0 yearly_crit = 0 # default limits for capacity checks (if set to 0, limit will not be checked) +# for overriding these values one needs to specify them in a root pool section! ([tank]\n ...) capacity_warn = 80 capacity_crit = 95 From eabe3aa791100e6be76c4487b63750faa44ae038 Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Wed, 12 Feb 2020 21:31:21 +0100 Subject: [PATCH 22/35] added note about the default options if some interval types aren't sepcified --- README.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index c2b502b..f27c347 100644 --- a/README.md +++ b/README.md @@ -39,6 +39,8 @@ And its /etc/sanoid/sanoid.conf might look something like this: Which would be enough to tell sanoid to take and keep 36 hourly snapshots, 30 dailies, 3 monthlies, and no yearlies for all datasets under data/images (but not data/images itself, since process_children_only is set). Except in the case of data/images/win7, which follows the same template (since it's a child of data/images) but only keeps 4 hourlies for whatever reason. +**Note**: Be aware that if you don't specify some interval options the defaults will be used (from /etc/sanoid/sanoid.defaults.conf) + ##### Sanoid Command Line Options + --cron @@ -192,7 +194,7 @@ As of 1.4.18, syncoid also automatically supports and enables resume of interrup + --source-bwlimit - This is the bandwidth limit in bytes (kbytes, mbytes, etc) per second imposed upon the source. This is mainly used if the target does not have mbuffer installed, but bandwidth limits are desired. + This is the bandwidth limit in bytes (kbytes, mbytes, etc) per second imposed upon the source. This is mainly used if the target does not have mbuffer installed, but bandwidth limits are desired. + --target-bw-limit @@ -212,7 +214,7 @@ As of 1.4.18, syncoid also automatically supports and enables resume of interrup + --create-bookmark - This argument tells syncoid to create a zfs bookmark for the newest snapshot after it got replicated successfully. The bookmark name will be equal to the snapshot name. Only works in combination with the --no-sync-snap option. This can be very useful for irregular replication where the last matching snapshot on the source was already deleted but the bookmark remains so a replication is still possible. + This argument tells syncoid to create a zfs bookmark for the newest snapshot after it got replicated successfully. The bookmark name will be equal to the snapshot name. Only works in combination with the --no-sync-snap option. This can be very useful for irregular replication where the last matching snapshot on the source was already deleted but the bookmark remains so a replication is still possible. + --no-clone-rollback From 13ba2b43fa35c838dcf5793ba279a3f1903ec416 Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Thu, 13 Feb 2020 01:37:54 +0100 Subject: [PATCH 23/35] fix iszfsbusy check for similar dataset names --- syncoid | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/syncoid b/syncoid index 2eef326..4a336f1 100755 --- a/syncoid +++ b/syncoid @@ -1107,7 +1107,7 @@ sub iszfsbusy { foreach my $process (@processes) { # if ($debug) { print "DEBUG: checking process $process...\n"; } - if ($process =~ /zfs *(receive|recv).*\Q$fs\E/) { + if ($process =~ /zfs *(receive|recv).*\Q$fs\E\Z/) { # there's already a zfs receive process for our target filesystem - return true if ($debug) { print "DEBUG: process $process matches target $fs!\n"; } return 1; From b09d2b66fce49620c8872e9b89e5fdbf72683d9c Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Thu, 13 Feb 2020 09:26:15 +0100 Subject: [PATCH 24/35] typo --- INSTALL.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/INSTALL.md b/INSTALL.md index 021b5b7..8dd96eb 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -52,7 +52,7 @@ Install prerequisite software: sudo yum install -y epel-release git # Install the packages that Sanoid depends on: sudo yum install -y perl-Config-IniFiles perl-Data-Dumper perl-Capture-Tiny lzop mbuffer mhash pv -# if the perl depencies can't be found in the configured repositories you can install them from CPAN manually: +# if the perl dependencies can't be found in the configured repositories you can install them from CPAN manually: sudo dnf install perl-CPAN perl-CPAN cpan # answer the questions and past the following lines # install Capture::Tiny From 4ebdc9d1738ee7f9f5c0b3822e47bdab2bfa798f Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Thu, 13 Feb 2020 18:31:56 +0100 Subject: [PATCH 25/35] check for an invalid argument combination --- syncoid | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/syncoid b/syncoid index 2eef326..eddfec7 100755 --- a/syncoid +++ b/syncoid @@ -38,6 +38,16 @@ if (length $args{'sendoptions'}) { pod2usage(2); exit 127; } + + if (defined $args{'recursive'}) { + foreach my $option(@sendoptions) { + if ($option->{option} eq 'R') { + warn "invalid argument combination, zfs send -R and --recursive aren't compatible!"; + pod2usage(2); + exit 127; + } + } + } } my @recvoptions = (); From 909d3f37653302d685d6c2bf8a7ed7df7a500cda Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Fri, 14 Feb 2020 09:37:38 +0100 Subject: [PATCH 26/35] added note for busybox based distributions --- INSTALL.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/INSTALL.md b/INSTALL.md index 57f225b..5adbb20 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -8,6 +8,7 @@ - [Debian/Ubuntu](#debianubuntu) - [CentOS](#centos) - [FreeBSD](#freebsd) + - [Alpine Linux / busybox](#alpine-Linux-busybox-based-distributions) - [Other OSes](#other-oses) - [Configuration](#configuration) - [Sanoid](#sanoid) @@ -158,6 +159,13 @@ pkg install p5-Config-Inifiles p5-Capture-Tiny pv mbuffer lzop * See note about mbuffer and other things in FREEBSD.readme +## Alpine Linux / busybox based distributions + +The busybox implementation of ps is lacking needed arguments so a proper ps program needs to be installed. +For Alpine Linux this can be done with: + +`apk --no-cache add procps` + ## Other OSes **Sanoid** depends on the Perl module Config::IniFiles and will not operate without it. Config::IniFiles may be installed from CPAN, though the project strongly recommends using your distribution's repositories instead. From b808a74e2e7d509b3231c41784df4920468e9709 Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Fri, 14 Feb 2020 18:38:38 +0100 Subject: [PATCH 27/35] catch another case were the resume state isn't availabe anymore --- syncoid | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/syncoid b/syncoid index 2eef326..5e083b5 100755 --- a/syncoid +++ b/syncoid @@ -550,7 +550,10 @@ sub syncdataset { } $exit == 0 or do { - if ($stdout =~ /\Qused in the initial send no longer exists\E/) { + if ( + $stdout =~ /\Qused in the initial send no longer exists\E/ || + $stdout =~ /incremental source [0-9xa-f]+ no longer exists/ + ) { if (!$quiet) { print "WARN: resetting partially receive state because the snapshot source no longer exists\n"; } resetreceivestate($targethost,$targetfs,$targetisroot); # do an normal sync cycle From adc99d034986b399a6fe47cadf0c769441c36a2f Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Mon, 17 Feb 2020 17:43:23 +0100 Subject: [PATCH 28/35] fixed minor warning --- sanoid | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sanoid b/sanoid index ea2711a..7b972f0 100755 --- a/sanoid +++ b/sanoid @@ -561,7 +561,7 @@ sub take_snapshots { if ($config{$dataset}{'pre_snapshot_script'}) { $ENV{'SANOID_TARGET'} = $dataset; $ENV{'SANOID_TARGETS'} = $datasetString; - $ENV{'SANOID_SNAPNAME'} = @snapshots[0]; + $ENV{'SANOID_SNAPNAME'} = $snapshots[0]; $ENV{'SANOID_SNAPNAMES'} = $snapshotString; $ENV{'SANOID_TYPES'} = $typeString; $ENV{'SANOID_SCRIPT'} = 'pre'; @@ -631,7 +631,7 @@ sub take_snapshots { if (!$presnapshotfailure or $config{$dataset}{'force_post_snapshot_script'}) { $ENV{'SANOID_TARGET'} = $dataset; $ENV{'SANOID_TARGETS'} = $datasetString; - $ENV{'SANOID_SNAPNAME'} = @snapshots[0]; + $ENV{'SANOID_SNAPNAME'} = $snapshots[0]; $ENV{'SANOID_SNAPNAMES'} = $snapshotString; $ENV{'SANOID_TYPES'} = $typeString; $ENV{'SANOID_SCRIPT'} = 'post'; From 0e807386c44dfb8c9ee1d497cdf291bbcf927850 Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Mon, 20 Apr 2020 19:20:28 +0200 Subject: [PATCH 29/35] use sudo if neccessary for checking pool capabilities regarding resumeable send --- syncoid | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/syncoid b/syncoid index 2eef326..5c93fe4 100755 --- a/syncoid +++ b/syncoid @@ -1067,11 +1067,11 @@ sub checkcommands { my $resumechkcmd = "$zpoolcmd get -o value -H feature\@extensible_dataset"; if ($debug) { print "DEBUG: checking availability of zfs resume feature on source...\n"; } - $avail{'sourceresume'} = system("$sourcessh $resumechkcmd $srcpool 2>/dev/null | grep '\\(active\\|enabled\\)' >/dev/null 2>&1"); + $avail{'sourceresume'} = system("$sourcessh $sourcesudocmd $resumechkcmd $srcpool 2>/dev/null | grep '\\(active\\|enabled\\)' >/dev/null 2>&1"); $avail{'sourceresume'} = $avail{'sourceresume'} == 0 ? 1 : 0; if ($debug) { print "DEBUG: checking availability of zfs resume feature on target...\n"; } - $avail{'targetresume'} = system("$targetssh $resumechkcmd $dstpool 2>/dev/null | grep '\\(active\\|enabled\\)' >/dev/null 2>&1"); + $avail{'targetresume'} = system("$targetssh $targetsudocmd $resumechkcmd $dstpool 2>/dev/null | grep '\\(active\\|enabled\\)' >/dev/null 2>&1"); $avail{'targetresume'} = $avail{'targetresume'} == 0 ? 1 : 0; if ($avail{'sourceresume'} == 0 || $avail{'targetresume'} == 0) { From d77cfdf4c75688f1732be9b6272d1124cedb1f51 Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Fri, 1 May 2020 17:20:07 +0200 Subject: [PATCH 30/35] implemented option for keeping sync snaps --- syncoid | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/syncoid b/syncoid index 2eef326..702b1c3 100755 --- a/syncoid +++ b/syncoid @@ -25,7 +25,7 @@ GetOptions(\%args, "no-command-checks", "monitor-version", "compress=s", "dumpsn "source-bwlimit=s", "target-bwlimit=s", "sshkey=s", "sshport=i", "sshcipher|c=s", "sshoption|o=s@", "debug", "quiet", "no-stream", "no-sync-snap", "no-resume", "exclude=s@", "skip-parent", "identifier=s", "no-clone-handling", "no-privilege-elevation", "force-delete", "no-clone-rollback", "no-rollback", - "create-bookmark", "pv-options=s" => \$pvoptions, + "create-bookmark", "pv-options=s" => \$pvoptions, "keep-sync-snap", "mbuffer-size=s" => \$mbuffer_size) or pod2usage(2); my %compressargs = %{compressargset($args{'compress'} || 'default')}; # Can't be done with GetOptions arg, as default still needs to be set @@ -837,9 +837,11 @@ sub syncdataset { }; } } else { - # prune obsolete sync snaps on source and target (only if this run created ones). - pruneoldsyncsnaps($sourcehost,$sourcefs,$newsyncsnap,$sourceisroot,keys %{ $snaps{'source'}}); - pruneoldsyncsnaps($targethost,$targetfs,$newsyncsnap,$targetisroot,keys %{ $snaps{'target'}}); + if (!defined $args{'keep-sync-snap'}) { + # prune obsolete sync snaps on source and target (only if this run created ones). + pruneoldsyncsnaps($sourcehost,$sourcefs,$newsyncsnap,$sourceisroot,keys %{ $snaps{'source'}}); + pruneoldsyncsnaps($targethost,$targetfs,$newsyncsnap,$targetisroot,keys %{ $snaps{'target'}}); + } } } # end syncdataset() From 9ab57e9bb437c8f3857c88a94779c96c13c67e19 Mon Sep 17 00:00:00 2001 From: Christoph Klaffl Date: Fri, 1 May 2020 17:26:54 +0200 Subject: [PATCH 31/35] added documentation regarding --keep-sync-snap option --- README.md | 8 ++++++-- syncoid | 1 + 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index c2b502b..89ddd4a 100644 --- a/README.md +++ b/README.md @@ -192,7 +192,7 @@ As of 1.4.18, syncoid also automatically supports and enables resume of interrup + --source-bwlimit - This is the bandwidth limit in bytes (kbytes, mbytes, etc) per second imposed upon the source. This is mainly used if the target does not have mbuffer installed, but bandwidth limits are desired. + This is the bandwidth limit in bytes (kbytes, mbytes, etc) per second imposed upon the source. This is mainly used if the target does not have mbuffer installed, but bandwidth limits are desired. + --target-bw-limit @@ -210,9 +210,13 @@ As of 1.4.18, syncoid also automatically supports and enables resume of interrup This argument tells syncoid to restrict itself to existing snapshots, instead of creating a semi-ephemeral syncoid snapshot at execution time. Especially useful in multi-target (A->B, A->C) replication schemes, where you might otherwise accumulate a large number of foreign syncoid snapshots. ++ --keep-sync-snap + + This argument tells syncoid to skip pruning old snapshots created and used by syncoid for replication if '--no-sync-snap' isn't specified. + + --create-bookmark - This argument tells syncoid to create a zfs bookmark for the newest snapshot after it got replicated successfully. The bookmark name will be equal to the snapshot name. Only works in combination with the --no-sync-snap option. This can be very useful for irregular replication where the last matching snapshot on the source was already deleted but the bookmark remains so a replication is still possible. + This argument tells syncoid to create a zfs bookmark for the newest snapshot after it got replicated successfully. The bookmark name will be equal to the snapshot name. Only works in combination with the --no-sync-snap option. This can be very useful for irregular replication where the last matching snapshot on the source was already deleted but the bookmark remains so a replication is still possible. + --no-clone-rollback diff --git a/syncoid b/syncoid index 702b1c3..d9be7b4 100755 --- a/syncoid +++ b/syncoid @@ -1903,6 +1903,7 @@ Options: --pv-options=OPTIONS Configure how pv displays the progress bar, default '-p -t -e -r -b' --no-stream Replicates using newest snapshot instead of intermediates --no-sync-snap Does not create new snapshot, only transfers existing + --keep-sync-snap Don't destroy created sync snapshots --create-bookmark Creates a zfs bookmark for the newest snapshot on the source after replication succeeds (only works with --no-sync-snap) --no-clone-rollback Does not rollback clones on target --no-rollback Does not rollback clones or snapshots on target (it probably requires a readonly target) From 9544ea7e6a1e21608cc39b81d897980f06f36307 Mon Sep 17 00:00:00 2001 From: jimsalterjrs Date: Sat, 23 May 2020 16:27:29 -0400 Subject: [PATCH 32/35] typo in syncoid help --- syncoid | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/syncoid b/syncoid index 2eef326..a4a5879 100755 --- a/syncoid +++ b/syncoid @@ -1905,8 +1905,8 @@ Options: --no-clone-rollback Does not rollback clones on target --no-rollback Does not rollback clones or snapshots on target (it probably requires a readonly target) --exclude=REGEX Exclude specific datasets which match the given regular expression. Can be specified multiple times - --sendoptions=OPTIONS Use advanced options for zfs send (the arguments are filterd as needed), e.g. syncoid --sendoptions="Lc e" sets zfs send -L -c -e ... - --recvoptions=OPTIONS Use advanced options for zfs receive (the arguments are filterd as needed), e.g. syncoid --recvoptions="ux recordsize o compression=lz4" sets zfs receive -u -x recordsize -o compression=lz4 ... + --sendoptions=OPTIONS Use advanced options for zfs send (the arguments are filtered as needed), e.g. syncoid --sendoptions="Lc e" sets zfs send -L -c -e ... + --recvoptions=OPTIONS Use advanced options for zfs receive (the arguments are filtered as needed), e.g. syncoid --recvoptions="ux recordsize o compression=lz4" sets zfs receive -u -x recordsize -o compression=lz4 ... --sshkey=FILE Specifies a ssh key to use to connect --sshport=PORT Connects to remote on a particular port --sshcipher|c=CIPHER Passes CIPHER to ssh to use a particular cipher set From 2c3de61b2b7de9aa30484caa30b0eb0b158f0381 Mon Sep 17 00:00:00 2001 From: Jim Perkins Date: Mon, 3 Aug 2020 02:58:06 -0500 Subject: [PATCH 33/35] Update sanoid.conf keeps system from running sanoid on non existent datasets --- sanoid.conf | 46 +++++++++++++++++++++++----------------------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/sanoid.conf b/sanoid.conf index 6bd5c62..cea5e25 100644 --- a/sanoid.conf +++ b/sanoid.conf @@ -3,29 +3,29 @@ # It should go in /etc/sanoid. # ###################################### -# name your backup modules with the path to their ZFS dataset - no leading slash. -[zpoolname/datasetname] - # pick one or more templates - they're defined (and editable) below. Comma separated, processed in order. - # in this example, template_demo's daily value overrides template_production's daily value. - use_template = production,demo - - # if you want to, you can override settings in the template directly inside module definitions like this. - # in this example, we override the template to only keep 12 hourly and 1 monthly snapshot for this dataset. - hourly = 12 - monthly = 1 - -# you can also handle datasets recursively. -[zpoolname/parent] - use_template = production - recursive = yes - # if you want sanoid to manage the child datasets but leave this one alone, set process_children_only. - process_children_only = yes - -# you can selectively override settings for child datasets which already fall under a recursive definition. -[zpoolname/parent/child] - # child datasets already initialized won't be wiped out, so if you use a new template, it will - # only override the values already set by the parent template, not replace it completely. - use_template = demo +## name your backup modules with the path to their ZFS dataset - no leading slash. +#[zpoolname/datasetname] +# # pick one or more templates - they're defined (and editable) below. Comma separated, processed in order. +# # in this example, template_demo's daily value overrides template_production's daily value. +# use_template = production,demo +# +# # if you want to, you can override settings in the template directly inside module definitions like this. +# # in this example, we override the template to only keep 12 hourly and 1 monthly snapshot for this dataset. +# hourly = 12 +# monthly = 1 +# +## you can also handle datasets recursively. +#[zpoolname/parent] +# use_template = production +# recursive = yes +# # if you want sanoid to manage the child datasets but leave this one alone, set process_children_only. +# process_children_only = yes +# +## you can selectively override settings for child datasets which already fall under a recursive definition. +#[zpoolname/parent/child] +# # child datasets already initialized won't be wiped out, so if you use a new template, it will +# # only override the values already set by the parent template, not replace it completely. +# use_template = demo From b31a7ba7d8a2b9af46da4a4f70d724c3880bf547 Mon Sep 17 00:00:00 2001 From: kr4z33 Date: Mon, 17 Aug 2020 19:16:26 -1000 Subject: [PATCH 34/35] Update INSTALL.md "build-essential" was required for me to complete the Debian/Ubuntu instructions. --- INSTALL.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/INSTALL.md b/INSTALL.md index 57f225b..9b227d5 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -21,7 +21,7 @@ Install prerequisite software: ```bash -apt install debhelper libcapture-tiny-perl libconfig-inifiles-perl pv lzop mbuffer +apt install debhelper libcapture-tiny-perl libconfig-inifiles-perl pv lzop mbuffer build-essential ``` From f397687968b5490b85ff269745654a2bbd2bea0e Mon Sep 17 00:00:00 2001 From: Antonio Russo Date: Sun, 1 Nov 2020 10:55:50 -0700 Subject: [PATCH 35/35] Do not require user to be specified for syncoid Extends syncoid remote capabilities to match that of ssh as closely as possible: allow a remote dataset to be specified without a username. - Detect if a remote reference is possible by looking for a : before any / characters. - Check if there are any pool names that might conflict with this name. E.g., 'weird:symbol/ds' might refer to the pool "symbol" on host "weird", and dataset ds. OR it might refer to the local pool "weird:symbol" and dataset ds. - Prefer local pools, matching existing behavior. No preexisting functioning configurations will break. - The name of the control socket is changed slightly. - A bug in the handling of remote datasets with colons in the name is addressed. Signed-off-by: Antonio Russo --- syncoid | 49 +++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 41 insertions(+), 8 deletions(-) diff --git a/syncoid b/syncoid index a4a5879..68587ce 100755 --- a/syncoid +++ b/syncoid @@ -1420,20 +1420,53 @@ sub targetexists { sub getssh { my $fs = shift; - my $rhost; + my $rhost = ""; my $isroot; my $socket; + my $remoteuser = ""; # if we got passed something with an @ in it, we assume it's an ssh connection, eg root@myotherbox if ($fs =~ /\@/) { $rhost = $fs; - $fs =~ s/^\S*\@\S*://; + $fs =~ s/^[^\@:]*\@[^\@:]*://; $rhost =~ s/:\Q$fs\E$//; - my $remoteuser = $rhost; - $remoteuser =~ s/\@.*$//; + $remoteuser = $rhost; + $remoteuser =~ s/\@.*$//; + # do not require a username to be specified + $rhost =~ s/^@//; + } elsif ($fs =~ m{^[^/]*:}) { + # if we got passed something with an : in it, BEFORE any forward slash + # (i.e., not in a dataset name) it MAY be an ssh connection + # but we need to check if there is a pool with that name + my $pool = $fs; + $pool =~ s%/.*$%%; + my ($pools, $error, $exit) = capture { + system("$zfscmd list -d0 -H -oname"); + }; + $rhost = $fs; + if ($exit != 0) { + warn "Unable to enumerate pools (is zfs available?)"; + } else { + foreach (split(/\n/,$pools)) { + if ($_ eq $pool) { + # there's a pool with this name. + $rhost = ""; + last; + } + } + } + if ($rhost ne "") { + # there's no pool that might conflict with this + $rhost =~ s/:.*$//; + $fs =~ s/\Q$rhost\E://; + } + } + + if ($rhost ne "") { if ($remoteuser eq 'root' || $args{'no-privilege-elevation'}) { $isroot = 1; } else { $isroot = 0; } # now we need to establish a persistent master SSH connection - $socket = "/tmp/syncoid-$remoteuser-$rhost-" . time(); + $socket = "/tmp/syncoid-$rhost-" . time(); + open FH, "$sshcmd -M -S $socket -o ControlPersist=1m $args{'sshport'} $rhost exit |"; close FH; @@ -1882,9 +1915,9 @@ syncoid - ZFS snapshot replication tool =head1 SYNOPSIS syncoid [options]... SOURCE TARGET - or syncoid [options]... SOURCE USER@HOST:TARGET - or syncoid [options]... USER@HOST:SOURCE TARGET - or syncoid [options]... USER@HOST:SOURCE USER@HOST:TARGET + or syncoid [options]... SOURCE [[USER]@]HOST:TARGET + or syncoid [options]... [[USER]@]HOST:SOURCE TARGET + or syncoid [options]... [[USER]@]HOST:SOURCE [[USER]@]HOST:TARGET SOURCE Source ZFS dataset. Can be either local or remote TARGET Target ZFS dataset. Can be either local or remote