diff --git a/INSTALL b/INSTALL index f0de17b..15c4896 100644 --- a/INSTALL +++ b/INSTALL @@ -30,4 +30,4 @@ strongly recommends using your distribution's repositories instead. On Ubuntu: apt install libconfig-inifiles-perl On CentOS: yum install perl-Config-IniFiles -On FreeBSD: pkg install p5-Config-Inifiles +On FreeBSD: pkg install p5-Config-IniFiles diff --git a/README.md b/README.md index b833dec..395eb73 100644 --- a/README.md +++ b/README.md @@ -28,6 +28,7 @@ And its /etc/sanoid/sanoid.conf might look something like this: ############################# [template_production] + frequently = 0 hourly = 36 daily = 30 monthly = 3 @@ -168,7 +169,7 @@ As of 1.4.18, syncoid also automatically supports and enables resume of interrup + --compress - Currently accepted options: gzip, pigz-fast, pigz-slow, lzo (default) & none. If the selected compression method is unavailable on the source and destination, no compression will be used. + Currently accepted options: gzip, pigz-fast, pigz-slow, zstd-fast, zstd-slow, lz4, lzo (default) & none. If the selected compression method is unavailable on the source and destination, no compression will be used. + --source-bwlimit @@ -198,6 +199,11 @@ As of 1.4.18, syncoid also automatically supports and enables resume of interrup This argument tells syncoid to not use resumeable zfs send/receive streams. ++ --no-clone-handling + + This argument tells syncoid to not recreate clones on the targe on initial sync and doing a normal replication instead. + + + --dumpsnaps This prints a list of snapshots during the run. diff --git a/packages/debian/rules b/packages/debian/rules index 83eb475..ddd77b0 100755 --- a/packages/debian/rules +++ b/packages/debian/rules @@ -16,4 +16,14 @@ override_dh_auto_install: @mkdir -p $(DESTDIR)/usr/share/doc/sanoid; \ cp sanoid.conf $(DESTDIR)/usr/share/doc/sanoid/sanoid.conf.example; @mkdir -p $(DESTDIR)/lib/systemd/system; \ - cp debian/sanoid.timer $(DESTDIR)/lib/systemd/system; + cp debian/sanoid-prune.service $(DESTDIR)/lib/systemd/system; + +override_dh_installinit: + dh_installinit --noscripts + +override_dh_systemd_enable: + dh_systemd_enable sanoid.timer + dh_systemd_enable sanoid-prune.service + +override_dh_systemd_start: + dh_systemd_start sanoid.timer diff --git a/packages/debian/sanoid-prune.service b/packages/debian/sanoid-prune.service new file mode 100644 index 0000000..c956bd5 --- /dev/null +++ b/packages/debian/sanoid-prune.service @@ -0,0 +1,13 @@ +[Unit] +Description=Cleanup ZFS Pool +Requires=zfs.target +After=zfs.target sanoid.service +ConditionFileNotEmpty=/etc/sanoid/sanoid.conf + +[Service] +Environment=TZ=UTC +Type=oneshot +ExecStart=/usr/sbin/sanoid --prune-snapshots + +[Install] +WantedBy=sanoid.service diff --git a/packages/debian/sanoid.service b/packages/debian/sanoid.service index 2d01bbf..e146354 100644 --- a/packages/debian/sanoid.service +++ b/packages/debian/sanoid.service @@ -7,4 +7,4 @@ ConditionFileNotEmpty=/etc/sanoid/sanoid.conf [Service] Environment=TZ=UTC Type=oneshot -ExecStart=/usr/sbin/sanoid --cron +ExecStart=/usr/sbin/sanoid --take-snapshots diff --git a/packages/rhel/sanoid.spec b/packages/rhel/sanoid.spec index 3a9412f..7d4995d 100644 --- a/packages/rhel/sanoid.spec +++ b/packages/rhel/sanoid.spec @@ -14,7 +14,7 @@ License: GPLv3 URL: https://github.com/jimsalterjrs/sanoid Source0: https://github.com/jimsalterjrs/%{name}/archive/%{git_tag}/%{name}-%{version}.tar.gz -Requires: perl, mbuffer, lzop, pv +Requires: perl, mbuffer, lzop, pv, perl-Config-IniFiles %if 0%{?_with_systemd} Requires: systemd >= 212 diff --git a/sanoid b/sanoid index 69562f7..027bf4b 100755 --- a/sanoid +++ b/sanoid @@ -5,6 +5,7 @@ # project's Git repository at https://github.com/jimsalterjrs/sanoid/blob/master/LICENSE. $::VERSION = '1.4.18'; +my $MINIMUM_DEFAULTS_VERSION = 2; use strict; use warnings; @@ -31,6 +32,7 @@ if (keys %args < 2) { my $pscmd = '/bin/ps'; my $zfs = '/sbin/zfs'; +my $zpool = '/sbin/zpool'; my $conf_file = "$args{'configdir'}/sanoid.conf"; my $default_conf_file = "$args{'configdir'}/sanoid.defaults.conf"; @@ -44,6 +46,7 @@ my $cache = '/var/cache/sanoidsnapshots.txt'; my $cacheTTL = 900; # 15 minutes my %snaps = getsnaps( \%config, $cacheTTL, $forcecacheupdate ); my %pruned; +my %capacitycache; my %snapsbytype = getsnapsbytype( \%config, \%snaps ); @@ -125,12 +128,13 @@ sub monitor_snapshots { my $path = $config{$section}{'path'}; push @paths, $path; - my @types = ('yearly','monthly','daily','hourly'); + my @types = ('yearly','monthly','daily','hourly','frequently'); foreach my $type (@types) { my $smallerperiod = 0; # we need to set the period length in seconds first - if ($type eq 'hourly') { $smallerperiod = 60; } + if ($type eq 'frequently') { $smallerperiod = 1; } + elsif ($type eq 'hourly') { $smallerperiod = 60; } elsif ($type eq 'daily') { $smallerperiod = 60*60; } elsif ($type eq 'monthly') { $smallerperiod = 60*60*24; } elsif ($type eq 'yearly') { $smallerperiod = 60*60*24; } @@ -254,12 +258,17 @@ sub prune_snapshots { my $path = $config{$section}{'path'}; my $period = 0; + if (check_prune_defer($config, $section)) { + if ($args{'verbose'}) { print "INFO: deferring snapshot pruning ($section)...\n"; } + next; + } foreach my $type (keys %{ $config{$section} }){ unless ($type =~ /ly$/) { next; } # we need to set the period length in seconds first - if ($type eq 'hourly') { $period = 60*60; } + if ($type eq 'frequently') { $period = 60 * $config{$section}{'frequent_period'}; } + elsif ($type eq 'hourly') { $period = 60*60; } elsif ($type eq 'daily') { $period = 60*60*24; } elsif ($type eq 'monthly') { $period = 60*60*24*31; } elsif ($type eq 'yearly') { $period = 60*60*24*365.25; } @@ -384,7 +393,18 @@ sub take_snapshots { # to avoid duplicates with DST my $dateSuffix = ""; - if ($type eq 'hourly') { + if ($type eq 'frequently') { + my $frequentslice = int($datestamp{'min'} / $config{$section}{'frequent_period'}); + + push @preferredtime,0; # try to hit 0 seconds + push @preferredtime,$frequentslice * $config{$section}{'frequent_period'}; + push @preferredtime,$datestamp{'hour'}; + push @preferredtime,$datestamp{'mday'}; + push @preferredtime,($datestamp{'mon'}-1); # january is month 0 + push @preferredtime,$datestamp{'year'}; + $lastpreferred = timelocal(@preferredtime); + if ($lastpreferred > time()) { $lastpreferred -= 60 * $config{$section}{'frequent_period'}; } # preferred time is later this frequent period - so look at last frequent period + } elsif ($type eq 'hourly') { push @preferredtime,0; # try to hit 0 seconds push @preferredtime,$config{$section}{'hourly_min'}; push @preferredtime,$datestamp{'hour'}; @@ -701,10 +721,21 @@ sub init { tie my %ini, 'Config::IniFiles', ( -file => $conf_file ) or die "FATAL: cannot load $conf_file - please create a valid local config file before running sanoid!"; # we'll use these later to normalize potentially true and false values on any toggle keys - my @toggles = ('autosnap','autoprune','monitor_dont_warn','monitor_dont_crit','monitor','recursive','process_children_only','no_inconsistent_snapshot','force_post_snapshot_script'); + my @toggles = ('autosnap','autoprune','monitor_dont_warn','monitor_dont_crit','monitor','recursive','process_children_only','skip_children','no_inconsistent_snapshot','force_post_snapshot_script'); my @istrue=(1,"true","True","TRUE","yes","Yes","YES","on","On","ON"); my @isfalse=(0,"false","False","FALSE","no","No","NO","off","Off","OFF"); + # check if default configuration file is up to date + my $defaults_version = 1; + if (defined $defaults{'version'}{'version'}) { + $defaults_version = $defaults{'version'}{'version'}; + delete $defaults{'version'}; + } + + if ($defaults_version < $MINIMUM_DEFAULTS_VERSION) { + die "FATAL: you're using sanoid.defaults.conf v$defaults_version, this version of sanoid requires a minimum sanoid.defaults.conf v$MINIMUM_DEFAULTS_VERSION"; + } + foreach my $section (keys %ini) { # first up - die with honor if unknown parameters are set in any modules or templates by the user. @@ -758,7 +789,7 @@ sub init { # override with any locally set values in the module itself foreach my $key (keys %{$ini{$section}} ) { - if (! ($key =~ /template|recursive/)) { + if (! ($key =~ /template|recursive|skip_children/)) { if ($args{'debug'}) { print "DEBUG: overriding $key on $section with value directly set in module.\n"; } $config{$section}{$key} = $ini{$section}{$key}; } @@ -783,10 +814,17 @@ sub init { # how 'bout some recursion? =) my @datasets; - if ($ini{$section}{'recursive'}) { + if ($ini{$section}{'recursive'} || $ini{$section}{'skip_children'}) { @datasets = getchilddatasets($config{$section}{'path'}); - foreach my $dataset(@datasets) { + DATASETS: foreach my $dataset(@datasets) { chomp $dataset; + + if ($ini{$section}{'skip_children'}) { + if ($args{'debug'}) { print "DEBUG: ignoring $dataset.\n"; } + delete $config{$dataset}; + next DATASETS; + } + foreach my $key (keys %{$config{$section}} ) { if (! ($key =~ /template|recursive|children_only/)) { if ($args{'debug'}) { print "DEBUG: recursively setting $key from $section to $dataset.\n"; } @@ -912,7 +950,7 @@ sub check_zpool() { exit $ERRORS{$state}; } - my $statcommand="/sbin/zpool list -o name,size,cap,health,free $pool"; + my $statcommand="$zpool list -o name,size,cap,health,free $pool"; if (! open STAT, "$statcommand|") { print ("$state '$statcommand' command returns no result! NOTE: This plugin needs OS support for ZFS, and execution with root privileges.\n"); @@ -960,7 +998,7 @@ sub check_zpool() { ## flag to detect section of zpool status involving our zpool my $poolfind=0; - $statcommand="/sbin/zpool status $pool"; + $statcommand="$zpool status $pool"; if (! open STAT, "$statcommand|") { $state = 'CRITICAL'; print ("$state '$statcommand' command returns no result! NOTE: This plugin needs OS support for ZFS, and execution with root privileges.\n"); @@ -1068,7 +1106,7 @@ sub check_zpool() { return ($ERRORS{$state},$msg); } # end check_zpool() -sub check_capacity_limit() { +sub check_capacity_limit { my $value = shift; if (!defined($value) || $value !~ /^\d+\z/) { @@ -1091,7 +1129,7 @@ sub check_zpool_capacity() { my $capacitylimitsref=shift; my %capacitylimits=%$capacitylimitsref; - my $statcommand="/sbin/zpool list -H -o cap $pool"; + my $statcommand="$zpool list -H -o cap $pool"; if (! open STAT, "$statcommand|") { print ("$state '$statcommand' command returns no result!\n"); @@ -1136,6 +1174,60 @@ sub check_zpool_capacity() { return ($ERRORS{$state},$msg); } # end check_zpool_capacity() +sub check_prune_defer { + my ($config, $section) = @_; + + my $limit = $config{$section}{"prune_defer"}; + + if (!check_capacity_limit($limit)) { + die "ERROR: invalid prune_defer limit!\n"; + } + + if ($limit eq 0) { + return 0; + } + + my @parts = split /\//, $section, 2; + my $pool = $parts[0]; + + if (exists $capacitycache{$pool}) { + } else { + $capacitycache{$pool} = get_zpool_capacity($pool); + } + + if ($limit < $capacitycache{$pool}) { + return 0; + } + + return 1; +} + +sub get_zpool_capacity { + my $pool = shift; + + my $statcommand="$zpool list -H -o cap $pool"; + + if (! open STAT, "$statcommand|") { + die "ERROR: '$statcommand' command returns no result!\n"; + } + + my $line = ; + close(STAT); + + chomp $line; + my @row = split(/ +/, $line); + my $cap=$row[0]; + + ## check for valid capacity value + if ($cap !~ m/^[0-9]{1,3}%$/ ) { + die "ERROR: '$statcommand' command returned invalid capacity value ($cap)!\n"; + } + + $cap =~ s/\D//g; + + return $cap; +} + ###################################################################################################### ###################################################################################################### ###################################################################################################### @@ -1297,6 +1389,9 @@ sub getchilddatasets { my @children = ; close FH; + # parent dataset is the first element + shift @children; + return @children; } diff --git a/sanoid.conf b/sanoid.conf index feb2237..9f13105 100644 --- a/sanoid.conf +++ b/sanoid.conf @@ -40,6 +40,7 @@ daily = 60 [template_production] + frequently = 0 hourly = 36 daily = 30 monthly = 3 @@ -49,6 +50,7 @@ [template_backup] autoprune = yes + frequently = 0 hourly = 30 daily = 90 monthly = 12 diff --git a/sanoid.defaults.conf b/sanoid.defaults.conf index d8e428a..2ca4586 100644 --- a/sanoid.defaults.conf +++ b/sanoid.defaults.conf @@ -5,6 +5,8 @@ # # # you have been warned. # ################################################################################### +[version] +version = 2 [template_default] @@ -15,6 +17,8 @@ path = recursive = use_template = process_children_only = +skip_children = + pre_snapshot_script = post_snapshot_script = pruning_script = @@ -22,17 +26,32 @@ script_timeout = 5 no_inconsistent_snapshot = force_post_snapshot_script = +# for snapshots shorter than one hour, the period duration must be defined +# in minutes. Because they are executed within a full hour, the selected +# value should divide 60 minutes without remainder so taken snapshots +# are apart in equal intervals. Values larger than 59 aren't practical +# as only one snapshot will be taken on each full hour in this case. +# examples: +# frequent_period = 15 -> four snapshot each hour 15 minutes apart +# frequent_period = 5 -> twelve snapshots each hour 5 minutes apart +# frequent_period = 45 -> two snapshots each hour with different time gaps +# between them: 45 minutes and 15 minutes in this case +frequent_period = 15 + # If any snapshot type is set to 0, we will not take snapshots for it - and will immediately # prune any of those type snapshots already present. # # Otherwise, if autoprune is set, we will prune any snapshots of that type which are older # than (setting * periodicity) - so if daily = 90, we'll prune any dailies older than 90 days. autoprune = yes +frequently = 0 hourly = 48 daily = 90 monthly = 6 yearly = 0 -min_percent_free = 10 +# pruning can be skipped based on the used capacity of the pool +# (0: always prune, 1-100: only prune if used capacity is greater than this value) +prune_defer = 0 # We will automatically take snapshots if autosnap is on, at the desired times configured # below (or immediately, if we don't have one since the last preferred time for that type). @@ -68,6 +87,8 @@ yearly_min = 0 monitor = yes monitor_dont_warn = no monitor_dont_crit = no +frequently_warn = 0 +frequently_crit = 0 hourly_warn = 90 hourly_crit = 360 daily_warn = 28 diff --git a/syncoid b/syncoid index 30aef49..c5ef3fd 100755 --- a/syncoid +++ b/syncoid @@ -19,7 +19,8 @@ use Sys::Hostname; my %args = ('sshkey' => '', 'sshport' => '', 'sshcipher' => '', 'sshoption' => [], 'target-bwlimit' => '', 'source-bwlimit' => ''); GetOptions(\%args, "no-command-checks", "monitor-version", "compress=s", "dumpsnaps", "recursive|r", "source-bwlimit=s", "target-bwlimit=s", "sshkey=s", "sshport=i", "sshcipher|c=s", "sshoption|o=s@", - "debug", "quiet", "no-stream", "no-sync-snap", "no-resume", "exclude=s@", "skip-parent", "identifier=s") or pod2usage(2); + "debug", "quiet", "no-stream", "no-sync-snap", "no-resume", "exclude=s@", "skip-parent", "identifier=s", + "no-clone-handling") or pod2usage(2); my %compressargs = %{compressargset($args{'compress'} || 'default')}; # Can't be done with GetOptions arg, as default still needs to be set @@ -104,17 +105,59 @@ my $exitcode = 0; ## replication ## if (!defined $args{'recursive'}) { - syncdataset($sourcehost, $sourcefs, $targethost, $targetfs); + syncdataset($sourcehost, $sourcefs, $targethost, $targetfs, undef); } else { if ($debug) { print "DEBUG: recursive sync of $sourcefs.\n"; } my @datasets = getchilddatasets($sourcehost, $sourcefs, $sourceisroot); - foreach my $dataset(@datasets) { + + my @deferred; + + foreach my $datasetProperties(@datasets) { + my $dataset = $datasetProperties->{'name'}; + my $origin = $datasetProperties->{'origin'}; + if ($origin eq "-" || defined $args{'no-clone-handling'}) { + $origin = undef; + } else { + # check if clone source is replicated too + my @values = split(/@/, $origin, 2); + my $srcdataset = $values[0]; + + my $found = 0; + foreach my $datasetProperties(@datasets) { + if ($datasetProperties->{'name'} eq $srcdataset) { + $found = 1; + last; + } + } + + if ($found == 0) { + # clone source is not replicated, do a full replication + $origin = undef; + } else { + # clone source is replicated, defer until all non clones are replicated + push @deferred, $datasetProperties; + next; + } + } + $dataset =~ s/\Q$sourcefs\E//; chomp $dataset; my $childsourcefs = $sourcefs . $dataset; my $childtargetfs = $targetfs . $dataset; # print "syncdataset($sourcehost, $childsourcefs, $targethost, $childtargetfs); \n"; - syncdataset($sourcehost, $childsourcefs, $targethost, $childtargetfs); + syncdataset($sourcehost, $childsourcefs, $targethost, $childtargetfs, $origin); + } + + # replicate cloned datasets and if this is the initial run, recreate them on the target + foreach my $datasetProperties(@deferred) { + my $dataset = $datasetProperties->{'name'}; + my $origin = $datasetProperties->{'origin'}; + + $dataset =~ s/\Q$sourcefs\E//; + chomp $dataset; + my $childsourcefs = $sourcefs . $dataset; + my $childtargetfs = $targetfs . $dataset; + syncdataset($sourcehost, $childsourcefs, $targethost, $childtargetfs, $origin); } } @@ -147,37 +190,51 @@ sub getchilddatasets { $fsescaped = escapeshellparam($fsescaped); } - my $getchildrencmd = "$rhost $mysudocmd $zfscmd list -o name -t filesystem,volume -Hr $fsescaped |"; + my $getchildrencmd = "$rhost $mysudocmd $zfscmd list -o name,origin -t filesystem,volume -Hr $fsescaped |"; if ($debug) { print "DEBUG: getting list of child datasets on $fs using $getchildrencmd...\n"; } - open FH, $getchildrencmd; - my @children = ; - close FH; - - if (defined $args{'skip-parent'}) { - # parent dataset is the first element - shift @children; + if (! open FH, $getchildrencmd) { + die "ERROR: list command failed!\n"; } - if (defined $args{'exclude'}) { - my $excludes = $args{'exclude'}; - foreach (@$excludes) { - for my $i ( 0 .. $#children ) { - if ($children[$i] =~ /$_/) { - if ($debug) { print "DEBUG: excluded $children[$i] because of $_\n"; } - undef $children[$i] + my @children; + my $first = 1; + + DATASETS: while() { + chomp; + + if (defined $args{'skip-parent'} && $first eq 1) { + # parent dataset is the first element + $first = 0; + next; + } + + my ($dataset, $origin) = /^([^\t]+)\t([^\t]+)/; + + if (defined $args{'exclude'}) { + my $excludes = $args{'exclude'}; + foreach (@$excludes) { + print("$dataset\n"); + if ($dataset =~ /$_/) { + if ($debug) { print "DEBUG: excluded $dataset because of $_\n"; } + next DATASETS; } } - - @children = grep{ defined }@children; } + + my %properties; + $properties{'name'} = $dataset; + $properties{'origin'} = $origin; + + push @children, \%properties; } + close FH; return @children; } sub syncdataset { - my ($sourcehost, $sourcefs, $targethost, $targetfs) = @_; + my ($sourcehost, $sourcefs, $targethost, $targetfs, $origin) = @_; my $sourcefsescaped = escapeshellparam($sourcefs); my $targetfsescaped = escapeshellparam($targetfs); @@ -305,11 +362,25 @@ sub syncdataset { my $sendcmd = "$sourcesudocmd $zfscmd send $sourcefsescaped\@$oldestsnapescaped"; my $recvcmd = "$targetsudocmd $zfscmd receive $receiveextraargs -F $targetfsescaped"; - my $pvsize = getsendsize($sourcehost,"$sourcefs\@$oldestsnap",0,$sourceisroot); + my $pvsize; + if (defined $origin) { + my $originescaped = escapeshellparam($origin); + $sendcmd = "$sourcesudocmd $zfscmd send -i $originescaped $sourcefsescaped\@$oldestsnapescaped"; + my $streamargBackup = $args{'streamarg'}; + $args{'streamarg'} = "-i"; + $pvsize = getsendsize($sourcehost,$origin,"$sourcefs\@$oldestsnap",$sourceisroot); + $args{'streamarg'} = $streamargBackup; + } else { + $pvsize = getsendsize($sourcehost,"$sourcefs\@$oldestsnap",0,$sourceisroot); + } + my $disp_pvsize = readablebytes($pvsize); if ($pvsize == 0) { $disp_pvsize = 'UNKNOWN'; } my $synccmd = buildsynccmd($sendcmd,$recvcmd,$pvsize,$sourceisroot,$targetisroot); if (!$quiet) { + if (defined $origin) { + print "INFO: Clone is recreated on target $targetfs based on $origin\n"; + } if (!defined ($args{'no-stream'}) ) { print "INFO: Sending oldest full snapshot $sourcefs\@$oldestsnap (~ $disp_pvsize) to new target filesystem:\n"; } else { @@ -396,7 +467,7 @@ sub syncdataset { # a resumed transfer will only be done to the next snapshot, # so do an normal sync cycle - return syncdataset($sourcehost, $sourcefs, $targethost, $targetfs); + return syncdataset($sourcehost, $sourcefs, $targethost, $targetfs, undef); } # find most recent matching snapshot and do an -I @@ -515,11 +586,17 @@ sub compressargset { decomrawcmd => '/usr/bin/lzop', decomargs => '-dfc', }, + 'lz4' => { + rawcmd => '/usr/bin/lz4', + args => '', + decomrawcmd => '/usr/bin/lz4', + decomargs => '-dc', + }, ); if ($value eq 'default') { $value = $DEFAULT_COMPRESSION; - } elsif (!(grep $value eq $_, ('gzip', 'pigz-fast', 'pigz-slow', 'zstd-fast', 'zstd-slow', 'lzo', 'default', 'none'))) { + } elsif (!(grep $value eq $_, ('gzip', 'pigz-fast', 'pigz-slow', 'zstd-fast', 'zstd-slow', 'lz4', 'lzo', 'default', 'none'))) { warn "Unrecognised compression value $value, defaulting to $DEFAULT_COMPRESSION"; $value = $DEFAULT_COMPRESSION; } @@ -1154,6 +1231,11 @@ sub getsendsize { } chomp $sendsize; + # check for valid value + if ($sendsize !~ /^\d+$/) { + $sendsize = ''; + } + # to avoid confusion with a zero size pv, give sendsize # a minimum 4K value - or if empty, make sure it reads UNKNOWN if ($debug) { print "DEBUG: sendsize = $sendsize\n"; } @@ -1227,7 +1309,7 @@ syncoid - ZFS snapshot replication tool Options: - --compress=FORMAT Compresses data during transfer. Currently accepted options are gzip, pigz-fast, pigz-slow, lzo (default) & none + --compress=FORMAT Compresses data during transfer. Currently accepted options are gzip, pigz-fast, pigz-slow, zstd-fast, zstd-slow, lz4, lzo (default) & none --identifier=EXTRA Extra identifier which is included in the snapshot name. Can be used for replicating to multiple targets. --recursive|r Also transfers child datasets --skip-parent Skips syncing of the parent dataset. Does nothing without '--recursive' option. @@ -1250,3 +1332,4 @@ Options: --dumpsnaps Dumps a list of snapshots during the run --no-command-checks Do not check command existence before attempting transfer. Not recommended --no-resume Don't use the ZFS resume feature if available + --no-clone-handling Don't try to recreate clones on target diff --git a/tests/1_one_year/run.sh b/tests/1_one_year/run.sh index 7cec813..1cae7b4 100755 --- a/tests/1_one_year/run.sh +++ b/tests/1_one_year/run.sh @@ -10,7 +10,7 @@ set -x POOL_NAME="sanoid-test-1" POOL_TARGET="" # root RESULT="/tmp/sanoid_test_result" -RESULT_CHECKSUM="aa15e5595b0ed959313289ecb70323dad9903328ac46e881da5c4b0f871dd7cf" +RESULT_CHECKSUM="68c67161a59d0e248094a66061972f53613067c9db52ad981030f36bc081fed7" # UTC timestamp of start and end START="1483225200" @@ -46,10 +46,4 @@ done saveSnapshotList "${POOL_NAME}" "${RESULT}" # hourly daily monthly -verifySnapshotList "${RESULT}" 8759 366 12 "${RESULT_CHECKSUM}" - -# hourly count should be 8760 but one hour get's lost because of DST - -# daily count should be 365 but one additional daily is taken -# because the DST change leads to a day with 25 hours -# which will trigger an additional daily snapshot +verifySnapshotList "${RESULT}" 8760 365 12 "${RESULT_CHECKSUM}"