diff --git a/README.md b/README.md index aff3b68..3f418bb 100644 --- a/README.md +++ b/README.md @@ -42,13 +42,14 @@ And its /etc/sanoid/sanoid.conf might look something like this: frequently = 0 hourly = 36 daily = 30 + weekly = 4 monthly = 3 yearly = 0 autosnap = yes autoprune = yes ``` -Which would be enough to tell sanoid to take and keep 36 hourly snapshots, 30 dailies, 3 monthlies, and no yearlies for all datasets under data/images (but not data/images itself, since process_children_only is set). Except in the case of data/images/win7, which follows the same template (since it's a child of data/images) but only keeps 4 hourlies for whatever reason. +Which would be enough to tell sanoid to take and keep 36 hourly snapshots, 30 dailies, 4 weeklies, 3 monthlies, and no yearlies for all datasets under data/images (but not data/images itself, since process_children_only is set). Except in the case of data/images/win7, which follows the same template (since it's a child of data/images) but only keeps 4 hourlies for whatever reason. For more full details on sanoid.conf settings see [Wiki page](https://github.com/jimsalterjrs/sanoid/wiki/Sanoid#options). diff --git a/packages/debian/changelog b/packages/debian/changelog index ba369a1..adbc36e 100644 --- a/packages/debian/changelog +++ b/packages/debian/changelog @@ -1,3 +1,9 @@ +sanoid (2.3.1-SNAPSHOT) unstable; urgency=medium + + SNAPSHOT + + -- Jim Salter Tue, 12 Aug 2025 14:43:00 +0200 + sanoid (2.3.0) unstable; urgency=medium [overall] documentation updates, small fixes (@thecatontheflat, @mjeanson, @jiawen, @EchterAgo, @jan-krieg, @dlangille, @rightaditya, @MynaITLabs, @ossimoi, @alexgarel, @TopherIsSwell, @jimsalterjrs, @phreaker0) diff --git a/sanoid b/sanoid index a5591b9..9a4bbda 100755 --- a/sanoid +++ b/sanoid @@ -144,6 +144,28 @@ if ($args{'cron'}) { exit 0; +#################################################################################### +#################################################################################### +#################################################################################### + +sub get_active_datasets { + my ($config, $snaps, $snapsbytype, $snapsbypath) = @_; + my @paths; + + foreach my $section (keys %config) { + if ($section =~ /^template/) { next; } + if ((! $config{$section}{'autoprune'}) and (! $config{$section}{'autosnap'})) { next; } + if ($config{$section}{'process_children_only'}) { next; } + + my $path = $config{$section}{'path'}; + push @paths, $path; + } + + my @sorted_paths = sort { lc($a) cmp lc($b) } @paths; + my $paths = join (" ", @sorted_paths); + return $paths +} + #################################################################################### #################################################################################### #################################################################################### @@ -870,6 +892,7 @@ sub getsnaps { my ($config, $cacheTTL, $forcecacheupdate) = @_; my @rawsnaps; + my $exitcode; my ($dev, $ino, $mode, $nlink, $uid, $gid, $rdev, $size, $atime, $mtime, $ctime, $blksize, $blocks) = stat($cache); @@ -882,11 +905,24 @@ sub getsnaps { } else { print "INFO: cache expired - updating from zfs list.\n"; } + if ($args{'debug'}) { + print "INFO: running: $zfs get -Hrpt snapshot creation " . get_active_datasets(@params) . "\n"; + } } - open FH, "$zfs get -Hrpt snapshot creation |"; + # just get snapshots from configured datasets + open FH, "$zfs get -Hrpt snapshot creation " . get_active_datasets(@params) . " |"; @rawsnaps = ; close FH; + my $exitcode = $? >> 8; + + if ($exitcode != 0) { + print "INFO: zfs list shapshots with dataset names does not work, retrying without dataset names\n"; + open FH, "$zfs get -Hrpt snapshot creation |"; + @rawsnaps = ; + close FH; + } + open FH, "> $cache.tmp" or die "Could not write to $cache.tmp!\n"; print FH @rawsnaps; close FH; @@ -1792,7 +1828,6 @@ sub addcachedsnapshots { my @datasets = getchilddatasets($dataset); foreach my $dataset(@datasets) { - print "${dataset}\@${suffix}\n"; print $fh "${dataset}\@${suffix}\n"; } } diff --git a/sanoid.conf b/sanoid.conf index 04e281f..496c109 100644 --- a/sanoid.conf +++ b/sanoid.conf @@ -55,6 +55,7 @@ frequently = 0 hourly = 36 daily = 30 + weekly = 4 monthly = 3 yearly = 0 autosnap = yes @@ -65,6 +66,7 @@ frequently = 0 hourly = 30 daily = 90 + weekly = 4 monthly = 12 yearly = 0 @@ -86,6 +88,7 @@ frequently = 0 hourly = 30 daily = 90 + weekly = 4 monthly = 3 yearly = 0 diff --git a/syncoid b/syncoid index 945e095..43e5107 100755 --- a/syncoid +++ b/syncoid @@ -194,6 +194,9 @@ if (length $args{'insecure-direct-connection'}) { # warn user of anything missing, then continue with sync. my %avail = checkcommands(); +# host => { supports_type_filter => 1/0, supported_properties => ['guid', 'creation', ...] } +my %host_zfs_get_features; + my %snaps; my $exitcode = 0; @@ -345,12 +348,6 @@ sub syncdataset { my $sourcefsescaped = escapeshellparam($sourcefs); my $targetfsescaped = escapeshellparam($targetfs); - # if no rollbacks are allowed, disable forced receive - my $forcedrecv = "-F"; - if (defined $args{'no-rollback'}) { - $forcedrecv = ""; - } - writelog('DEBUG', "syncing source $sourcefs to target $targetfs."); my ($sync, $error) = getzfsvalue($sourcehost,$sourcefs,$sourceisroot,'syncoid:sync'); @@ -438,7 +435,7 @@ sub syncdataset { # Don't send the sync snap if it's filtered out by --exclude-snaps or # --include-snaps if (!snapisincluded($newsyncsnap)) { - $newsyncsnap = getnewestsnapshot($sourcehost,$sourcefs,$sourceisroot); + $newsyncsnap = getnewestsnapshot(\%snaps); if ($newsyncsnap eq '') { writelog('WARN', "CRITICAL: no snapshots exist on source $sourcefs, and you asked for --no-sync-snap."); if ($exitcode < 1) { $exitcode = 1; } @@ -447,7 +444,7 @@ sub syncdataset { } } else { # we don't want sync snapshots created, so use the newest snapshot we can find. - $newsyncsnap = getnewestsnapshot($sourcehost,$sourcefs,$sourceisroot); + $newsyncsnap = getnewestsnapshot(\%snaps); if ($newsyncsnap eq '') { writelog('WARN', "CRITICAL: no snapshots exist on source $sourcefs, and you asked for --no-sync-snap."); if ($exitcode < 1) { $exitcode = 1; } @@ -575,28 +572,26 @@ sub syncdataset { my $targetsize = getzfsvalue($targethost,$targetfs,$targetisroot,'-p used'); - my $bookmark = ''; - my $bookmarkcreation = 0; + my %bookmark = (); $matchingsnap = getmatchingsnapshot($sourcefs, $targetfs, \%snaps); if (! $matchingsnap) { # no matching snapshots, check for bookmarks as fallback my %bookmarks = getbookmarks($sourcehost,$sourcefs,$sourceisroot); - # check for matching guid of source bookmark and target snapshot (oldest first) - foreach my $snap ( sort { $snaps{'target'}{$b}{'creation'}<=>$snaps{'target'}{$a}{'creation'} } keys %{ $snaps{'target'} }) { + # check for matching guid of source bookmark and target snapshot (newest first) + foreach my $snap ( sort { sortsnapshots($snaps{'target'}, $b, $a) } keys %{ $snaps{'target'} }) { my $guid = $snaps{'target'}{$snap}{'guid'}; if (defined $bookmarks{$guid}) { # found a match - $bookmark = $bookmarks{$guid}{'name'}; - $bookmarkcreation = $bookmarks{$guid}{'creation'}; + %bookmark = %{ $bookmarks{$guid} }; $matchingsnap = $snap; last; } } - if ($bookmark eq '') { + if (! %bookmark) { # force delete is not possible for the root dataset if ($args{'force-delete'} && index($targetfs, '/') != -1) { writelog('INFO', "Removing $targetfs because no matching snapshots were found"); @@ -669,15 +664,18 @@ sub syncdataset { my $nextsnapshot = 0; - if ($bookmark ne '') { - my $bookmarkescaped = escapeshellparam($bookmark); + if (%bookmark) { if (!defined $args{'no-stream'}) { # if intermediate snapshots are needed we need to find the next oldest snapshot, # do an replication to it and replicate as always from oldest to newest # because bookmark sends doesn't support intermediates directly - foreach my $snap ( sort { $snaps{'source'}{$a}{'creation'}<=>$snaps{'source'}{$b}{'creation'} } keys %{ $snaps{'source'} }) { - if ($snaps{'source'}{$snap}{'creation'} >= $bookmarkcreation) { + foreach my $snap ( sort { sortsnapshots($snaps{'source'}, $a, $b) } keys %{ $snaps{'source'} }) { + my $comparisonkey = 'creation'; + if (defined $snaps{'source'}{$snap}{'createtxg'} && defined $bookmark{'createtxg'}) { + $comparisonkey = 'createtxg'; + } + if ($snaps{'source'}{$snap}{$comparisonkey} >= $bookmark{$comparisonkey}) { $nextsnapshot = $snap; last; } @@ -685,13 +683,13 @@ sub syncdataset { } if ($nextsnapshot) { - ($exit, $stdout) = syncbookmark($sourcehost, $sourcefs, $targethost, $targetfs, $bookmark, $nextsnapshot); + ($exit, $stdout) = syncbookmark($sourcehost, $sourcefs, $targethost, $targetfs, $bookmark{'name'}, $nextsnapshot); $exit == 0 or do { if (!$resume && $stdout =~ /\Qcontains partially-complete state\E/) { writelog('WARN', "resetting partially receive state"); resetreceivestate($targethost,$targetfs,$targetisroot); - (my $ret) = syncbookmark($sourcehost, $sourcefs, $targethost, $targetfs, $bookmark, $nextsnapshot); + (my $ret) = syncbookmark($sourcehost, $sourcefs, $targethost, $targetfs, $bookmark{'name'}, $nextsnapshot); $ret == 0 or do { if ($exitcode < 2) { $exitcode = 2; } return 0; @@ -705,13 +703,13 @@ sub syncdataset { $matchingsnap = $nextsnapshot; $matchingsnapescaped = escapeshellparam($matchingsnap); } else { - ($exit, $stdout) = syncbookmark($sourcehost, $sourcefs, $targethost, $targetfs, $bookmark, $newsyncsnap); + ($exit, $stdout) = syncbookmark($sourcehost, $sourcefs, $targethost, $targetfs, $bookmark{'name'}, $newsyncsnap); $exit == 0 or do { if (!$resume && $stdout =~ /\Qcontains partially-complete state\E/) { writelog('WARN', "resetting partially receive state"); resetreceivestate($targethost,$targetfs,$targetisroot); - (my $ret) = syncbookmark($sourcehost, $sourcefs, $targethost, $targetfs, $bookmark, $newsyncsnap); + (my $ret) = syncbookmark($sourcehost, $sourcefs, $targethost, $targetfs, $bookmark{'name'}, $newsyncsnap); $ret == 0 or do { if ($exitcode < 2) { $exitcode = 2; } return 0; @@ -726,8 +724,8 @@ sub syncdataset { # do a normal replication if bookmarks aren't used or if previous # bookmark replication was only done to the next oldest snapshot - # edge case: skip repilcation if bookmark replication used the latest snapshot - if (($bookmark eq '' || $nextsnapshot) && !($matchingsnap eq $newsyncsnap)) { + # edge case: skip replication if bookmark replication used the latest snapshot + if ((!%bookmark || $nextsnapshot) && !($matchingsnap eq $newsyncsnap)) { ($exit, $stdout) = syncincremental($sourcehost, $sourcefs, $targethost, $targetfs, $matchingsnap, $newsyncsnap, defined($args{'no-stream'})); @@ -826,16 +824,25 @@ sub syncdataset { if (defined $args{'create-bookmark'}) { my $ret = createbookmark($sourcehost, $sourcefs, $newsyncsnap, $newsyncsnap); $ret == 0 or do { - # fallback: assume naming conflict and try again with guid based suffix + my %existingbookmarks = getbookmarks($sourcehost,$sourcefs,$sourceisroot); my $guid = $snaps{'source'}{$newsyncsnap}{'guid'}; - $guid = substr($guid, 0, 6); - writelog('INFO', "bookmark creation failed, retrying with guid based suffix ($guid)..."); + if (defined $existingbookmarks{$guid} && $existingbookmarks{$guid}{'name'} eq $newsyncsnap) { + writelog('INFO', "bookmark already exists, skipping creation"); + } else { + # fallback: assume naming conflict and try again with guid based suffix + my $suffix = substr($guid, 0, 6); - my $ret = createbookmark($sourcehost, $sourcefs, $newsyncsnap, "$newsyncsnap$guid"); - $ret == 0 or do { - if ($exitcode < 2) { $exitcode = 2; } - return 0; + writelog('INFO', "bookmark creation failed, retrying with guid based suffix ($suffix)..."); + + my $newsyncsnapsuffix = "$newsyncsnap$suffix"; + my $ret = createbookmark($sourcehost, $sourcefs, $newsyncsnap, $newsyncsnapsuffix); + $ret == 0 or do { + if (! defined $existingbookmarks{$guid} || $existingbookmarks{$guid}{'name'} ne $newsyncsnapsuffix) { + if ($exitcode < 2) { $exitcode = 2; } + return 0; + } + } } }; } @@ -862,7 +869,7 @@ sub syncdataset { %snaps = (%sourcesnaps, %targetsnaps); } - my @to_delete = sort { $snaps{'target'}{$a}{'creation'}<=>$snaps{'target'}{$b}{'creation'} } grep {!exists $snaps{'source'}{$_}} keys %{ $snaps{'target'} }; + my @to_delete = sort { sortsnapshots($snaps{'target'}, $a, $b) } grep {!exists $snaps{'source'}{$_}} keys %{ $snaps{'target'} }; while (@to_delete) { # Create batch of snapshots to remove my $snaps = join ',', splice(@to_delete, 0, 50); @@ -1389,6 +1396,47 @@ sub checkcommands { return %avail; } +sub check_zfs_get_features { + my ($rhost, $mysudocmd, $zfscmd) = @_; + my $host = $rhost ? (split(/\s+/, $rhost))[-1] : "localhost"; + + return $host_zfs_get_features{$host} if exists $host_zfs_get_features{$host}; + + writelog('DEBUG', "Checking `zfs get` features on host \"$host\"..."); + + $host_zfs_get_features{$host} = { + supports_type_filter => 0, + supported_properties => ['guid', 'creation'] + }; + + my $check_t_option_cmd = "$rhost $mysudocmd $zfscmd get -H -t snapshot '' ''"; + open my $fh_t, "$check_t_option_cmd 2>&1 |"; + my $output_t = <$fh_t>; + close $fh_t; + + if ($output_t !~ /^\Qinvalid option\E/) { + $host_zfs_get_features{$host}->{supports_type_filter} = 1; + } + + writelog('DEBUG', "Host \"$host\" has `zfs get -t`?: $host_zfs_get_features{$host}->{supports_type_filter}"); + + my @properties_to_check = ('createtxg'); + foreach my $prop (@properties_to_check) { + my $check_prop_cmd = "$rhost $mysudocmd $zfscmd get -H $prop ''"; + open my $fh_p, "$check_prop_cmd 2>&1 |"; + my $output_p = <$fh_p>; + close $fh_p; + + if ($output_p !~ /^\Qbad property list: invalid property\E/) { + push @{$host_zfs_get_features{$host}->{supported_properties}}, $prop; + } + } + + writelog('DEBUG', "Host \"$host\" ZFS properties: @{$host_zfs_get_features{$host}->{supported_properties}}"); + + return $host_zfs_get_features{$host}; +} + sub iszfsbusy { my ($rhost,$fs,$isroot) = @_; if ($rhost ne '') { $rhost = "$sshcmd $rhost"; } @@ -1399,7 +1447,7 @@ sub iszfsbusy { close PL; foreach my $process (@processes) { - if ($process =~ /zfs *(receive|recv)[^\/]*\Q$fs\E\Z/) { + if ($process =~ /zfs *(receive|recv)[^\/]*\s\Q$fs\E\Z/) { # there's already a zfs receive process for our target filesystem - return true writelog('DEBUG', "process $process matches target $fs!"); return 1; @@ -1527,9 +1575,22 @@ sub readablebytes { return $disp; } +sub sortsnapshots { + my ($snapdata, $left, $right) = @_; + if (defined $snapdata->{$left}{'createtxg'} && defined $snapdata->{$right}{'createtxg'}) { + return $snapdata->{$left}{'createtxg'} <=> $snapdata->{$right}{'createtxg'}; + } + + if (defined $snapdata->{$left}{'creation'} && defined $snapdata->{$right}{'creation'}) { + return $snapdata->{$left}{'creation'} <=> $snapdata->{$right}{'creation'}; + } + + return 0; +} + sub getoldestsnapshot { my $snaps = shift; - foreach my $snap ( sort { $snaps{'source'}{$a}{'creation'}<=>$snaps{'source'}{$b}{'creation'} } keys %{ $snaps{'source'} }) { + foreach my $snap (sort { sortsnapshots($snaps{'source'}, $a, $b) } keys %{ $snaps{'source'} }) { # return on first snap found - it's the oldest return $snap; } @@ -1543,7 +1604,7 @@ sub getoldestsnapshot { sub getnewestsnapshot { my $snaps = shift; - foreach my $snap ( sort { $snaps{'source'}{$b}{'creation'}<=>$snaps{'source'}{$a}{'creation'} } keys %{ $snaps{'source'} }) { + foreach my $snap (sort { sortsnapshots($snaps{'source'}, $b, $a) } keys %{ $snaps{'source'} }) { # return on first snap found - it's the newest writelog('DEBUG', "NEWEST SNAPSHOT: $snap"); return $snap; @@ -1722,7 +1783,7 @@ sub pruneoldsyncsnaps { sub getmatchingsnapshot { my ($sourcefs, $targetfs, $snaps) = @_; - foreach my $snap ( sort { $snaps{'source'}{$b}{'creation'}<=>$snaps{'source'}{$a}{'creation'} } keys %{ $snaps{'source'} }) { + foreach my $snap ( sort { sortsnapshots($snaps{'source'}, $b, $a) } keys %{ $snaps{'source'} }) { if (defined $snaps{'target'}{$snap}) { if ($snaps{'source'}{$snap}{'guid'} == $snaps{'target'}{$snap}{'guid'}) { return $snap; @@ -1857,21 +1918,30 @@ sub dumphash() { writelog('INFO', Dumper($hash)); } -sub getsnaps() { +sub getsnaps { my ($type,$rhost,$fs,$isroot,%snaps) = @_; my $mysudocmd; my $fsescaped = escapeshellparam($fs); if ($isroot) { $mysudocmd = ''; } else { $mysudocmd = $sudocmd; } - my $rhostOriginal = $rhost; - if ($rhost ne '') { $rhost = "$sshcmd $rhost"; # double escaping needed $fsescaped = escapeshellparam($fsescaped); } - my $getsnapcmd = "$rhost $mysudocmd $zfscmd get -Hpd 1 -t snapshot guid,creation $fsescaped"; + my $host_features = check_zfs_get_features($rhost, $mysudocmd, $zfscmd); + + my @properties = @{$host_features->{supported_properties}}; + my $type_filter = ""; + if ($host_features->{supports_type_filter}) { + $type_filter = "-t snapshot"; + } else { + push @properties, 'type'; + } + my $properties_string = join(',', @properties); + my $getsnapcmd = "$rhost $mysudocmd $zfscmd get -Hpd 1 $type_filter $properties_string $fsescaped"; + if ($debug) { $getsnapcmd = "$getsnapcmd |"; writelog('DEBUG', "getting list of snapshots on $fs using $getsnapcmd..."); @@ -1880,142 +1950,50 @@ sub getsnaps() { } open FH, $getsnapcmd; my @rawsnaps = ; - close FH or do { - # fallback (solaris for example doesn't support the -t option) - return getsnapsfallback($type,$rhostOriginal,$fs,$isroot,%snaps); - }; - - # this is a little obnoxious. get guid,creation returns guid,creation on two separate lines - # as though each were an entirely separate get command. - - my %creationtimes=(); - - foreach my $line (@rawsnaps) { - $line =~ /\Q$fs\E\@(\S*)/; - my $snapname = $1; - - if (!snapisincluded($snapname)) { next; } - - # only import snap guids from the specified filesystem - if ($line =~ /\Q$fs\E\@.*\tguid/) { - chomp $line; - my $guid = $line; - $guid =~ s/^.*\tguid\t*(\d*).*/$1/; - my $snap = $line; - $snap =~ s/^.*\@(.*)\tguid.*$/$1/; - $snaps{$type}{$snap}{'guid'}=$guid; - } - # only import snap creations from the specified filesystem - elsif ($line =~ /\Q$fs\E\@.*\tcreation/) { - chomp $line; - my $creation = $line; - $creation =~ s/^.*\tcreation\t*(\d*).*/$1/; - my $snap = $line; - $snap =~ s/^.*\@(.*)\tcreation.*$/$1/; - - # the accuracy of the creation timestamp is only for a second, but - # snapshots in the same second are highly likely. The list command - # has an ordered output so we append another three digit running number - # to the creation timestamp and make sure those are ordered correctly - # for snapshot with the same creation timestamp - my $counter = 0; - my $creationsuffix; - while ($counter < 999) { - $creationsuffix = sprintf("%s%03d", $creation, $counter); - if (!defined $creationtimes{$creationsuffix}) { - $creationtimes{$creationsuffix} = 1; - last; - } - $counter += 1; - } - - $snaps{$type}{$snap}{'creation'}=$creationsuffix; - } - } - - return %snaps; -} - -sub getsnapsfallback() { - # fallback (solaris for example doesn't support the -t option) - my ($type,$rhost,$fs,$isroot,%snaps) = @_; - my $mysudocmd; - my $fsescaped = escapeshellparam($fs); - if ($isroot) { $mysudocmd = ''; } else { $mysudocmd = $sudocmd; } - - if ($rhost ne '') { - $rhost = "$sshcmd $rhost"; - # double escaping needed - $fsescaped = escapeshellparam($fsescaped); - } - - my $getsnapcmd = "$rhost $mysudocmd $zfscmd get -Hpd 1 type,guid,creation $fsescaped |"; - writelog('WARN', "snapshot listing failed, trying fallback command"); - writelog('DEBUG', "FALLBACK, getting list of snapshots on $fs using $getsnapcmd..."); - open FH, $getsnapcmd; - my @rawsnaps = ; close FH or die "CRITICAL ERROR: snapshots couldn't be listed for $fs (exit code $?)"; - my %creationtimes=(); + my %snap_data; + my %creationtimes; - my $state = 0; - foreach my $line (@rawsnaps) { - if ($state < 0) { - $state++; - next; - } + for my $line (@rawsnaps) { + chomp $line; + my ($dataset, $property, $value) = split /\t/, $line; + next unless defined $value; - if ($state eq 0) { - if ($line !~ /\Q$fs\E\@.*\ttype\s*snapshot/) { - # skip non snapshot type object - $state = -2; - next; - } - } elsif ($state eq 1) { - if ($line !~ /\Q$fs\E\@.*\tguid/) { - die "CRITICAL ERROR: snapshots couldn't be listed for $fs (guid parser error)"; - } + my (undef, $snap) = split /@/, $dataset; + next unless length $snap; - chomp $line; - my $guid = $line; - $guid =~ s/^.*\tguid\t*(\d*).*/$1/; - my $snap = $line; - $snap =~ s/^.*\@(.*)\tguid.*$/$1/; - if (!snapisincluded($snap)) { next; } - $snaps{$type}{$snap}{'guid'}=$guid; - } elsif ($state eq 2) { - if ($line !~ /\Q$fs\E\@.*\tcreation/) { - die "CRITICAL ERROR: snapshots couldn't be listed for $fs (creation parser error)"; - } + if (!snapisincluded($snap)) { next; } + $snap_data{$snap}{$property} = $value; - chomp $line; - my $creation = $line; - $creation =~ s/^.*\tcreation\t*(\d*).*/$1/; - my $snap = $line; - $snap =~ s/^.*\@(.*)\tcreation.*$/$1/; - if (!snapisincluded($snap)) { next; } - - # the accuracy of the creation timestamp is only for a second, but - # snapshots in the same second are highly likely. The list command - # has an ordered output so we append another three digit running number - # to the creation timestamp and make sure those are ordered correctly - # for snapshot with the same creation timestamp + # the accuracy of the creation timestamp is only for a second, but + # snapshots in the same second are highly likely. The list command + # has an ordered output so we append another three digit running number + # to the creation timestamp and make sure those are ordered correctly + # for snapshot with the same creation timestamp + if ($property eq 'creation') { my $counter = 0; my $creationsuffix; while ($counter < 999) { - $creationsuffix = sprintf("%s%03d", $creation, $counter); + $creationsuffix = sprintf("%s%03d", $value, $counter); if (!defined $creationtimes{$creationsuffix}) { $creationtimes{$creationsuffix} = 1; last; } $counter += 1; } - - $snaps{$type}{$snap}{'creation'}=$creationsuffix; - $state = -1; + $snap_data{$snap}{'creation'} = $creationsuffix; } + } - $state++; + for my $snap (keys %snap_data) { + if (length $type_filter || $snap_data{$snap}{'type'} eq 'snapshot') { + foreach my $prop (@{$host_features->{supported_properties}}) { + if (exists $snap_data{$snap}{$prop}) { + $snaps{$type}{$snap}{$prop} = $snap_data{$snap}{$prop}; + } + } + } } return %snaps; @@ -2033,8 +2011,12 @@ sub getbookmarks() { $fsescaped = escapeshellparam($fsescaped); } + my $host_features = check_zfs_get_features($rhost, $mysudocmd, $zfscmd); + my @properties = @{$host_features->{supported_properties}}; + my $properties_string = join(',', @properties); + my $error = 0; - my $getbookmarkcmd = "$rhost $mysudocmd $zfscmd get -Hpd 1 -t bookmark guid,creation $fsescaped 2>&1 |"; + my $getbookmarkcmd = "$rhost $mysudocmd $zfscmd get -Hpd 1 -t bookmark $properties_string $fsescaped 2>&1 |"; writelog('DEBUG', "getting list of bookmarks on $fs using $getbookmarkcmd..."); open FH, $getbookmarkcmd; my @rawbookmarks = ; @@ -2049,48 +2031,46 @@ sub getbookmarks() { die "CRITICAL ERROR: bookmarks couldn't be listed for $fs (exit code $?)"; } - # this is a little obnoxious. get guid,creation returns guid,creation on two separate lines - # as though each were an entirely separate get command. + my %bookmark_data; + my %creationtimes; - my $lastguid; - my %creationtimes=(); + for my $line (@rawbookmarks) { + chomp $line; + my ($dataset, $property, $value) = split /\t/, $line; + next unless defined $value; - foreach my $line (@rawbookmarks) { - # only import bookmark guids, creation from the specified filesystem - if ($line =~ /\Q$fs\E\#.*\tguid/) { - chomp $line; - $lastguid = $line; - $lastguid =~ s/^.*\tguid\t*(\d*).*/$1/; - my $bookmark = $line; - $bookmark =~ s/^.*\#(.*)\tguid.*$/$1/; - $bookmarks{$lastguid}{'name'}=$bookmark; - } elsif ($line =~ /\Q$fs\E\#.*\tcreation/) { - chomp $line; - my $creation = $line; - $creation =~ s/^.*\tcreation\t*(\d*).*/$1/; - my $bookmark = $line; - $bookmark =~ s/^.*\#(.*)\tcreation.*$/$1/; + my (undef, $bookmark) = split /#/, $dataset; + next unless length $bookmark; - # the accuracy of the creation timestamp is only for a second, but - # bookmarks in the same second are possible. The list command - # has an ordered output so we append another three digit running number - # to the creation timestamp and make sure those are ordered correctly - # for bookmarks with the same creation timestamp + $bookmark_data{$bookmark}{$property} = $value; + + # the accuracy of the creation timestamp is only for a second, but + # bookmarks in the same second are possible. The list command + # has an ordered output so we append another three digit running number + # to the creation timestamp and make sure those are ordered correctly + # for bookmarks with the same creation timestamp + if ($property eq 'creation') { my $counter = 0; my $creationsuffix; while ($counter < 999) { - $creationsuffix = sprintf("%s%03d", $creation, $counter); + $creationsuffix = sprintf("%s%03d", $value, $counter); if (!defined $creationtimes{$creationsuffix}) { $creationtimes{$creationsuffix} = 1; last; } $counter += 1; } - - $bookmarks{$lastguid}{'creation'}=$creationsuffix; + $bookmark_data{$bookmark}{'creation'} = $creationsuffix; } } + for my $bookmark (keys %bookmark_data) { + my $guid = $bookmark_data{$bookmark}{'guid'}; + $bookmarks{$guid}{'name'} = $bookmark; + $bookmarks{$guid}{'creation'} = $bookmark_data{$bookmark}{'creation'}; + $bookmarks{$guid}{'createtxg'} = $bookmark_data{$bookmark}{'createtxg'}; + } + return %bookmarks; } @@ -2194,7 +2174,7 @@ sub getdate { $date{'mday'} = sprintf ("%02u", $mday); $date{'mon'} = sprintf ("%02u", ($mon + 1)); $date{'tzoffset'} = sprintf ("GMT%s%02d:%02u", $sign, $hours, $minutes); - $date{'stamp'} = "$date{'year'}-$date{'mon'}-$date{'mday'}:$date{'hour'}:$date{'min'}:$date{'sec'}-$date{'tzoffset'}"; + $date{'stamp'} = "$date{'year'}-$date{'mon'}-$date{'mday'}_$date{'hour'}:$date{'min'}:$date{'sec'}-$date{'tzoffset'}"; return %date; } diff --git a/tests/syncoid/009_preserve_properties/run.sh b/tests/syncoid/009_preserve_properties/run.sh index 6ec9c4e..9724ff8 100755 --- a/tests/syncoid/009_preserve_properties/run.sh +++ b/tests/syncoid/009_preserve_properties/run.sh @@ -23,7 +23,7 @@ function cleanUp { # export pool in any case trap cleanUp EXIT -zfs create -o recordsize=16k -o xattr=on -o mountpoint=none -o primarycache=none "${POOL_NAME}"/src +zfs create -o recordsize=16k -o xattr=sa -o mountpoint=none -o primarycache=none "${POOL_NAME}"/src zfs create -V 100M -o volblocksize=8k "${POOL_NAME}"/src/zvol8 zfs create -V 100M -o volblocksize=16k -o primarycache=all "${POOL_NAME}"/src/zvol16 zfs create -V 100M -o volblocksize=64k "${POOL_NAME}"/src/zvol64 @@ -33,7 +33,6 @@ zfs set 'net.openoid:var-name'='with whitespace and !"ยง$%&/()= symbols' "${POOL ../../../syncoid --preserve-properties --recursive --debug --compress=none "${POOL_NAME}"/src "${POOL_NAME}"/dst - if [ "$(zfs get -H -o value -t filesystem recordsize "${POOL_NAME}"/dst)" != "16K" ]; then exit 1 fi @@ -42,7 +41,7 @@ if [ "$(zfs get -H -o value -t filesystem mountpoint "${POOL_NAME}"/dst)" != "no exit 1 fi -if [ "$(zfs get -H -o value -t filesystem xattr "${POOL_NAME}"/dst)" != "on" ]; then +if [ "$(zfs get -H -o value -t filesystem xattr "${POOL_NAME}"/dst)" != "sa" ]; then exit 1 fi diff --git a/tests/syncoid/011_sync_out-of-order_snapshots/run.sh b/tests/syncoid/011_sync_out-of-order_snapshots/run.sh new file mode 100755 index 0000000..d111d3b --- /dev/null +++ b/tests/syncoid/011_sync_out-of-order_snapshots/run.sh @@ -0,0 +1,50 @@ +#!/bin/bash + +# test verifying snapshots with out-of-order snapshot creation datetimes + +set -x +set -e + +. ../../common/lib.sh + +if [ "$INVASIVE_TESTS" != "1" ]; then + exit 130 +fi + +POOL_IMAGE="/tmp/syncoid-test-11.zpool" +POOL_SIZE="64M" +POOL_NAME="syncoid-test-11" + +truncate -s "${POOL_SIZE}" "${POOL_IMAGE}" + +zpool create -m none -f "${POOL_NAME}" "${POOL_IMAGE}" + +function cleanUp { + zpool export "${POOL_NAME}" + rm -f "${POOL_IMAGE}" +} + +# export pool and remove the image in any case +trap cleanUp EXIT + +zfs create "${POOL_NAME}"/before +zfs snapshot "${POOL_NAME}"/before@this-snapshot-should-make-it-into-the-after-dataset + +disableTimeSync +setdate 1155533696 +zfs snapshot "${POOL_NAME}"/before@oldest-snapshot + +zfs snapshot "${POOL_NAME}"/before@another-snapshot-does-not-matter +../../../syncoid --sendoptions="Lec" "${POOL_NAME}"/before "${POOL_NAME}"/after + +# verify +saveSnapshotList "${POOL_NAME}" "snapshot-list.txt" + +grep "${POOL_NAME}/before@this-snapshot-should-make-it-into-the-after-dataset" "snapshot-list.txt" || exit $? +grep "${POOL_NAME}/after@this-snapshot-should-make-it-into-the-after-dataset" "snapshot-list.txt" || exit $? +grep "${POOL_NAME}/before@oldest-snapshot" "snapshot-list.txt" || exit $? +grep "${POOL_NAME}/after@oldest-snapshot" "snapshot-list.txt" || exit $? +grep "${POOL_NAME}/before@another-snapshot-does-not-matter" "snapshot-list.txt" || exit $? +grep "${POOL_NAME}/after@another-snapshot-does-not-matter" "snapshot-list.txt" || exit $? + +exit 0 diff --git a/tests/syncoid/014_bookmark_check_guid_on_existing/run.sh b/tests/syncoid/014_bookmark_check_guid_on_existing/run.sh new file mode 100755 index 0000000..ac47e55 --- /dev/null +++ b/tests/syncoid/014_bookmark_check_guid_on_existing/run.sh @@ -0,0 +1,93 @@ +#!/bin/bash + +# test if guid of existing bookmark matches new guid + +set -x +set -e + +. ../../common/lib.sh + +POOL_IMAGE="/tmp/syncoid-test-014.zpool" +MOUNT_TARGET="/tmp/syncoid-test-014.mount" +POOL_SIZE="1000M" +POOL_NAME="syncoid-test-014" + +truncate -s "${POOL_SIZE}" "${POOL_IMAGE}" + +zpool create -m "${MOUNT_TARGET}" -f "${POOL_NAME}" "${POOL_IMAGE}" + +function cleanUp { + zpool export "${POOL_NAME}" +} + +function getGuid { + zfs get -H guid "$1" | awk '{print $3}' +} + +# export pool in any case +trap cleanUp EXIT + +zfs create "${POOL_NAME}/a" +zfs snapshot "${POOL_NAME}/a@s0" + +# This fully replicates a to b +../../../syncoid --debug --no-sync-snap --no-rollback --create-bookmark "${POOL_NAME}"/a "${POOL_NAME}"/b + +# This fully replicates a to c +../../../syncoid --debug --no-sync-snap --no-rollback --create-bookmark "${POOL_NAME}"/a "${POOL_NAME}"/c + +bookmark_guid=$(getGuid "${POOL_NAME}/a#s0") +snap_a_guid=$(getGuid "${POOL_NAME}/a@s0") +snap_b_guid=$(getGuid "${POOL_NAME}/b@s0") +snap_c_guid=$(getGuid "${POOL_NAME}/c@s0") + +# Bookmark guid should equal guid of all snapshots +if [ "${bookmark_guid}" != "${snap_a_guid}" ] || \ + [ "${bookmark_guid}" != "${snap_b_guid}" ] || \ + [ "${bookmark_guid}" != "${snap_c_guid}" ]; then + exit 1 +fi + +bookmark_suffix="${bookmark_guid:0:6}" +fallback_bookmark="${POOL_NAME}/a#s0${bookmark_suffix}" + +# Fallback bookmark should not exist +if zfs get guid "${fallback_bookmark}"; then + exit 1 +fi + +zfs snapshot "${POOL_NAME}/a@s1" + +# Create bookmark so syncoid is forced to create fallback bookmark +zfs bookmark "${POOL_NAME}/a@s0" "${POOL_NAME}/a#s1" + +# This incrementally replicates from a@s0 to a@s1 and should create a +# bookmark with fallback suffix +../../../syncoid --debug --no-sync-snap --no-rollback --create-bookmark "${POOL_NAME}"/a "${POOL_NAME}"/b + +snap_guid=$(getGuid "${POOL_NAME}/a@s1") +bookmark_suffix="${snap_guid:0:6}" +fallback_bookmark="${POOL_NAME}/a#s1${bookmark_suffix}" + +# Fallback bookmark guid should equal guid of snapshot +if [ "$(getGuid "${fallback_bookmark}")" != "${snap_guid}" ]; then + exit 1 +fi + +zfs snapshot "${POOL_NAME}/a@s2" + +snap_guid=$(getGuid "${POOL_NAME}/a@s2") +bookmark_suffix="${snap_guid:0:6}" +fallback_bookmark="${POOL_NAME}/a#s2${bookmark_suffix}" + +# Create bookmark and fallback bookmark so syncoid should fail +zfs bookmark "${POOL_NAME}/a@s0" "${POOL_NAME}/a#s2" +zfs bookmark "${POOL_NAME}/a@s0" "${fallback_bookmark}" + +# This incrementally replicates from a@s1 to a@s2 and should fail to create a +# bookmark with fallback suffix +if ../../../syncoid --debug --no-sync-snap --no-rollback --create-bookmark "${POOL_NAME}"/a "${POOL_NAME}"/b; then + exit 1 +fi + +exit 0 diff --git a/tests/syncoid/run-tests.sh b/tests/syncoid/run-tests.sh index 0e7570e..194b34f 100755 --- a/tests/syncoid/run-tests.sh +++ b/tests/syncoid/run-tests.sh @@ -2,6 +2,8 @@ # run's all the available tests +# set INVASIVE_TESTS=1 to also run invasive test which manipulate the system time + for test in */; do if [ ! -x "${test}/run.sh" ]; then continue