Merge branch 'master' into pre-post-scripts

This commit is contained in:
Christoph Klaffl 2018-12-04 21:31:13 +01:00
commit e542cdcbde
No known key found for this signature in database
GPG Key ID: FC1C525C2A47CC28
11 changed files with 276 additions and 52 deletions

View File

@ -30,4 +30,4 @@ strongly recommends using your distribution's repositories instead.
On Ubuntu: apt install libconfig-inifiles-perl On Ubuntu: apt install libconfig-inifiles-perl
On CentOS: yum install perl-Config-IniFiles On CentOS: yum install perl-Config-IniFiles
On FreeBSD: pkg install p5-Config-Inifiles On FreeBSD: pkg install p5-Config-IniFiles

View File

@ -28,6 +28,7 @@ And its /etc/sanoid/sanoid.conf might look something like this:
############################# #############################
[template_production] [template_production]
frequently = 0
hourly = 36 hourly = 36
daily = 30 daily = 30
monthly = 3 monthly = 3
@ -168,7 +169,7 @@ As of 1.4.18, syncoid also automatically supports and enables resume of interrup
+ --compress <compression type> + --compress <compression type>
Currently accepted options: gzip, pigz-fast, pigz-slow, lzo (default) & none. If the selected compression method is unavailable on the source and destination, no compression will be used. Currently accepted options: gzip, pigz-fast, pigz-slow, zstd-fast, zstd-slow, lz4, lzo (default) & none. If the selected compression method is unavailable on the source and destination, no compression will be used.
+ --source-bwlimit <limit t|g|m|k> + --source-bwlimit <limit t|g|m|k>
@ -198,6 +199,11 @@ As of 1.4.18, syncoid also automatically supports and enables resume of interrup
This argument tells syncoid to not use resumeable zfs send/receive streams. This argument tells syncoid to not use resumeable zfs send/receive streams.
+ --no-clone-handling
This argument tells syncoid to not recreate clones on the targe on initial sync and doing a normal replication instead.
+ --dumpsnaps + --dumpsnaps
This prints a list of snapshots during the run. This prints a list of snapshots during the run.

View File

@ -16,4 +16,14 @@ override_dh_auto_install:
@mkdir -p $(DESTDIR)/usr/share/doc/sanoid; \ @mkdir -p $(DESTDIR)/usr/share/doc/sanoid; \
cp sanoid.conf $(DESTDIR)/usr/share/doc/sanoid/sanoid.conf.example; cp sanoid.conf $(DESTDIR)/usr/share/doc/sanoid/sanoid.conf.example;
@mkdir -p $(DESTDIR)/lib/systemd/system; \ @mkdir -p $(DESTDIR)/lib/systemd/system; \
cp debian/sanoid.timer $(DESTDIR)/lib/systemd/system; cp debian/sanoid-prune.service $(DESTDIR)/lib/systemd/system;
override_dh_installinit:
dh_installinit --noscripts
override_dh_systemd_enable:
dh_systemd_enable sanoid.timer
dh_systemd_enable sanoid-prune.service
override_dh_systemd_start:
dh_systemd_start sanoid.timer

View File

@ -0,0 +1,13 @@
[Unit]
Description=Cleanup ZFS Pool
Requires=zfs.target
After=zfs.target sanoid.service
ConditionFileNotEmpty=/etc/sanoid/sanoid.conf
[Service]
Environment=TZ=UTC
Type=oneshot
ExecStart=/usr/sbin/sanoid --prune-snapshots
[Install]
WantedBy=sanoid.service

View File

@ -7,4 +7,4 @@ ConditionFileNotEmpty=/etc/sanoid/sanoid.conf
[Service] [Service]
Environment=TZ=UTC Environment=TZ=UTC
Type=oneshot Type=oneshot
ExecStart=/usr/sbin/sanoid --cron ExecStart=/usr/sbin/sanoid --take-snapshots

View File

@ -14,7 +14,7 @@ License: GPLv3
URL: https://github.com/jimsalterjrs/sanoid URL: https://github.com/jimsalterjrs/sanoid
Source0: https://github.com/jimsalterjrs/%{name}/archive/%{git_tag}/%{name}-%{version}.tar.gz Source0: https://github.com/jimsalterjrs/%{name}/archive/%{git_tag}/%{name}-%{version}.tar.gz
Requires: perl, mbuffer, lzop, pv Requires: perl, mbuffer, lzop, pv, perl-Config-IniFiles
%if 0%{?_with_systemd} %if 0%{?_with_systemd}
Requires: systemd >= 212 Requires: systemd >= 212

119
sanoid
View File

@ -5,6 +5,7 @@
# project's Git repository at https://github.com/jimsalterjrs/sanoid/blob/master/LICENSE. # project's Git repository at https://github.com/jimsalterjrs/sanoid/blob/master/LICENSE.
$::VERSION = '1.4.18'; $::VERSION = '1.4.18';
my $MINIMUM_DEFAULTS_VERSION = 2;
use strict; use strict;
use warnings; use warnings;
@ -31,6 +32,7 @@ if (keys %args < 2) {
my $pscmd = '/bin/ps'; my $pscmd = '/bin/ps';
my $zfs = '/sbin/zfs'; my $zfs = '/sbin/zfs';
my $zpool = '/sbin/zpool';
my $conf_file = "$args{'configdir'}/sanoid.conf"; my $conf_file = "$args{'configdir'}/sanoid.conf";
my $default_conf_file = "$args{'configdir'}/sanoid.defaults.conf"; my $default_conf_file = "$args{'configdir'}/sanoid.defaults.conf";
@ -44,6 +46,7 @@ my $cache = '/var/cache/sanoidsnapshots.txt';
my $cacheTTL = 900; # 15 minutes my $cacheTTL = 900; # 15 minutes
my %snaps = getsnaps( \%config, $cacheTTL, $forcecacheupdate ); my %snaps = getsnaps( \%config, $cacheTTL, $forcecacheupdate );
my %pruned; my %pruned;
my %capacitycache;
my %snapsbytype = getsnapsbytype( \%config, \%snaps ); my %snapsbytype = getsnapsbytype( \%config, \%snaps );
@ -125,12 +128,13 @@ sub monitor_snapshots {
my $path = $config{$section}{'path'}; my $path = $config{$section}{'path'};
push @paths, $path; push @paths, $path;
my @types = ('yearly','monthly','daily','hourly'); my @types = ('yearly','monthly','daily','hourly','frequently');
foreach my $type (@types) { foreach my $type (@types) {
my $smallerperiod = 0; my $smallerperiod = 0;
# we need to set the period length in seconds first # we need to set the period length in seconds first
if ($type eq 'hourly') { $smallerperiod = 60; } if ($type eq 'frequently') { $smallerperiod = 1; }
elsif ($type eq 'hourly') { $smallerperiod = 60; }
elsif ($type eq 'daily') { $smallerperiod = 60*60; } elsif ($type eq 'daily') { $smallerperiod = 60*60; }
elsif ($type eq 'monthly') { $smallerperiod = 60*60*24; } elsif ($type eq 'monthly') { $smallerperiod = 60*60*24; }
elsif ($type eq 'yearly') { $smallerperiod = 60*60*24; } elsif ($type eq 'yearly') { $smallerperiod = 60*60*24; }
@ -254,12 +258,17 @@ sub prune_snapshots {
my $path = $config{$section}{'path'}; my $path = $config{$section}{'path'};
my $period = 0; my $period = 0;
if (check_prune_defer($config, $section)) {
if ($args{'verbose'}) { print "INFO: deferring snapshot pruning ($section)...\n"; }
next;
}
foreach my $type (keys %{ $config{$section} }){ foreach my $type (keys %{ $config{$section} }){
unless ($type =~ /ly$/) { next; } unless ($type =~ /ly$/) { next; }
# we need to set the period length in seconds first # we need to set the period length in seconds first
if ($type eq 'hourly') { $period = 60*60; } if ($type eq 'frequently') { $period = 60 * $config{$section}{'frequent_period'}; }
elsif ($type eq 'hourly') { $period = 60*60; }
elsif ($type eq 'daily') { $period = 60*60*24; } elsif ($type eq 'daily') { $period = 60*60*24; }
elsif ($type eq 'monthly') { $period = 60*60*24*31; } elsif ($type eq 'monthly') { $period = 60*60*24*31; }
elsif ($type eq 'yearly') { $period = 60*60*24*365.25; } elsif ($type eq 'yearly') { $period = 60*60*24*365.25; }
@ -384,7 +393,18 @@ sub take_snapshots {
# to avoid duplicates with DST # to avoid duplicates with DST
my $dateSuffix = ""; my $dateSuffix = "";
if ($type eq 'hourly') { if ($type eq 'frequently') {
my $frequentslice = int($datestamp{'min'} / $config{$section}{'frequent_period'});
push @preferredtime,0; # try to hit 0 seconds
push @preferredtime,$frequentslice * $config{$section}{'frequent_period'};
push @preferredtime,$datestamp{'hour'};
push @preferredtime,$datestamp{'mday'};
push @preferredtime,($datestamp{'mon'}-1); # january is month 0
push @preferredtime,$datestamp{'year'};
$lastpreferred = timelocal(@preferredtime);
if ($lastpreferred > time()) { $lastpreferred -= 60 * $config{$section}{'frequent_period'}; } # preferred time is later this frequent period - so look at last frequent period
} elsif ($type eq 'hourly') {
push @preferredtime,0; # try to hit 0 seconds push @preferredtime,0; # try to hit 0 seconds
push @preferredtime,$config{$section}{'hourly_min'}; push @preferredtime,$config{$section}{'hourly_min'};
push @preferredtime,$datestamp{'hour'}; push @preferredtime,$datestamp{'hour'};
@ -701,10 +721,21 @@ sub init {
tie my %ini, 'Config::IniFiles', ( -file => $conf_file ) or die "FATAL: cannot load $conf_file - please create a valid local config file before running sanoid!"; tie my %ini, 'Config::IniFiles', ( -file => $conf_file ) or die "FATAL: cannot load $conf_file - please create a valid local config file before running sanoid!";
# we'll use these later to normalize potentially true and false values on any toggle keys # we'll use these later to normalize potentially true and false values on any toggle keys
my @toggles = ('autosnap','autoprune','monitor_dont_warn','monitor_dont_crit','monitor','recursive','process_children_only','no_inconsistent_snapshot','force_post_snapshot_script'); my @toggles = ('autosnap','autoprune','monitor_dont_warn','monitor_dont_crit','monitor','recursive','process_children_only','skip_children','no_inconsistent_snapshot','force_post_snapshot_script');
my @istrue=(1,"true","True","TRUE","yes","Yes","YES","on","On","ON"); my @istrue=(1,"true","True","TRUE","yes","Yes","YES","on","On","ON");
my @isfalse=(0,"false","False","FALSE","no","No","NO","off","Off","OFF"); my @isfalse=(0,"false","False","FALSE","no","No","NO","off","Off","OFF");
# check if default configuration file is up to date
my $defaults_version = 1;
if (defined $defaults{'version'}{'version'}) {
$defaults_version = $defaults{'version'}{'version'};
delete $defaults{'version'};
}
if ($defaults_version < $MINIMUM_DEFAULTS_VERSION) {
die "FATAL: you're using sanoid.defaults.conf v$defaults_version, this version of sanoid requires a minimum sanoid.defaults.conf v$MINIMUM_DEFAULTS_VERSION";
}
foreach my $section (keys %ini) { foreach my $section (keys %ini) {
# first up - die with honor if unknown parameters are set in any modules or templates by the user. # first up - die with honor if unknown parameters are set in any modules or templates by the user.
@ -758,7 +789,7 @@ sub init {
# override with any locally set values in the module itself # override with any locally set values in the module itself
foreach my $key (keys %{$ini{$section}} ) { foreach my $key (keys %{$ini{$section}} ) {
if (! ($key =~ /template|recursive/)) { if (! ($key =~ /template|recursive|skip_children/)) {
if ($args{'debug'}) { print "DEBUG: overriding $key on $section with value directly set in module.\n"; } if ($args{'debug'}) { print "DEBUG: overriding $key on $section with value directly set in module.\n"; }
$config{$section}{$key} = $ini{$section}{$key}; $config{$section}{$key} = $ini{$section}{$key};
} }
@ -783,10 +814,17 @@ sub init {
# how 'bout some recursion? =) # how 'bout some recursion? =)
my @datasets; my @datasets;
if ($ini{$section}{'recursive'}) { if ($ini{$section}{'recursive'} || $ini{$section}{'skip_children'}) {
@datasets = getchilddatasets($config{$section}{'path'}); @datasets = getchilddatasets($config{$section}{'path'});
foreach my $dataset(@datasets) { DATASETS: foreach my $dataset(@datasets) {
chomp $dataset; chomp $dataset;
if ($ini{$section}{'skip_children'}) {
if ($args{'debug'}) { print "DEBUG: ignoring $dataset.\n"; }
delete $config{$dataset};
next DATASETS;
}
foreach my $key (keys %{$config{$section}} ) { foreach my $key (keys %{$config{$section}} ) {
if (! ($key =~ /template|recursive|children_only/)) { if (! ($key =~ /template|recursive|children_only/)) {
if ($args{'debug'}) { print "DEBUG: recursively setting $key from $section to $dataset.\n"; } if ($args{'debug'}) { print "DEBUG: recursively setting $key from $section to $dataset.\n"; }
@ -912,7 +950,7 @@ sub check_zpool() {
exit $ERRORS{$state}; exit $ERRORS{$state};
} }
my $statcommand="/sbin/zpool list -o name,size,cap,health,free $pool"; my $statcommand="$zpool list -o name,size,cap,health,free $pool";
if (! open STAT, "$statcommand|") { if (! open STAT, "$statcommand|") {
print ("$state '$statcommand' command returns no result! NOTE: This plugin needs OS support for ZFS, and execution with root privileges.\n"); print ("$state '$statcommand' command returns no result! NOTE: This plugin needs OS support for ZFS, and execution with root privileges.\n");
@ -960,7 +998,7 @@ sub check_zpool() {
## flag to detect section of zpool status involving our zpool ## flag to detect section of zpool status involving our zpool
my $poolfind=0; my $poolfind=0;
$statcommand="/sbin/zpool status $pool"; $statcommand="$zpool status $pool";
if (! open STAT, "$statcommand|") { if (! open STAT, "$statcommand|") {
$state = 'CRITICAL'; $state = 'CRITICAL';
print ("$state '$statcommand' command returns no result! NOTE: This plugin needs OS support for ZFS, and execution with root privileges.\n"); print ("$state '$statcommand' command returns no result! NOTE: This plugin needs OS support for ZFS, and execution with root privileges.\n");
@ -1068,7 +1106,7 @@ sub check_zpool() {
return ($ERRORS{$state},$msg); return ($ERRORS{$state},$msg);
} # end check_zpool() } # end check_zpool()
sub check_capacity_limit() { sub check_capacity_limit {
my $value = shift; my $value = shift;
if (!defined($value) || $value !~ /^\d+\z/) { if (!defined($value) || $value !~ /^\d+\z/) {
@ -1091,7 +1129,7 @@ sub check_zpool_capacity() {
my $capacitylimitsref=shift; my $capacitylimitsref=shift;
my %capacitylimits=%$capacitylimitsref; my %capacitylimits=%$capacitylimitsref;
my $statcommand="/sbin/zpool list -H -o cap $pool"; my $statcommand="$zpool list -H -o cap $pool";
if (! open STAT, "$statcommand|") { if (! open STAT, "$statcommand|") {
print ("$state '$statcommand' command returns no result!\n"); print ("$state '$statcommand' command returns no result!\n");
@ -1136,6 +1174,60 @@ sub check_zpool_capacity() {
return ($ERRORS{$state},$msg); return ($ERRORS{$state},$msg);
} # end check_zpool_capacity() } # end check_zpool_capacity()
sub check_prune_defer {
my ($config, $section) = @_;
my $limit = $config{$section}{"prune_defer"};
if (!check_capacity_limit($limit)) {
die "ERROR: invalid prune_defer limit!\n";
}
if ($limit eq 0) {
return 0;
}
my @parts = split /\//, $section, 2;
my $pool = $parts[0];
if (exists $capacitycache{$pool}) {
} else {
$capacitycache{$pool} = get_zpool_capacity($pool);
}
if ($limit < $capacitycache{$pool}) {
return 0;
}
return 1;
}
sub get_zpool_capacity {
my $pool = shift;
my $statcommand="$zpool list -H -o cap $pool";
if (! open STAT, "$statcommand|") {
die "ERROR: '$statcommand' command returns no result!\n";
}
my $line = <STAT>;
close(STAT);
chomp $line;
my @row = split(/ +/, $line);
my $cap=$row[0];
## check for valid capacity value
if ($cap !~ m/^[0-9]{1,3}%$/ ) {
die "ERROR: '$statcommand' command returned invalid capacity value ($cap)!\n";
}
$cap =~ s/\D//g;
return $cap;
}
###################################################################################################### ######################################################################################################
###################################################################################################### ######################################################################################################
###################################################################################################### ######################################################################################################
@ -1297,6 +1389,9 @@ sub getchilddatasets {
my @children = <FH>; my @children = <FH>;
close FH; close FH;
# parent dataset is the first element
shift @children;
return @children; return @children;
} }

View File

@ -40,6 +40,7 @@
daily = 60 daily = 60
[template_production] [template_production]
frequently = 0
hourly = 36 hourly = 36
daily = 30 daily = 30
monthly = 3 monthly = 3
@ -49,6 +50,7 @@
[template_backup] [template_backup]
autoprune = yes autoprune = yes
frequently = 0
hourly = 30 hourly = 30
daily = 90 daily = 90
monthly = 12 monthly = 12

View File

@ -5,6 +5,8 @@
# # # #
# you have been warned. # # you have been warned. #
################################################################################### ###################################################################################
[version]
version = 2
[template_default] [template_default]
@ -15,6 +17,8 @@ path =
recursive = recursive =
use_template = use_template =
process_children_only = process_children_only =
skip_children =
pre_snapshot_script = pre_snapshot_script =
post_snapshot_script = post_snapshot_script =
pruning_script = pruning_script =
@ -22,17 +26,32 @@ script_timeout = 5
no_inconsistent_snapshot = no_inconsistent_snapshot =
force_post_snapshot_script = force_post_snapshot_script =
# for snapshots shorter than one hour, the period duration must be defined
# in minutes. Because they are executed within a full hour, the selected
# value should divide 60 minutes without remainder so taken snapshots
# are apart in equal intervals. Values larger than 59 aren't practical
# as only one snapshot will be taken on each full hour in this case.
# examples:
# frequent_period = 15 -> four snapshot each hour 15 minutes apart
# frequent_period = 5 -> twelve snapshots each hour 5 minutes apart
# frequent_period = 45 -> two snapshots each hour with different time gaps
# between them: 45 minutes and 15 minutes in this case
frequent_period = 15
# If any snapshot type is set to 0, we will not take snapshots for it - and will immediately # If any snapshot type is set to 0, we will not take snapshots for it - and will immediately
# prune any of those type snapshots already present. # prune any of those type snapshots already present.
# #
# Otherwise, if autoprune is set, we will prune any snapshots of that type which are older # Otherwise, if autoprune is set, we will prune any snapshots of that type which are older
# than (setting * periodicity) - so if daily = 90, we'll prune any dailies older than 90 days. # than (setting * periodicity) - so if daily = 90, we'll prune any dailies older than 90 days.
autoprune = yes autoprune = yes
frequently = 0
hourly = 48 hourly = 48
daily = 90 daily = 90
monthly = 6 monthly = 6
yearly = 0 yearly = 0
min_percent_free = 10 # pruning can be skipped based on the used capacity of the pool
# (0: always prune, 1-100: only prune if used capacity is greater than this value)
prune_defer = 0
# We will automatically take snapshots if autosnap is on, at the desired times configured # We will automatically take snapshots if autosnap is on, at the desired times configured
# below (or immediately, if we don't have one since the last preferred time for that type). # below (or immediately, if we don't have one since the last preferred time for that type).
@ -68,6 +87,8 @@ yearly_min = 0
monitor = yes monitor = yes
monitor_dont_warn = no monitor_dont_warn = no
monitor_dont_crit = no monitor_dont_crit = no
frequently_warn = 0
frequently_crit = 0
hourly_warn = 90 hourly_warn = 90
hourly_crit = 360 hourly_crit = 360
daily_warn = 28 daily_warn = 28

135
syncoid
View File

@ -19,7 +19,8 @@ use Sys::Hostname;
my %args = ('sshkey' => '', 'sshport' => '', 'sshcipher' => '', 'sshoption' => [], 'target-bwlimit' => '', 'source-bwlimit' => ''); my %args = ('sshkey' => '', 'sshport' => '', 'sshcipher' => '', 'sshoption' => [], 'target-bwlimit' => '', 'source-bwlimit' => '');
GetOptions(\%args, "no-command-checks", "monitor-version", "compress=s", "dumpsnaps", "recursive|r", GetOptions(\%args, "no-command-checks", "monitor-version", "compress=s", "dumpsnaps", "recursive|r",
"source-bwlimit=s", "target-bwlimit=s", "sshkey=s", "sshport=i", "sshcipher|c=s", "sshoption|o=s@", "source-bwlimit=s", "target-bwlimit=s", "sshkey=s", "sshport=i", "sshcipher|c=s", "sshoption|o=s@",
"debug", "quiet", "no-stream", "no-sync-snap", "no-resume", "exclude=s@", "skip-parent", "identifier=s") or pod2usage(2); "debug", "quiet", "no-stream", "no-sync-snap", "no-resume", "exclude=s@", "skip-parent", "identifier=s",
"no-clone-handling") or pod2usage(2);
my %compressargs = %{compressargset($args{'compress'} || 'default')}; # Can't be done with GetOptions arg, as default still needs to be set my %compressargs = %{compressargset($args{'compress'} || 'default')}; # Can't be done with GetOptions arg, as default still needs to be set
@ -104,17 +105,59 @@ my $exitcode = 0;
## replication ## ## replication ##
if (!defined $args{'recursive'}) { if (!defined $args{'recursive'}) {
syncdataset($sourcehost, $sourcefs, $targethost, $targetfs); syncdataset($sourcehost, $sourcefs, $targethost, $targetfs, undef);
} else { } else {
if ($debug) { print "DEBUG: recursive sync of $sourcefs.\n"; } if ($debug) { print "DEBUG: recursive sync of $sourcefs.\n"; }
my @datasets = getchilddatasets($sourcehost, $sourcefs, $sourceisroot); my @datasets = getchilddatasets($sourcehost, $sourcefs, $sourceisroot);
foreach my $dataset(@datasets) {
my @deferred;
foreach my $datasetProperties(@datasets) {
my $dataset = $datasetProperties->{'name'};
my $origin = $datasetProperties->{'origin'};
if ($origin eq "-" || defined $args{'no-clone-handling'}) {
$origin = undef;
} else {
# check if clone source is replicated too
my @values = split(/@/, $origin, 2);
my $srcdataset = $values[0];
my $found = 0;
foreach my $datasetProperties(@datasets) {
if ($datasetProperties->{'name'} eq $srcdataset) {
$found = 1;
last;
}
}
if ($found == 0) {
# clone source is not replicated, do a full replication
$origin = undef;
} else {
# clone source is replicated, defer until all non clones are replicated
push @deferred, $datasetProperties;
next;
}
}
$dataset =~ s/\Q$sourcefs\E//; $dataset =~ s/\Q$sourcefs\E//;
chomp $dataset; chomp $dataset;
my $childsourcefs = $sourcefs . $dataset; my $childsourcefs = $sourcefs . $dataset;
my $childtargetfs = $targetfs . $dataset; my $childtargetfs = $targetfs . $dataset;
# print "syncdataset($sourcehost, $childsourcefs, $targethost, $childtargetfs); \n"; # print "syncdataset($sourcehost, $childsourcefs, $targethost, $childtargetfs); \n";
syncdataset($sourcehost, $childsourcefs, $targethost, $childtargetfs); syncdataset($sourcehost, $childsourcefs, $targethost, $childtargetfs, $origin);
}
# replicate cloned datasets and if this is the initial run, recreate them on the target
foreach my $datasetProperties(@deferred) {
my $dataset = $datasetProperties->{'name'};
my $origin = $datasetProperties->{'origin'};
$dataset =~ s/\Q$sourcefs\E//;
chomp $dataset;
my $childsourcefs = $sourcefs . $dataset;
my $childtargetfs = $targetfs . $dataset;
syncdataset($sourcehost, $childsourcefs, $targethost, $childtargetfs, $origin);
} }
} }
@ -147,37 +190,51 @@ sub getchilddatasets {
$fsescaped = escapeshellparam($fsescaped); $fsescaped = escapeshellparam($fsescaped);
} }
my $getchildrencmd = "$rhost $mysudocmd $zfscmd list -o name -t filesystem,volume -Hr $fsescaped |"; my $getchildrencmd = "$rhost $mysudocmd $zfscmd list -o name,origin -t filesystem,volume -Hr $fsescaped |";
if ($debug) { print "DEBUG: getting list of child datasets on $fs using $getchildrencmd...\n"; } if ($debug) { print "DEBUG: getting list of child datasets on $fs using $getchildrencmd...\n"; }
open FH, $getchildrencmd; if (! open FH, $getchildrencmd) {
my @children = <FH>; die "ERROR: list command failed!\n";
close FH;
if (defined $args{'skip-parent'}) {
# parent dataset is the first element
shift @children;
} }
if (defined $args{'exclude'}) { my @children;
my $excludes = $args{'exclude'}; my $first = 1;
foreach (@$excludes) {
for my $i ( 0 .. $#children ) { DATASETS: while(<FH>) {
if ($children[$i] =~ /$_/) { chomp;
if ($debug) { print "DEBUG: excluded $children[$i] because of $_\n"; }
undef $children[$i] if (defined $args{'skip-parent'} && $first eq 1) {
# parent dataset is the first element
$first = 0;
next;
}
my ($dataset, $origin) = /^([^\t]+)\t([^\t]+)/;
if (defined $args{'exclude'}) {
my $excludes = $args{'exclude'};
foreach (@$excludes) {
print("$dataset\n");
if ($dataset =~ /$_/) {
if ($debug) { print "DEBUG: excluded $dataset because of $_\n"; }
next DATASETS;
} }
} }
@children = grep{ defined }@children;
} }
my %properties;
$properties{'name'} = $dataset;
$properties{'origin'} = $origin;
push @children, \%properties;
} }
close FH;
return @children; return @children;
} }
sub syncdataset { sub syncdataset {
my ($sourcehost, $sourcefs, $targethost, $targetfs) = @_; my ($sourcehost, $sourcefs, $targethost, $targetfs, $origin) = @_;
my $sourcefsescaped = escapeshellparam($sourcefs); my $sourcefsescaped = escapeshellparam($sourcefs);
my $targetfsescaped = escapeshellparam($targetfs); my $targetfsescaped = escapeshellparam($targetfs);
@ -305,11 +362,25 @@ sub syncdataset {
my $sendcmd = "$sourcesudocmd $zfscmd send $sourcefsescaped\@$oldestsnapescaped"; my $sendcmd = "$sourcesudocmd $zfscmd send $sourcefsescaped\@$oldestsnapescaped";
my $recvcmd = "$targetsudocmd $zfscmd receive $receiveextraargs -F $targetfsescaped"; my $recvcmd = "$targetsudocmd $zfscmd receive $receiveextraargs -F $targetfsescaped";
my $pvsize = getsendsize($sourcehost,"$sourcefs\@$oldestsnap",0,$sourceisroot); my $pvsize;
if (defined $origin) {
my $originescaped = escapeshellparam($origin);
$sendcmd = "$sourcesudocmd $zfscmd send -i $originescaped $sourcefsescaped\@$oldestsnapescaped";
my $streamargBackup = $args{'streamarg'};
$args{'streamarg'} = "-i";
$pvsize = getsendsize($sourcehost,$origin,"$sourcefs\@$oldestsnap",$sourceisroot);
$args{'streamarg'} = $streamargBackup;
} else {
$pvsize = getsendsize($sourcehost,"$sourcefs\@$oldestsnap",0,$sourceisroot);
}
my $disp_pvsize = readablebytes($pvsize); my $disp_pvsize = readablebytes($pvsize);
if ($pvsize == 0) { $disp_pvsize = 'UNKNOWN'; } if ($pvsize == 0) { $disp_pvsize = 'UNKNOWN'; }
my $synccmd = buildsynccmd($sendcmd,$recvcmd,$pvsize,$sourceisroot,$targetisroot); my $synccmd = buildsynccmd($sendcmd,$recvcmd,$pvsize,$sourceisroot,$targetisroot);
if (!$quiet) { if (!$quiet) {
if (defined $origin) {
print "INFO: Clone is recreated on target $targetfs based on $origin\n";
}
if (!defined ($args{'no-stream'}) ) { if (!defined ($args{'no-stream'}) ) {
print "INFO: Sending oldest full snapshot $sourcefs\@$oldestsnap (~ $disp_pvsize) to new target filesystem:\n"; print "INFO: Sending oldest full snapshot $sourcefs\@$oldestsnap (~ $disp_pvsize) to new target filesystem:\n";
} else { } else {
@ -396,7 +467,7 @@ sub syncdataset {
# a resumed transfer will only be done to the next snapshot, # a resumed transfer will only be done to the next snapshot,
# so do an normal sync cycle # so do an normal sync cycle
return syncdataset($sourcehost, $sourcefs, $targethost, $targetfs); return syncdataset($sourcehost, $sourcefs, $targethost, $targetfs, undef);
} }
# find most recent matching snapshot and do an -I # find most recent matching snapshot and do an -I
@ -515,11 +586,17 @@ sub compressargset {
decomrawcmd => '/usr/bin/lzop', decomrawcmd => '/usr/bin/lzop',
decomargs => '-dfc', decomargs => '-dfc',
}, },
'lz4' => {
rawcmd => '/usr/bin/lz4',
args => '',
decomrawcmd => '/usr/bin/lz4',
decomargs => '-dc',
},
); );
if ($value eq 'default') { if ($value eq 'default') {
$value = $DEFAULT_COMPRESSION; $value = $DEFAULT_COMPRESSION;
} elsif (!(grep $value eq $_, ('gzip', 'pigz-fast', 'pigz-slow', 'zstd-fast', 'zstd-slow', 'lzo', 'default', 'none'))) { } elsif (!(grep $value eq $_, ('gzip', 'pigz-fast', 'pigz-slow', 'zstd-fast', 'zstd-slow', 'lz4', 'lzo', 'default', 'none'))) {
warn "Unrecognised compression value $value, defaulting to $DEFAULT_COMPRESSION"; warn "Unrecognised compression value $value, defaulting to $DEFAULT_COMPRESSION";
$value = $DEFAULT_COMPRESSION; $value = $DEFAULT_COMPRESSION;
} }
@ -1154,6 +1231,11 @@ sub getsendsize {
} }
chomp $sendsize; chomp $sendsize;
# check for valid value
if ($sendsize !~ /^\d+$/) {
$sendsize = '';
}
# to avoid confusion with a zero size pv, give sendsize # to avoid confusion with a zero size pv, give sendsize
# a minimum 4K value - or if empty, make sure it reads UNKNOWN # a minimum 4K value - or if empty, make sure it reads UNKNOWN
if ($debug) { print "DEBUG: sendsize = $sendsize\n"; } if ($debug) { print "DEBUG: sendsize = $sendsize\n"; }
@ -1227,7 +1309,7 @@ syncoid - ZFS snapshot replication tool
Options: Options:
--compress=FORMAT Compresses data during transfer. Currently accepted options are gzip, pigz-fast, pigz-slow, lzo (default) & none --compress=FORMAT Compresses data during transfer. Currently accepted options are gzip, pigz-fast, pigz-slow, zstd-fast, zstd-slow, lz4, lzo (default) & none
--identifier=EXTRA Extra identifier which is included in the snapshot name. Can be used for replicating to multiple targets. --identifier=EXTRA Extra identifier which is included in the snapshot name. Can be used for replicating to multiple targets.
--recursive|r Also transfers child datasets --recursive|r Also transfers child datasets
--skip-parent Skips syncing of the parent dataset. Does nothing without '--recursive' option. --skip-parent Skips syncing of the parent dataset. Does nothing without '--recursive' option.
@ -1250,3 +1332,4 @@ Options:
--dumpsnaps Dumps a list of snapshots during the run --dumpsnaps Dumps a list of snapshots during the run
--no-command-checks Do not check command existence before attempting transfer. Not recommended --no-command-checks Do not check command existence before attempting transfer. Not recommended
--no-resume Don't use the ZFS resume feature if available --no-resume Don't use the ZFS resume feature if available
--no-clone-handling Don't try to recreate clones on target

View File

@ -10,7 +10,7 @@ set -x
POOL_NAME="sanoid-test-1" POOL_NAME="sanoid-test-1"
POOL_TARGET="" # root POOL_TARGET="" # root
RESULT="/tmp/sanoid_test_result" RESULT="/tmp/sanoid_test_result"
RESULT_CHECKSUM="aa15e5595b0ed959313289ecb70323dad9903328ac46e881da5c4b0f871dd7cf" RESULT_CHECKSUM="68c67161a59d0e248094a66061972f53613067c9db52ad981030f36bc081fed7"
# UTC timestamp of start and end # UTC timestamp of start and end
START="1483225200" START="1483225200"
@ -46,10 +46,4 @@ done
saveSnapshotList "${POOL_NAME}" "${RESULT}" saveSnapshotList "${POOL_NAME}" "${RESULT}"
# hourly daily monthly # hourly daily monthly
verifySnapshotList "${RESULT}" 8759 366 12 "${RESULT_CHECKSUM}" verifySnapshotList "${RESULT}" 8760 365 12 "${RESULT_CHECKSUM}"
# hourly count should be 8760 but one hour get's lost because of DST
# daily count should be 365 but one additional daily is taken
# because the DST change leads to a day with 25 hours
# which will trigger an additional daily snapshot