Merge branch 'master' into force-delete

This commit is contained in:
Christoph Klaffl 2018-12-04 22:15:54 +01:00
commit 0c217ced8b
No known key found for this signature in database
GPG Key ID: FC1C525C2A47CC28
11 changed files with 482 additions and 83 deletions

View File

@ -30,4 +30,4 @@ strongly recommends using your distribution's repositories instead.
On Ubuntu: apt install libconfig-inifiles-perl
On CentOS: yum install perl-Config-IniFiles
On FreeBSD: pkg install p5-Config-Inifiles
On FreeBSD: pkg install p5-Config-IniFiles

View File

@ -28,6 +28,7 @@ And its /etc/sanoid/sanoid.conf might look something like this:
#############################
[template_production]
frequently = 0
hourly = 36
daily = 30
monthly = 3
@ -36,7 +37,7 @@ And its /etc/sanoid/sanoid.conf might look something like this:
autoprune = yes
```
Which would be enough to tell sanoid to take and keep 36 hourly snapshots, 30 dailies, 3 monthlies, and no yearlies for all datasets under data/images (but not data/images itself, since process_children_only is set). Except in the case of data/images/win7-spice, which follows the same template (since it's a child of data/images) but only keeps 4 hourlies for whatever reason.
Which would be enough to tell sanoid to take and keep 36 hourly snapshots, 30 dailies, 3 monthlies, and no yearlies for all datasets under data/images (but not data/images itself, since process_children_only is set). Except in the case of data/images/win7, which follows the same template (since it's a child of data/images) but only keeps 4 hourlies for whatever reason.
##### Sanoid Command Line Options
@ -88,6 +89,13 @@ Which would be enough to tell sanoid to take and keep 36 hourly snapshots, 30 da
This prints out quite alot of additional information during a sanoid run, and is normally not needed.
+ --readonly
Skip creation/deletion of snapshots (Simulate).
+ --help
Show help message.
----------
@ -168,7 +176,7 @@ As of 1.4.18, syncoid also automatically supports and enables resume of interrup
+ --compress <compression type>
Currently accepted options: gzip, pigz-fast, pigz-slow, lzo (default) & none. If the selected compression method is unavailable on the source and destination, no compression will be used.
Currently accepted options: gzip, pigz-fast, pigz-slow, zstd-fast, zstd-slow, lz4, xz, lzo (default) & none. If the selected compression method is unavailable on the source and destination, no compression will be used.
+ --source-bwlimit <limit t|g|m|k>
@ -202,14 +210,30 @@ As of 1.4.18, syncoid also automatically supports and enables resume of interrup
Remove target datasets recursively (WARNING: this will also affect child datasets with matching snapshots/bookmarks), if there are no matching snapshots/bookmarks.
+ --no-clone-handling
This argument tells syncoid to not recreate clones on the targe on initial sync and doing a normal replication instead.
+ --dumpsnaps
This prints a list of snapshots during the run.
+ --no-privilege-elevation
Bypass the root check and assume syncoid has the necessary permissions (for use with ZFS permission delegation).
+ --sshport
Allow sync to/from boxes running SSH on non-standard ports.
+ --sshcipher
Instruct ssh to use a particular cipher set.
+ --sshoption
Passes option to ssh. This argument can be specified multiple times.
+ --sshkey
Use specified identity file as per ssh -i.
@ -222,6 +246,10 @@ As of 1.4.18, syncoid also automatically supports and enables resume of interrup
This prints out quite alot of additional information during a sanoid run, and is normally not needed.
+ --help
Show help message.
+ --version
Print the version and exit.

View File

@ -16,4 +16,14 @@ override_dh_auto_install:
@mkdir -p $(DESTDIR)/usr/share/doc/sanoid; \
cp sanoid.conf $(DESTDIR)/usr/share/doc/sanoid/sanoid.conf.example;
@mkdir -p $(DESTDIR)/lib/systemd/system; \
cp debian/sanoid.timer $(DESTDIR)/lib/systemd/system;
cp debian/sanoid-prune.service $(DESTDIR)/lib/systemd/system;
override_dh_installinit:
dh_installinit --noscripts
override_dh_systemd_enable:
dh_systemd_enable sanoid.timer
dh_systemd_enable sanoid-prune.service
override_dh_systemd_start:
dh_systemd_start sanoid.timer

View File

@ -0,0 +1,13 @@
[Unit]
Description=Cleanup ZFS Pool
Requires=zfs.target
After=zfs.target sanoid.service
ConditionFileNotEmpty=/etc/sanoid/sanoid.conf
[Service]
Environment=TZ=UTC
Type=oneshot
ExecStart=/usr/sbin/sanoid --prune-snapshots
[Install]
WantedBy=sanoid.service

View File

@ -7,4 +7,4 @@ ConditionFileNotEmpty=/etc/sanoid/sanoid.conf
[Service]
Environment=TZ=UTC
Type=oneshot
ExecStart=/usr/sbin/sanoid --cron
ExecStart=/usr/sbin/sanoid --take-snapshots

View File

@ -14,7 +14,7 @@ License: GPLv3
URL: https://github.com/jimsalterjrs/sanoid
Source0: https://github.com/jimsalterjrs/%{name}/archive/%{git_tag}/%{name}-%{version}.tar.gz
Requires: perl, mbuffer, lzop, pv
Requires: perl, mbuffer, lzop, pv, perl-Config-IniFiles
%if 0%{?_with_systemd}
Requires: systemd >= 212

281
sanoid
View File

@ -5,6 +5,7 @@
# project's Git repository at https://github.com/jimsalterjrs/sanoid/blob/master/LICENSE.
$::VERSION = '1.4.18';
my $MINIMUM_DEFAULTS_VERSION = 2;
use strict;
use warnings;
@ -31,6 +32,7 @@ if (keys %args < 2) {
my $pscmd = '/bin/ps';
my $zfs = '/sbin/zfs';
my $zpool = '/sbin/zpool';
my $conf_file = "$args{'configdir'}/sanoid.conf";
my $default_conf_file = "$args{'configdir'}/sanoid.defaults.conf";
@ -44,6 +46,7 @@ my $cache = '/var/cache/sanoidsnapshots.txt';
my $cacheTTL = 900; # 15 minutes
my %snaps = getsnaps( \%config, $cacheTTL, $forcecacheupdate );
my %pruned;
my %capacitycache;
my %snapsbytype = getsnapsbytype( \%config, \%snaps );
@ -125,15 +128,18 @@ sub monitor_snapshots {
my $path = $config{$section}{'path'};
push @paths, $path;
my @types = ('yearly','monthly','daily','hourly');
my @types = ('yearly','monthly','weekly','daily','hourly','frequently');
foreach my $type (@types) {
if ($config{$section}{$type} == 0) { next; }
my $smallerperiod = 0;
# we need to set the period length in seconds first
if ($type eq 'hourly') { $smallerperiod = 60; }
if ($type eq 'frequently') { $smallerperiod = 1; }
elsif ($type eq 'hourly') { $smallerperiod = 60; }
elsif ($type eq 'daily') { $smallerperiod = 60*60; }
elsif ($type eq 'monthly') { $smallerperiod = 60*60*24; }
elsif ($type eq 'yearly') { $smallerperiod = 60*60*24; }
elsif ($type eq 'weekly') { $smallerperiod = 60*60*24; }
elsif ($type eq 'monthly') { $smallerperiod = 60*60*24*7; }
elsif ($type eq 'yearly') { $smallerperiod = 60*60*24*31; }
my $typewarn = $type . '_warn';
my $typecrit = $type . '_crit';
@ -254,13 +260,19 @@ sub prune_snapshots {
my $path = $config{$section}{'path'};
my $period = 0;
if (check_prune_defer($config, $section)) {
if ($args{'verbose'}) { print "INFO: deferring snapshot pruning ($section)...\n"; }
next;
}
foreach my $type (keys %{ $config{$section} }){
unless ($type =~ /ly$/) { next; }
# we need to set the period length in seconds first
if ($type eq 'hourly') { $period = 60*60; }
if ($type eq 'frequently') { $period = 60 * $config{$section}{'frequent_period'}; }
elsif ($type eq 'hourly') { $period = 60*60; }
elsif ($type eq 'daily') { $period = 60*60*24; }
elsif ($type eq 'weekly') { $period = 60*60*24*7; }
elsif ($type eq 'monthly') { $period = 60*60*24*31; }
elsif ($type eq 'yearly') { $period = 60*60*24*365.25; }
@ -299,6 +311,17 @@ sub prune_snapshots {
if (! $args{'readonly'}) {
if (system($zfs, "destroy", $snap) == 0) {
$pruned{$snap} = 1;
my $dataset = (split '@', $snap)[0];
my $snapname = (split '@', $snap)[1];
if ($config{$dataset}{'pruning_script'}) {
$ENV{'SANOID_TARGET'} = $dataset;
$ENV{'SANOID_SNAPNAME'} = $snapname;
if ($args{'verbose'}) { print "executing pruning_script '".$config{$dataset}{'pruning_script'}."' on dataset '$dataset'\n"; }
my $ret = runscript('pruning_script',$dataset);
delete $ENV{'SANOID_TARGET'};
delete $ENV{'SANOID_SNAPNAME'};
}
} else {
warn "could not remove $snap : $?";
}
@ -373,7 +396,18 @@ sub take_snapshots {
# to avoid duplicates with DST
my $dateSuffix = "";
if ($type eq 'hourly') {
if ($type eq 'frequently') {
my $frequentslice = int($datestamp{'min'} / $config{$section}{'frequent_period'});
push @preferredtime,0; # try to hit 0 seconds
push @preferredtime,$frequentslice * $config{$section}{'frequent_period'};
push @preferredtime,$datestamp{'hour'};
push @preferredtime,$datestamp{'mday'};
push @preferredtime,($datestamp{'mon'}-1); # january is month 0
push @preferredtime,$datestamp{'year'};
$lastpreferred = timelocal(@preferredtime);
if ($lastpreferred > time()) { $lastpreferred -= 60 * $config{$section}{'frequent_period'}; } # preferred time is later this frequent period - so look at last frequent period
} elsif ($type eq 'hourly') {
push @preferredtime,0; # try to hit 0 seconds
push @preferredtime,$config{$section}{'hourly_min'};
push @preferredtime,$datestamp{'hour'};
@ -420,6 +454,24 @@ sub take_snapshots {
$lastpreferred -= 2*$dstOffset;
}
} # preferred time is later today - so look at yesterday's
} elsif ($type eq 'weekly') {
# calculate offset in seconds for the desired weekday
my $offset = 0;
if ($config{$section}{'weekly_wday'} < $datestamp{'wday'}) {
$offset += 7;
}
$offset += $config{$section}{'weekly_wday'} - $datestamp{'wday'};
$offset *= 60*60*24; # full day
push @preferredtime,0; # try to hit 0 seconds
push @preferredtime,$config{$section}{'weekly_min'};
push @preferredtime,$config{$section}{'weekly_hour'};
push @preferredtime,$datestamp{'mday'};
push @preferredtime,($datestamp{'mon'}-1); # january is month 0
push @preferredtime,$datestamp{'year'};
$lastpreferred = timelocal(@preferredtime);
$lastpreferred += $offset;
if ($lastpreferred > time()) { $lastpreferred -= 60*60*24*7; } # preferred time is later this week - so look at last week's
} elsif ($type eq 'monthly') {
push @preferredtime,0; # try to hit 0 seconds
push @preferredtime,$config{$section}{'monthly_min'};
@ -438,6 +490,9 @@ sub take_snapshots {
push @preferredtime,$datestamp{'year'};
$lastpreferred = timelocal(@preferredtime);
if ($lastpreferred > time()) { $lastpreferred -= 60*60*24*31*365.25; } # preferred time is later this year - so look at last year
} else {
warn "WARN: unknown interval type $type in config!";
next;
}
# reconstruct our human-formatted most recent preferred snapshot time into an epoch time, to compare with the epoch of our most recent snapshot
@ -455,12 +510,39 @@ sub take_snapshots {
if ( (scalar(@newsnaps)) > 0) {
foreach my $snap ( @newsnaps ) {
my $dataset = (split '@', $snap)[0];
my $snapname = (split '@', $snap)[1];
my $presnapshotfailure = 0;
if ($config{$dataset}{'pre_snapshot_script'} and !$args{'readonly'}) {
$ENV{'SANOID_TARGET'} = $dataset;
$ENV{'SANOID_SNAPNAME'} = $snapname;
if ($args{'verbose'}) { print "executing pre_snapshot_script '".$config{$dataset}{'pre_snapshot_script'}."' on dataset '$dataset'\n"; }
my $ret = runscript('pre_snapshot_script',$dataset);
delete $ENV{'SANOID_TARGET'};
delete $ENV{'SANOID_SNAPNAME'};
if ($ret != 0) {
# warning was already thrown by runscript function
$config{$dataset}{'no_inconsistent_snapshot'} and next;
$presnapshotfailure = 1;
}
}
if ($args{'verbose'}) { print "taking snapshot $snap\n"; }
if (!$args{'readonly'}) {
system($zfs, "snapshot", "$snap") == 0
or warn "CRITICAL ERROR: $zfs snapshot $snap failed, $?";
# make sure we don't end up with multiple snapshots with the same ctime
sleep 1;
}
if ($config{$dataset}{'post_snapshot_script'} and !$args{'readonly'}) {
if (!$presnapshotfailure or $config{$dataset}{'force_post_snapshot_script'}) {
$ENV{'SANOID_TARGET'} = $dataset;
$ENV{'SANOID_SNAPNAME'} = $snapname;
if ($args{'verbose'}) { print "executing post_snapshot_script '".$config{$dataset}{'post_snapshot_script'}."' on dataset '$dataset'\n"; }
runscript('post_snapshot_script',$dataset);
delete $ENV{'SANOID_TARGET'};
delete $ENV{'SANOID_SNAPNAME'};
}
}
}
$forcecacheupdate = 1;
@ -492,16 +574,20 @@ sub blabber {
my $path = $config{$section}{'path'};
print "Filesystem $path has:\n";
print " $snapsbypath{$path}{'numsnaps'} total snapshots ";
print "(newest: ";
my $newest = sprintf("%.1f",$snapsbypath{$path}{'newest'} / 60 / 60);
print "$newest hours old)\n";
if ($snapsbypath{$path}{'numsnaps'} == 0) {
print "(no current snapshots)"
} else {
print "(newest: ";
my $newest = sprintf("%.1f",$snapsbypath{$path}{'newest'} / 60 / 60);
print "$newest hours old)\n";
foreach my $type (keys %{ $snapsbytype{$path} }){
print " $snapsbytype{$path}{$type}{'numsnaps'} $type\n";
print " desired: $config{$section}{$type}\n";
print " newest: ";
my $newest = sprintf("%.1f",($snapsbytype{$path}{$type}{'newest'} / 60 / 60));
print "$newest hours old, named $snapsbytype{$path}{$type}{'newestname'}\n";
foreach my $type (keys %{ $snapsbytype{$path} }){
print " $snapsbytype{$path}{$type}{'numsnaps'} $type\n";
print " desired: $config{$section}{$type}\n";
print " newest: ";
my $newest = sprintf("%.1f",($snapsbytype{$path}{$type}{'newest'} / 60 / 60));
print "$newest hours old, named $snapsbytype{$path}{$type}{'newestname'}\n";
}
}
print "\n\n";
}
@ -661,10 +747,21 @@ sub init {
tie my %ini, 'Config::IniFiles', ( -file => $conf_file ) or die "FATAL: cannot load $conf_file - please create a valid local config file before running sanoid!";
# we'll use these later to normalize potentially true and false values on any toggle keys
my @toggles = ('autosnap','autoprune','monitor_dont_warn','monitor_dont_crit','monitor','recursive','process_children_only');
my @toggles = ('autosnap','autoprune','monitor_dont_warn','monitor_dont_crit','monitor','recursive','process_children_only','skip_children','no_inconsistent_snapshot','force_post_snapshot_script');
my @istrue=(1,"true","True","TRUE","yes","Yes","YES","on","On","ON");
my @isfalse=(0,"false","False","FALSE","no","No","NO","off","Off","OFF");
# check if default configuration file is up to date
my $defaults_version = 1;
if (defined $defaults{'version'}{'version'}) {
$defaults_version = $defaults{'version'}{'version'};
delete $defaults{'version'};
}
if ($defaults_version < $MINIMUM_DEFAULTS_VERSION) {
die "FATAL: you're using sanoid.defaults.conf v$defaults_version, this version of sanoid requires a minimum sanoid.defaults.conf v$MINIMUM_DEFAULTS_VERSION";
}
foreach my $section (keys %ini) {
# first up - die with honor if unknown parameters are set in any modules or templates by the user.
@ -691,10 +788,12 @@ sub init {
# override with values from user-defined default template, if any
foreach my $key (keys %{$ini{'template_default'}}) {
if (! ($key =~ /template|recursive/)) {
if ($args{'debug'}) { print "DEBUG: overriding $key on $section with value from user-defined default template.\n"; }
$config{$section}{$key} = $ini{'template_default'}{$key};
if ($key =~ /template|recursive/) {
warn "ignored key '$key' from user-defined default template.\n";
next;
}
if ($args{'debug'}) { print "DEBUG: overriding $key on $section with value from user-defined default template.\n"; }
$config{$section}{$key} = $ini{'template_default'}{$key};
}
}
@ -708,17 +807,19 @@ sub init {
my $template = 'template_'.$rawtemplate;
foreach my $key (keys %{$ini{$template}}) {
if (! ($key =~ /template|recursive/)) {
if ($args{'debug'}) { print "DEBUG: overriding $key on $section with value from user-defined template $template.\n"; }
$config{$section}{$key} = $ini{$template}{$key};
if ($key =~ /template|recursive/) {
warn "ignored key '$key' from '$rawtemplate' template.\n";
next;
}
if ($args{'debug'}) { print "DEBUG: overriding $key on $section with value from user-defined template $template.\n"; }
$config{$section}{$key} = $ini{$template}{$key};
}
}
}
# override with any locally set values in the module itself
foreach my $key (keys %{$ini{$section}} ) {
if (! ($key =~ /template|recursive/)) {
if (! ($key =~ /template|recursive|skip_children/)) {
if ($args{'debug'}) { print "DEBUG: overriding $key on $section with value directly set in module.\n"; }
$config{$section}{$key} = $ini{$section}{$key};
}
@ -743,10 +844,17 @@ sub init {
# how 'bout some recursion? =)
my @datasets;
if ($ini{$section}{'recursive'}) {
if (grep( /^$ini{$section}{'recursive'}$/, @istrue ) || grep( /^$ini{$section}{'skip_children'}$/, @istrue )) {
@datasets = getchilddatasets($config{$section}{'path'});
foreach my $dataset(@datasets) {
DATASETS: foreach my $dataset(@datasets) {
chomp $dataset;
if (grep( /^$ini{$section}{'skip_children'}$/, @istrue )) {
if ($args{'debug'}) { print "DEBUG: ignoring $dataset.\n"; }
delete $config{$dataset};
next DATASETS;
}
foreach my $key (keys %{$config{$section}} ) {
if (! ($key =~ /template|recursive|children_only/)) {
if ($args{'debug'}) { print "DEBUG: recursively setting $key from $section to $dataset.\n"; }
@ -872,7 +980,7 @@ sub check_zpool() {
exit $ERRORS{$state};
}
my $statcommand="/sbin/zpool list -o name,size,cap,health,free $pool";
my $statcommand="$zpool list -o name,size,cap,health,free $pool";
if (! open STAT, "$statcommand|") {
print ("$state '$statcommand' command returns no result! NOTE: This plugin needs OS support for ZFS, and execution with root privileges.\n");
@ -920,7 +1028,7 @@ sub check_zpool() {
## flag to detect section of zpool status involving our zpool
my $poolfind=0;
$statcommand="/sbin/zpool status $pool";
$statcommand="$zpool status $pool";
if (! open STAT, "$statcommand|") {
$state = 'CRITICAL';
print ("$state '$statcommand' command returns no result! NOTE: This plugin needs OS support for ZFS, and execution with root privileges.\n");
@ -974,7 +1082,7 @@ sub check_zpool() {
}
## other cases
my ($dev, $sta) = /^\s+(\S+)\s+(\S+)/;
my ($dev, $sta, $read, $write, $cksum) = /^\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)/;
if (!defined($sta)) {
# cache and logs are special and don't have a status
@ -994,8 +1102,21 @@ sub check_zpool() {
## no display for verbose level 1
next if ($verbose==1);
## don't display working devices for verbose level 2
next if ($verbose==2 && $state eq "OK");
next if ($verbose==2 && ($sta eq "ONLINE" || $sta eq "AVAIL" || $sta eq "INUSE"));
if ($verbose==2 && ($state eq "OK" || $sta eq "ONLINE" || $sta eq "AVAIL" || $sta eq "INUSE")) {
# check for io/checksum errors
my @vdeverr = ();
if ($read != 0) { push @vdeverr, "read" };
if ($write != 0) { push @vdeverr, "write" };
if ($cksum != 0) { push @vdeverr, "cksum" };
if (scalar @vdeverr) {
$dmge=$dmge . "(" . $dev . ":" . join(", ", @vdeverr) . " errors) ";
if ($state eq "OK") { $state = "WARNING" };
}
next;
}
## show everything else
if (/^\s{3}(\S+)/) {
@ -1015,7 +1136,7 @@ sub check_zpool() {
return ($ERRORS{$state},$msg);
} # end check_zpool()
sub check_capacity_limit() {
sub check_capacity_limit {
my $value = shift;
if (!defined($value) || $value !~ /^\d+\z/) {
@ -1038,7 +1159,7 @@ sub check_zpool_capacity() {
my $capacitylimitsref=shift;
my %capacitylimits=%$capacitylimitsref;
my $statcommand="/sbin/zpool list -H -o cap $pool";
my $statcommand="$zpool list -H -o cap $pool";
if (! open STAT, "$statcommand|") {
print ("$state '$statcommand' command returns no result!\n");
@ -1083,6 +1204,60 @@ sub check_zpool_capacity() {
return ($ERRORS{$state},$msg);
} # end check_zpool_capacity()
sub check_prune_defer {
my ($config, $section) = @_;
my $limit = $config{$section}{"prune_defer"};
if (!check_capacity_limit($limit)) {
die "ERROR: invalid prune_defer limit!\n";
}
if ($limit eq 0) {
return 0;
}
my @parts = split /\//, $section, 2;
my $pool = $parts[0];
if (exists $capacitycache{$pool}) {
} else {
$capacitycache{$pool} = get_zpool_capacity($pool);
}
if ($limit < $capacitycache{$pool}) {
return 0;
}
return 1;
}
sub get_zpool_capacity {
my $pool = shift;
my $statcommand="$zpool list -H -o cap $pool";
if (! open STAT, "$statcommand|") {
die "ERROR: '$statcommand' command returns no result!\n";
}
my $line = <STAT>;
close(STAT);
chomp $line;
my @row = split(/ +/, $line);
my $cap=$row[0];
## check for valid capacity value
if ($cap !~ m/^[0-9]{1,3}%$/ ) {
die "ERROR: '$statcommand' command returned invalid capacity value ($cap)!\n";
}
$cap =~ s/\D//g;
return $cap;
}
######################################################################################################
######################################################################################################
######################################################################################################
@ -1244,6 +1419,9 @@ sub getchilddatasets {
my @children = <FH>;
close FH;
# parent dataset is the first element
shift @children;
return @children;
}
@ -1296,6 +1474,41 @@ sub removecachedsnapshots {
undef %pruned;
}
#######################################################################################################################3
#######################################################################################################################3
#######################################################################################################################3
sub runscript {
my $key=shift;
my $dataset=shift;
my $timeout=$config{$dataset}{'script_timeout'};
my $ret;
eval {
if ($timeout gt 0) {
local $SIG{ALRM} = sub { die "alarm\n" };
alarm $timeout;
}
$ret = system($config{$dataset}{$key});
alarm 0;
};
if ($@) {
if ($@ eq "alarm\n") {
warn "WARN: $key didn't finish in the allowed time!";
} else {
warn "CRITICAL ERROR: $@";
}
return -1;
} else {
if ($ret != 0) {
warn "WARN: $key failed, $?";
}
}
return $ret;
}
__END__
=head1 NAME

View File

@ -40,6 +40,7 @@
daily = 60
[template_production]
frequently = 0
hourly = 36
daily = 30
monthly = 3
@ -49,6 +50,7 @@
[template_backup]
autoprune = yes
frequently = 0
hourly = 30
daily = 90
monthly = 12
@ -67,6 +69,21 @@
daily_warn = 48
daily_crit = 60
[template_scripts]
### dataset and snapshot name will be supplied as environment variables
### for all pre/post/prune scripts ($SANOID_TARGET, $SANOID_SNAPNAME)
### run script before snapshot
pre_snapshot_script = /path/to/script.sh
### run script after snapshot
post_snapshot_script = /path/to/script.sh
### run script after pruning snapshot
pruning_script = /path/to/script.sh
### don't take an inconsistent snapshot (skip if pre script fails)
#no_inconsistent_snapshot = yes
### run post_snapshot_script when pre_snapshot_script is failing
#force_post_snapshot_script = yes
### limit allowed execution time of scripts before continuing (<= 0: infinite)
script_timeout = 5
[template_ignore]
autoprune = no

View File

@ -5,6 +5,8 @@
# #
# you have been warned. #
###################################################################################
[version]
version = 2
[template_default]
@ -15,6 +17,26 @@ path =
recursive =
use_template =
process_children_only =
skip_children =
pre_snapshot_script =
post_snapshot_script =
pruning_script =
script_timeout = 5
no_inconsistent_snapshot =
force_post_snapshot_script =
# for snapshots shorter than one hour, the period duration must be defined
# in minutes. Because they are executed within a full hour, the selected
# value should divide 60 minutes without remainder so taken snapshots
# are apart in equal intervals. Values larger than 59 aren't practical
# as only one snapshot will be taken on each full hour in this case.
# examples:
# frequent_period = 15 -> four snapshot each hour 15 minutes apart
# frequent_period = 5 -> twelve snapshots each hour 5 minutes apart
# frequent_period = 45 -> two snapshots each hour with different time gaps
# between them: 45 minutes and 15 minutes in this case
frequent_period = 15
# If any snapshot type is set to 0, we will not take snapshots for it - and will immediately
# prune any of those type snapshots already present.
@ -22,11 +44,15 @@ process_children_only =
# Otherwise, if autoprune is set, we will prune any snapshots of that type which are older
# than (setting * periodicity) - so if daily = 90, we'll prune any dailies older than 90 days.
autoprune = yes
frequently = 0
hourly = 48
daily = 90
weekly = 0
monthly = 6
yearly = 0
min_percent_free = 10
# pruning can be skipped based on the used capacity of the pool
# (0: always prune, 1-100: only prune if used capacity is greater than this value)
prune_defer = 0
# We will automatically take snapshots if autosnap is on, at the desired times configured
# below (or immediately, if we don't have one since the last preferred time for that type).
@ -40,6 +66,10 @@ hourly_min = 0
# daily - at 23:59 (most people expect a daily to contain everything done DURING that day)
daily_hour = 23
daily_min = 59
# weekly -at 23:30 each Monday
weekly_wday = 1
weekly_hour = 23
weekly_min = 30
# monthly - immediately at the beginning of the month (ie 00:00 of day 1)
monthly_mday = 1
monthly_hour = 0
@ -62,12 +92,16 @@ yearly_min = 0
monitor = yes
monitor_dont_warn = no
monitor_dont_crit = no
frequently_warn = 0
frequently_crit = 0
hourly_warn = 90
hourly_crit = 360
daily_warn = 28
daily_crit = 32
monthly_warn = 32
monthly_crit = 35
weekly_warn = 0
weekly_crit = 0
monthly_warn = 5
monthly_crit = 6
yearly_warn = 0
yearly_crit = 0

154
syncoid
View File

@ -20,7 +20,7 @@ my %args = ('sshkey' => '', 'sshport' => '', 'sshcipher' => '', 'sshoption' => [
GetOptions(\%args, "no-command-checks", "monitor-version", "compress=s", "dumpsnaps", "recursive|r",
"source-bwlimit=s", "target-bwlimit=s", "sshkey=s", "sshport=i", "sshcipher|c=s", "sshoption|o=s@",
"debug", "quiet", "no-stream", "no-sync-snap", "no-resume", "exclude=s@", "skip-parent", "identifier=s",
"force-delete") or pod2usage(2);
"no-clone-handling", "no-privilege-elevation", "force-delete") or pod2usage(2);
my %compressargs = %{compressargset($args{'compress'} || 'default')}; # Can't be done with GetOptions arg, as default still needs to be set
@ -105,17 +105,59 @@ my $exitcode = 0;
## replication ##
if (!defined $args{'recursive'}) {
syncdataset($sourcehost, $sourcefs, $targethost, $targetfs);
syncdataset($sourcehost, $sourcefs, $targethost, $targetfs, undef);
} else {
if ($debug) { print "DEBUG: recursive sync of $sourcefs.\n"; }
my @datasets = getchilddatasets($sourcehost, $sourcefs, $sourceisroot);
foreach my $dataset(@datasets) {
my @deferred;
foreach my $datasetProperties(@datasets) {
my $dataset = $datasetProperties->{'name'};
my $origin = $datasetProperties->{'origin'};
if ($origin eq "-" || defined $args{'no-clone-handling'}) {
$origin = undef;
} else {
# check if clone source is replicated too
my @values = split(/@/, $origin, 2);
my $srcdataset = $values[0];
my $found = 0;
foreach my $datasetProperties(@datasets) {
if ($datasetProperties->{'name'} eq $srcdataset) {
$found = 1;
last;
}
}
if ($found == 0) {
# clone source is not replicated, do a full replication
$origin = undef;
} else {
# clone source is replicated, defer until all non clones are replicated
push @deferred, $datasetProperties;
next;
}
}
$dataset =~ s/\Q$sourcefs\E//;
chomp $dataset;
my $childsourcefs = $sourcefs . $dataset;
my $childtargetfs = $targetfs . $dataset;
# print "syncdataset($sourcehost, $childsourcefs, $targethost, $childtargetfs); \n";
syncdataset($sourcehost, $childsourcefs, $targethost, $childtargetfs);
syncdataset($sourcehost, $childsourcefs, $targethost, $childtargetfs, $origin);
}
# replicate cloned datasets and if this is the initial run, recreate them on the target
foreach my $datasetProperties(@deferred) {
my $dataset = $datasetProperties->{'name'};
my $origin = $datasetProperties->{'origin'};
$dataset =~ s/\Q$sourcefs\E//;
chomp $dataset;
my $childsourcefs = $sourcefs . $dataset;
my $childtargetfs = $targetfs . $dataset;
syncdataset($sourcehost, $childsourcefs, $targethost, $childtargetfs, $origin);
}
}
@ -148,37 +190,51 @@ sub getchilddatasets {
$fsescaped = escapeshellparam($fsescaped);
}
my $getchildrencmd = "$rhost $mysudocmd $zfscmd list -o name -t filesystem,volume -Hr $fsescaped |";
my $getchildrencmd = "$rhost $mysudocmd $zfscmd list -o name,origin -t filesystem,volume -Hr $fsescaped |";
if ($debug) { print "DEBUG: getting list of child datasets on $fs using $getchildrencmd...\n"; }
open FH, $getchildrencmd;
my @children = <FH>;
close FH;
if (defined $args{'skip-parent'}) {
# parent dataset is the first element
shift @children;
if (! open FH, $getchildrencmd) {
die "ERROR: list command failed!\n";
}
if (defined $args{'exclude'}) {
my $excludes = $args{'exclude'};
foreach (@$excludes) {
for my $i ( 0 .. $#children ) {
if ($children[$i] =~ /$_/) {
if ($debug) { print "DEBUG: excluded $children[$i] because of $_\n"; }
undef $children[$i]
my @children;
my $first = 1;
DATASETS: while(<FH>) {
chomp;
if (defined $args{'skip-parent'} && $first eq 1) {
# parent dataset is the first element
$first = 0;
next;
}
my ($dataset, $origin) = /^([^\t]+)\t([^\t]+)/;
if (defined $args{'exclude'}) {
my $excludes = $args{'exclude'};
foreach (@$excludes) {
print("$dataset\n");
if ($dataset =~ /$_/) {
if ($debug) { print "DEBUG: excluded $dataset because of $_\n"; }
next DATASETS;
}
}
@children = grep{ defined }@children;
}
my %properties;
$properties{'name'} = $dataset;
$properties{'origin'} = $origin;
push @children, \%properties;
}
close FH;
return @children;
}
sub syncdataset {
my ($sourcehost, $sourcefs, $targethost, $targetfs, $skipsnapshot) = @_;
my ($sourcehost, $sourcefs, $targethost, $targetfs, $origin, $skipsnapshot) = @_;
my $sourcefsescaped = escapeshellparam($sourcefs);
my $targetfsescaped = escapeshellparam($targetfs);
@ -306,11 +362,25 @@ sub syncdataset {
my $sendcmd = "$sourcesudocmd $zfscmd send $sourcefsescaped\@$oldestsnapescaped";
my $recvcmd = "$targetsudocmd $zfscmd receive $receiveextraargs -F $targetfsescaped";
my $pvsize = getsendsize($sourcehost,"$sourcefs\@$oldestsnap",0,$sourceisroot);
my $pvsize;
if (defined $origin) {
my $originescaped = escapeshellparam($origin);
$sendcmd = "$sourcesudocmd $zfscmd send -i $originescaped $sourcefsescaped\@$oldestsnapescaped";
my $streamargBackup = $args{'streamarg'};
$args{'streamarg'} = "-i";
$pvsize = getsendsize($sourcehost,$origin,"$sourcefs\@$oldestsnap",$sourceisroot);
$args{'streamarg'} = $streamargBackup;
} else {
$pvsize = getsendsize($sourcehost,"$sourcefs\@$oldestsnap",0,$sourceisroot);
}
my $disp_pvsize = readablebytes($pvsize);
if ($pvsize == 0) { $disp_pvsize = 'UNKNOWN'; }
my $synccmd = buildsynccmd($sendcmd,$recvcmd,$pvsize,$sourceisroot,$targetisroot);
if (!$quiet) {
if (defined $origin) {
print "INFO: Clone is recreated on target $targetfs based on $origin\n";
}
if (!defined ($args{'no-stream'}) ) {
print "INFO: Sending oldest full snapshot $sourcefs\@$oldestsnap (~ $disp_pvsize) to new target filesystem:\n";
} else {
@ -397,7 +467,7 @@ sub syncdataset {
# a resumed transfer will only be done to the next snapshot,
# so do an normal sync cycle
return syncdataset($sourcehost, $sourcefs, $targethost, $targetfs);
return syncdataset($sourcehost, $sourcefs, $targethost, $targetfs, undef);
}
# find most recent matching snapshot and do an -I
@ -453,7 +523,7 @@ sub syncdataset {
warn "WARNING: $rcommand $prunecmd failed: $?";
} else {
# redo sync and skip snapshot creation (already taken)
return syncdataset($sourcehost, $sourcefs, $targethost, $targetfs, 1);
return syncdataset($sourcehost, $sourcefs, $targethost, $targetfs, undef, 1);
}
}
@ -629,17 +699,29 @@ sub compressargset {
decomrawcmd => '/usr/bin/zstd',
decomargs => '-dc',
},
'xz' => {
rawcmd => '/usr/bin/xz',
args => '',
decomrawcmd => '/usr/bin/xz',
decomargs => '-d',
},
'lzo' => {
rawcmd => '/usr/bin/lzop',
args => '',
decomrawcmd => '/usr/bin/lzop',
decomargs => '-dfc',
},
'lz4' => {
rawcmd => '/usr/bin/lz4',
args => '',
decomrawcmd => '/usr/bin/lz4',
decomargs => '-dc',
},
);
if ($value eq 'default') {
$value = $DEFAULT_COMPRESSION;
} elsif (!(grep $value eq $_, ('gzip', 'pigz-fast', 'pigz-slow', 'zstd-fast', 'zstd-slow', 'lzo', 'default', 'none'))) {
} elsif (!(grep $value eq $_, ('gzip', 'pigz-fast', 'pigz-slow', 'zstd-fast', 'zstd-slow', 'lz4', 'xz', 'lzo', 'default', 'none'))) {
warn "Unrecognised compression value $value, defaulting to $DEFAULT_COMPRESSION";
$value = $DEFAULT_COMPRESSION;
}
@ -1134,7 +1216,7 @@ sub getssh {
$rhost =~ s/:\Q$fs\E$//;
my $remoteuser = $rhost;
$remoteuser =~ s/\@.*$//;
if ($remoteuser eq 'root') { $isroot = 1; } else { $isroot = 0; }
if ($remoteuser eq 'root' || $args{'no-privilege-elevation'}) { $isroot = 1; } else { $isroot = 0; }
# now we need to establish a persistent master SSH connection
$socket = "/tmp/syncoid-$remoteuser-$rhost-" . time();
open FH, "$sshcmd -M -S $socket -o ControlPersist=1m $args{'sshport'} $rhost exit |";
@ -1142,7 +1224,7 @@ sub getssh {
$rhost = "-S $socket $rhost";
} else {
my $localuid = $<;
if ($localuid == 0) { $isroot = 1; } else { $isroot = 0; }
if ($localuid == 0 || $args{'no-privilege-elevation'}) { $isroot = 1; } else { $isroot = 0; }
}
# if ($isroot) { print "this user is root.\n"; } else { print "this user is not root.\n"; }
return ($rhost,$fs,$isroot);
@ -1311,6 +1393,11 @@ sub getsendsize {
}
chomp $sendsize;
# check for valid value
if ($sendsize !~ /^\d+$/) {
$sendsize = '';
}
# to avoid confusion with a zero size pv, give sendsize
# a minimum 4K value - or if empty, make sure it reads UNKNOWN
if ($debug) { print "DEBUG: sendsize = $sendsize\n"; }
@ -1375,16 +1462,16 @@ syncoid - ZFS snapshot replication tool
=head1 SYNOPSIS
syncoid [options]... SOURCE TARGET
or syncoid [options]... SOURCE [USER@]HOST:TARGET
or syncoid [options]... [USER@]HOST:SOURCE [TARGET]
or syncoid [options]... [USER@]HOST:SOURCE [USER@]HOST:TARGET
or syncoid [options]... SOURCE USER@HOST:TARGET
or syncoid [options]... USER@HOST:SOURCE TARGET
or syncoid [options]... USER@HOST:SOURCE USER@HOST:TARGET
SOURCE Source ZFS dataset. Can be either local or remote
TARGET Target ZFS dataset. Can be either local or remote
Options:
--compress=FORMAT Compresses data during transfer. Currently accepted options are gzip, pigz-fast, pigz-slow, lzo (default) & none
--compress=FORMAT Compresses data during transfer. Currently accepted options are gzip, pigz-fast, pigz-slow, zstd-fast, zstd-slow, lz4, xz, lzo (default) & none
--identifier=EXTRA Extra identifier which is included in the snapshot name. Can be used for replicating to multiple targets.
--recursive|r Also transfers child datasets
--skip-parent Skips syncing of the parent dataset. Does nothing without '--recursive' option.
@ -1407,4 +1494,7 @@ Options:
--dumpsnaps Dumps a list of snapshots during the run
--no-command-checks Do not check command existence before attempting transfer. Not recommended
--no-resume Don't use the ZFS resume feature if available
--no-clone-handling Don't try to recreate clones on target
--no-privilege-elevation Bypass the root check, for use with ZFS permission delegation
--force-delete Remove target datasets recursively, if there are no matching snapshots/bookmarks

View File

@ -10,7 +10,7 @@ set -x
POOL_NAME="sanoid-test-1"
POOL_TARGET="" # root
RESULT="/tmp/sanoid_test_result"
RESULT_CHECKSUM="aa15e5595b0ed959313289ecb70323dad9903328ac46e881da5c4b0f871dd7cf"
RESULT_CHECKSUM="68c67161a59d0e248094a66061972f53613067c9db52ad981030f36bc081fed7"
# UTC timestamp of start and end
START="1483225200"
@ -46,10 +46,4 @@ done
saveSnapshotList "${POOL_NAME}" "${RESULT}"
# hourly daily monthly
verifySnapshotList "${RESULT}" 8759 366 12 "${RESULT_CHECKSUM}"
# hourly count should be 8760 but one hour get's lost because of DST
# daily count should be 365 but one additional daily is taken
# because the DST change leads to a day with 25 hours
# which will trigger an additional daily snapshot
verifySnapshotList "${RESULT}" 8760 365 12 "${RESULT_CHECKSUM}"