mirror of https://github.com/jimsalterjrs/sanoid
Merge branch 'master' into dev/sde/remove-force-prune
This commit is contained in:
commit
7ba73acea9
3
findoid
3
findoid
|
|
@ -25,6 +25,9 @@ if ($args{'path'} eq '') {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# resolve given path to a canonical one
|
||||||
|
$args{'path'} = Cwd::realpath($args{'path'});
|
||||||
|
|
||||||
my $dataset = getdataset($args{'path'});
|
my $dataset = getdataset($args{'path'});
|
||||||
|
|
||||||
my %versions = getversions($args{'path'}, $dataset);
|
my %versions = getversions($args{'path'}, $dataset);
|
||||||
|
|
|
||||||
151
sanoid
151
sanoid
|
|
@ -35,17 +35,6 @@ if (keys %args < 4) {
|
||||||
$args{'verbose'} = 1;
|
$args{'verbose'} = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
my $cacheTTL = 900; # 15 minutes
|
|
||||||
|
|
||||||
# Allow a much older snapshot cache file than default if _only_ "--monitor-*" action commands are given
|
|
||||||
# (ignore "--verbose", "--configdir" etc)
|
|
||||||
if (($args{'monitor-snapshots'} || $args{'monitor-health'} || $args{'monitor-capacity'}) && ! ($args{'cron'} || $args{'force-update'} || $args{'take-snapshots'} || $args{'prune-snapshots'})) {
|
|
||||||
# The command combination above must not assert true for any command that takes or prunes snapshots
|
|
||||||
$cacheTTL = 18000; # 5 hours
|
|
||||||
if ($args{'debug'}) { print "DEBUG: command combo means that the cache file (provided it exists) will be allowed to be older than default.\n"; }
|
|
||||||
}
|
|
||||||
|
|
||||||
# for compatibility reasons, older versions used hardcoded command paths
|
# for compatibility reasons, older versions used hardcoded command paths
|
||||||
$ENV{'PATH'} = $ENV{'PATH'} . ":/bin:/sbin";
|
$ENV{'PATH'} = $ENV{'PATH'} . ":/bin:/sbin";
|
||||||
|
|
||||||
|
|
@ -57,25 +46,69 @@ my $zpool = 'zpool';
|
||||||
my $conf_file = "$args{'configdir'}/sanoid.conf";
|
my $conf_file = "$args{'configdir'}/sanoid.conf";
|
||||||
my $default_conf_file = "$args{'configdir'}/sanoid.defaults.conf";
|
my $default_conf_file = "$args{'configdir'}/sanoid.defaults.conf";
|
||||||
|
|
||||||
# parse config file
|
|
||||||
my %config = init($conf_file,$default_conf_file);
|
|
||||||
|
|
||||||
my $cache_dir = $args{'cache-dir'};
|
my $cache_dir = $args{'cache-dir'};
|
||||||
my $run_dir = $args{'run-dir'};
|
my $run_dir = $args{'run-dir'};
|
||||||
|
|
||||||
make_path($cache_dir);
|
make_path($cache_dir);
|
||||||
make_path($run_dir);
|
make_path($run_dir);
|
||||||
|
|
||||||
# if we call getsnaps(%config,1) it will forcibly update the cache, TTL or no TTL
|
my $cacheTTL = 1200; # 20 minutes
|
||||||
my $forcecacheupdate = 0;
|
|
||||||
|
# Allow a much older snapshot cache file than default if _only_ "--monitor-*" action commands are given
|
||||||
|
# (ignore "--verbose", "--configdir" etc)
|
||||||
|
if (
|
||||||
|
(
|
||||||
|
$args{'monitor-snapshots'}
|
||||||
|
|| $args{'monitor-health'}
|
||||||
|
|| $args{'monitor-capacity'}
|
||||||
|
) && ! (
|
||||||
|
$args{'cron'}
|
||||||
|
|| $args{'force-update'}
|
||||||
|
|| $args{'take-snapshots'}
|
||||||
|
|| $args{'prune-snapshots'}
|
||||||
|
)
|
||||||
|
) {
|
||||||
|
# The command combination above must not assert true for any command that takes or prunes snapshots
|
||||||
|
$cacheTTL = 18000; # 5 hours
|
||||||
|
if ($args{'debug'}) { print "DEBUG: command combo means that the cache file (provided it exists) will be allowed to be older than default.\n"; }
|
||||||
|
}
|
||||||
|
|
||||||
|
# snapshot cache
|
||||||
my $cache = "$cache_dir/snapshots.txt";
|
my $cache = "$cache_dir/snapshots.txt";
|
||||||
my %snaps = getsnaps( \%config, $cacheTTL, $forcecacheupdate );
|
|
||||||
|
# configured dataset cache
|
||||||
|
my $cachedatasetspath = "$cache_dir/datasets.txt";
|
||||||
|
my @cachedatasets;
|
||||||
|
|
||||||
|
# parse config file
|
||||||
|
my %config = init($conf_file,$default_conf_file);
|
||||||
|
|
||||||
my %pruned;
|
my %pruned;
|
||||||
my %capacitycache;
|
my %capacitycache;
|
||||||
|
|
||||||
my %snapsbytype = getsnapsbytype( \%config, \%snaps );
|
my %snaps;
|
||||||
|
my %snapsbytype;
|
||||||
|
my %snapsbypath;
|
||||||
|
|
||||||
my %snapsbypath = getsnapsbypath( \%config, \%snaps );
|
# get snapshot list only if needed
|
||||||
|
if ($args{'monitor-snapshots'}
|
||||||
|
|| $args{'monitor-health'}
|
||||||
|
|| $args{'cron'}
|
||||||
|
|| $args{'take-snapshots'}
|
||||||
|
|| $args{'prune-snapshots'}
|
||||||
|
|| $args{'force-update'}
|
||||||
|
|| $args{'debug'}
|
||||||
|
) {
|
||||||
|
my $forcecacheupdate = 0;
|
||||||
|
if ($args{'force-update'}) {
|
||||||
|
$forcecacheupdate = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
%snaps = getsnaps( \%config, $cacheTTL, $forcecacheupdate);
|
||||||
|
|
||||||
|
%snapsbytype = getsnapsbytype( \%config, \%snaps );
|
||||||
|
%snapsbypath = getsnapsbypath( \%config, \%snaps );
|
||||||
|
}
|
||||||
|
|
||||||
# let's make it a little easier to be consistent passing these hashes in the same order to each sub
|
# let's make it a little easier to be consistent passing these hashes in the same order to each sub
|
||||||
my @params = ( \%config, \%snaps, \%snapsbytype, \%snapsbypath );
|
my @params = ( \%config, \%snaps, \%snapsbytype, \%snapsbypath );
|
||||||
|
|
@ -84,7 +117,6 @@ if ($args{'debug'}) { $args{'verbose'}=1; blabber (@params); }
|
||||||
if ($args{'monitor-snapshots'}) { monitor_snapshots(@params); }
|
if ($args{'monitor-snapshots'}) { monitor_snapshots(@params); }
|
||||||
if ($args{'monitor-health'}) { monitor_health(@params); }
|
if ($args{'monitor-health'}) { monitor_health(@params); }
|
||||||
if ($args{'monitor-capacity'}) { monitor_capacity(@params); }
|
if ($args{'monitor-capacity'}) { monitor_capacity(@params); }
|
||||||
if ($args{'force-update'}) { my $snaps = getsnaps( \%config, $cacheTTL, 1 ); }
|
|
||||||
|
|
||||||
if ($args{'cron'}) {
|
if ($args{'cron'}) {
|
||||||
if ($args{'quiet'}) { $args{'verbose'} = 0; }
|
if ($args{'quiet'}) { $args{'verbose'} = 0; }
|
||||||
|
|
@ -275,7 +307,6 @@ sub prune_snapshots {
|
||||||
my ($config, $snaps, $snapsbytype, $snapsbypath) = @_;
|
my ($config, $snaps, $snapsbytype, $snapsbypath) = @_;
|
||||||
|
|
||||||
my %datestamp = get_date();
|
my %datestamp = get_date();
|
||||||
my $forcecacheupdate = 0;
|
|
||||||
|
|
||||||
foreach my $section (keys %config) {
|
foreach my $section (keys %config) {
|
||||||
if ($section =~ /^template/) { next; }
|
if ($section =~ /^template/) { next; }
|
||||||
|
|
@ -823,7 +854,7 @@ sub getsnaps {
|
||||||
if (checklock('sanoid_cacheupdate')) {
|
if (checklock('sanoid_cacheupdate')) {
|
||||||
writelock('sanoid_cacheupdate');
|
writelock('sanoid_cacheupdate');
|
||||||
if ($args{'verbose'}) {
|
if ($args{'verbose'}) {
|
||||||
if ($args{'force-update'}) {
|
if ($forcecacheupdate) {
|
||||||
print "INFO: cache forcibly expired - updating from zfs list.\n";
|
print "INFO: cache forcibly expired - updating from zfs list.\n";
|
||||||
} else {
|
} else {
|
||||||
print "INFO: cache expired - updating from zfs list.\n";
|
print "INFO: cache expired - updating from zfs list.\n";
|
||||||
|
|
@ -833,9 +864,10 @@ sub getsnaps {
|
||||||
@rawsnaps = <FH>;
|
@rawsnaps = <FH>;
|
||||||
close FH;
|
close FH;
|
||||||
|
|
||||||
open FH, "> $cache" or die 'Could not write to $cache!\n';
|
open FH, "> $cache.tmp" or die 'Could not write to $cache.tmp!\n';
|
||||||
print FH @rawsnaps;
|
print FH @rawsnaps;
|
||||||
close FH;
|
close FH;
|
||||||
|
rename("$cache.tmp", "$cache") or die 'Could not rename to $cache!\n';
|
||||||
removelock('sanoid_cacheupdate');
|
removelock('sanoid_cacheupdate');
|
||||||
} else {
|
} else {
|
||||||
if ($args{'verbose'}) { print "INFO: deferring cache update - valid cache update lock held by another sanoid process.\n"; }
|
if ($args{'verbose'}) { print "INFO: deferring cache update - valid cache update lock held by another sanoid process.\n"; }
|
||||||
|
|
@ -898,6 +930,20 @@ sub init {
|
||||||
die "FATAL: you're using sanoid.defaults.conf v$defaults_version, this version of sanoid requires a minimum sanoid.defaults.conf v$MINIMUM_DEFAULTS_VERSION";
|
die "FATAL: you're using sanoid.defaults.conf v$defaults_version, this version of sanoid requires a minimum sanoid.defaults.conf v$MINIMUM_DEFAULTS_VERSION";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
my @updatedatasets;
|
||||||
|
|
||||||
|
# load dataset cache if valid
|
||||||
|
if (!$args{'force-update'} && -f $cachedatasetspath) {
|
||||||
|
my ($dev, $ino, $mode, $nlink, $uid, $gid, $rdev, $size, $atime, $mtime, $ctime, $blksize, $blocks) = stat($cachedatasetspath);
|
||||||
|
|
||||||
|
if ((time() - $mtime) <= $cacheTTL) {
|
||||||
|
if ($args{'debug'}) { print "DEBUG: dataset cache not expired (" . (time() - $mtime) . " seconds old with TTL of $cacheTTL): pulling dataset list from cache.\n"; }
|
||||||
|
open FH, "< $cachedatasetspath";
|
||||||
|
@cachedatasets = <FH>;
|
||||||
|
close FH;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
foreach my $section (keys %ini) {
|
foreach my $section (keys %ini) {
|
||||||
|
|
||||||
# first up - die with honor if unknown parameters are set in any modules or templates by the user.
|
# first up - die with honor if unknown parameters are set in any modules or templates by the user.
|
||||||
|
|
@ -987,6 +1033,10 @@ sub init {
|
||||||
$config{$section}{'path'} = $section;
|
$config{$section}{'path'} = $section;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (! @cachedatasets) {
|
||||||
|
push (@updatedatasets, "$config{$section}{'path'}\n");
|
||||||
|
}
|
||||||
|
|
||||||
# how 'bout some recursion? =)
|
# how 'bout some recursion? =)
|
||||||
if ($config{$section}{'zfs_recursion'} && $config{$section}{'zfs_recursion'} == 1 && $config{$section}{'autosnap'} == 1) {
|
if ($config{$section}{'zfs_recursion'} && $config{$section}{'zfs_recursion'} == 1 && $config{$section}{'autosnap'} == 1) {
|
||||||
warn "ignored autosnap configuration for '$section' because it's part of a zfs recursion.\n";
|
warn "ignored autosnap configuration for '$section' because it's part of a zfs recursion.\n";
|
||||||
|
|
@ -1004,6 +1054,10 @@ sub init {
|
||||||
|
|
||||||
@datasets = getchilddatasets($config{$section}{'path'});
|
@datasets = getchilddatasets($config{$section}{'path'});
|
||||||
DATASETS: foreach my $dataset(@datasets) {
|
DATASETS: foreach my $dataset(@datasets) {
|
||||||
|
if (! @cachedatasets) {
|
||||||
|
push (@updatedatasets, $dataset);
|
||||||
|
}
|
||||||
|
|
||||||
chomp $dataset;
|
chomp $dataset;
|
||||||
|
|
||||||
if ($zfsRecursive) {
|
if ($zfsRecursive) {
|
||||||
|
|
@ -1035,9 +1089,27 @@ sub init {
|
||||||
$config{$dataset}{'initialized'} = 1;
|
$config{$dataset}{'initialized'} = 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# update dataset cache if it was unused
|
||||||
|
if (! @cachedatasets) {
|
||||||
|
if (checklock('sanoid_cachedatasetupdate')) {
|
||||||
|
writelock('sanoid_cachedatasetupdate');
|
||||||
|
if ($args{'verbose'}) {
|
||||||
|
if ($args{'force-update'}) {
|
||||||
|
print "INFO: dataset cache forcibly expired - updating from zfs list.\n";
|
||||||
|
} else {
|
||||||
|
print "INFO: dataset cache expired - updating from zfs list.\n";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
open FH, "> $cachedatasetspath.tmp" or die 'Could not write to $cachedatasetspath.tmp!\n';
|
||||||
|
print FH @updatedatasets;
|
||||||
|
close FH;
|
||||||
|
rename("$cachedatasetspath.tmp", "$cachedatasetspath") or die 'Could not rename to $cachedatasetspath!\n';
|
||||||
|
removelock('sanoid_cachedatasetupdate');
|
||||||
|
} else {
|
||||||
|
if ($args{'verbose'}) { print "INFO: deferring dataset cache update - valid cache update lock held by another sanoid process.\n"; }
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return %config;
|
return %config;
|
||||||
|
|
@ -1563,6 +1635,30 @@ sub getchilddatasets {
|
||||||
my $fs = shift;
|
my $fs = shift;
|
||||||
my $mysudocmd = '';
|
my $mysudocmd = '';
|
||||||
|
|
||||||
|
# use dataset cache if available
|
||||||
|
if (@cachedatasets) {
|
||||||
|
my $foundparent = 0;
|
||||||
|
my @cachechildren = ();
|
||||||
|
foreach my $dataset (@cachedatasets) {
|
||||||
|
chomp $dataset;
|
||||||
|
my $ret = rindex $dataset, "${fs}/", 0;
|
||||||
|
if ($ret == 0) {
|
||||||
|
push (@cachechildren, $dataset);
|
||||||
|
} else {
|
||||||
|
if ($dataset eq $fs) {
|
||||||
|
$foundparent = 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# sanity check
|
||||||
|
if ($foundparent) {
|
||||||
|
return @cachechildren;
|
||||||
|
}
|
||||||
|
|
||||||
|
# fallback if cache misses items for whatever reason
|
||||||
|
}
|
||||||
|
|
||||||
my $getchildrencmd = "$mysudocmd $zfs list -o name -t filesystem,volume -Hr $fs |";
|
my $getchildrencmd = "$mysudocmd $zfs list -o name -t filesystem,volume -Hr $fs |";
|
||||||
if ($args{'debug'}) { print "DEBUG: getting list of child datasets on $fs using $getchildrencmd...\n"; }
|
if ($args{'debug'}) { print "DEBUG: getting list of child datasets on $fs using $getchildrencmd...\n"; }
|
||||||
open FH, $getchildrencmd;
|
open FH, $getchildrencmd;
|
||||||
|
|
@ -1609,16 +1705,17 @@ sub removecachedsnapshots {
|
||||||
my @rawsnaps = <FH>;
|
my @rawsnaps = <FH>;
|
||||||
close FH;
|
close FH;
|
||||||
|
|
||||||
open FH, "> $cache" or die 'Could not write to $cache!\n';
|
open FH, "> $cache.tmp" or die 'Could not write to $cache.tmp!\n';
|
||||||
foreach my $snapline ( @rawsnaps ) {
|
foreach my $snapline ( @rawsnaps ) {
|
||||||
my @columns = split("\t", $snapline);
|
my @columns = split("\t", $snapline);
|
||||||
my $snap = $columns[0];
|
my $snap = $columns[0];
|
||||||
print FH $snapline unless ( exists($pruned{$snap}) );
|
print FH $snapline unless ( exists($pruned{$snap}) );
|
||||||
}
|
}
|
||||||
close FH;
|
close FH;
|
||||||
|
rename("$cache.tmp", "$cache") or die 'Could not rename to $cache!\n';
|
||||||
|
|
||||||
removelock('sanoid_cacheupdate');
|
removelock('sanoid_cacheupdate');
|
||||||
%snaps = getsnaps(\%config,$cacheTTL,$forcecacheupdate);
|
%snaps = getsnaps(\%config,$cacheTTL,0);
|
||||||
|
|
||||||
# clear hash
|
# clear hash
|
||||||
undef %pruned;
|
undef %pruned;
|
||||||
|
|
|
||||||
97
syncoid
97
syncoid
|
|
@ -498,7 +498,6 @@ sub syncdataset {
|
||||||
|
|
||||||
my $ret;
|
my $ret;
|
||||||
if (defined $origin) {
|
if (defined $origin) {
|
||||||
writelog('INFO', "Clone is recreated on target $targetfs based on $origin");
|
|
||||||
($ret, $stdout) = syncclone($sourcehost, $sourcefs, $origin, $targethost, $targetfs, $oldestsnap);
|
($ret, $stdout) = syncclone($sourcehost, $sourcefs, $origin, $targethost, $targetfs, $oldestsnap);
|
||||||
if ($ret) {
|
if ($ret) {
|
||||||
writelog('INFO', "clone creation failed, trying ordinary replication as fallback");
|
writelog('INFO', "clone creation failed, trying ordinary replication as fallback");
|
||||||
|
|
@ -506,12 +505,6 @@ sub syncdataset {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if (!defined ($args{'no-stream'}) ) {
|
|
||||||
writelog('INFO', "Sending oldest full snapshot $sourcefs\@$oldestsnap to new target filesystem:");
|
|
||||||
} else {
|
|
||||||
writelog('INFO', "--no-stream selected; sending newest full snapshot $sourcefs\@$oldestsnap to new target filesystem:");
|
|
||||||
}
|
|
||||||
|
|
||||||
($ret, $stdout) = syncfull($sourcehost, $sourcefs, $targethost, $targetfs, $oldestsnap);
|
($ret, $stdout) = syncfull($sourcehost, $sourcefs, $targethost, $targetfs, $oldestsnap);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -532,8 +525,6 @@ sub syncdataset {
|
||||||
# $originaltargetreadonly = getzfsvalue($targethost,$targetfs,$targetisroot,'readonly');
|
# $originaltargetreadonly = getzfsvalue($targethost,$targetfs,$targetisroot,'readonly');
|
||||||
# setzfsvalue($targethost,$targetfs,$targetisroot,'readonly','on');
|
# setzfsvalue($targethost,$targetfs,$targetisroot,'readonly','on');
|
||||||
|
|
||||||
writelog('INFO', "Updating new target filesystem with incremental $sourcefs\@$oldestsnap ... $newsyncsnap:");
|
|
||||||
|
|
||||||
(my $ret, $stdout) = syncincremental($sourcehost, $sourcefs, $targethost, $targetfs, $oldestsnap, $newsyncsnap, 0);
|
(my $ret, $stdout) = syncincremental($sourcehost, $sourcefs, $targethost, $targetfs, $oldestsnap, $newsyncsnap, 0);
|
||||||
|
|
||||||
if ($ret != 0) {
|
if ($ret != 0) {
|
||||||
|
|
@ -865,6 +856,16 @@ sub syncdataset {
|
||||||
# those that exist on the source. Remaining are the snapshots
|
# those that exist on the source. Remaining are the snapshots
|
||||||
# that are only on the target. Then sort to remove the oldest
|
# that are only on the target. Then sort to remove the oldest
|
||||||
# snapshots first.
|
# snapshots first.
|
||||||
|
|
||||||
|
# regather snapshots on source and target
|
||||||
|
%snaps = getsnaps('source',$sourcehost,$sourcefs,$sourceisroot,0);
|
||||||
|
|
||||||
|
if ($targetexists) {
|
||||||
|
my %targetsnaps = getsnaps('target',$targethost,$targetfs,$targetisroot,0);
|
||||||
|
my %sourcesnaps = %snaps;
|
||||||
|
%snaps = (%sourcesnaps, %targetsnaps);
|
||||||
|
}
|
||||||
|
|
||||||
my @to_delete = sort { sortsnapshots(\%snaps, $a, $b) } grep {!exists $snaps{'source'}{$_}} keys %{ $snaps{'target'} };
|
my @to_delete = sort { sortsnapshots(\%snaps, $a, $b) } grep {!exists $snaps{'source'}{$_}} keys %{ $snaps{'target'} };
|
||||||
while (@to_delete) {
|
while (@to_delete) {
|
||||||
# Create batch of snapshots to remove
|
# Create batch of snapshots to remove
|
||||||
|
|
@ -898,7 +899,6 @@ sub runsynccmd {
|
||||||
my $disp_pvsize = $pvsize == 0 ? 'UNKNOWN' : readablebytes($pvsize);
|
my $disp_pvsize = $pvsize == 0 ? 'UNKNOWN' : readablebytes($pvsize);
|
||||||
my $sendoptions;
|
my $sendoptions;
|
||||||
if ($sendsource =~ / -t /) {
|
if ($sendsource =~ / -t /) {
|
||||||
writelog('INFO', "Resuming interrupted zfs send/receive from $sourcefs to $targetfs (~ $disp_pvsize remaining):");
|
|
||||||
$sendoptions = getoptionsline(\@sendoptions, ('P','V','e','v'));
|
$sendoptions = getoptionsline(\@sendoptions, ('P','V','e','v'));
|
||||||
} elsif ($sendsource =~ /#/) {
|
} elsif ($sendsource =~ /#/) {
|
||||||
$sendoptions = getoptionsline(\@sendoptions, ('L','V','c','e','w'));
|
$sendoptions = getoptionsline(\@sendoptions, ('L','V','c','e','w'));
|
||||||
|
|
@ -934,12 +934,13 @@ sub runsynccmd {
|
||||||
my $recvcmd = "$targetsudocmd $zfscmd receive $recvoptions $targetfsescaped 2>&1";
|
my $recvcmd = "$targetsudocmd $zfscmd receive $recvoptions $targetfsescaped 2>&1";
|
||||||
|
|
||||||
my $synccmd = buildsynccmd($sendcmd,$recvcmd,$pvsize,$sourceisroot,$targetisroot);
|
my $synccmd = buildsynccmd($sendcmd,$recvcmd,$pvsize,$sourceisroot,$targetisroot);
|
||||||
writelog('INFO', "Sync size: ~$disp_pvsize");
|
writelog('DEBUG', "sync size: ~$disp_pvsize");
|
||||||
writelog('DEBUG', "$synccmd");
|
writelog('DEBUG', "$synccmd");
|
||||||
|
|
||||||
# make sure target is (still) not currently in receive.
|
# make sure target is (still) not currently in receive.
|
||||||
if (iszfsbusy($targethost,$targetfs,$targetisroot)) {
|
if (iszfsbusy($targethost,$targetfs,$targetisroot)) {
|
||||||
writelog('WARN', "Cannot sync now: $targetfs is already target of a zfs receive process.");
|
my $targetname = buildnicename($targethost, $targetfs);
|
||||||
|
writelog('WARN', "Cannot sync now: $targetname is already target of a zfs receive process.");
|
||||||
return (1, '');
|
return (1, '');
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -971,6 +972,16 @@ sub syncfull {
|
||||||
my $sendsource = "$sourcefsescaped\@$snapescaped";
|
my $sendsource = "$sourcefsescaped\@$snapescaped";
|
||||||
my $pvsize = getsendsize($sourcehost,"$sourcefs\@$snapname",0,$sourceisroot);
|
my $pvsize = getsendsize($sourcehost,"$sourcefs\@$snapname",0,$sourceisroot);
|
||||||
|
|
||||||
|
my $srcname = buildnicename($sourcehost, $sourcefs, $snapname);
|
||||||
|
my $targetname = buildnicename($targethost, $targetfs);
|
||||||
|
my $disp_pvsize = $pvsize == 0 ? 'UNKNOWN' : readablebytes($pvsize);
|
||||||
|
|
||||||
|
if (!defined ($args{'no-stream'}) ) {
|
||||||
|
writelog('INFO', "Sending oldest full snapshot $srcname to new target filesystem $targetname (~ $disp_pvsize):");
|
||||||
|
} else {
|
||||||
|
writelog('INFO', "--no-stream selected; sending newest full snapshot $srcname to new target filesystem $targetname: (~ $disp_pvsize)");
|
||||||
|
}
|
||||||
|
|
||||||
return runsynccmd($sourcehost, $sourcefs, $sendsource, $targethost, $targetfs, $pvsize);
|
return runsynccmd($sourcehost, $sourcefs, $sendsource, $targethost, $targetfs, $pvsize);
|
||||||
} # end syncfull()
|
} # end syncfull()
|
||||||
|
|
||||||
|
|
@ -1011,7 +1022,6 @@ sub syncincremental {
|
||||||
foreach my $i (0..(scalar(@intsnaps) - 2)) {
|
foreach my $i (0..(scalar(@intsnaps) - 2)) {
|
||||||
my $snapa = $intsnaps[$i];
|
my $snapa = $intsnaps[$i];
|
||||||
my $snapb = $intsnaps[$i + 1];
|
my $snapb = $intsnaps[$i + 1];
|
||||||
writelog('INFO', "Performing an incremental sync between '$snapa' and '$snapb'");
|
|
||||||
(my $ret, my $stdout) = syncincremental($sourcehost, $sourcefs, $targethost, $targetfs, $snapa, $snapb, 1);
|
(my $ret, my $stdout) = syncincremental($sourcehost, $sourcefs, $targethost, $targetfs, $snapa, $snapb, 1);
|
||||||
|
|
||||||
if ($ret != 0) {
|
if ($ret != 0) {
|
||||||
|
|
@ -1030,6 +1040,12 @@ sub syncincremental {
|
||||||
my $sendsource = "$streamarg $sourcefsescaped\@$fromsnapescaped $sourcefsescaped\@$tosnapescaped";
|
my $sendsource = "$streamarg $sourcefsescaped\@$fromsnapescaped $sourcefsescaped\@$tosnapescaped";
|
||||||
my $pvsize = getsendsize($sourcehost,"$sourcefs\@$fromsnap","$sourcefs\@$tosnap",$sourceisroot);
|
my $pvsize = getsendsize($sourcehost,"$sourcefs\@$fromsnap","$sourcefs\@$tosnap",$sourceisroot);
|
||||||
|
|
||||||
|
my $srcname = buildnicename($sourcehost, $sourcefs, $fromsnap);
|
||||||
|
my $targetname = buildnicename($targethost, $targetfs);
|
||||||
|
my $disp_pvsize = $pvsize == 0 ? 'UNKNOWN' : readablebytes($pvsize);
|
||||||
|
|
||||||
|
writelog('INFO', "Sending incremental $srcname ... $tosnap to $targetname (~ $disp_pvsize):");
|
||||||
|
|
||||||
return runsynccmd($sourcehost, $sourcefs, $sendsource, $targethost, $targetfs, $pvsize);
|
return runsynccmd($sourcehost, $sourcefs, $sendsource, $targethost, $targetfs, $pvsize);
|
||||||
} # end syncincremental()
|
} # end syncincremental()
|
||||||
|
|
||||||
|
|
@ -1042,6 +1058,12 @@ sub syncclone {
|
||||||
my $sendsource = "-i $originescaped $sourcefsescaped\@$tosnapescaped";
|
my $sendsource = "-i $originescaped $sourcefsescaped\@$tosnapescaped";
|
||||||
my $pvsize = getsendsize($sourcehost,$origin,"$sourcefs\@$tosnap",$sourceisroot);
|
my $pvsize = getsendsize($sourcehost,$origin,"$sourcefs\@$tosnap",$sourceisroot);
|
||||||
|
|
||||||
|
my $srcname = buildnicename($sourcehost, $origin);
|
||||||
|
my $targetname = buildnicename($targethost, $targetfs);
|
||||||
|
my $disp_pvsize = $pvsize == 0 ? 'UNKNOWN' : readablebytes($pvsize);
|
||||||
|
|
||||||
|
writelog('INFO', "Clone is recreated on target $targetname based on $srcname (~ $disp_pvsize):");
|
||||||
|
|
||||||
return runsynccmd($sourcehost, $sourcefs, $sendsource, $targethost, $targetfs, $pvsize);
|
return runsynccmd($sourcehost, $sourcefs, $sendsource, $targethost, $targetfs, $pvsize);
|
||||||
} # end syncclone()
|
} # end syncclone()
|
||||||
|
|
||||||
|
|
@ -1051,6 +1073,12 @@ sub syncresume {
|
||||||
my $sendsource = "-t $receivetoken";
|
my $sendsource = "-t $receivetoken";
|
||||||
my $pvsize = getsendsize($sourcehost,"","",$sourceisroot,$receivetoken);
|
my $pvsize = getsendsize($sourcehost,"","",$sourceisroot,$receivetoken);
|
||||||
|
|
||||||
|
my $srcname = buildnicename($sourcehost, $sourcefs);
|
||||||
|
my $targetname = buildnicename($targethost, $targetfs);
|
||||||
|
my $disp_pvsize = $pvsize == 0 ? 'UNKNOWN' : readablebytes($pvsize);
|
||||||
|
|
||||||
|
writelog('INFO', "Resuming interrupted zfs send/receive from $srcname to $targetname (~ $disp_pvsize remaining):");
|
||||||
|
|
||||||
return runsynccmd($sourcehost, $sourcefs, $sendsource, $targethost, $targetfs, $pvsize);
|
return runsynccmd($sourcehost, $sourcefs, $sendsource, $targethost, $targetfs, $pvsize);
|
||||||
} # end syncresume()
|
} # end syncresume()
|
||||||
|
|
||||||
|
|
@ -1062,6 +1090,11 @@ sub syncbookmark {
|
||||||
my $tosnapescaped = escapeshellparam($tosnap);
|
my $tosnapescaped = escapeshellparam($tosnap);
|
||||||
my $sendsource = "-i $sourcefsescaped#$bookmarkescaped $sourcefsescaped\@$tosnapescaped";
|
my $sendsource = "-i $sourcefsescaped#$bookmarkescaped $sourcefsescaped\@$tosnapescaped";
|
||||||
|
|
||||||
|
my $srcname = buildnicename($sourcehost, $sourcefs, '', $bookmark);
|
||||||
|
my $targetname = buildnicename($targethost, $targetfs);
|
||||||
|
|
||||||
|
writelog('INFO', "Sending incremental $srcname ... $tosnap to $targetname:");
|
||||||
|
|
||||||
return runsynccmd($sourcehost, $sourcefs, $sendsource, $targethost, $targetfs, 0);
|
return runsynccmd($sourcehost, $sourcefs, $sendsource, $targethost, $targetfs, 0);
|
||||||
} # end syncbookmark
|
} # end syncbookmark
|
||||||
|
|
||||||
|
|
@ -1114,12 +1147,24 @@ sub compressargset {
|
||||||
decomrawcmd => 'zstd',
|
decomrawcmd => 'zstd',
|
||||||
decomargs => '-dc',
|
decomargs => '-dc',
|
||||||
},
|
},
|
||||||
|
'zstdmt-fast' => {
|
||||||
|
rawcmd => 'zstdmt',
|
||||||
|
args => '-3',
|
||||||
|
decomrawcmd => 'zstdmt',
|
||||||
|
decomargs => '-dc',
|
||||||
|
},
|
||||||
'zstd-slow' => {
|
'zstd-slow' => {
|
||||||
rawcmd => 'zstd',
|
rawcmd => 'zstd',
|
||||||
args => '-19',
|
args => '-19',
|
||||||
decomrawcmd => 'zstd',
|
decomrawcmd => 'zstd',
|
||||||
decomargs => '-dc',
|
decomargs => '-dc',
|
||||||
},
|
},
|
||||||
|
'zstdmt-slow' => {
|
||||||
|
rawcmd => 'zstdmt',
|
||||||
|
args => '-19',
|
||||||
|
decomrawcmd => 'zstdmt',
|
||||||
|
decomargs => '-dc',
|
||||||
|
},
|
||||||
'xz' => {
|
'xz' => {
|
||||||
rawcmd => 'xz',
|
rawcmd => 'xz',
|
||||||
args => '',
|
args => '',
|
||||||
|
|
@ -1142,7 +1187,7 @@ sub compressargset {
|
||||||
|
|
||||||
if ($value eq 'default') {
|
if ($value eq 'default') {
|
||||||
$value = $DEFAULT_COMPRESSION;
|
$value = $DEFAULT_COMPRESSION;
|
||||||
} elsif (!(grep $value eq $_, ('gzip', 'pigz-fast', 'pigz-slow', 'zstd-fast', 'zstd-slow', 'lz4', 'xz', 'lzo', 'default', 'none'))) {
|
} elsif (!(grep $value eq $_, ('gzip', 'pigz-fast', 'pigz-slow', 'zstd-fast', 'zstdmt-fast', 'zstd-slow', 'zstdmt-slow', 'lz4', 'xz', 'lzo', 'default', 'none'))) {
|
||||||
writelog('WARN', "Unrecognised compression value $value, defaulting to $DEFAULT_COMPRESSION");
|
writelog('WARN', "Unrecognised compression value $value, defaulting to $DEFAULT_COMPRESSION");
|
||||||
$value = $DEFAULT_COMPRESSION;
|
$value = $DEFAULT_COMPRESSION;
|
||||||
}
|
}
|
||||||
|
|
@ -1511,7 +1556,7 @@ sub getnewestsnapshot {
|
||||||
my $snaps = shift;
|
my $snaps = shift;
|
||||||
foreach my $snap (sort { sortsnapshots($snaps, $b, $a) } keys %{ $snaps{'source'} }) {
|
foreach my $snap (sort { sortsnapshots($snaps, $b, $a) } keys %{ $snaps{'source'} }) {
|
||||||
# return on first snap found - it's the newest
|
# return on first snap found - it's the newest
|
||||||
writelog('INFO', "NEWEST SNAPSHOT: $snap");
|
writelog('DEBUG', "NEWEST SNAPSHOT: $snap");
|
||||||
return $snap;
|
return $snap;
|
||||||
}
|
}
|
||||||
# must not have had any snapshots on source - looks like we'd better create one!
|
# must not have had any snapshots on source - looks like we'd better create one!
|
||||||
|
|
@ -2237,6 +2282,26 @@ sub snapisincluded {
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
sub buildnicename {
|
||||||
|
my ($host,$fs,$snapname,$bookmarkname) = @_;
|
||||||
|
|
||||||
|
my $name;
|
||||||
|
if ($host) {
|
||||||
|
$host =~ s/-S \/tmp\/syncoid[a-zA-Z0-9-@]+ //g;
|
||||||
|
$name = "$host:$fs";
|
||||||
|
} else {
|
||||||
|
$name = "$fs";
|
||||||
|
}
|
||||||
|
|
||||||
|
if ($snapname) {
|
||||||
|
$name = "$name\@$snapname";
|
||||||
|
} elsif ($bookmarkname) {
|
||||||
|
$name = "$name#$bookmarkname";
|
||||||
|
}
|
||||||
|
|
||||||
|
return $name;
|
||||||
|
}
|
||||||
|
|
||||||
__END__
|
__END__
|
||||||
|
|
||||||
=head1 NAME
|
=head1 NAME
|
||||||
|
|
@ -2255,7 +2320,7 @@ syncoid - ZFS snapshot replication tool
|
||||||
|
|
||||||
Options:
|
Options:
|
||||||
|
|
||||||
--compress=FORMAT Compresses data during transfer. Currently accepted options are gzip, pigz-fast, pigz-slow, zstd-fast, zstd-slow, lz4, xz, lzo (default) & none
|
--compress=FORMAT Compresses data during transfer. Currently accepted options are gzip, pigz-fast, pigz-slow, zstd-fast, zstdmt-fast, zstd-slow, zstdmt-slow, lz4, xz, lzo (default) & none
|
||||||
--identifier=EXTRA Extra identifier which is included in the snapshot name. Can be used for replicating to multiple targets.
|
--identifier=EXTRA Extra identifier which is included in the snapshot name. Can be used for replicating to multiple targets.
|
||||||
--recursive|r Also transfers child datasets
|
--recursive|r Also transfers child datasets
|
||||||
--skip-parent Skips syncing of the parent dataset. Does nothing without '--recursive' option.
|
--skip-parent Skips syncing of the parent dataset. Does nothing without '--recursive' option.
|
||||||
|
|
|
||||||
|
|
@ -17,8 +17,11 @@ for test in $(find . -mindepth 1 -maxdepth 1 -type d -printf "%P\n" | sort -g);
|
||||||
cd "${test}"
|
cd "${test}"
|
||||||
echo -n y | bash run.sh > "${LOGFILE}" 2>&1
|
echo -n y | bash run.sh > "${LOGFILE}" 2>&1
|
||||||
|
|
||||||
if [ $? -eq 0 ]; then
|
ret=$?
|
||||||
|
if [ $ret -eq 0 ]; then
|
||||||
echo "[PASS]"
|
echo "[PASS]"
|
||||||
|
elif [ $ret -eq 130 ]; then
|
||||||
|
echo "[SKIPPED]"
|
||||||
else
|
else
|
||||||
echo "[FAILED] (see ${LOGFILE})"
|
echo "[FAILED] (see ${LOGFILE})"
|
||||||
fi
|
fi
|
||||||
|
|
|
||||||
|
|
@ -28,6 +28,8 @@ zfs create -o mountpoint="${MOUNT_TARGET}" "${POOL_NAME}"/src
|
||||||
|
|
||||||
dd if=/dev/urandom of="${MOUNT_TARGET}"/big_file bs=1M count=200
|
dd if=/dev/urandom of="${MOUNT_TARGET}"/big_file bs=1M count=200
|
||||||
|
|
||||||
|
sleep 1
|
||||||
|
|
||||||
../../../syncoid --debug --compress=none --source-bwlimit=2m "${POOL_NAME}"/src "${POOL_NAME}"/dst &
|
../../../syncoid --debug --compress=none --source-bwlimit=2m "${POOL_NAME}"/src "${POOL_NAME}"/dst &
|
||||||
syncoid_pid=$!
|
syncoid_pid=$!
|
||||||
sleep 5
|
sleep 5
|
||||||
|
|
@ -28,6 +28,8 @@ zfs create -o mountpoint="${MOUNT_TARGET}" "${POOL_NAME}"/src
|
||||||
|
|
||||||
dd if=/dev/urandom of="${MOUNT_TARGET}"/big_file bs=1M count=200
|
dd if=/dev/urandom of="${MOUNT_TARGET}"/big_file bs=1M count=200
|
||||||
|
|
||||||
|
sleep 1
|
||||||
|
|
||||||
zfs snapshot "${POOL_NAME}"/src@big
|
zfs snapshot "${POOL_NAME}"/src@big
|
||||||
../../../syncoid --debug --no-sync-snap --compress=none --source-bwlimit=2m "${POOL_NAME}"/src "${POOL_NAME}"/dst &
|
../../../syncoid --debug --no-sync-snap --compress=none --source-bwlimit=2m "${POOL_NAME}"/src "${POOL_NAME}"/dst &
|
||||||
syncoid_pid=$!
|
syncoid_pid=$!
|
||||||
|
|
@ -7,9 +7,13 @@ set -e
|
||||||
|
|
||||||
. ../../common/lib.sh
|
. ../../common/lib.sh
|
||||||
|
|
||||||
POOL_IMAGE="/tmp/jimsalterjrs_sanoid_815.img"
|
if [ -z "$ALLOW_INVASIVE_TESTS" ]; then
|
||||||
|
exit 130
|
||||||
|
fi
|
||||||
|
|
||||||
|
POOL_IMAGE="/tmp/syncoid-test-11.zpool"
|
||||||
POOL_SIZE="64M"
|
POOL_SIZE="64M"
|
||||||
POOL_NAME="jimsalterjrs_sanoid_815"
|
POOL_NAME="syncoid-test-11"
|
||||||
|
|
||||||
truncate -s "${POOL_SIZE}" "${POOL_IMAGE}"
|
truncate -s "${POOL_SIZE}" "${POOL_IMAGE}"
|
||||||
|
|
||||||
|
|
@ -17,8 +17,11 @@ for test in $(find . -mindepth 1 -maxdepth 1 -type d -printf "%P\n" | sort -g);
|
||||||
cd "${test}"
|
cd "${test}"
|
||||||
echo | bash run.sh > "${LOGFILE}" 2>&1
|
echo | bash run.sh > "${LOGFILE}" 2>&1
|
||||||
|
|
||||||
if [ $? -eq 0 ]; then
|
ret=$?
|
||||||
|
if [ $ret -eq 0 ]; then
|
||||||
echo "[PASS]"
|
echo "[PASS]"
|
||||||
|
elif [ $ret -eq 130 ]; then
|
||||||
|
echo "[SKIPPED]"
|
||||||
else
|
else
|
||||||
echo "[FAILED] (see ${LOGFILE})"
|
echo "[FAILED] (see ${LOGFILE})"
|
||||||
fi
|
fi
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue