mirror of https://github.com/jimsalterjrs/sanoid
Compare commits
28 Commits
cb2443d70a
...
41c448e2a5
| Author | SHA1 | Date |
|---|---|---|
|
|
41c448e2a5 | |
|
|
8d4abf14b2 | |
|
|
becec66320 | |
|
|
aa2c693e62 | |
|
|
b794da6f14 | |
|
|
b9bcb6a9d3 | |
|
|
f0a2b120d9 | |
|
|
a546b7d162 | |
|
|
b1f191ff8f | |
|
|
003dd4635a | |
|
|
1915ea29a2 | |
|
|
6bda64508b | |
|
|
5109a51b68 | |
|
|
67b9dec294 | |
|
|
44a9b71d5f | |
|
|
27fc179490 | |
|
|
6beef5fee6 | |
|
|
7062b7347e | |
|
|
4a9db9541d | |
|
|
f4e425d682 | |
|
|
19f8877dcb | |
|
|
3942254e30 | |
|
|
b27b120c19 | |
|
|
cf0ecb30ae | |
|
|
7ba73acea9 | |
|
|
4d39e39217 | |
|
|
fab4b4076c | |
|
|
03c3db3d9a |
20
CHANGELIST
20
CHANGELIST
|
|
@ -1,3 +1,23 @@
|
|||
2.3.0 [overall] documentation updates, small fixes (@thecatontheflat, @mjeanson, @jiawen, @EchterAgo, @jan-krieg, @dlangille, @rightaditya, @MynaITLabs, @ossimoi, @alexgarel, @TopherIsSwell, @jimsalterjrs, @phreaker0)
|
||||
[sanoid] implemented adding of taken snapshots to the cache file and a new parameter for setting an custom cache expire time (@phreaker0)
|
||||
[sanoid] ignore duplicate template keys (@phreaker0)
|
||||
[packaging] fix debian packaging with debian 12 and ubuntu 24.04 (@phreaker0)
|
||||
[syncoid] fix typo preventing resumed transfer with --sendoptions (@Deltik)
|
||||
[sanoid] remove iszfsbusy check to boost performance (@sdettmer)
|
||||
[sanoid] write cache files in an atomic way to prevent race conditions (@phreaker0)
|
||||
[sanoid] improve performance (especially for monitor commands) by caching the dataset list (@phreaker0)
|
||||
[syncoid] add zstdmt compress options (@0xFelix)
|
||||
[syncoid] added missing status information about what is done and provide more details (@phreaker0)
|
||||
[syncoid] rename ssh control socket to avoid problem with length limits and conflicts (@phreaker0)
|
||||
[syncoid] support relative paths (@phreaker0)
|
||||
[syncoid] regather snapshots on --delete-target-snapshots flag (@Adam Fulton)
|
||||
[sanoid] allow monitor commands to be run without root by using only the cache file (@Pajkastare)
|
||||
[syncoid] add --include-snaps and --exclude-snaps options (@mr-vinn, @phreaker0)
|
||||
[syncoid] escape property key and value pair in case of property preservation (@phreaker0)
|
||||
[syncoid] prevent destroying of root dataset which leads to infinite loop because it can't be destroyed (@phreaker0)
|
||||
[syncoid] modify zfs-get argument order for portability (@Rantherhin)
|
||||
[sanoid] trim config values (@phreaker0)
|
||||
|
||||
2.2.0 [overall] documentation updates, small fixes (@azmodude, @deviantintegral, @jimsalterjrs, @alexhaydock, @cbreak-black, @kd8bny, @JavaScriptDude, @veeableful, @rsheasby, @Topslakr, @mavhc, @adam-stamand, @joelishness, @jsoref, @dodexahedron, @phreaker0)
|
||||
[syncoid] implemented flag for preserving properties without the zfs -p flag (@phreaker0)
|
||||
[syncoid] implemented target snapshot deletion (@mat813)
|
||||
|
|
|
|||
10
INSTALL.md
10
INSTALL.md
|
|
@ -26,9 +26,10 @@ apt install debhelper libcapture-tiny-perl libconfig-inifiles-perl pv lzop mbuff
|
|||
|
||||
```
|
||||
|
||||
Clone this repo, build the debian package and install it (alternatively you can skip the package and do it manually like described below for CentOS):
|
||||
Clone this repo under /tmp (to make sure the apt user has access to the unpacked clone), build the debian package and install it (alternatively you can skip the package and do it manually like described below for CentOS):
|
||||
|
||||
```bash
|
||||
cd /tmp
|
||||
git clone https://github.com/jimsalterjrs/sanoid.git
|
||||
cd sanoid
|
||||
# checkout latest stable release or stay on master for bleeding edge stuff (but expect bugs!)
|
||||
|
|
@ -59,6 +60,8 @@ sudo yum config-manager --set-enabled powertools
|
|||
sudo dnf config-manager --set-enabled powertools
|
||||
# On RHEL, instead of PowerTools, we need to enable the CodeReady Builder repo:
|
||||
sudo subscription-manager repos --enable=codeready-builder-for-rhel-8-x86_64-rpms
|
||||
# For Rocky Linux 9 or AlmaLinux 9 you need the CodeReady Builder repo, and it is labelled `crb`
|
||||
sudo dnf config-manager --set-enabled crb
|
||||
# Install the packages that Sanoid depends on:
|
||||
sudo yum install -y perl-Config-IniFiles perl-Data-Dumper perl-Capture-Tiny perl-Getopt-Long lzop mbuffer mhash pv
|
||||
# The repositories above should contain all the relevant Perl modules, but if you
|
||||
|
|
@ -73,6 +76,7 @@ cpan # answer the questions and paste the following lines:
|
|||
Clone this repo, then put the executables and config files into the appropriate directories:
|
||||
|
||||
```bash
|
||||
cd /tmp
|
||||
# Download the repo as root to avoid changing permissions later
|
||||
sudo git clone https://github.com/jimsalterjrs/sanoid.git
|
||||
cd sanoid
|
||||
|
|
@ -225,9 +229,9 @@ sudo launchctl load /Library/LaunchDaemons/net.openoid.Sanoid.plist
|
|||
|
||||
## Other OSes
|
||||
|
||||
**Sanoid** depends on the Perl module Config::IniFiles and will not operate without it. Config::IniFiles may be installed from CPAN, though the project strongly recommends using your distribution's repositories instead.
|
||||
**Sanoid** depends on the Perl modules Config::IniFiles and Capture::Tiny and will not operate without them. These modules may be installed from CPAN, though the project strongly recommends using your distribution's repositories instead.
|
||||
|
||||
**Syncoid** depends on ssh, pv, gzip, lzop, and mbuffer. It can run with reduced functionality in the absence of any or all of the above. SSH is only required for remote synchronization. On newer FreeBSD and Ubuntu Xenial chacha20-poly1305@openssh.com, on other distributions arcfour crypto is the default for SSH transport since v1.4.6. Syncoid runs will fail if one of them is not available on either end of the transport.
|
||||
**Syncoid** depends on ssh, pv, gzip, lzop, and mbuffer as well as sharing sanoid's dependency on Capture::Tiny. Capture::Tiny is mandatory, but syncoid can function with reduced functionality without any or all of the command-line dependencies. SSH is only required for remote synchronization. On newer FreeBSD and Ubuntu Xenial chacha20-poly1305@openssh.com, on other distributions arcfour crypto is the default for SSH transport since v1.4.6. Syncoid runs will fail if one of them is not available on either end of the transport.
|
||||
|
||||
### General outline for installation
|
||||
|
||||
|
|
|
|||
12
README.md
12
README.md
|
|
@ -80,10 +80,6 @@ For more full details on sanoid.conf settings see [Wiki page](https://github.com
|
|||
|
||||
This will process your sanoid.conf file, it will NOT create snapshots, but it will purge expired ones.
|
||||
|
||||
+ --force-prune
|
||||
|
||||
Purges expired snapshots even if a send/recv is in progress
|
||||
|
||||
+ --monitor-snapshots
|
||||
|
||||
This option is designed to be run by a Nagios monitoring system. It reports on the health of your snapshots.
|
||||
|
|
@ -100,6 +96,10 @@ For more full details on sanoid.conf settings see [Wiki page](https://github.com
|
|||
|
||||
This clears out sanoid's zfs snapshot listing cache. This is normally not needed.
|
||||
|
||||
+ --cache-ttl=SECONDS
|
||||
|
||||
Set custom cache expire time in seconds (default: 20 minutes).
|
||||
|
||||
+ --version
|
||||
|
||||
This prints the version number, and exits.
|
||||
|
|
@ -126,7 +126,9 @@ For more full details on sanoid.conf settings see [Wiki page](https://github.com
|
|||
|
||||
### Sanoid script hooks
|
||||
|
||||
There are three script types which can optionally be executed at various stages in the lifecycle of a snapshot:
|
||||
There are three script types which can optionally be executed at various stages in the lifecycle of a snapshot.
|
||||
|
||||
**Note** that snapshots related script are triggered only if you have `autosnap = yes` and pruning scripts are triggered only if you have `autoprune = yes`.
|
||||
|
||||
#### `pre_snapshot_script`
|
||||
|
||||
|
|
|
|||
2
findoid
2
findoid
|
|
@ -4,7 +4,7 @@
|
|||
# from http://www.gnu.org/licenses/gpl-3.0.html on 2014-11-17. A copy should also be available in this
|
||||
# project's Git repository at https://github.com/jimsalterjrs/sanoid/blob/master/LICENSE.
|
||||
|
||||
$::VERSION = '2.2.0';
|
||||
$::VERSION = '2.3.0';
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
|
|
|||
|
|
@ -1,3 +1,27 @@
|
|||
sanoid (2.3.0) unstable; urgency=medium
|
||||
|
||||
[overall] documentation updates, small fixes (@thecatontheflat, @mjeanson, @jiawen, @EchterAgo, @jan-krieg, @dlangille, @rightaditya, @MynaITLabs, @ossimoi, @alexgarel, @TopherIsSwell, @jimsalterjrs, @phreaker0)
|
||||
[sanoid] implemented adding of taken snapshots to the cache file and a new parameter for setting an custom cache expire time (@phreaker0)
|
||||
[sanoid] ignore duplicate template keys (@phreaker0)
|
||||
[packaging] fix debian packaging with debian 12 and ubuntu 24.04 (@phreaker0)
|
||||
[syncoid] fix typo preventing resumed transfer with --sendoptions (@Deltik)
|
||||
[sanoid] remove iszfsbusy check to boost performance (@sdettmer)
|
||||
[sanoid] write cache files in an atomic way to prevent race conditions (@phreaker0)
|
||||
[sanoid] improve performance (especially for monitor commands) by caching the dataset list (@phreaker0)
|
||||
[syncoid] add zstdmt compress options (@0xFelix)
|
||||
[syncoid] added missing status information about what is done and provide more details (@phreaker0)
|
||||
[syncoid] rename ssh control socket to avoid problem with length limits and conflicts (@phreaker0)
|
||||
[syncoid] support relative paths (@phreaker0)
|
||||
[syncoid] regather snapshots on --delete-target-snapshots flag (@Adam Fulton)
|
||||
[sanoid] allow monitor commands to be run without root by using only the cache file (@Pajkastare)
|
||||
[syncoid] add --include-snaps and --exclude-snaps options (@mr-vinn, @phreaker0)
|
||||
[syncoid] escape property key and value pair in case of property preservation (@phreaker0)
|
||||
[syncoid] prevent destroying of root dataset which leads to infinite loop because it can't be destroyed (@phreaker0)
|
||||
[syncoid] modify zfs-get argument order for portability (@Rantherhin)
|
||||
[sanoid] trim config values (@phreaker0)
|
||||
|
||||
-- Jim Salter <github@jrs-s.net> Tue, 05 Jun 2025 22:47:00 +0200
|
||||
|
||||
sanoid (2.2.0) unstable; urgency=medium
|
||||
|
||||
[overall] documentation updates, small fixes (@azmodude, @deviantintegral, @jimsalterjrs, @alexhaydock, @cbreak-black, @kd8bny, @JavaScriptDude, @veeableful, @rsheasby, @Topslakr, @mavhc, @adam-stamand, @joelishness, @jsoref, @dodexahedron, @phreaker0)
|
||||
|
|
|
|||
|
|
@ -2,3 +2,5 @@
|
|||
|
||||
# remove old cache file
|
||||
[ -f /var/cache/sanoidsnapshots.txt ] && rm /var/cache/sanoidsnapshots.txt || true
|
||||
[ -f /var/cache/sanoid/snapshots.txt ] && rm /var/cache/sanoid/snapshots.txt || true
|
||||
[ -f /var/cache/sanoid/datasets.txt ] && rm /var/cache/sanoid/datasets.txt || true
|
||||
|
|
|
|||
|
|
@ -12,10 +12,6 @@ override_dh_auto_install:
|
|||
install -d $(DESTDIR)/etc/sanoid
|
||||
install -m 664 sanoid.defaults.conf $(DESTDIR)/etc/sanoid
|
||||
|
||||
install -d $(DESTDIR)/lib/systemd/system
|
||||
install -m 664 debian/sanoid-prune.service debian/sanoid.timer \
|
||||
$(DESTDIR)/lib/systemd/system
|
||||
|
||||
install -d $(DESTDIR)/usr/sbin
|
||||
install -m 775 \
|
||||
findoid sanoid sleepymutex syncoid \
|
||||
|
|
@ -25,6 +21,8 @@ override_dh_auto_install:
|
|||
install -m 664 sanoid.conf \
|
||||
$(DESTDIR)/usr/share/doc/sanoid/sanoid.conf.example
|
||||
|
||||
dh_installsystemd --name sanoid-prune
|
||||
|
||||
override_dh_installinit:
|
||||
dh_installinit --noscripts
|
||||
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
%global version 2.2.0
|
||||
%global version 2.3.0
|
||||
%global git_tag v%{version}
|
||||
|
||||
# Enable with systemctl "enable sanoid.timer"
|
||||
|
|
@ -111,6 +111,8 @@ echo "* * * * * root %{_sbindir}/sanoid --cron" > %{buildroot}%{_docdir}/%{name}
|
|||
%endif
|
||||
|
||||
%changelog
|
||||
* Tue Jun 05 2025 Christoph Klaffl <christoph@phreaker.eu> - 2.3.0
|
||||
- Bump to 2.3.0
|
||||
* Tue Jul 18 2023 Christoph Klaffl <christoph@phreaker.eu> - 2.2.0
|
||||
- Bump to 2.2.0
|
||||
* Tue Nov 24 2020 Christoph Klaffl <christoph@phreaker.eu> - 2.1.0
|
||||
|
|
|
|||
157
sanoid
157
sanoid
|
|
@ -4,7 +4,7 @@
|
|||
# from http://www.gnu.org/licenses/gpl-3.0.html on 2014-11-17. A copy should also be available in this
|
||||
# project's Git repository at https://github.com/jimsalterjrs/sanoid/blob/master/LICENSE.
|
||||
|
||||
$::VERSION = '2.2.0';
|
||||
$::VERSION = '2.3.0';
|
||||
my $MINIMUM_DEFAULTS_VERSION = 2;
|
||||
|
||||
use strict;
|
||||
|
|
@ -12,6 +12,7 @@ use warnings;
|
|||
use Config::IniFiles; # read samba-style conf file
|
||||
use Data::Dumper; # debugging - print contents of hash
|
||||
use File::Path 'make_path';
|
||||
use File::Copy;
|
||||
use Getopt::Long qw(:config auto_version auto_help);
|
||||
use Pod::Usage; # pod2usage
|
||||
use Time::Local; # to parse dates in reverse
|
||||
|
|
@ -26,7 +27,7 @@ GetOptions(\%args, "verbose", "debug", "cron", "readonly", "quiet",
|
|||
"configdir=s", "cache-dir=s", "run-dir=s",
|
||||
"monitor-health", "force-update",
|
||||
"monitor-snapshots", "take-snapshots", "prune-snapshots", "force-prune",
|
||||
"monitor-capacity"
|
||||
"monitor-capacity", "cache-ttl=i"
|
||||
) or pod2usage(2);
|
||||
|
||||
# If only config directory (or nothing) has been specified, default to --cron --verbose
|
||||
|
|
@ -54,6 +55,17 @@ make_path($run_dir);
|
|||
|
||||
my $cacheTTL = 1200; # 20 minutes
|
||||
|
||||
if ($args{'force-prune'}) {
|
||||
warn "WARN: --force-prune argument is deprecated and its behavior is now standard";
|
||||
}
|
||||
|
||||
if ($args{'cache-ttl'}) {
|
||||
if ($args{'cache-ttl'} < 0) {
|
||||
die "ERROR: cache-ttl needs to be positive!\n";
|
||||
}
|
||||
$cacheTTL = $args{'cache-ttl'};
|
||||
}
|
||||
|
||||
# Allow a much older snapshot cache file than default if _only_ "--monitor-*" action commands are given
|
||||
# (ignore "--verbose", "--configdir" etc)
|
||||
if (
|
||||
|
|
@ -66,7 +78,7 @@ if (
|
|||
|| $args{'force-update'}
|
||||
|| $args{'take-snapshots'}
|
||||
|| $args{'prune-snapshots'}
|
||||
|| $args{'force-prune'}
|
||||
|| $args{'cache-ttl'}
|
||||
)
|
||||
) {
|
||||
# The command combination above must not assert true for any command that takes or prunes snapshots
|
||||
|
|
@ -86,6 +98,7 @@ my %config = init($conf_file,$default_conf_file);
|
|||
|
||||
my %pruned;
|
||||
my %capacitycache;
|
||||
my %taken;
|
||||
|
||||
my %snaps;
|
||||
my %snapsbytype;
|
||||
|
|
@ -381,26 +394,23 @@ sub prune_snapshots {
|
|||
}
|
||||
|
||||
if ($args{'verbose'}) { print "INFO: pruning $snap ... \n"; }
|
||||
if (!$args{'force-prune'} && iszfsbusy($path)) {
|
||||
if ($args{'verbose'}) { print "INFO: deferring pruning of $snap - $path is currently in zfs send or receive.\n"; }
|
||||
} else {
|
||||
if (! $args{'readonly'}) {
|
||||
if (system($zfs, "destroy", $snap) == 0) {
|
||||
$pruned{$snap} = 1;
|
||||
if ($config{$dataset}{'pruning_script'}) {
|
||||
$ENV{'SANOID_TARGET'} = $dataset;
|
||||
$ENV{'SANOID_SNAPNAME'} = $snapname;
|
||||
$ENV{'SANOID_SCRIPT'} = 'prune';
|
||||
if ($args{'verbose'}) { print "executing pruning_script '".$config{$dataset}{'pruning_script'}."' on dataset '$dataset'\n"; }
|
||||
my $ret = runscript('pruning_script',$dataset);
|
||||
|
||||
delete $ENV{'SANOID_TARGET'};
|
||||
delete $ENV{'SANOID_SNAPNAME'};
|
||||
delete $ENV{'SANOID_SCRIPT'};
|
||||
}
|
||||
} else {
|
||||
warn "could not remove $snap : $?";
|
||||
if (! $args{'readonly'}) {
|
||||
if (system($zfs, "destroy", $snap) == 0) {
|
||||
$pruned{$snap} = 1;
|
||||
if ($config{$dataset}{'pruning_script'}) {
|
||||
$ENV{'SANOID_TARGET'} = $dataset;
|
||||
$ENV{'SANOID_SNAPNAME'} = $snapname;
|
||||
$ENV{'SANOID_SCRIPT'} = 'prune';
|
||||
if ($args{'verbose'}) { print "executing pruning_script '".$config{$dataset}{'pruning_script'}."' on dataset '$dataset'\n"; }
|
||||
my $ret = runscript('pruning_script',$dataset);
|
||||
|
||||
delete $ENV{'SANOID_TARGET'};
|
||||
delete $ENV{'SANOID_SNAPNAME'};
|
||||
delete $ENV{'SANOID_SCRIPT'};
|
||||
}
|
||||
} else {
|
||||
warn "could not remove $snap : $?";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -592,6 +602,7 @@ sub take_snapshots {
|
|||
}
|
||||
|
||||
if (%newsnapsgroup) {
|
||||
$forcecacheupdate = 0;
|
||||
while ((my $path, my $snapData) = each(%newsnapsgroup)) {
|
||||
my $recursiveFlag = $snapData->{recursive};
|
||||
my $dstHandling = $snapData->{handleDst};
|
||||
|
|
@ -662,9 +673,17 @@ sub take_snapshots {
|
|||
}
|
||||
};
|
||||
|
||||
if ($exit == 0) {
|
||||
$taken{$snap} = {
|
||||
'time' => time(),
|
||||
'recursive' => $recursiveFlag
|
||||
};
|
||||
}
|
||||
|
||||
$exit == 0 or do {
|
||||
if ($dstHandling) {
|
||||
if ($stderr =~ /already exists/) {
|
||||
$forcecacheupdate = 1;
|
||||
$exit = 0;
|
||||
$snap =~ s/_([a-z]+)$/dst_$1/g;
|
||||
if ($args{'verbose'}) { print "taking dst snapshot $snap$extraMessage\n"; }
|
||||
|
|
@ -714,8 +733,8 @@ sub take_snapshots {
|
|||
}
|
||||
}
|
||||
}
|
||||
$forcecacheupdate = 1;
|
||||
%snaps = getsnaps(%config,$cacheTTL,$forcecacheupdate);
|
||||
addcachedsnapshots();
|
||||
%snaps = getsnaps(\%config,$cacheTTL,$forcecacheupdate);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1008,6 +1027,12 @@ sub init {
|
|||
}
|
||||
if ($args{'debug'}) { print "DEBUG: overriding $key on $section with value from user-defined template $template.\n"; }
|
||||
$config{$section}{$key} = $ini{$template}{$key};
|
||||
|
||||
my $value = $config{$section}{$key};
|
||||
if (ref($value) eq 'ARRAY') {
|
||||
# handle duplicates silently (warning was already printed above)
|
||||
$config{$section}{$key} = $value->[0];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1630,30 +1655,6 @@ sub writelock {
|
|||
close FH;
|
||||
}
|
||||
|
||||
sub iszfsbusy {
|
||||
# check to see if ZFS filesystem passed in as argument currently has a zfs send or zfs receive process referencing it.
|
||||
# return true if busy (currently being sent or received), return false if not.
|
||||
|
||||
my $fs = shift;
|
||||
# if (args{'debug'}) { print "DEBUG: checking to see if $fs on is already in zfs receive using $pscmd -Ao args= ...\n"; }
|
||||
|
||||
open PL, "$pscmd -Ao args= |";
|
||||
my @processes = <PL>;
|
||||
close PL;
|
||||
|
||||
foreach my $process (@processes) {
|
||||
# if ($args{'debug'}) { print "DEBUG: checking process $process...\n"; }
|
||||
if ($process =~ /zfs *(send|receive|recv).*$fs/) {
|
||||
# there's already a zfs send/receive process for our target filesystem - return true
|
||||
# if ($args{'debug'}) { print "DEBUG: process $process matches target $fs!\n"; }
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
# no zfs receive processes for our target filesystem found - return false
|
||||
return 0;
|
||||
}
|
||||
|
||||
#######################################################################################################################3
|
||||
#######################################################################################################################3
|
||||
#######################################################################################################################3
|
||||
|
|
@ -1740,6 +1741,11 @@ sub removecachedsnapshots {
|
|||
print FH $snapline unless ( exists($pruned{$snap}) );
|
||||
}
|
||||
close FH;
|
||||
|
||||
# preserve mtime of cache for expire check
|
||||
my ($dev, $ino, $mode, $nlink, $uid, $gid, $rdev, $size, $atime, $mtime, $ctime, $blksize, $blocks) = stat($cache);
|
||||
utime($atime, $mtime, "$cache.tmp");
|
||||
|
||||
rename("$cache.tmp", "$cache") or die "Could not rename to $cache!\n";
|
||||
|
||||
removelock('sanoid_cacheupdate');
|
||||
|
|
@ -1753,6 +1759,61 @@ sub removecachedsnapshots {
|
|||
#######################################################################################################################3
|
||||
#######################################################################################################################3
|
||||
|
||||
sub addcachedsnapshots {
|
||||
if (not %taken) {
|
||||
return;
|
||||
}
|
||||
|
||||
my $unlocked = checklock('sanoid_cacheupdate');
|
||||
|
||||
# wait until we can get a lock to do our cache changes
|
||||
while (not $unlocked) {
|
||||
if ($args{'verbose'}) { print "INFO: waiting for cache update lock held by another sanoid process.\n"; }
|
||||
sleep(10);
|
||||
$unlocked = checklock('sanoid_cacheupdate');
|
||||
}
|
||||
|
||||
writelock('sanoid_cacheupdate');
|
||||
|
||||
if ($args{'verbose'}) {
|
||||
print "INFO: adding taken snapshots to cache.\n";
|
||||
}
|
||||
|
||||
copy($cache, "$cache.tmp") or die "Could not copy to $cache.tmp!\n";
|
||||
|
||||
open FH, ">> $cache.tmp" or die "Could not write to $cache.tmp!\n";
|
||||
while((my $snap, my $details) = each(%taken)) {
|
||||
my @parts = split("@", $snap, 2);
|
||||
|
||||
my $suffix = $parts[1] . "\tcreation\t" . $details->{time} . "\t-";
|
||||
my $dataset = $parts[0];
|
||||
|
||||
print FH "${dataset}\@${suffix}\n";
|
||||
|
||||
if ($details->{recursive}) {
|
||||
my @datasets = getchilddatasets($dataset);
|
||||
|
||||
foreach my $dataset(@datasets) {
|
||||
print FH "${dataset}\@${suffix}\n";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
close FH;
|
||||
|
||||
# preserve mtime of cache for expire check
|
||||
my ($dev, $ino, $mode, $nlink, $uid, $gid, $rdev, $size, $atime, $mtime, $ctime, $blksize, $blocks) = stat($cache);
|
||||
utime($atime, $mtime, "$cache.tmp");
|
||||
|
||||
rename("$cache.tmp", "$cache") or die "Could not rename to $cache!\n";
|
||||
|
||||
removelock('sanoid_cacheupdate');
|
||||
}
|
||||
|
||||
#######################################################################################################################3
|
||||
#######################################################################################################################3
|
||||
#######################################################################################################################3
|
||||
|
||||
sub runscript {
|
||||
my $key=shift;
|
||||
my $dataset=shift;
|
||||
|
|
@ -1850,7 +1911,7 @@ Options:
|
|||
--monitor-snapshots Reports on snapshot "health", in a Nagios compatible format
|
||||
--take-snapshots Creates snapshots as specified in sanoid.conf
|
||||
--prune-snapshots Purges expired snapshots as specified in sanoid.conf
|
||||
--force-prune Purges expired snapshots even if a send/recv is in progress
|
||||
--cache-ttl=SECONDS Set custom cache expire time in seconds (default: 20 minutes)
|
||||
|
||||
--help Prints this helptext
|
||||
--version Prints the version number
|
||||
|
|
|
|||
302
syncoid
302
syncoid
|
|
@ -4,7 +4,7 @@
|
|||
# from http://www.gnu.org/licenses/gpl-3.0.html on 2014-11-17. A copy should also be available in this
|
||||
# project's Git repository at https://github.com/jimsalterjrs/sanoid/blob/master/LICENSE.
|
||||
|
||||
$::VERSION = '2.2.0';
|
||||
$::VERSION = '2.3.0';
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
|
@ -421,10 +421,10 @@ sub syncdataset {
|
|||
if (!defined($receivetoken)) {
|
||||
# build hashes of the snaps on the source and target filesystems.
|
||||
|
||||
%snaps = getsnaps('source',$sourcehost,$sourcefs,$sourceisroot,0);
|
||||
%snaps = getsnaps('source',$sourcehost,$sourcefs,$sourceisroot);
|
||||
|
||||
if ($targetexists) {
|
||||
my %targetsnaps = getsnaps('target',$targethost,$targetfs,$targetisroot,0);
|
||||
my %targetsnaps = getsnaps('target',$targethost,$targetfs,$targetisroot);
|
||||
my %sourcesnaps = %snaps;
|
||||
%snaps = (%sourcesnaps, %targetsnaps);
|
||||
}
|
||||
|
|
@ -444,7 +444,7 @@ sub syncdataset {
|
|||
# Don't send the sync snap if it's filtered out by --exclude-snaps or
|
||||
# --include-snaps
|
||||
if (!snapisincluded($newsyncsnap)) {
|
||||
$newsyncsnap = getnewestsnapshot(\%snaps);
|
||||
$newsyncsnap = getnewestsnapshot($sourcehost,$sourcefs,$sourceisroot);
|
||||
if ($newsyncsnap eq 0) {
|
||||
writelog('WARN', "CRITICAL: no snapshots exist on source $sourcefs, and you asked for --no-sync-snap.");
|
||||
if ($exitcode < 1) { $exitcode = 1; }
|
||||
|
|
@ -453,7 +453,7 @@ sub syncdataset {
|
|||
}
|
||||
} else {
|
||||
# we don't want sync snapshots created, so use the newest snapshot we can find.
|
||||
$newsyncsnap = getnewestsnapshot(\%snaps);
|
||||
$newsyncsnap = getnewestsnapshot($sourcehost,$sourcefs,$sourceisroot);
|
||||
if ($newsyncsnap eq 0) {
|
||||
writelog('WARN', "CRITICAL: no snapshots exist on source $sourcefs, and you asked for --no-sync-snap.");
|
||||
if ($exitcode < 1) { $exitcode = 1; }
|
||||
|
|
@ -581,7 +581,8 @@ sub syncdataset {
|
|||
|
||||
my $targetsize = getzfsvalue($targethost,$targetfs,$targetisroot,'-p used');
|
||||
|
||||
my %bookmark = ();
|
||||
my $bookmark = 0;
|
||||
my $bookmarkcreation = 0;
|
||||
|
||||
$matchingsnap = getmatchingsnapshot($sourcefs, $targetfs, \%snaps);
|
||||
if (! $matchingsnap) {
|
||||
|
|
@ -589,18 +590,19 @@ sub syncdataset {
|
|||
my %bookmarks = getbookmarks($sourcehost,$sourcefs,$sourceisroot);
|
||||
|
||||
# check for matching guid of source bookmark and target snapshot (oldest first)
|
||||
foreach my $snap ( sort { sortsnapshots(\%snaps, $b, $a) } keys %{ $snaps{'target'} }) {
|
||||
foreach my $snap ( sort { $snaps{'target'}{$b}{'creation'}<=>$snaps{'target'}{$a}{'creation'} } keys %{ $snaps{'target'} }) {
|
||||
my $guid = $snaps{'target'}{$snap}{'guid'};
|
||||
|
||||
if (defined $bookmarks{$guid}) {
|
||||
# found a match
|
||||
%bookmark = %{ $bookmarks{$guid} };
|
||||
$bookmark = $bookmarks{$guid}{'name'};
|
||||
$bookmarkcreation = $bookmarks{$guid}{'creation'};
|
||||
$matchingsnap = $snap;
|
||||
last;
|
||||
}
|
||||
}
|
||||
|
||||
if (! %bookmark) {
|
||||
if (! $bookmark) {
|
||||
# force delete is not possible for the root dataset
|
||||
if ($args{'force-delete'} && index($targetfs, '/') != -1) {
|
||||
writelog('INFO', "Removing $targetfs because no matching snapshots were found");
|
||||
|
|
@ -673,18 +675,15 @@ sub syncdataset {
|
|||
|
||||
my $nextsnapshot = 0;
|
||||
|
||||
if (%bookmark) {
|
||||
if ($bookmark) {
|
||||
my $bookmarkescaped = escapeshellparam($bookmark);
|
||||
|
||||
if (!defined $args{'no-stream'}) {
|
||||
# if intermediate snapshots are needed we need to find the next oldest snapshot,
|
||||
# do an replication to it and replicate as always from oldest to newest
|
||||
# because bookmark sends doesn't support intermediates directly
|
||||
foreach my $snap ( sort { sortsnapshots(\%snaps, $a, $b) } keys %{ $snaps{'source'} }) {
|
||||
my $comparisonkey = 'creation';
|
||||
if (defined $snaps{'source'}{$snap}{'createtxg'} && defined $bookmark{'createtxg'}) {
|
||||
$comparisonkey = 'createtxg';
|
||||
}
|
||||
if ($snaps{'source'}{$snap}{$comparisonkey} >= $bookmark{$comparisonkey}) {
|
||||
foreach my $snap ( sort { $snaps{'source'}{$a}{'creation'}<=>$snaps{'source'}{$b}{'creation'} } keys %{ $snaps{'source'} }) {
|
||||
if ($snaps{'source'}{$snap}{'creation'} >= $bookmarkcreation) {
|
||||
$nextsnapshot = $snap;
|
||||
last;
|
||||
}
|
||||
|
|
@ -692,13 +691,13 @@ sub syncdataset {
|
|||
}
|
||||
|
||||
if ($nextsnapshot) {
|
||||
($exit, $stdout) = syncbookmark($sourcehost, $sourcefs, $targethost, $targetfs, $bookmark{'name'}, $nextsnapshot);
|
||||
($exit, $stdout) = syncbookmark($sourcehost, $sourcefs, $targethost, $targetfs, $bookmark, $nextsnapshot);
|
||||
|
||||
$exit == 0 or do {
|
||||
if (!$resume && $stdout =~ /\Qcontains partially-complete state\E/) {
|
||||
writelog('WARN', "resetting partially receive state");
|
||||
resetreceivestate($targethost,$targetfs,$targetisroot);
|
||||
(my $ret) = syncbookmark($sourcehost, $sourcefs, $targethost, $targetfs, $bookmark{'name'}, $nextsnapshot);
|
||||
(my $ret) = syncbookmark($sourcehost, $sourcefs, $targethost, $targetfs, $bookmark, $nextsnapshot);
|
||||
$ret == 0 or do {
|
||||
if ($exitcode < 2) { $exitcode = 2; }
|
||||
return 0;
|
||||
|
|
@ -712,13 +711,13 @@ sub syncdataset {
|
|||
$matchingsnap = $nextsnapshot;
|
||||
$matchingsnapescaped = escapeshellparam($matchingsnap);
|
||||
} else {
|
||||
($exit, $stdout) = syncbookmark($sourcehost, $sourcefs, $targethost, $targetfs, $bookmark{'name'}, $newsyncsnap);
|
||||
($exit, $stdout) = syncbookmark($sourcehost, $sourcefs, $targethost, $targetfs, $bookmark, $newsyncsnap);
|
||||
|
||||
$exit == 0 or do {
|
||||
if (!$resume && $stdout =~ /\Qcontains partially-complete state\E/) {
|
||||
writelog('WARN', "resetting partially receive state");
|
||||
resetreceivestate($targethost,$targetfs,$targetisroot);
|
||||
(my $ret) = syncbookmark($sourcehost, $sourcefs, $targethost, $targetfs, $bookmark{'name'}, $newsyncsnap);
|
||||
(my $ret) = syncbookmark($sourcehost, $sourcefs, $targethost, $targetfs, $bookmark, $newsyncsnap);
|
||||
$ret == 0 or do {
|
||||
if ($exitcode < 2) { $exitcode = 2; }
|
||||
return 0;
|
||||
|
|
@ -733,7 +732,7 @@ sub syncdataset {
|
|||
|
||||
# do a normal replication if bookmarks aren't used or if previous
|
||||
# bookmark replication was only done to the next oldest snapshot
|
||||
if (!%bookmark || $nextsnapshot) {
|
||||
if (!$bookmark || $nextsnapshot) {
|
||||
if ($matchingsnap eq $newsyncsnap) {
|
||||
# edge case: bookmark replication used the latest snapshot
|
||||
return 0;
|
||||
|
|
@ -864,15 +863,15 @@ sub syncdataset {
|
|||
# snapshots first.
|
||||
|
||||
# regather snapshots on source and target
|
||||
%snaps = getsnaps('source',$sourcehost,$sourcefs,$sourceisroot,0);
|
||||
%snaps = getsnaps('source',$sourcehost,$sourcefs,$sourceisroot);
|
||||
|
||||
if ($targetexists) {
|
||||
my %targetsnaps = getsnaps('target',$targethost,$targetfs,$targetisroot,0);
|
||||
my %sourcesnaps = %snaps;
|
||||
%snaps = (%sourcesnaps, %targetsnaps);
|
||||
my %targetsnaps = getsnaps('target',$targethost,$targetfs,$targetisroot);
|
||||
my %sourcesnaps = %snaps;
|
||||
%snaps = (%sourcesnaps, %targetsnaps);
|
||||
}
|
||||
|
||||
my @to_delete = sort { sortsnapshots(\%snaps, $a, $b) } grep {!exists $snaps{'source'}{$_}} keys %{ $snaps{'target'} };
|
||||
my @to_delete = sort { $snaps{'target'}{$a}{'creation'}<=>$snaps{'target'}{$b}{'creation'} } grep {!exists $snaps{'source'}{$_}} keys %{ $snaps{'target'} };
|
||||
while (@to_delete) {
|
||||
# Create batch of snapshots to remove
|
||||
my $snaps = join ',', splice(@to_delete, 0, 50);
|
||||
|
|
@ -904,7 +903,7 @@ sub runsynccmd {
|
|||
|
||||
my $disp_pvsize = $pvsize == 0 ? 'UNKNOWN' : readablebytes($pvsize);
|
||||
my $sendoptions;
|
||||
if ($sendsource =~ / -t /) {
|
||||
if ($sendsource =~ /^-t /) {
|
||||
$sendoptions = getoptionsline(\@sendoptions, ('P','V','e','v'));
|
||||
} elsif ($sendsource =~ /#/) {
|
||||
$sendoptions = getoptionsline(\@sendoptions, ('L','V','c','e','w'));
|
||||
|
|
@ -1570,7 +1569,8 @@ sub getlocalzfsvalues {
|
|||
"receive_resume_token", "redact_snaps", "referenced", "refcompressratio", "snapshot_count",
|
||||
"type", "used", "usedbychildren", "usedbydataset", "usedbyrefreservation",
|
||||
"usedbysnapshots", "userrefs", "snapshots_changed", "volblocksize", "written",
|
||||
"version", "volsize", "casesensitivity", "normalization", "utf8only"
|
||||
"version", "volsize", "casesensitivity", "normalization", "utf8only",
|
||||
"encryption"
|
||||
);
|
||||
my %blacklisthash = map {$_ => 1} @blacklist;
|
||||
|
||||
|
|
@ -1599,17 +1599,9 @@ sub readablebytes {
|
|||
return $disp;
|
||||
}
|
||||
|
||||
sub sortsnapshots {
|
||||
my ($snaps, $left, $right) = @_;
|
||||
if (defined $snaps->{'source'}{$left}{'createtxg'} && defined $snaps->{'source'}{$right}{'createtxg'}) {
|
||||
return $snaps->{'source'}{$left}{'createtxg'} <=> $snaps->{'source'}{$right}{'createtxg'};
|
||||
}
|
||||
return $snaps->{'source'}{$left}{'creation'} <=> $snaps->{'source'}{$right}{'creation'};
|
||||
}
|
||||
|
||||
sub getoldestsnapshot {
|
||||
my $snaps = shift;
|
||||
foreach my $snap (sort { sortsnapshots($snaps, $a, $b) } keys %{ $snaps{'source'} }) {
|
||||
foreach my $snap ( sort { $snaps{'source'}{$a}{'creation'}<=>$snaps{'source'}{$b}{'creation'} } keys %{ $snaps{'source'} }) {
|
||||
# return on first snap found - it's the oldest
|
||||
return $snap;
|
||||
}
|
||||
|
|
@ -1623,7 +1615,7 @@ sub getoldestsnapshot {
|
|||
|
||||
sub getnewestsnapshot {
|
||||
my $snaps = shift;
|
||||
foreach my $snap (sort { sortsnapshots($snaps, $b, $a) } keys %{ $snaps{'source'} }) {
|
||||
foreach my $snap ( sort { $snaps{'source'}{$b}{'creation'}<=>$snaps{'source'}{$a}{'creation'} } keys %{ $snaps{'source'} }) {
|
||||
# return on first snap found - it's the newest
|
||||
writelog('DEBUG', "NEWEST SNAPSHOT: $snap");
|
||||
return $snap;
|
||||
|
|
@ -1802,7 +1794,7 @@ sub pruneoldsyncsnaps {
|
|||
|
||||
sub getmatchingsnapshot {
|
||||
my ($sourcefs, $targetfs, $snaps) = @_;
|
||||
foreach my $snap ( sort { sortsnapshots($snaps, $b, $a) } keys %{ $snaps{'source'} }) {
|
||||
foreach my $snap ( sort { $snaps{'source'}{$b}{'creation'}<=>$snaps{'source'}{$a}{'creation'} } keys %{ $snaps{'source'} }) {
|
||||
if (defined $snaps{'target'}{$snap}) {
|
||||
if ($snaps{'source'}{$snap}{'guid'} == $snaps{'target'}{$snap}{'guid'}) {
|
||||
return $snap;
|
||||
|
|
@ -1937,8 +1929,88 @@ sub dumphash() {
|
|||
writelog('INFO', Dumper($hash));
|
||||
}
|
||||
|
||||
sub getsnaps {
|
||||
my ($type,$rhost,$fs,$isroot,$use_fallback,%snaps) = @_;
|
||||
sub getsnaps() {
|
||||
my ($type,$rhost,$fs,$isroot,%snaps) = @_;
|
||||
my $mysudocmd;
|
||||
my $fsescaped = escapeshellparam($fs);
|
||||
if ($isroot) { $mysudocmd = ''; } else { $mysudocmd = $sudocmd; }
|
||||
|
||||
my $rhostOriginal = $rhost;
|
||||
|
||||
if ($rhost ne '') {
|
||||
$rhost = "$sshcmd $rhost";
|
||||
# double escaping needed
|
||||
$fsescaped = escapeshellparam($fsescaped);
|
||||
}
|
||||
|
||||
my $getsnapcmd = "$rhost $mysudocmd $zfscmd get -Hpd 1 -t snapshot guid,creation $fsescaped";
|
||||
if ($debug) {
|
||||
$getsnapcmd = "$getsnapcmd |";
|
||||
writelog('DEBUG', "getting list of snapshots on $fs using $getsnapcmd...");
|
||||
} else {
|
||||
$getsnapcmd = "$getsnapcmd 2>/dev/null |";
|
||||
}
|
||||
open FH, $getsnapcmd;
|
||||
my @rawsnaps = <FH>;
|
||||
close FH or do {
|
||||
# fallback (solaris for example doesn't support the -t option)
|
||||
return getsnapsfallback($type,$rhostOriginal,$fs,$isroot,%snaps);
|
||||
};
|
||||
|
||||
# this is a little obnoxious. get guid,creation returns guid,creation on two separate lines
|
||||
# as though each were an entirely separate get command.
|
||||
|
||||
my %creationtimes=();
|
||||
|
||||
foreach my $line (@rawsnaps) {
|
||||
$line =~ /\Q$fs\E\@(\S*)/;
|
||||
my $snapname = $1;
|
||||
|
||||
if (!snapisincluded($snapname)) { next; }
|
||||
|
||||
# only import snap guids from the specified filesystem
|
||||
if ($line =~ /\Q$fs\E\@.*\tguid/) {
|
||||
chomp $line;
|
||||
my $guid = $line;
|
||||
$guid =~ s/^.*\tguid\t*(\d*).*/$1/;
|
||||
my $snap = $line;
|
||||
$snap =~ s/^.*\@(.*)\tguid.*$/$1/;
|
||||
$snaps{$type}{$snap}{'guid'}=$guid;
|
||||
}
|
||||
# only import snap creations from the specified filesystem
|
||||
elsif ($line =~ /\Q$fs\E\@.*\tcreation/) {
|
||||
chomp $line;
|
||||
my $creation = $line;
|
||||
$creation =~ s/^.*\tcreation\t*(\d*).*/$1/;
|
||||
my $snap = $line;
|
||||
$snap =~ s/^.*\@(.*)\tcreation.*$/$1/;
|
||||
|
||||
# the accuracy of the creation timestamp is only for a second, but
|
||||
# snapshots in the same second are highly likely. The list command
|
||||
# has an ordered output so we append another three digit running number
|
||||
# to the creation timestamp and make sure those are ordered correctly
|
||||
# for snapshot with the same creation timestamp
|
||||
my $counter = 0;
|
||||
my $creationsuffix;
|
||||
while ($counter < 999) {
|
||||
$creationsuffix = sprintf("%s%03d", $creation, $counter);
|
||||
if (!defined $creationtimes{$creationsuffix}) {
|
||||
$creationtimes{$creationsuffix} = 1;
|
||||
last;
|
||||
}
|
||||
$counter += 1;
|
||||
}
|
||||
|
||||
$snaps{$type}{$snap}{'creation'}=$creationsuffix;
|
||||
}
|
||||
}
|
||||
|
||||
return %snaps;
|
||||
}
|
||||
|
||||
sub getsnapsfallback() {
|
||||
# fallback (solaris for example doesn't support the -t option)
|
||||
my ($type,$rhost,$fs,$isroot,%snaps) = @_;
|
||||
my $mysudocmd;
|
||||
my $fsescaped = escapeshellparam($fs);
|
||||
if ($isroot) { $mysudocmd = ''; } else { $mysudocmd = $sudocmd; }
|
||||
|
|
@ -1949,67 +2021,73 @@ sub getsnaps {
|
|||
$fsescaped = escapeshellparam($fsescaped);
|
||||
}
|
||||
|
||||
my $getsnapcmd = $use_fallback
|
||||
? "$rhost $mysudocmd $zfscmd get -Hpd 1 all $fsescaped"
|
||||
: "$rhost $mysudocmd $zfscmd get -Hpd 1 -t snapshot all $fsescaped";
|
||||
|
||||
if ($debug) {
|
||||
$getsnapcmd = "$getsnapcmd |";
|
||||
writelog('DEBUG', "getting list of snapshots on $fs using $getsnapcmd...");
|
||||
} else {
|
||||
$getsnapcmd = "$getsnapcmd 2>/dev/null |";
|
||||
}
|
||||
my $getsnapcmd = "$rhost $mysudocmd $zfscmd get -Hpd 1 type,guid,creation $fsescaped |";
|
||||
writelog('WARN', "snapshot listing failed, trying fallback command");
|
||||
writelog('DEBUG', "FALLBACK, getting list of snapshots on $fs using $getsnapcmd...");
|
||||
open FH, $getsnapcmd;
|
||||
my @rawsnaps = <FH>;
|
||||
close FH or do {
|
||||
if (!$use_fallback) {
|
||||
writelog('WARN', "snapshot listing failed, trying fallback command");
|
||||
return getsnaps($type, $rhost, $fs, $isroot, 1, %snaps);
|
||||
close FH or die "CRITICAL ERROR: snapshots couldn't be listed for $fs (exit code $?)";
|
||||
|
||||
my %creationtimes=();
|
||||
|
||||
my $state = 0;
|
||||
foreach my $line (@rawsnaps) {
|
||||
if ($state < 0) {
|
||||
$state++;
|
||||
next;
|
||||
}
|
||||
die "CRITICAL ERROR: snapshots couldn't be listed for $fs (exit code $?)";
|
||||
};
|
||||
|
||||
my %snap_data;
|
||||
my %creationtimes;
|
||||
if ($state eq 0) {
|
||||
if ($line !~ /\Q$fs\E\@.*\ttype\s*snapshot/) {
|
||||
# skip non snapshot type object
|
||||
$state = -2;
|
||||
next;
|
||||
}
|
||||
} elsif ($state eq 1) {
|
||||
if ($line !~ /\Q$fs\E\@.*\tguid/) {
|
||||
die "CRITICAL ERROR: snapshots couldn't be listed for $fs (guid parser error)";
|
||||
}
|
||||
|
||||
for my $line (@rawsnaps) {
|
||||
chomp $line;
|
||||
my ($dataset, $property, $value) = split /\t/, $line;
|
||||
die "CRITICAL ERROR: Unexpected line format in $line" unless defined $value;
|
||||
chomp $line;
|
||||
my $guid = $line;
|
||||
$guid =~ s/^.*\tguid\t*(\d*).*/$1/;
|
||||
my $snap = $line;
|
||||
$snap =~ s/^.*\@(.*)\tguid.*$/$1/;
|
||||
if (!snapisincluded($snap)) { next; }
|
||||
$snaps{$type}{$snap}{'guid'}=$guid;
|
||||
} elsif ($state eq 2) {
|
||||
if ($line !~ /\Q$fs\E\@.*\tcreation/) {
|
||||
die "CRITICAL ERROR: snapshots couldn't be listed for $fs (creation parser error)";
|
||||
}
|
||||
|
||||
my (undef, $snap) = split /@/, $dataset;
|
||||
die "CRITICAL ERROR: Unexpected dataset format in $line" unless $snap;
|
||||
chomp $line;
|
||||
my $creation = $line;
|
||||
$creation =~ s/^.*\tcreation\t*(\d*).*/$1/;
|
||||
my $snap = $line;
|
||||
$snap =~ s/^.*\@(.*)\tcreation.*$/$1/;
|
||||
if (!snapisincluded($snap)) { next; }
|
||||
|
||||
if (!snapisincluded($snap)) { next; }
|
||||
|
||||
$snap_data{$snap}{$property} = $value;
|
||||
|
||||
# the accuracy of the creation timestamp is only for a second, but
|
||||
# snapshots in the same second are highly likely. The list command
|
||||
# has an ordered output so we append another three digit running number
|
||||
# to the creation timestamp and make sure those are ordered correctly
|
||||
# for snapshot with the same creation timestamp
|
||||
if ($property eq 'creation') {
|
||||
# the accuracy of the creation timestamp is only for a second, but
|
||||
# snapshots in the same second are highly likely. The list command
|
||||
# has an ordered output so we append another three digit running number
|
||||
# to the creation timestamp and make sure those are ordered correctly
|
||||
# for snapshot with the same creation timestamp
|
||||
my $counter = 0;
|
||||
my $creationsuffix;
|
||||
while ($counter < 999) {
|
||||
$creationsuffix = sprintf("%s%03d", $value, $counter);
|
||||
$creationsuffix = sprintf("%s%03d", $creation, $counter);
|
||||
if (!defined $creationtimes{$creationsuffix}) {
|
||||
$creationtimes{$creationsuffix} = 1;
|
||||
last;
|
||||
}
|
||||
$counter += 1;
|
||||
}
|
||||
$snap_data{$snap}{'creation'} = $creationsuffix;
|
||||
}
|
||||
}
|
||||
|
||||
for my $snap (keys %snap_data) {
|
||||
if (!$use_fallback || $snap_data{$snap}{'type'} eq 'snapshot') {
|
||||
$snaps{$type}{$snap}{'guid'} = $snap_data{$snap}{'guid'};
|
||||
$snaps{$type}{$snap}{'createtxg'} = $snap_data{$snap}{'createtxg'};
|
||||
$snaps{$type}{$snap}{'creation'} = $snap_data{$snap}{'creation'};
|
||||
$snaps{$type}{$snap}{'creation'}=$creationsuffix;
|
||||
$state = -1;
|
||||
}
|
||||
|
||||
$state++;
|
||||
}
|
||||
|
||||
return %snaps;
|
||||
|
|
@ -2028,7 +2106,7 @@ sub getbookmarks() {
|
|||
}
|
||||
|
||||
my $error = 0;
|
||||
my $getbookmarkcmd = "$rhost $mysudocmd $zfscmd get -Hpd 1 -t bookmark all $fsescaped 2>&1 |";
|
||||
my $getbookmarkcmd = "$rhost $mysudocmd $zfscmd get -Hpd 1 -t bookmark guid,creation $fsescaped 2>&1 |";
|
||||
writelog('DEBUG', "getting list of bookmarks on $fs using $getbookmarkcmd...");
|
||||
open FH, $getbookmarkcmd;
|
||||
my @rawbookmarks = <FH>;
|
||||
|
|
@ -2043,44 +2121,46 @@ sub getbookmarks() {
|
|||
die "CRITICAL ERROR: bookmarks couldn't be listed for $fs (exit code $?)";
|
||||
}
|
||||
|
||||
my %bookmark_data;
|
||||
my %creationtimes;
|
||||
# this is a little obnoxious. get guid,creation returns guid,creation on two separate lines
|
||||
# as though each were an entirely separate get command.
|
||||
|
||||
for my $line (@rawbookmarks) {
|
||||
chomp $line;
|
||||
my ($dataset, $property, $value) = split /\t/, $line;
|
||||
die "CRITICAL ERROR: Unexpected line format in $line" unless defined $value;
|
||||
my $lastguid;
|
||||
my %creationtimes=();
|
||||
|
||||
my (undef, $bookmark) = split /#/, $dataset;
|
||||
die "CRITICAL ERROR: Unexpected dataset format in $line" unless $bookmark;
|
||||
foreach my $line (@rawbookmarks) {
|
||||
# only import bookmark guids, creation from the specified filesystem
|
||||
if ($line =~ /\Q$fs\E\#.*\tguid/) {
|
||||
chomp $line;
|
||||
$lastguid = $line;
|
||||
$lastguid =~ s/^.*\tguid\t*(\d*).*/$1/;
|
||||
my $bookmark = $line;
|
||||
$bookmark =~ s/^.*\#(.*)\tguid.*$/$1/;
|
||||
$bookmarks{$lastguid}{'name'}=$bookmark;
|
||||
} elsif ($line =~ /\Q$fs\E\#.*\tcreation/) {
|
||||
chomp $line;
|
||||
my $creation = $line;
|
||||
$creation =~ s/^.*\tcreation\t*(\d*).*/$1/;
|
||||
my $bookmark = $line;
|
||||
$bookmark =~ s/^.*\#(.*)\tcreation.*$/$1/;
|
||||
|
||||
$bookmark_data{$bookmark}{$property} = $value;
|
||||
|
||||
# the accuracy of the creation timestamp is only for a second, but
|
||||
# bookmarks in the same second are possible. The list command
|
||||
# has an ordered output so we append another three digit running number
|
||||
# to the creation timestamp and make sure those are ordered correctly
|
||||
# for bookmarks with the same creation timestamp
|
||||
if ($property eq 'creation') {
|
||||
# the accuracy of the creation timestamp is only for a second, but
|
||||
# bookmarks in the same second are possible. The list command
|
||||
# has an ordered output so we append another three digit running number
|
||||
# to the creation timestamp and make sure those are ordered correctly
|
||||
# for bookmarks with the same creation timestamp
|
||||
my $counter = 0;
|
||||
my $creationsuffix;
|
||||
while ($counter < 999) {
|
||||
$creationsuffix = sprintf("%s%03d", $value, $counter);
|
||||
$creationsuffix = sprintf("%s%03d", $creation, $counter);
|
||||
if (!defined $creationtimes{$creationsuffix}) {
|
||||
$creationtimes{$creationsuffix} = 1;
|
||||
last;
|
||||
}
|
||||
$counter += 1;
|
||||
}
|
||||
$bookmark_data{$bookmark}{'creation'} = $creationsuffix;
|
||||
}
|
||||
}
|
||||
|
||||
for my $bookmark (keys %bookmark_data) {
|
||||
my $guid = $bookmark_data{$bookmark}{'guid'};
|
||||
$bookmarks{$guid}{'name'} = $bookmark;
|
||||
$bookmarks{$guid}{'creation'} = $bookmark_data{$bookmark}{'creation'};
|
||||
$bookmarks{$guid}{'createtxg'} = $bookmark_data{$bookmark}{'createtxg'};
|
||||
$bookmarks{$lastguid}{'creation'}=$creationsuffix;
|
||||
}
|
||||
}
|
||||
|
||||
return %bookmarks;
|
||||
|
|
@ -2244,7 +2324,7 @@ sub parsespecialoptions {
|
|||
return undef;
|
||||
}
|
||||
|
||||
if ($char eq 'o' || $char eq 'x') {
|
||||
if ($char eq 'o' || $char eq 'x' || $char eq 'X') {
|
||||
$lastOption = $char;
|
||||
$optionValue = 1;
|
||||
} else {
|
||||
|
|
|
|||
|
|
@ -39,7 +39,7 @@ function cleanUp {
|
|||
trap cleanUp EXIT
|
||||
|
||||
while [ $timestamp -le $END ]; do
|
||||
setdate $timestamp; date; "${SANOID}" --cron --verbose
|
||||
setdate $timestamp; date; "${SANOID}" --cron --verbose --cache-ttl=2592000
|
||||
timestamp=$((timestamp+3600))
|
||||
done
|
||||
|
||||
|
|
|
|||
|
|
@ -42,7 +42,7 @@ function cleanUp {
|
|||
trap cleanUp EXIT
|
||||
|
||||
while [ $timestamp -le $END ]; do
|
||||
setdate $timestamp; date; "${SANOID}" --cron --verbose
|
||||
setdate $timestamp; date; "${SANOID}" --cron --verbose --cache-ttl=2592000
|
||||
timestamp=$((timestamp+900))
|
||||
done
|
||||
|
||||
|
|
|
|||
|
|
@ -10,7 +10,10 @@ function setup {
|
|||
export SANOID="../../sanoid"
|
||||
|
||||
# make sure that there is no cache file
|
||||
rm -f /var/cache/sanoidsnapshots.txt
|
||||
rm -f /var/cache/sanoid/snapshots.txt
|
||||
rm -f /var/cache/sanoid/datasets.txt
|
||||
|
||||
mkdir -p /etc/sanoid
|
||||
|
||||
# install needed sanoid configuration files
|
||||
[ -f sanoid.conf ] && cp sanoid.conf /etc/sanoid/sanoid.conf
|
||||
|
|
@ -51,6 +54,11 @@ function disableTimeSync {
|
|||
if [ $? -eq 0 ]; then
|
||||
timedatectl set-ntp 0
|
||||
fi
|
||||
|
||||
which systemctl > /dev/null
|
||||
if [ $? -eq 0 ]; then
|
||||
systemctl is-active virtualbox-guest-utils.service && systemctl stop virtualbox-guest-utils.service
|
||||
fi
|
||||
}
|
||||
|
||||
function saveSnapshotList {
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
# run's all the available tests
|
||||
|
||||
for test in $(find . -mindepth 1 -maxdepth 1 -type d -printf "%P\n" | sort -g); do
|
||||
for test in */; do
|
||||
if [ ! -x "${test}/run.sh" ]; then
|
||||
continue
|
||||
fi
|
||||
|
|
|
|||
|
|
@ -1,50 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
# test verifying snapshots with out-of-order snapshot creation datetimes
|
||||
|
||||
set -x
|
||||
set -e
|
||||
|
||||
. ../../common/lib.sh
|
||||
|
||||
if [ -z "$ALLOW_INVASIVE_TESTS" ]; then
|
||||
exit 130
|
||||
fi
|
||||
|
||||
POOL_IMAGE="/tmp/syncoid-test-11.zpool"
|
||||
POOL_SIZE="64M"
|
||||
POOL_NAME="syncoid-test-11"
|
||||
|
||||
truncate -s "${POOL_SIZE}" "${POOL_IMAGE}"
|
||||
|
||||
zpool create -m none -f "${POOL_NAME}" "${POOL_IMAGE}"
|
||||
|
||||
function cleanUp {
|
||||
zpool export "${POOL_NAME}"
|
||||
rm -f "${POOL_IMAGE}"
|
||||
}
|
||||
|
||||
# export pool and remove the image in any case
|
||||
trap cleanUp EXIT
|
||||
|
||||
zfs create "${POOL_NAME}"/before
|
||||
zfs snapshot "${POOL_NAME}"/before@this-snapshot-should-make-it-into-the-after-dataset
|
||||
|
||||
disableTimeSync
|
||||
setdate 1155533696
|
||||
zfs snapshot "${POOL_NAME}"/before@oldest-snapshot
|
||||
|
||||
zfs snapshot "${POOL_NAME}"/before@another-snapshot-does-not-matter
|
||||
../../../syncoid --sendoptions="Lec" "${POOL_NAME}"/before "${POOL_NAME}"/after
|
||||
|
||||
# verify
|
||||
saveSnapshotList "${POOL_NAME}" "snapshot-list.txt"
|
||||
|
||||
grep "${POOL_NAME}/before@this-snapshot-should-make-it-into-the-after-dataset" "snapshot-list.txt" || exit $?
|
||||
grep "${POOL_NAME}/after@this-snapshot-should-make-it-into-the-after-dataset" "snapshot-list.txt" || exit $?
|
||||
grep "${POOL_NAME}/before@oldest-snapshot" "snapshot-list.txt" || exit $?
|
||||
grep "${POOL_NAME}/after@oldest-snapshot" "snapshot-list.txt" || exit $?
|
||||
grep "${POOL_NAME}/before@another-snapshot-does-not-matter" "snapshot-list.txt" || exit $?
|
||||
grep "${POOL_NAME}/after@another-snapshot-does-not-matter" "snapshot-list.txt" || exit $?
|
||||
|
||||
exit 0
|
||||
|
|
@ -0,0 +1,55 @@
|
|||
#!/bin/bash
|
||||
|
||||
# test verifying syncoid behavior with partial transfers
|
||||
|
||||
set -x
|
||||
|
||||
. ../../common/lib.sh
|
||||
|
||||
POOL_IMAGE="/tmp/syncoid-test-012.zpool"
|
||||
POOL_SIZE="128M"
|
||||
POOL_NAME="syncoid-test-012"
|
||||
MOUNT_TARGET="/tmp/syncoid-test-012.mount"
|
||||
|
||||
truncate -s "${POOL_SIZE}" "${POOL_IMAGE}"
|
||||
|
||||
zpool create -O mountpoint="${MOUNT_TARGET}" -f "${POOL_NAME}" "${POOL_IMAGE}"
|
||||
|
||||
function cleanUp {
|
||||
zpool destroy "${POOL_NAME}"
|
||||
rm -f "${POOL_IMAGE}"
|
||||
}
|
||||
|
||||
# Clean up the pool and image file on exit
|
||||
trap cleanUp EXIT
|
||||
|
||||
zfs create "${POOL_NAME}/source"
|
||||
zfs snap "${POOL_NAME}/source@empty"
|
||||
dd if=/dev/urandom of="${MOUNT_TARGET}/source/garbage.bin" bs=1M count=16
|
||||
zfs snap "${POOL_NAME}/source@something"
|
||||
|
||||
# Simulate interrupted transfer
|
||||
zfs send -pwR "${POOL_NAME}/source@something" | head --bytes=8M | zfs recv -s "${POOL_NAME}/destination"
|
||||
|
||||
# Using syncoid to continue interrupted transfer
|
||||
../../../syncoid --sendoptions="pw" "${POOL_NAME}/source" "${POOL_NAME}/destination"
|
||||
|
||||
# Check if syncoid succeeded in handling the interrupted transfer
|
||||
if [ $? -eq 0 ]; then
|
||||
echo "Syncoid resumed transfer successfully."
|
||||
|
||||
# Verify data integrity with sha256sum comparison
|
||||
original_sum=$(sha256sum "${MOUNT_TARGET}/source/garbage.bin" | cut -d ' ' -f 1)
|
||||
received_sum=$(sha256sum "${MOUNT_TARGET}/destination/garbage.bin" | cut -d ' ' -f 1)
|
||||
|
||||
if [ "${original_sum}" == "${received_sum}" ]; then
|
||||
echo "Data integrity verified."
|
||||
exit 0
|
||||
else
|
||||
echo "Data integrity check failed."
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo "Regression detected: syncoid did not handle the resuming correctly."
|
||||
exit 1
|
||||
fi
|
||||
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
# run's all the available tests
|
||||
|
||||
for test in $(find . -mindepth 1 -maxdepth 1 -type d -printf "%P\n" | sort -g); do
|
||||
for test in */; do
|
||||
if [ ! -x "${test}/run.sh" ]; then
|
||||
continue
|
||||
fi
|
||||
|
|
|
|||
Loading…
Reference in New Issue