Merge branch 'master' into fix/815

This commit is contained in:
Christoph Klaffl 2026-02-18 22:59:26 +01:00
commit 3b606768fa
No known key found for this signature in database
GPG Key ID: 8FC1D76EED4970D2
8 changed files with 191 additions and 33 deletions

View File

@ -8,7 +8,8 @@
- [Debian/Ubuntu](#debianubuntu) - [Debian/Ubuntu](#debianubuntu)
- [RHEL/CentOS/AlmaLinux](#RHEL/CentOS/AlmaLinux) - [RHEL/CentOS/AlmaLinux](#RHEL/CentOS/AlmaLinux)
- [FreeBSD](#freebsd) - [FreeBSD](#freebsd)
- [Alpine Linux / busybox](#alpine-Linux-busybox-based-distributions) - [Alpine Linux / busybox](#alpine-Linux-or-busybox-based-distributions)
- [OmniOS](#OmniOS)
- [Other OSes](#other-oses) - [Other OSes](#other-oses)
- [Configuration](#configuration) - [Configuration](#configuration)
- [Sanoid](#sanoid) - [Sanoid](#sanoid)
@ -175,13 +176,88 @@ pkg install p5-Config-Inifiles p5-Capture-Tiny pv mbuffer lzop sanoid
* See note about tcsh unpleasantness and other things in FREEBSD.readme * See note about tcsh unpleasantness and other things in FREEBSD.readme
## Alpine Linux / busybox based distributions ## OmniOS
The busybox implementation of ps is lacking needed arguments so a proper ps program needs to be installed. Used with OmniOS r34, r36 and r37 (with napp-it installed). Hence, we presume you have a standard perl installation etc.
For Alpine Linux this can be done with:
`apk --no-cache add procps` 1. Install prerequisites: Perl module Config::IniFiles, ssh, pv, gzip, lzop, and mbuffer
```bash
# install/update standard programs
pfexec pkg install openssh gzip mbuffer pipe-viewer
# include OpenCSW repository
pfexec pkg set-publisher -G '*' -g https://sfe.opencsw.org/localhostomnios localhostomnios
# install LZOP (from OpenCSW)
pfexec pkg install lzop
# install Perl modules
pfexec perl -MCPAN -e shell
install CPAN ## update CPAN
reload cpan ## reload
install inc::latest ## not sure if required
install IO::Scalar ## not sure if required
install Config::IniFiles
install Capture::Tiny
install Data::Dumper ## not sure if required, may be installed already
install File::Path ## not sure if required, may be installed already
install Getopt::Long ## not sure if required
install Pod::Usage ## not sure if required
install Time::Local ## not sure if required
exit
```
2. Download and clone the Sanoid repo:
```bash
# install git
pfexec pkg install git
# Tip: download the repo as root to avoid changing permissions later
pfexec git clone https://github.com/jimsalterjrs/sanoid.git
cd sanoid
# checkout latest stable release or stay on master for bleeding edge stuff (but expect bugs!)
pfexec git checkout $(git tag | grep "^v" | tail -n 1)
# patch syncoid, so that it correctly recognises the "zfs resume" capability under OmniOS (see https://github.com/jimsalterjrs/sanoid/issues/554)
<< $avail{'sourceresume'} = system("$sourcessh $resumechkcmd $srcpool 2>/dev/null | grep '\\(active\\|enabled\\)' >/dev/null 2>&1");
>> $avail{'sourceresume'} = system("$sourcessh $resumechkcmd $srcpool 2>/dev/null | grep -E '^(active|enabled)' >/dev/null 2>&1");
<< $avail{'targetresume'} = system("$targetssh $resumechkcmd $dstpool 2>/dev/null | grep '\\(active\\|enabled\\)' >/dev/null 2>&1");
>> $avail{'targetresume'} = system("$targetssh $resumechkcmd $dstpool 2>/dev/null | grep -E '^(active|enabled)' >/dev/null 2>&1");
# most likely not required, but make the executables eXecutable
pfexec chmod +x sanoid syncoid findoid sleepymutex
# Install the executables into /opt/sanoid
pfexec mkdir /opt/sanoid
pfexec cp sanoid syncoid findoid sleepymutex /opt/sanoid
# add symbolic links to executables to a directory in $path
pfexec ln -s /opt/sanoid/sanoid /usr/bin/sanoid & pfexec ln -s /opt/sanoid/syncoid /usr/bin/syncoid & pfexec ln -s /opt/sanoid/findoid /usr/bin/findoid & pfexec ln -s /opt/sanoid/sleepymutex /usr/bin/sleepymutex
```
3. Create the config directory /etc/sanoid, put default sanoid files there, and create and edit sanoid.conf:
```bash
# Create the config directory
pfexec mkdir /etc/sanoid
# Copy default config and sample config
pfexec cp sanoid.defaults.conf sanoid.conf /etc/sanoid/sanoid.example.conf
# Create a blank config file
pfexec touch /etc/sanoid/sanoid.conf
## and edit it (using e.g. nano as editor):
pfexec nano /etc/sanoid/sanoid.conf
```
Further steps (not OmniOS specific):
- set up SSH connections between two remote hosts
- create a cron job that runs sanoid --cron --quiet periodically
=======
## MacOS ## MacOS
Install prerequisite software: Install prerequisite software:

74
check_all_disk_space Normal file
View File

@ -0,0 +1,74 @@
#!/usr/bin/perl
# This is a Nagios-compatible script which checks all mounted filesystems--not just ZFS filesystems--to see
# if any are running dangerously low on space. It's generally bright enough to avoid weird mounts where
# free space checks don't apply, eg dev, tmpfs, CD or DVDs, and so forth.
#
# usage:
# check_all_disk_space [warnlevel] [critlevel]
#
# example:
# root@banshee:~/check_all_disk_space 80 90
# OK
# default levels: warn if a filesystem is 93% full, crit if it's 98% full
# default: warn 93%, crit 98%
my $warnlevel = .93;
my $critlevel = .98;
# accept warnlevel and critlevel as arguments if passed. Must be passed as decimals, eg 0.4 for 40%
if (defined $ARGV[0] && defined $ARGV[1] && $ARGV[0] >=0 && $ARGV[0] <=1 && $ARGV[1] >=0 && $ARGV[1] <=1) {
$warnlevel = $ARGV[0];
$critlevel = $ARGV[1];
}
my $msg,$warnfound,$critfound,$errfound;
# get standard df output, but skip the tmpfs and devtmpfs crap - this should leave us with
# nothing but disks. we WILL also get nfs or other network filesystems here, since we
# didn't use the -l flag. Feature, not bug. also skip UDF so we don't check free space on CDs!
my @filesystems = `/bin/df -x tmpfs -x devtmpfs -x udf -x squashfs`;
# get rid of header line
shift @filesystems;
foreach my $fs (@filesystems) {
chomp $fs;
# compress space to make sure split works right
$fs =~ s/ +/ /g;
my ($dev,$space,$used,$available,$percent,$mounted) = split(' ',$fs);
my $calcpercent = $used/$space;
my $prettypercent = sprintf('%.1F%',($calcpercent * 100));
if ($calcpercent > $critlevel) {
$critfound = 1;
$msg .= "$mounted: $prettypercent, ";
} elsif ($calcpercent > $warnlevel) {
$warnfound = 1;
$msg .= "$mounted: $prettypercent, ";
} elsif ($calcpercent < 0 || $calcpercent > 1) {
$errfound = 1;
$msg .= "$mounted: $prettypercent (WTF?), ";
}
}
$msg =~ s/, $//;
$msg .= "\n";
if ($critfound) {
print "CRITICAL: $msg";
exit 2;
} elsif ($warnfound) {
print "WARNING: $msg";
exit 1;
} elsif ($errfound) {
print "ERROR: $msg";
exit 3;
} else {
print "OK\n";
exit 0;
}
print "ERROR: $msg ?";
exit 3;

View File

@ -1,3 +1,9 @@
sanoid (2.3.1-SNAPSHOT) unstable; urgency=medium
SNAPSHOT
-- Jim Salter <github@jrs-s.net> Tue, 12 Aug 2025 14:43:00 +0200
sanoid (2.3.0) unstable; urgency=medium sanoid (2.3.0) unstable; urgency=medium
[overall] documentation updates, small fixes (@thecatontheflat, @mjeanson, @jiawen, @EchterAgo, @jan-krieg, @dlangille, @rightaditya, @MynaITLabs, @ossimoi, @alexgarel, @TopherIsSwell, @jimsalterjrs, @phreaker0) [overall] documentation updates, small fixes (@thecatontheflat, @mjeanson, @jiawen, @EchterAgo, @jan-krieg, @dlangille, @rightaditya, @MynaITLabs, @ossimoi, @alexgarel, @TopherIsSwell, @jimsalterjrs, @phreaker0)

21
sanoid
View File

@ -1084,11 +1084,9 @@ sub init {
@datasets = getchilddatasets($config{$section}{'path'}); @datasets = getchilddatasets($config{$section}{'path'});
DATASETS: foreach my $dataset(@datasets) { DATASETS: foreach my $dataset(@datasets) {
if (! @cachedatasets) { if (! @cachedatasets) {
push (@updatedatasets, $dataset); push (@updatedatasets, "$dataset\n");
} }
chomp $dataset;
if ($zfsRecursive) { if ($zfsRecursive) {
# don't try to take the snapshot ourself, recursive zfs snapshot will take care of that # don't try to take the snapshot ourself, recursive zfs snapshot will take care of that
$config{$dataset}{'autosnap'} = 0; $config{$dataset}{'autosnap'} = 0;
@ -1377,9 +1375,9 @@ sub check_zpool() {
# check for io/checksum errors # check for io/checksum errors
my @vdeverr = (); my @vdeverr = ();
if ($read != 0) { push @vdeverr, "read" }; if ($read ne 0) { push @vdeverr, "read" };
if ($write != 0) { push @vdeverr, "write" }; if ($write ne 0) { push @vdeverr, "write" };
if ($cksum != 0) { push @vdeverr, "cksum" }; if ($cksum ne 0) { push @vdeverr, "cksum" };
if (scalar @vdeverr) { if (scalar @vdeverr) {
$dmge=$dmge . "(" . $dev . ":" . join(", ", @vdeverr) . " errors) "; $dmge=$dmge . "(" . $dev . ":" . join(", ", @vdeverr) . " errors) ";
@ -1691,7 +1689,7 @@ sub getchilddatasets {
my $getchildrencmd = "$mysudocmd $zfs list -o name -t filesystem,volume -Hr $fs |"; my $getchildrencmd = "$mysudocmd $zfs list -o name -t filesystem,volume -Hr $fs |";
if ($args{'debug'}) { print "DEBUG: getting list of child datasets on $fs using $getchildrencmd...\n"; } if ($args{'debug'}) { print "DEBUG: getting list of child datasets on $fs using $getchildrencmd...\n"; }
open FH, $getchildrencmd; open FH, $getchildrencmd;
my @children = <FH>; chomp( my @children = <FH> );
close FH; close FH;
# parent dataset is the first element # parent dataset is the first element
@ -1781,25 +1779,26 @@ sub addcachedsnapshots {
copy($cache, "$cache.tmp") or die "Could not copy to $cache.tmp!\n"; copy($cache, "$cache.tmp") or die "Could not copy to $cache.tmp!\n";
open FH, ">> $cache.tmp" or die "Could not write to $cache.tmp!\n"; open my $fh, ">> $cache.tmp" or die "Could not write to $cache.tmp!\n";
while((my $snap, my $details) = each(%taken)) { while((my $snap, my $details) = each(%taken)) {
my @parts = split("@", $snap, 2); my @parts = split("@", $snap, 2);
my $suffix = $parts[1] . "\tcreation\t" . $details->{time} . "\t-"; my $suffix = $parts[1] . "\tcreation\t" . $details->{time} . "\t-";
my $dataset = $parts[0]; my $dataset = $parts[0];
print FH "${dataset}\@${suffix}\n"; print $fh "${dataset}\@${suffix}\n";
if ($details->{recursive}) { if ($details->{recursive}) {
my @datasets = getchilddatasets($dataset); my @datasets = getchilddatasets($dataset);
foreach my $dataset(@datasets) { foreach my $dataset(@datasets) {
print FH "${dataset}\@${suffix}\n"; print "${dataset}\@${suffix}\n";
print $fh "${dataset}\@${suffix}\n";
} }
} }
} }
close FH; close $fh;
# preserve mtime of cache for expire check # preserve mtime of cache for expire check
my ($dev, $ino, $mode, $nlink, $uid, $gid, $rdev, $size, $atime, $mtime, $ctime, $blksize, $blocks) = stat($cache); my ($dev, $ino, $mode, $nlink, $uid, $gid, $rdev, $size, $atime, $mtime, $ctime, $blksize, $blocks) = stat($cache);

23
syncoid
View File

@ -178,7 +178,7 @@ if (length $args{'insecure-direct-connection'}) {
$directlisten = $args{'insecure-direct-connection'}; $directlisten = $args{'insecure-direct-connection'};
} }
if (scalar @parts == 3) { if (scalar @parts >= 3) {
$directtimeout = $parts[2]; $directtimeout = $parts[2];
} }
@ -434,7 +434,7 @@ sub syncdataset {
if (!defined $args{'no-sync-snap'} && !defined $skipsnapshot) { if (!defined $args{'no-sync-snap'} && !defined $skipsnapshot) {
# create a new syncoid snapshot on the source filesystem. # create a new syncoid snapshot on the source filesystem.
$newsyncsnap = newsyncsnap($sourcehost,$sourcefs,$sourceisroot); $newsyncsnap = newsyncsnap($sourcehost,$sourcefs,$sourceisroot);
if (!$newsyncsnap) { if ($newsyncsnap eq '') {
# we already whined about the error # we already whined about the error
return 0; return 0;
} }
@ -442,7 +442,7 @@ sub syncdataset {
# --include-snaps # --include-snaps
if (!snapisincluded($newsyncsnap)) { if (!snapisincluded($newsyncsnap)) {
$newsyncsnap = getnewestsnapshot(\%snaps); $newsyncsnap = getnewestsnapshot(\%snaps);
if ($newsyncsnap eq 0) { if ($newsyncsnap eq '') {
writelog('WARN', "CRITICAL: no snapshots exist on source $sourcefs, and you asked for --no-sync-snap."); writelog('WARN', "CRITICAL: no snapshots exist on source $sourcefs, and you asked for --no-sync-snap.");
if ($exitcode < 1) { $exitcode = 1; } if ($exitcode < 1) { $exitcode = 1; }
return 0; return 0;
@ -451,7 +451,7 @@ sub syncdataset {
} else { } else {
# we don't want sync snapshots created, so use the newest snapshot we can find. # we don't want sync snapshots created, so use the newest snapshot we can find.
$newsyncsnap = getnewestsnapshot(\%snaps); $newsyncsnap = getnewestsnapshot(\%snaps);
if ($newsyncsnap eq 0) { if ($newsyncsnap eq '') {
writelog('WARN', "CRITICAL: no snapshots exist on source $sourcefs, and you asked for --no-sync-snap."); writelog('WARN', "CRITICAL: no snapshots exist on source $sourcefs, and you asked for --no-sync-snap.");
if ($exitcode < 1) { $exitcode = 1; } if ($exitcode < 1) { $exitcode = 1; }
return 0; return 0;
@ -479,13 +479,13 @@ sub syncdataset {
writelog('DEBUG', "target $targetfs does not exist, and --no-stream selected. Finding newest available snapshot on source $sourcefs ..."); writelog('DEBUG', "target $targetfs does not exist, and --no-stream selected. Finding newest available snapshot on source $sourcefs ...");
} }
my $oldestsnap = getoldestsnapshot(\%snaps); my $oldestsnap = getoldestsnapshot(\%snaps);
if (! $oldestsnap) { if ($oldestsnap eq '') {
if (defined ($args{'no-sync-snap'}) ) { if (defined ($args{'no-sync-snap'}) ) {
# we already whined about the missing snapshots # we already whined about the missing snapshots
return 0; return 0;
} }
# getoldestsnapshot() returned false, so use new sync snapshot # getoldestsnapshot() returned null, so use new sync snapshot
writelog('DEBUG', "getoldestsnapshot() returned false, so using $newsyncsnap."); writelog('DEBUG', "getoldestsnapshot() returned false, so using $newsyncsnap.");
$oldestsnap = $newsyncsnap; $oldestsnap = $newsyncsnap;
} }
@ -730,11 +730,8 @@ sub syncdataset {
# do a normal replication if bookmarks aren't used or if previous # do a normal replication if bookmarks aren't used or if previous
# bookmark replication was only done to the next oldest snapshot # bookmark replication was only done to the next oldest snapshot
if (!%bookmark || $nextsnapshot) { # edge case: skip replication if bookmark replication used the latest snapshot
if ($matchingsnap eq $newsyncsnap) { if ((!%bookmark || $nextsnapshot) && !($matchingsnap eq $newsyncsnap)) {
# edge case: bookmark replication used the latest snapshot
return 0;
}
($exit, $stdout) = syncincremental($sourcehost, $sourcefs, $targethost, $targetfs, $matchingsnap, $newsyncsnap, defined($args{'no-stream'})); ($exit, $stdout) = syncincremental($sourcehost, $sourcefs, $targethost, $targetfs, $matchingsnap, $newsyncsnap, defined($args{'no-stream'}));
@ -1599,7 +1596,7 @@ sub getoldestsnapshot {
# well, actually we set --no-sync-snap, so no we *didn't* already make one. Whoops. # well, actually we set --no-sync-snap, so no we *didn't* already make one. Whoops.
writelog('CRITICAL', "--no-sync-snap is set, and getoldestsnapshot() could not find any snapshots on source!"); writelog('CRITICAL', "--no-sync-snap is set, and getoldestsnapshot() could not find any snapshots on source!");
} }
return 0; return '';
} }
sub getnewestsnapshot { sub getnewestsnapshot {
@ -1621,7 +1618,7 @@ sub getnewestsnapshot {
writelog('WARN', "--no-sync-snap is set, and getnewestsnapshot() could not find any snapshots on source for current dataset. Continuing."); writelog('WARN', "--no-sync-snap is set, and getnewestsnapshot() could not find any snapshots on source for current dataset. Continuing.");
if ($exitcode < 2) { $exitcode = 2; } if ($exitcode < 2) { $exitcode = 2; }
} }
return 0; return '';
} }
sub buildsynccmd { sub buildsynccmd {

View File

@ -57,7 +57,7 @@ function disableTimeSync {
which systemctl > /dev/null which systemctl > /dev/null
if [ $? -eq 0 ]; then if [ $? -eq 0 ]; then
systemctl is-active virtualbox-guest-utils.service && systemctl stop virtualbox-guest-utils.service || true systemctl is-active virtualbox-guest-utils.service && systemctl stop virtualbox-guest-utils.service
fi fi
} }

View File

@ -7,9 +7,13 @@ set -e
. ../../common/lib.sh . ../../common/lib.sh
POOL_IMAGE="/tmp/jimsalterjrs_sanoid_815.img" if [ "$INVASIVE_TESTS" != "1" ]; then
exit 130
fi
POOL_IMAGE="/tmp/syncoid-test-11.zpool"
POOL_SIZE="64M" POOL_SIZE="64M"
POOL_NAME="jimsalterjrs_sanoid_815" POOL_NAME="syncoid-test-11"
truncate -s "${POOL_SIZE}" "${POOL_IMAGE}" truncate -s "${POOL_SIZE}" "${POOL_IMAGE}"

View File

@ -2,6 +2,8 @@
# run's all the available tests # run's all the available tests
# set INVASIVE_TESTS=1 to also run invasive test which manipulate the system time
for test in */; do for test in */; do
if [ ! -x "${test}/run.sh" ]; then if [ ! -x "${test}/run.sh" ]; then
continue continue