mirror of https://github.com/jimsalterjrs/sanoid
Compare commits
13 Commits
9707a2b779
...
fe49a8a99d
| Author | SHA1 | Date |
|---|---|---|
|
|
fe49a8a99d | |
|
|
2c98f0501c | |
|
|
0f3a9c94d9 | |
|
|
a6728e48de | |
|
|
393a4672e5 | |
|
|
efd52f416d | |
|
|
749490830f | |
|
|
114724b0a4 | |
|
|
d493ef7592 | |
|
|
876685a636 | |
|
|
b1c2bcf859 | |
|
|
41ee154d2c | |
|
|
a61808d3bf |
87
INSTALL.md
87
INSTALL.md
|
|
@ -8,7 +8,8 @@
|
|||
- [Debian/Ubuntu](#debianubuntu)
|
||||
- [RHEL/CentOS/AlmaLinux](#RHEL/CentOS/AlmaLinux)
|
||||
- [FreeBSD](#freebsd)
|
||||
- [Alpine Linux / busybox](#alpine-Linux-busybox-based-distributions)
|
||||
- [Alpine Linux / busybox](#alpine-Linux-or-busybox-based-distributions)
|
||||
- [OmniOS](#OmniOS)
|
||||
- [Other OSes](#other-oses)
|
||||
- [Configuration](#configuration)
|
||||
- [Sanoid](#sanoid)
|
||||
|
|
@ -175,13 +176,95 @@ pkg install p5-Config-Inifiles p5-Capture-Tiny pv mbuffer lzop sanoid
|
|||
|
||||
* See note about tcsh unpleasantness and other things in FREEBSD.readme
|
||||
|
||||
## Alpine Linux / busybox based distributions
|
||||
## Alpine Linux or busybox based distributions
|
||||
|
||||
The busybox implementation of ps is lacking needed arguments so a proper ps program needs to be installed.
|
||||
For Alpine Linux this can be done with:
|
||||
|
||||
`apk --no-cache add procps`
|
||||
|
||||
## OmniOS
|
||||
|
||||
Used with OmniOS r34, r36 and r37 (with napp-it installed). Hence, we presume you have a standard perl installation etc.
|
||||
|
||||
1. Install prerequisites: Perl module Config::IniFiles, ssh, pv, gzip, lzop, and mbuffer
|
||||
|
||||
```bash
|
||||
# install/update standard programs
|
||||
pfexec pkg install openssh gzip mbuffer pipe-viewer
|
||||
|
||||
# include OpenCSW repository
|
||||
pfexec pkg set-publisher -G '*' -g https://sfe.opencsw.org/localhostomnios localhostomnios
|
||||
|
||||
# install LZOP (from OpenCSW)
|
||||
pfexec pkg install lzop
|
||||
|
||||
# install Perl modules
|
||||
pfexec perl -MCPAN -e shell
|
||||
install CPAN ## update CPAN
|
||||
reload cpan ## reload
|
||||
|
||||
install inc::latest ## not sure if required
|
||||
install IO::Scalar ## not sure if required
|
||||
install Config::IniFiles
|
||||
install Capture::Tiny
|
||||
install Data::Dumper ## not sure if required, may be installed already
|
||||
install File::Path ## not sure if required, may be installed already
|
||||
install Getopt::Long ## not sure if required
|
||||
install Pod::Usage ## not sure if required
|
||||
install Time::Local ## not sure if required
|
||||
exit
|
||||
```
|
||||
|
||||
2. Download and clone the Sanoid repo:
|
||||
|
||||
```bash
|
||||
# install git
|
||||
pfexec pkg install git
|
||||
|
||||
# Tip: download the repo as root to avoid changing permissions later
|
||||
pfexec git clone https://github.com/jimsalterjrs/sanoid.git
|
||||
cd sanoid
|
||||
|
||||
# checkout latest stable release or stay on master for bleeding edge stuff (but expect bugs!)
|
||||
pfexec git checkout $(git tag | grep "^v" | tail -n 1)
|
||||
|
||||
# patch syncoid, so that it correctly recognises the "zfs resume" capability under OmniOS (see https://github.com/jimsalterjrs/sanoid/issues/554)
|
||||
<< $avail{'sourceresume'} = system("$sourcessh $resumechkcmd $srcpool 2>/dev/null | grep '\\(active\\|enabled\\)' >/dev/null 2>&1");
|
||||
>> $avail{'sourceresume'} = system("$sourcessh $resumechkcmd $srcpool 2>/dev/null | grep -E '^(active|enabled)' >/dev/null 2>&1");
|
||||
<< $avail{'targetresume'} = system("$targetssh $resumechkcmd $dstpool 2>/dev/null | grep '\\(active\\|enabled\\)' >/dev/null 2>&1");
|
||||
>> $avail{'targetresume'} = system("$targetssh $resumechkcmd $dstpool 2>/dev/null | grep -E '^(active|enabled)' >/dev/null 2>&1");
|
||||
|
||||
# most likely not required, but make the executables eXecutable
|
||||
pfexec chmod +x sanoid syncoid findoid sleepymutex
|
||||
|
||||
# Install the executables into /opt/sanoid
|
||||
pfexec mkdir /opt/sanoid
|
||||
pfexec cp sanoid syncoid findoid sleepymutex /opt/sanoid
|
||||
|
||||
# add symbolic links to executables to a directory in $path
|
||||
pfexec ln -s /opt/sanoid/sanoid /usr/bin/sanoid & pfexec ln -s /opt/sanoid/syncoid /usr/bin/syncoid & pfexec ln -s /opt/sanoid/findoid /usr/bin/findoid & pfexec ln -s /opt/sanoid/sleepymutex /usr/bin/sleepymutex
|
||||
```
|
||||
|
||||
3. Create the config directory /etc/sanoid, put default sanoid files there, and create and edit sanoid.conf:
|
||||
```bash
|
||||
# Create the config directory
|
||||
pfexec mkdir /etc/sanoid
|
||||
|
||||
# Copy default config and sample config
|
||||
pfexec cp sanoid.defaults.conf sanoid.conf /etc/sanoid/sanoid.example.conf
|
||||
|
||||
# Create a blank config file
|
||||
pfexec touch /etc/sanoid/sanoid.conf
|
||||
## and edit it (using e.g. nano as editor):
|
||||
pfexec nano /etc/sanoid/sanoid.conf
|
||||
```
|
||||
|
||||
Further steps (not OmniOS specific):
|
||||
- set up SSH connections between two remote hosts
|
||||
- create a cron job that runs sanoid --cron --quiet periodically
|
||||
|
||||
=======
|
||||
## MacOS
|
||||
|
||||
Install prerequisite software:
|
||||
|
|
|
|||
15
sanoid
15
sanoid
|
|
@ -1084,11 +1084,9 @@ sub init {
|
|||
@datasets = getchilddatasets($config{$section}{'path'});
|
||||
DATASETS: foreach my $dataset(@datasets) {
|
||||
if (! @cachedatasets) {
|
||||
push (@updatedatasets, $dataset);
|
||||
push (@updatedatasets, "$dataset\n");
|
||||
}
|
||||
|
||||
chomp $dataset;
|
||||
|
||||
if ($zfsRecursive) {
|
||||
# don't try to take the snapshot ourself, recursive zfs snapshot will take care of that
|
||||
$config{$dataset}{'autosnap'} = 0;
|
||||
|
|
@ -1691,7 +1689,7 @@ sub getchilddatasets {
|
|||
my $getchildrencmd = "$mysudocmd $zfs list -o name -t filesystem,volume -Hr $fs |";
|
||||
if ($args{'debug'}) { print "DEBUG: getting list of child datasets on $fs using $getchildrencmd...\n"; }
|
||||
open FH, $getchildrencmd;
|
||||
my @children = <FH>;
|
||||
chomp( my @children = <FH> );
|
||||
close FH;
|
||||
|
||||
# parent dataset is the first element
|
||||
|
|
@ -1781,25 +1779,26 @@ sub addcachedsnapshots {
|
|||
|
||||
copy($cache, "$cache.tmp") or die "Could not copy to $cache.tmp!\n";
|
||||
|
||||
open FH, ">> $cache.tmp" or die "Could not write to $cache.tmp!\n";
|
||||
open my $fh, ">> $cache.tmp" or die "Could not write to $cache.tmp!\n";
|
||||
while((my $snap, my $details) = each(%taken)) {
|
||||
my @parts = split("@", $snap, 2);
|
||||
|
||||
my $suffix = $parts[1] . "\tcreation\t" . $details->{time} . "\t-";
|
||||
my $dataset = $parts[0];
|
||||
|
||||
print FH "${dataset}\@${suffix}\n";
|
||||
print $fh "${dataset}\@${suffix}\n";
|
||||
|
||||
if ($details->{recursive}) {
|
||||
my @datasets = getchilddatasets($dataset);
|
||||
|
||||
foreach my $dataset(@datasets) {
|
||||
print FH "${dataset}\@${suffix}\n";
|
||||
print "${dataset}\@${suffix}\n";
|
||||
print $fh "${dataset}\@${suffix}\n";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
close FH;
|
||||
close $fh;
|
||||
|
||||
# preserve mtime of cache for expire check
|
||||
my ($dev, $ino, $mode, $nlink, $uid, $gid, $rdev, $size, $atime, $mtime, $ctime, $blksize, $blocks) = stat($cache);
|
||||
|
|
|
|||
111
syncoid
111
syncoid
|
|
@ -178,7 +178,7 @@ if (length $args{'insecure-direct-connection'}) {
|
|||
$directlisten = $args{'insecure-direct-connection'};
|
||||
}
|
||||
|
||||
if (scalar @parts == 3) {
|
||||
if (scalar @parts >= 3) {
|
||||
$directtimeout = $parts[2];
|
||||
}
|
||||
|
||||
|
|
@ -215,53 +215,86 @@ if (!defined $args{'recursive'}) {
|
|||
$exitcode = 2;
|
||||
}
|
||||
|
||||
my @deferred;
|
||||
|
||||
foreach my $datasetProperties(@datasets) {
|
||||
my %datasetsByName;
|
||||
foreach my $datasetProperties (@datasets) {
|
||||
my $dataset = $datasetProperties->{'name'};
|
||||
my $origin = $datasetProperties->{'origin'};
|
||||
if ($origin eq "-" || defined $args{'no-clone-handling'}) {
|
||||
$origin = undef;
|
||||
} else {
|
||||
# check if clone source is replicated too
|
||||
my @values = split(/@/, $origin, 2);
|
||||
my $srcdataset = $values[0];
|
||||
$datasetsByName{$dataset} = $datasetProperties;
|
||||
|
||||
my $found = 0;
|
||||
foreach my $datasetProperties(@datasets) {
|
||||
if ($datasetProperties->{'name'} eq $srcdataset) {
|
||||
$found = 1;
|
||||
last;
|
||||
# Clean the 'origin' property
|
||||
# (we set 'origin' to undef whenever we don't want to handle it during sync)
|
||||
if ($origin eq "-" || defined $args{'no-clone-handling'}) {
|
||||
$datasetProperties->{'origin'} = undef;
|
||||
}
|
||||
}
|
||||
|
||||
my %synced;
|
||||
|
||||
foreach my $dataset1Properties (@datasets) {
|
||||
my $dataset1 = $dataset1Properties->{'name'};
|
||||
|
||||
# Collect all transitive dependencies of this dataset.
|
||||
# A dataset can have two dependencies:
|
||||
# - the parent dataset
|
||||
# - the origin (if it is a clone)
|
||||
my @todo = ($dataset1); # the datasets whose dependencies we still have to collect
|
||||
my @tosync; # the datasets we have to sync (in the correct order)
|
||||
my %tosyncSet; # set of synced datasets to check for dependency cycles
|
||||
while (@todo) {
|
||||
my $dataset = shift(@todo);
|
||||
|
||||
if (exists $synced{$dataset}) {
|
||||
# We already synced this dataset, thus also all its dependencies => skip
|
||||
next;
|
||||
}
|
||||
|
||||
if (exists $tosyncSet{$dataset}) {
|
||||
# We already processed this dataset once during this loop,
|
||||
# so we do not need to do it again.
|
||||
# This check is also necessary to break dependency cycles.
|
||||
#
|
||||
# NOTE:
|
||||
# If there is a cycle, multiple syncoid runs might be necessary to replicate all datasets,
|
||||
# and not all clone relationships will be preserved
|
||||
# (it seems like huge effort to handle this case properly, and it should be quite rare in practice)
|
||||
next;
|
||||
}
|
||||
|
||||
unshift @tosync, $dataset;
|
||||
$tosyncSet{$dataset} = 1;
|
||||
|
||||
my ($parent) = $dataset =~ /(.*)\/[^\/]+/;
|
||||
if (defined $parent) {
|
||||
# If parent is replicated too, sync it first
|
||||
if (exists $datasetsByName{$parent}) {
|
||||
push @todo, $parent;
|
||||
}
|
||||
}
|
||||
|
||||
if ($found == 0) {
|
||||
# clone source is not replicated, do a full replication
|
||||
$origin = undef;
|
||||
} else {
|
||||
# clone source is replicated, defer until all non clones are replicated
|
||||
push @deferred, $datasetProperties;
|
||||
next;
|
||||
my $origin = $datasetsByName{$dataset}->{'origin'};
|
||||
if (defined $origin) {
|
||||
# If clone source is replicated too, sync it first
|
||||
my @values = split(/@/, $origin, 2);
|
||||
my $srcdataset = $values[0];
|
||||
|
||||
if (exists $datasetsByName{$srcdataset}) {
|
||||
push @todo, $srcdataset;
|
||||
} else {
|
||||
$datasetsByName{$dataset}->{'origin'} = undef;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
$dataset =~ s/\Q$sourcefs\E//;
|
||||
chomp $dataset;
|
||||
my $childsourcefs = $sourcefs . $dataset;
|
||||
my $childtargetfs = $targetfs . $dataset;
|
||||
syncdataset($sourcehost, $childsourcefs, $targethost, $childtargetfs, $origin);
|
||||
}
|
||||
|
||||
# replicate cloned datasets and if this is the initial run, recreate them on the target
|
||||
foreach my $datasetProperties(@deferred) {
|
||||
my $dataset = $datasetProperties->{'name'};
|
||||
my $origin = $datasetProperties->{'origin'};
|
||||
|
||||
$dataset =~ s/\Q$sourcefs\E//;
|
||||
chomp $dataset;
|
||||
my $childsourcefs = $sourcefs . $dataset;
|
||||
my $childtargetfs = $targetfs . $dataset;
|
||||
syncdataset($sourcehost, $childsourcefs, $targethost, $childtargetfs, $origin);
|
||||
foreach my $dataset (@tosync) {
|
||||
my $origin = $datasetsByName{$dataset}->{'origin'};
|
||||
my $datasetPath = $dataset;
|
||||
$datasetPath =~ s/\Q$sourcefs\E//;
|
||||
chomp $datasetPath;
|
||||
my $childsourcefs = $sourcefs . $datasetPath;
|
||||
my $childtargetfs = $targetfs . $datasetPath;
|
||||
syncdataset($sourcehost, $childsourcefs, $targethost, $childtargetfs, $origin);
|
||||
$synced{$dataset} = 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue