mirror of https://github.com/jimsalterjrs/sanoid
Compare commits
3 Commits
73fab1b245
...
6419716093
| Author | SHA1 | Date |
|---|---|---|
|
|
6419716093 | |
|
|
6beef5fee6 | |
|
|
114724b0a4 |
|
|
@ -26,9 +26,10 @@ apt install debhelper libcapture-tiny-perl libconfig-inifiles-perl pv lzop mbuff
|
|||
|
||||
```
|
||||
|
||||
Clone this repo, build the debian package and install it (alternatively you can skip the package and do it manually like described below for CentOS):
|
||||
Clone this repo under /tmp (to make sure the apt user has access to the unpacked clone), build the debian package and install it (alternatively you can skip the package and do it manually like described below for CentOS):
|
||||
|
||||
```bash
|
||||
cd /tmp
|
||||
git clone https://github.com/jimsalterjrs/sanoid.git
|
||||
cd sanoid
|
||||
# checkout latest stable release or stay on master for bleeding edge stuff (but expect bugs!)
|
||||
|
|
@ -73,6 +74,7 @@ cpan # answer the questions and paste the following lines:
|
|||
Clone this repo, then put the executables and config files into the appropriate directories:
|
||||
|
||||
```bash
|
||||
cd /tmp
|
||||
# Download the repo as root to avoid changing permissions later
|
||||
sudo git clone https://github.com/jimsalterjrs/sanoid.git
|
||||
cd sanoid
|
||||
|
|
@ -225,9 +227,9 @@ sudo launchctl load /Library/LaunchDaemons/net.openoid.Sanoid.plist
|
|||
|
||||
## Other OSes
|
||||
|
||||
**Sanoid** depends on the Perl module Config::IniFiles and will not operate without it. Config::IniFiles may be installed from CPAN, though the project strongly recommends using your distribution's repositories instead.
|
||||
**Sanoid** depends on the Perl modules Config::IniFiles and Capture::Tiny and will not operate without them. These modules may be installed from CPAN, though the project strongly recommends using your distribution's repositories instead.
|
||||
|
||||
**Syncoid** depends on ssh, pv, gzip, lzop, and mbuffer. It can run with reduced functionality in the absence of any or all of the above. SSH is only required for remote synchronization. On newer FreeBSD and Ubuntu Xenial chacha20-poly1305@openssh.com, on other distributions arcfour crypto is the default for SSH transport since v1.4.6. Syncoid runs will fail if one of them is not available on either end of the transport.
|
||||
**Syncoid** depends on ssh, pv, gzip, lzop, and mbuffer as well as sharing sanoid's dependency on Capture::Tiny. Capture::Tiny is mandatory, but syncoid can function with reduced functionality without any or all of the command-line dependencies. SSH is only required for remote synchronization. On newer FreeBSD and Ubuntu Xenial chacha20-poly1305@openssh.com, on other distributions arcfour crypto is the default for SSH transport since v1.4.6. Syncoid runs will fail if one of them is not available on either end of the transport.
|
||||
|
||||
### General outline for installation
|
||||
|
||||
|
|
|
|||
109
syncoid
109
syncoid
|
|
@ -215,53 +215,86 @@ if (!defined $args{'recursive'}) {
|
|||
$exitcode = 2;
|
||||
}
|
||||
|
||||
my @deferred;
|
||||
|
||||
foreach my $datasetProperties(@datasets) {
|
||||
my %datasetsByName;
|
||||
foreach my $datasetProperties (@datasets) {
|
||||
my $dataset = $datasetProperties->{'name'};
|
||||
my $origin = $datasetProperties->{'origin'};
|
||||
if ($origin eq "-" || defined $args{'no-clone-handling'}) {
|
||||
$origin = undef;
|
||||
} else {
|
||||
# check if clone source is replicated too
|
||||
my @values = split(/@/, $origin, 2);
|
||||
my $srcdataset = $values[0];
|
||||
$datasetsByName{$dataset} = $datasetProperties;
|
||||
|
||||
my $found = 0;
|
||||
foreach my $datasetProperties(@datasets) {
|
||||
if ($datasetProperties->{'name'} eq $srcdataset) {
|
||||
$found = 1;
|
||||
last;
|
||||
# Clean the 'origin' property
|
||||
# (we set 'origin' to undef whenever we don't want to handle it during sync)
|
||||
if ($origin eq "-" || defined $args{'no-clone-handling'}) {
|
||||
$datasetProperties->{'origin'} = undef;
|
||||
}
|
||||
}
|
||||
|
||||
my %synced;
|
||||
|
||||
foreach my $dataset1Properties (@datasets) {
|
||||
my $dataset1 = $dataset1Properties->{'name'};
|
||||
|
||||
# Collect all transitive dependencies of this dataset.
|
||||
# A dataset can have two dependencies:
|
||||
# - the parent dataset
|
||||
# - the origin (if it is a clone)
|
||||
my @todo = ($dataset1); # the datasets whose dependencies we still have to collect
|
||||
my @tosync; # the datasets we have to sync (in the correct order)
|
||||
my %tosyncSet; # set of synced datasets to check for dependency cycles
|
||||
while (@todo) {
|
||||
my $dataset = shift(@todo);
|
||||
|
||||
if (exists $synced{$dataset}) {
|
||||
# We already synced this dataset, thus also all its dependencies => skip
|
||||
next;
|
||||
}
|
||||
|
||||
if (exists $tosyncSet{$dataset}) {
|
||||
# We already processed this dataset once during this loop,
|
||||
# so we do not need to do it again.
|
||||
# This check is also necessary to break dependency cycles.
|
||||
#
|
||||
# NOTE:
|
||||
# If there is a cycle, multiple syncoid runs might be necessary to replicate all datasets,
|
||||
# and not all clone relationships will be preserved
|
||||
# (it seems like huge effort to handle this case properly, and it should be quite rare in practice)
|
||||
next;
|
||||
}
|
||||
|
||||
unshift @tosync, $dataset;
|
||||
$tosyncSet{$dataset} = 1;
|
||||
|
||||
my ($parent) = $dataset =~ /(.*)\/[^\/]+/;
|
||||
if (defined $parent) {
|
||||
# If parent is replicated too, sync it first
|
||||
if (exists $datasetsByName{$parent}) {
|
||||
push @todo, $parent;
|
||||
}
|
||||
}
|
||||
|
||||
if ($found == 0) {
|
||||
# clone source is not replicated, do a full replication
|
||||
$origin = undef;
|
||||
} else {
|
||||
# clone source is replicated, defer until all non clones are replicated
|
||||
push @deferred, $datasetProperties;
|
||||
next;
|
||||
my $origin = $datasetsByName{$dataset}->{'origin'};
|
||||
if (defined $origin) {
|
||||
# If clone source is replicated too, sync it first
|
||||
my @values = split(/@/, $origin, 2);
|
||||
my $srcdataset = $values[0];
|
||||
|
||||
if (exists $datasetsByName{$srcdataset}) {
|
||||
push @todo, $srcdataset;
|
||||
} else {
|
||||
$datasetsByName{$dataset}->{'origin'} = undef;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
$dataset =~ s/\Q$sourcefs\E//;
|
||||
chomp $dataset;
|
||||
my $childsourcefs = $sourcefs . $dataset;
|
||||
my $childtargetfs = $targetfs . $dataset;
|
||||
syncdataset($sourcehost, $childsourcefs, $targethost, $childtargetfs, $origin);
|
||||
}
|
||||
|
||||
# replicate cloned datasets and if this is the initial run, recreate them on the target
|
||||
foreach my $datasetProperties(@deferred) {
|
||||
my $dataset = $datasetProperties->{'name'};
|
||||
my $origin = $datasetProperties->{'origin'};
|
||||
|
||||
$dataset =~ s/\Q$sourcefs\E//;
|
||||
chomp $dataset;
|
||||
my $childsourcefs = $sourcefs . $dataset;
|
||||
my $childtargetfs = $targetfs . $dataset;
|
||||
syncdataset($sourcehost, $childsourcefs, $targethost, $childtargetfs, $origin);
|
||||
foreach my $dataset (@tosync) {
|
||||
my $origin = $datasetsByName{$dataset}->{'origin'};
|
||||
my $datasetPath = $dataset;
|
||||
$datasetPath =~ s/\Q$sourcefs\E//;
|
||||
chomp $datasetPath;
|
||||
my $childsourcefs = $sourcefs . $datasetPath;
|
||||
my $childtargetfs = $targetfs . $datasetPath;
|
||||
syncdataset($sourcehost, $childsourcefs, $targethost, $childtargetfs, $origin);
|
||||
$synced{$dataset} = 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue