Merge branch 'master' into patch-1

This commit is contained in:
Jim Salter 2019-05-22 10:32:12 -04:00 committed by GitHub
commit 2fceef65a3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 238 additions and 86 deletions

View File

@ -20,7 +20,9 @@
Install prerequisite software:
```bash
apt install debhelper libconfig-inifiles-perl pv lzop mbuffer
apt install debhelper libcapture-tiny-perl libconfig-inifiles-perl pv lzop mbuffer
```
Clone this repo, build the debian package and install it (alternatively you can skip the package and do it manually like described below for CentOS):
@ -49,7 +51,7 @@ Install prerequisite software:
# Install and enable epel if we don't already have it, and git too
sudo yum install -y epel-release git
# Install the packages that Sanoid depends on:
sudo yum install -y perl-Config-IniFiles perl-Data-Dumper lzop mbuffer mhash pv
sudo yum install -y perl-Config-IniFiles perl-Data-Dumper perl-capture-tiny lzop mbuffer mhash pv
```
Clone this repo, then put the executables and config files into the appropriate directories:
@ -137,7 +139,7 @@ Now, proceed to configure [**Sanoid**](#configuration)
Install prerequisite software:
```bash
pkg install p5-Config-Inifiles pv mbuffer lzop
pkg install p5-Config-Inifiles p5-Capture-Tiny pv mbuffer lzop
```
**Additional notes:**

View File

@ -9,7 +9,7 @@ More prosaically, you can use Sanoid to create, automatically thin, and monitor
* * * * * TZ=UTC /usr/local/bin/sanoid --cron
```
`Note`: Using UTC as timezone is recommend to prevent problems with daylight saving times
**`IMPORTANT NOTE`**: using a local timezone will result in a single hourly snapshot to be **skipped** during `daylight->nodaylight` transition. To avoid that, using UTC as timezone is recommend whenever possible.
And its /etc/sanoid/sanoid.conf might look something like this:

View File

@ -10,5 +10,5 @@ Vcs-Browser: https://github.com/jimsalterjrs/sanoid
Package: sanoid
Architecture: all
Depends: ${misc:Depends}, ${perl:Depends}, zfsutils-linux | zfs, libconfig-inifiles-perl
Depends: ${misc:Depends}, ${perl:Depends}, zfsutils-linux | zfs, libconfig-inifiles-perl, libcapture-tiny-perl
Description: Policy-driven snapshot management and replication tools

View File

@ -1,4 +1,4 @@
AUX sanoid.cron 45 BLAKE2B 3f6294bbbf485dc21a565cd2c8da05a42fb21cdaabdf872a21500f1a7338786c60d4a1fd188bbf81ce85f06a376db16998740996f47c049707a5109bdf02c052 SHA512 7676b32f21e517e8c84a097c7934b54097cf2122852098ea756093ece242125da3f6ca756a6fbb82fc348f84b94bfd61639e86e0bfa4bbe7abf94a8a4c551419
DIST sanoid-2.0.1.tar.gz 106981 BLAKE2B 824b7271266ac9f9bf1fef5374a442215c20a4f139081f77d5d8db2ec7db9b8b349d9d0394c76f9d421a957853af64ff069097243f69e7e4b83a804f5ba992a6 SHA512 9d999b0f071bc3c3ca956df11e1501fd72a842f7d3315ede3ab3b5e0a36351100b6edbab8448bba65a2e187e4e8f77ff24671ed33b28f2fca9bb6ad0801aba9d
EBUILD sanoid-2.0.1.ebuild 772 BLAKE2B befbc479b5c79faa88ae21649ed31d1af70dbecb60416e8c879fffd9a3cdf9f3f508e12d8edc9f4e0afbf0e6ab0491a36fdae2af995a1984072dc5bffd63fe1d SHA512 d90a8b8ae40634e2f2e1fa11ba787cfcb461b75fa65b19c0d9a34eb458f07f510bbb1992f4a0e7a0e4aa5f55a5acdc064779c9a4f993b30eb5cbf39037f97858
EBUILD sanoid-9999.ebuild 752 BLAKE2B 073533436c6f5c47b9e8410c898bf86b605d61c9b16a08b57253f5a87ad583e00d935ae9ea90f98b42c20dc1fbda0b9f1a8a7bf5be1cf3daf20afc640f1428ca SHA512 40ad34230fdb538bbdcda2d8149f37eac2a0e2accce5f79f7ba77d8e62e3fd78e997d8143baa0e050f548f90ce1cb6827e50b536b5e3acc444c6032f170251be
EBUILD sanoid-2.0.1.ebuild 796 BLAKE2B f3d633289d66c60fd26cb7731bc6b63533019f527aaec9ca8e5c0e748542d391153dbb55b17b8c981ca4fa4ae1fc8dc202b5480c13736fca250940b3b5ebb793 SHA512 d0143680c029ffe4ac37d97a979ed51527b4b8dd263d0c57e43a4650bf8a9bb8
EBUILD sanoid-9999.ebuild 776 BLAKE2B 416b8d04a9e5a84bce46d2a6f88eaefe03804944c03bc7f49b7a5b284b844212a6204402db3de3afa5d9c0545125d2631e7231c8cb2a3537bdcb10ea1be46b6a SHA512 98d8a30a13e75d7847ae9d60797d54078465bf75c6c6d9b6fd86075e342c0374

View File

@ -14,6 +14,7 @@ IUSE=""
DEPEND="app-arch/lzop
dev-perl/Config-IniFiles
dev-perl/Capture-Tiny
sys-apps/pv
sys-block/mbuffer
virtual/perl-Data-Dumper"

View File

@ -16,6 +16,7 @@ IUSE=""
DEPEND="app-arch/lzop
dev-perl/Config-IniFiles
dev-perl/Capture-Tiny
sys-apps/pv
sys-block/mbuffer
virtual/perl-Data-Dumper"

View File

@ -14,7 +14,7 @@ License: GPLv3
URL: https://github.com/jimsalterjrs/sanoid
Source0: https://github.com/jimsalterjrs/%{name}/archive/%{git_tag}/%{name}-%{version}.tar.gz
Requires: perl, mbuffer, lzop, pv, perl-Config-IniFiles
Requires: perl, mbuffer, lzop, pv, perl-Config-IniFiles, perl-capture-tiny
%if 0%{?_with_systemd}
Requires: systemd >= 212

52
sanoid
View File

@ -357,19 +357,6 @@ sub take_snapshots {
my @newsnaps;
# get utc timestamp of the current day for DST check
my $daystartUtc = timelocal(0, 0, 0, $datestamp{'mday'}, ($datestamp{'mon'}-1), $datestamp{'year'});
my ($isdst) = (localtime($daystartUtc))[8];
my $dstOffset = 0;
if ($isdst ne $datestamp{'isdst'}) {
# current dst is different then at the beginning og the day
if ($isdst) {
# DST ended in the current day
$dstOffset = 60*60;
}
}
if ($args{'verbose'}) { print "INFO: taking snapshots...\n"; }
foreach my $section (keys %config) {
if ($section =~ /^template/) { next; }
@ -393,9 +380,6 @@ sub take_snapshots {
my @preferredtime;
my $lastpreferred;
# to avoid duplicates with DST
my $dateSuffix = "";
if ($type eq 'frequently') {
my $frequentslice = int($datestamp{'min'} / $config{$section}{'frequent_period'});
@ -415,13 +399,6 @@ sub take_snapshots {
push @preferredtime,($datestamp{'mon'}-1); # january is month 0
push @preferredtime,$datestamp{'year'};
$lastpreferred = timelocal(@preferredtime);
if ($dstOffset ne 0) {
# timelocal doesn't take DST into account
$lastpreferred += $dstOffset;
# DST ended, avoid duplicates
$dateSuffix = "_y";
}
if ($lastpreferred > time()) { $lastpreferred -= 60*60; } # preferred time is later this hour - so look at last hour's
} elsif ($type eq 'daily') {
push @preferredtime,0; # try to hit 0 seconds
@ -431,29 +408,10 @@ sub take_snapshots {
push @preferredtime,($datestamp{'mon'}-1); # january is month 0
push @preferredtime,$datestamp{'year'};
$lastpreferred = timelocal(@preferredtime);
# timelocal doesn't take DST into account
$lastpreferred += $dstOffset;
# check if the planned time has different DST flag than the current
my ($isdst) = (localtime($lastpreferred))[8];
if ($isdst ne $datestamp{'isdst'}) {
if (!$isdst) {
# correct DST difference
$lastpreferred -= 60*60;
}
}
if ($lastpreferred > time()) {
$lastpreferred -= 60*60*24;
if ($dstOffset ne 0) {
# because we are going back one day
# the DST difference has to be accounted
# for in reverse now
$lastpreferred -= 2*$dstOffset;
}
} # preferred time is later today - so look at yesterday's
$preferredtime[3] -= 1; # preferred time is later today - so look at yesterday's
$lastpreferred = timelocal(@preferredtime);
}
} elsif ($type eq 'weekly') {
# calculate offset in seconds for the desired weekday
my $offset = 0;
@ -505,9 +463,9 @@ sub take_snapshots {
# use zfs (atomic) recursion if specified in config
if ($config{$section}{'zfs_recursion'}) {
push(@newsnaps, "$path\@autosnap_$datestamp{'sortable'}${dateSuffix}_$type\@");
push(@newsnaps, "$path\@autosnap_$datestamp{'sortable'}_$type\@");
} else {
push(@newsnaps, "$path\@autosnap_$datestamp{'sortable'}${dateSuffix}_$type");
push(@newsnaps, "$path\@autosnap_$datestamp{'sortable'}_$type");
}
}
}

140
syncoid
View File

@ -13,6 +13,7 @@ use Getopt::Long qw(:config auto_version auto_help);
use Pod::Usage;
use Time::Local;
use Sys::Hostname;
use Capture::Tiny ':all';
my $mbuffer_size = "16M";
@ -270,6 +271,10 @@ sub syncdataset {
my ($sourcehost, $sourcefs, $targethost, $targetfs, $origin, $skipsnapshot) = @_;
my $stdout;
my $stderr;
my $exit;
my $sourcefsescaped = escapeshellparam($sourcefs);
my $targetfsescaped = escapeshellparam($targetfs);
@ -511,25 +516,37 @@ sub syncdataset {
# and because this will ony resume the receive to the next
# snapshot, do a normal sync after that
if (defined($receivetoken)) {
$sendoptions = getoptionsline(\@sendoptions, ('P','e','v','w'));
my $sendcmd = "$sourcesudocmd $zfscmd send $sendoptions -t $receivetoken";
my $recvcmd = "$targetsudocmd $zfscmd receive $recvoptions $receiveextraargs $forcedrecv $targetfsescaped";
my $pvsize = getsendsize($sourcehost,"","",$sourceisroot,$receivetoken);
my $disp_pvsize = readablebytes($pvsize);
if ($pvsize == 0) { $disp_pvsize = "UNKNOWN"; }
my $synccmd = buildsynccmd($sendcmd,$recvcmd,$pvsize,$sourceisroot,$targetisroot);
$sendoptions = getoptionsline(\@sendoptions, ('P','e','v','w'));
my $sendcmd = "$sourcesudocmd $zfscmd send $sendoptions -t $receivetoken";
my $recvcmd = "$targetsudocmd $zfscmd receive $recvoptions $receiveextraargs $forcedrecv $targetfsescaped";
my $pvsize = getsendsize($sourcehost,"","",$sourceisroot,$receivetoken);
my $disp_pvsize = readablebytes($pvsize);
if ($pvsize == 0) { $disp_pvsize = "UNKNOWN"; }
my $synccmd = buildsynccmd($sendcmd,$recvcmd,$pvsize,$sourceisroot,$targetisroot);
if (!$quiet) { print "Resuming interrupted zfs send/receive from $sourcefs to $targetfs (~ $disp_pvsize remaining):\n"; }
if ($debug) { print "DEBUG: $synccmd\n"; }
system("$synccmd") == 0 or do {
warn "CRITICAL ERROR: $synccmd failed: $?";
if ($exitcode < 2) { $exitcode = 2; }
return 0;
};
if (!$quiet) { print "Resuming interrupted zfs send/receive from $sourcefs to $targetfs (~ $disp_pvsize remaining):\n"; }
if ($debug) { print "DEBUG: $synccmd\n"; }
# a resumed transfer will only be done to the next snapshot,
# so do an normal sync cycle
return syncdataset($sourcehost, $sourcefs, $targethost, $targetfs, undef);
($stdout, $stderr, $exit) = tee {
system("$synccmd")
};
$exit == 0 or do {
if ($stderr =~ /\Qused in the initial send no longer exists\E/) {
if (!$quiet) { print "WARN: resetting partially receive state\n"; }
resetreceivestate($targethost,$targetfs,$targetisroot);
# do an normal sync cycle
return syncdataset($sourcehost, $sourcefs, $targethost, $targetfs, $origin);
} else {
warn "CRITICAL ERROR: $synccmd failed: $?";
if ($exitcode < 2) { $exitcode = 2; }
return 0;
}
};
# a resumed transfer will only be done to the next snapshot,
# so do an normal sync cycle
return syncdataset($sourcehost, $sourcefs, $targethost, $targetfs, undef);
}
# find most recent matching snapshot and do an -I
@ -671,10 +688,25 @@ sub syncdataset {
if (!$quiet) { print "Sending incremental $sourcefs#$bookmarkescaped ... $nextsnapshot (~ $disp_pvsize):\n"; }
if ($debug) { print "DEBUG: $synccmd\n"; }
system("$synccmd") == 0 or do {
warn "CRITICAL ERROR: $synccmd failed: $?";
if ($exitcode < 2) { $exitcode = 2; }
return 0;
($stdout, $stderr, $exit) = tee {
system("$synccmd")
};
$exit == 0 or do {
if (!$resume && $stderr =~ /\Qcontains partially-complete state\E/) {
if (!$quiet) { print "WARN: resetting partially receive state\n"; }
resetreceivestate($targethost,$targetfs,$targetisroot);
system("$synccmd") == 0 or do {
warn "CRITICAL ERROR: $synccmd failed: $?";
if ($exitcode < 2) { $exitcode = 2; }
return 0;
}
} else {
warn "CRITICAL ERROR: $synccmd failed: $?";
if ($exitcode < 2) { $exitcode = 2; }
return 0;
}
};
$matchingsnap = $nextsnapshot;
@ -686,10 +718,25 @@ sub syncdataset {
if (!$quiet) { print "Sending incremental $sourcefs#$bookmarkescaped ... $newsyncsnap (~ $disp_pvsize):\n"; }
if ($debug) { print "DEBUG: $synccmd\n"; }
system("$synccmd") == 0 or do {
warn "CRITICAL ERROR: $synccmd failed: $?";
if ($exitcode < 2) { $exitcode = 2; }
return 0;
($stdout, $stderr, $exit) = tee {
system("$synccmd")
};
$exit == 0 or do {
if (!$resume && $stderr =~ /\Qcontains partially-complete state\E/) {
if (!$quiet) { print "WARN: resetting partially receive state\n"; }
resetreceivestate($targethost,$targetfs,$targetisroot);
system("$synccmd") == 0 or do {
warn "CRITICAL ERROR: $synccmd failed: $?";
if ($exitcode < 2) { $exitcode = 2; }
return 0;
}
} else {
warn "CRITICAL ERROR: $synccmd failed: $?";
if ($exitcode < 2) { $exitcode = 2; }
return 0;
}
};
}
}
@ -712,10 +759,25 @@ sub syncdataset {
if (!$quiet) { print "Sending incremental $sourcefs\@$matchingsnap ... $newsyncsnap (~ $disp_pvsize):\n"; }
if ($debug) { print "DEBUG: $synccmd\n"; }
system("$synccmd") == 0 or do {
warn "CRITICAL ERROR: $synccmd failed: $?";
if ($exitcode < 2) { $exitcode = 2; }
return 0;
($stdout, $stderr, $exit) = tee {
system("$synccmd")
};
$exit == 0 or do {
if (!$resume && $stderr =~ /\Qcontains partially-complete state\E/) {
if (!$quiet) { print "WARN: resetting partially receive state\n"; }
resetreceivestate($targethost,$targetfs,$targetisroot);
system("$synccmd") == 0 or do {
warn "CRITICAL ERROR: $synccmd failed: $?";
if ($exitcode < 2) { $exitcode = 2; }
return 0;
}
} else {
warn "CRITICAL ERROR: $synccmd failed: $?";
if ($exitcode < 2) { $exitcode = 2; }
return 0;
}
};
}
@ -1676,6 +1738,26 @@ sub getoptionsline {
return $line;
}
sub resetreceivestate {
my ($rhost,$fs,$isroot) = @_;
my $fsescaped = escapeshellparam($fs);
if ($rhost ne '') {
$rhost = "$sshcmd $rhost";
# double escaping needed
$fsescaped = escapeshellparam($fsescaped);
}
if ($debug) { print "DEBUG: reset partial receive state of $fs...\n"; }
my $mysudocmd;
if ($isroot) { $mysudocmd = ''; } else { $mysudocmd = $sudocmd; }
my $resetcmd = "$rhost $mysudocmd $zfscmd receive -A $fsescaped";
if ($debug) { print "$resetcmd\n"; }
system("$resetcmd") == 0
or die "CRITICAL ERROR: $resetcmd failed: $?";
}
__END__
=head1 NAME

View File

@ -13,7 +13,7 @@ set -x
POOL_NAME="sanoid-test-2"
POOL_TARGET="" # root
RESULT="/tmp/sanoid_test_result"
RESULT_CHECKSUM="a916d9cd46f4b80f285d069f3497d02671bbb1bfd12b43ef93531cbdaf89d55c"
RESULT_CHECKSUM="0a6336ccdc948c69563cb56994d190aebbc9b21588aef17bb97e51ae074f879a"
# UTC timestamp of start and end
START="1509141600"
@ -49,6 +49,6 @@ done
saveSnapshotList "${POOL_NAME}" "${RESULT}"
# hourly daily monthly
verifySnapshotList "${RESULT}" 73 3 1 "${RESULT_CHECKSUM}"
verifySnapshotList "${RESULT}" 72 3 1 "${RESULT_CHECKSUM}"
# one more hour because of DST

View File

@ -0,0 +1,53 @@
#!/bin/bash
# test no resume replication with a target containing a partially received replication stream
set -x
set -e
. ../../common/lib.sh
POOL_IMAGE="/tmp/syncoid-test-5.zpool"
MOUNT_TARGET="/tmp/syncoid-test-5.mount"
POOL_SIZE="1000M"
POOL_NAME="syncoid-test-5"
truncate -s "${POOL_SIZE}" "${POOL_IMAGE}"
zpool create -m none -f "${POOL_NAME}" "${POOL_IMAGE}"
function cleanUp {
zpool export "${POOL_NAME}"
}
# export pool in any case
trap cleanUp EXIT
zfs create "${POOL_NAME}"/src -o mountpoint="${MOUNT_TARGET}"
../../../syncoid --debug --compress=none "${POOL_NAME}"/src "${POOL_NAME}"/dst
dd if=/dev/urandom of="${MOUNT_TARGET}"/big_file bs=1M count=200
../../../syncoid --debug --compress=none --source-bwlimit=2m "${POOL_NAME}"/src "${POOL_NAME}"/dst &
syncoid_pid=$!
sleep 5
list_descendants ()
{
local children=$(ps -o pid= --ppid "$1")
for pid in $children
do
list_descendants "$pid"
done
echo "$children"
}
kill $(list_descendants $$) || true
wait
sleep 1
../../../syncoid --debug --compress=none --no-resume "${POOL_NAME}"/src "${POOL_NAME}"/dst | grep "reset partial receive state of syncoid"
../../../syncoid --debug --compress=none "${POOL_NAME}"/src "${POOL_NAME}"/dst
exit $?

View File

@ -0,0 +1,55 @@
#!/bin/bash
# test resumable replication where the original snapshot doesn't exist anymore
set -x
set -e
. ../../common/lib.sh
POOL_IMAGE="/tmp/syncoid-test-6.zpool"
MOUNT_TARGET="/tmp/syncoid-test-6.mount"
POOL_SIZE="1000M"
POOL_NAME="syncoid-test-6"
truncate -s "${POOL_SIZE}" "${POOL_IMAGE}"
zpool create -m none -f "${POOL_NAME}" "${POOL_IMAGE}"
function cleanUp {
zpool export "${POOL_NAME}"
}
# export pool in any case
trap cleanUp EXIT
zfs create "${POOL_NAME}"/src -o mountpoint="${MOUNT_TARGET}"
../../../syncoid --debug --compress=none "${POOL_NAME}"/src "${POOL_NAME}"/dst
dd if=/dev/urandom of="${MOUNT_TARGET}"/big_file bs=1M count=200
zfs snapshot "${POOL_NAME}"/src@big
../../../syncoid --debug --no-sync-snap --compress=none --source-bwlimit=2m "${POOL_NAME}"/src "${POOL_NAME}"/dst &
syncoid_pid=$!
sleep 5
list_descendants ()
{
local children=$(ps -o pid= --ppid "$1")
for pid in $children
do
list_descendants "$pid"
done
echo "$children"
}
kill $(list_descendants $$) || true
wait
sleep 1
zfs destroy "${POOL_NAME}"/src@big
../../../syncoid --debug --compress=none "${POOL_NAME}"/src "${POOL_NAME}"/dst # | grep "reset partial receive state of syncoid"
../../../syncoid --debug --compress=none "${POOL_NAME}"/src "${POOL_NAME}"/dst
exit $?