diff --git a/INSTALL b/INSTALL deleted file mode 100644 index 15c4896..0000000 --- a/INSTALL +++ /dev/null @@ -1,33 +0,0 @@ -SYNCOID -------- -Syncoid depends on ssh, pv, gzip, lzop, and mbuffer. It can run with reduced -functionality in the absence of any or all of the above. SSH is only required -for remote synchronization. On newer FreeBSD and Ubuntu Xenial -chacha20-poly1305@openssh.com, on other distributions arcfour crypto is the -default for SSH transport since v1.4.6. Syncoid runs will fail if one of them -is not available on either end of the transport. - -On Ubuntu: apt install pv lzop mbuffer -On CentOS: yum install lzo pv mbuffer lzop perl-Data-Dumper -On FreeBSD: pkg install pv mbuffer lzop - -FreeBSD notes: FreeBSD may place pv and lzop in somewhere other than - /usr/bin ; syncoid currently does not check path. - - Simplest path workaround is symlinks, eg: - root@bsd:~# ln -s /usr/local/bin/lzop /usr/bin/lzop - or similar, as appropriate, to create links in /usr/bin - to wherever the utilities actually are on your system. - - See note about mbuffer in FREEBSD.readme - - -SANOID ------- -Sanoid depends on the Perl module Config::IniFiles and will not operate -without it. Config::IniFiles may be installed from CPAN, though the project -strongly recommends using your distribution's repositories instead. - -On Ubuntu: apt install libconfig-inifiles-perl -On CentOS: yum install perl-Config-IniFiles -On FreeBSD: pkg install p5-Config-IniFiles diff --git a/INSTALL.md b/INSTALL.md new file mode 100644 index 0000000..88435d0 --- /dev/null +++ b/INSTALL.md @@ -0,0 +1,172 @@ +# Installation + +**Sanoid** and **Syncoid** are complementary but separate pieces of software. To install and configure them, follow the guide below for your operating system. Everything in `code blocks` should be copy-pasteable. If your OS isn't listed, a set of general instructions is at the end of the list and you can perform the process manually. + + + +- [Installation](#installation) + - [Debian/Ubuntu](#debianubuntu) + - [CentOS](#centos) + - [FreeBSD](#freebsd) + - [Other OSes](#other-oses) +- [Configuration](#configuration) + - [Sanoid](#sanoid) + + + + +## Debian/Ubuntu + +Install prerequisite software: + +```bash +apt install libconfig-inifiles-perl pv lzop mbuffer +``` + +Clone this repo, build the debian package and install it (alternatively you can skip the package and do it manually like described below for CentOS): + +```bash +# Download the repo as root to avoid changing permissions later +sudo git clone https://github.com/jimsalterjrs/sanoid.git +cd sanoid +ln -s packages/debian . +dpkg-buildpackage -uc -us +apt install ../sanoid_*_all.deb +``` + +Enable sanoid timer: +```bash +# enable and start the sanoid timer +sudo systemctl enable sanoid.timer +sudo systemctl start sanoid.timer +``` + +## CentOS + +Install prerequisite software: + +```bash +# Install and enable epel if we don't already have it, and git too +sudo yum install -y epel-release git +# Install the packages that Sanoid depends on: +sudo yum install -y perl-Config-IniFiles perl-Data-Dumper lzop mbuffer mhash pv +``` + +Clone this repo, then put the executables and config files into the appropriate directories: + +```bash +# Download the repo as root to avoid changing permissions later +sudo git clone https://github.com/jimsalterjrs/sanoid.git +cd sanoid +# Install the executables +sudo cp sanoid syncoid findoid sleepymutex /usr/local/sbin +# Create the config directory +sudo mkdir /etc/sanoid +# Install default config +sudo cp sanoid.defaults.conf /etc/sanoid +# Create a blank config file +sudo touch /etc/sanoid/sanoid.conf +# Place the sample config in the conf directory for reference +sudo cp sanoid.conf /etc/sanoid/sanoid.example.conf +``` + +Create a systemd service: + +```bash +cat << "EOF" | sudo tee /etc/systemd/system/sanoid.service +[Unit] +Description=Snapshot ZFS Pool +Requires=zfs.target +After=zfs.target +ConditionFileNotEmpty=/etc/sanoid/sanoid.conf + +[Service] +Environment=TZ=UTC +Type=oneshot +ExecStart=/usr/local/sbin/sanoid --take-snapshots --verbose +EOF + +cat << "EOF" | sudo tee /etc/systemd/system/sanoid-prune.service +[Unit] +Description=Cleanup ZFS Pool +Requires=zfs.target +After=zfs.target sanoid.service +ConditionFileNotEmpty=/etc/sanoid/sanoid.conf + +[Service] +Environment=TZ=UTC +Type=oneshot +ExecStart=/usr/local/sbin/sanoid --prune-snapshots --verbose + +[Install] +WantedBy=sanoid.service +EOF +``` + +And a systemd timer that will execute **Sanoid** once per quarter hour +(Decrease the interval as suitable for configuration): + +```bash +cat << "EOF" | sudo tee /etc/systemd/system/sanoid.timer +[Unit] +Description=Run Sanoid Every 15 Minutes +Requires=sanoid.service + +[Timer] +OnCalendar=*:0/15 +Persistent=true + +[Install] +WantedBy=timers.target +EOF +``` + +Reload systemd and start our timer: +```bash +# Tell systemd about our new service definitions +sudo systemctl daemon-reload +# Enable and start the Sanoid timer +sudo systemctl enable sanoid.timer +sudo systemctl start sanoid.timer +``` + +Now, proceed to configure [**Sanoid**](#configuration) + +## FreeBSD + +Install prerequisite software: + +```bash +pkg install p5-Config-Inifiles pv mbuffer lzop +``` + +**Additional notes:** + +* FreeBSD may place pv and lzop in somewhere other than /usr/bin — syncoid currently does not check path. + +* Simplest path workaround is symlinks, eg `ln -s /usr/local/bin/lzop /usr/bin/lzop` or similar, as appropriate to create links in **/usr/bin** to wherever the utilities actually are on your system. + +* See note about mbuffer and other things in FREEBSD.readme + +## Other OSes + +**Sanoid** depends on the Perl module Config::IniFiles and will not operate without it. Config::IniFiles may be installed from CPAN, though the project strongly recommends using your distribution's repositories instead. + +**Syncoid** depends on ssh, pv, gzip, lzop, and mbuffer. It can run with reduced functionality in the absence of any or all of the above. SSH is only required for remote synchronization. On newer FreeBSD and Ubuntu Xenial chacha20-poly1305@openssh.com, on other distributions arcfour crypto is the default for SSH transport since v1.4.6. Syncoid runs will fail if one of them is not available on either end of the transport. + +### General outline for installation + +1. Install prerequisites: Perl module Config::IniFiles, ssh, pv, gzip, lzop, and mbuffer +2. Download the **Sanoid** repo +3. Create the config directory `/etc/sanoid` and put `sanoid.defaults.conf` in there, and create `sanoid.conf` in it too +4. Create a cron job or a systemd timer that runs `sanoid --cron` once per minute + +# Configuration + +**Sanoid** won't do anything useful unless you tell it how to handle your ZFS datasets in `/etc/sanoid/sanoid.conf`. + +**Syncoid** is a command line utility that doesn't require any configuration, with all of its switches set at runtime. + +## Sanoid + +Take a look at the files `sanoid.defaults.conf` and` sanoid.conf.example` for all possible configuration options. Also have a look at the README.md diff --git a/README.md b/README.md index a8cdfc7..bc8ed83 100644 --- a/README.md +++ b/README.md @@ -184,11 +184,11 @@ As of 1.4.18, syncoid also automatically supports and enables resume of interrup + --source-bwlimit - This is the bandwidth limit imposed upon the source. This is mainly used if the target does not have mbuffer installed, but bandwidth limites are desired. + This is the bandwidth limit in bytes (kbytes, mbytes, etc) per second imposed upon the source. This is mainly used if the target does not have mbuffer installed, but bandwidth limits are desired. + --target-bw-limit - This is the bandwidth limit imposed upon the target. This is mainly used if the source does not have mbuffer installed, but bandwidth limites are desired. + This is the bandwidth limit in bytes (kbytes, mbytesm etc) per second imposed upon the target. This is mainly used if the source does not have mbuffer installed, but bandwidth limits are desired. + --no-command-checks @@ -202,6 +202,10 @@ As of 1.4.18, syncoid also automatically supports and enables resume of interrup This argument tells syncoid to restrict itself to existing snapshots, instead of creating a semi-ephemeral syncoid snapshot at execution time. Especially useful in multi-target (A->B, A->C) replication schemes, where you might otherwise accumulate a large number of foreign syncoid snapshots. ++ --create-bookmark + + This argument tells syncoid to create a zfs bookmark for the newest snapshot after it got replicated successfully. The bookmark name will be equal to the snapshot name. Only works in combination with the --no-sync-snap option. This can be very useful for irregular replication where the last matching snapshot on the source was already deleted but the bookmark remains so a replication is still possible. + + --no-clone-rollback Do not rollback clones on target diff --git a/packages/gentoo/sys-fs/sanoid/Manifest b/packages/gentoo/sys-fs/sanoid/Manifest new file mode 100644 index 0000000..4c629f9 --- /dev/null +++ b/packages/gentoo/sys-fs/sanoid/Manifest @@ -0,0 +1,4 @@ +AUX sanoid.cron 45 BLAKE2B 3f6294bbbf485dc21a565cd2c8da05a42fb21cdaabdf872a21500f1a7338786c60d4a1fd188bbf81ce85f06a376db16998740996f47c049707a5109bdf02c052 SHA512 7676b32f21e517e8c84a097c7934b54097cf2122852098ea756093ece242125da3f6ca756a6fbb82fc348f84b94bfd61639e86e0bfa4bbe7abf94a8a4c551419 +DIST sanoid-2.0.1.tar.gz 106981 BLAKE2B 824b7271266ac9f9bf1fef5374a442215c20a4f139081f77d5d8db2ec7db9b8b349d9d0394c76f9d421a957853af64ff069097243f69e7e4b83a804f5ba992a6 SHA512 9d999b0f071bc3c3ca956df11e1501fd72a842f7d3315ede3ab3b5e0a36351100b6edbab8448bba65a2e187e4e8f77ff24671ed33b28f2fca9bb6ad0801aba9d +EBUILD sanoid-2.0.1.ebuild 772 BLAKE2B befbc479b5c79faa88ae21649ed31d1af70dbecb60416e8c879fffd9a3cdf9f3f508e12d8edc9f4e0afbf0e6ab0491a36fdae2af995a1984072dc5bffd63fe1d SHA512 d90a8b8ae40634e2f2e1fa11ba787cfcb461b75fa65b19c0d9a34eb458f07f510bbb1992f4a0e7a0e4aa5f55a5acdc064779c9a4f993b30eb5cbf39037f97858 +EBUILD sanoid-9999.ebuild 752 BLAKE2B 073533436c6f5c47b9e8410c898bf86b605d61c9b16a08b57253f5a87ad583e00d935ae9ea90f98b42c20dc1fbda0b9f1a8a7bf5be1cf3daf20afc640f1428ca SHA512 40ad34230fdb538bbdcda2d8149f37eac2a0e2accce5f79f7ba77d8e62e3fd78e997d8143baa0e050f548f90ce1cb6827e50b536b5e3acc444c6032f170251be diff --git a/packages/gentoo/sys-fs/sanoid/files/sanoid.cron b/packages/gentoo/sys-fs/sanoid/files/sanoid.cron new file mode 100644 index 0000000..09169ad --- /dev/null +++ b/packages/gentoo/sys-fs/sanoid/files/sanoid.cron @@ -0,0 +1 @@ +* * * * * root TZ=UTC /usr/bin/sanoid --cron diff --git a/packages/gentoo/sys-fs/sanoid/sanoid-2.0.1.ebuild b/packages/gentoo/sys-fs/sanoid/sanoid-2.0.1.ebuild new file mode 100644 index 0000000..5a8d67e --- /dev/null +++ b/packages/gentoo/sys-fs/sanoid/sanoid-2.0.1.ebuild @@ -0,0 +1,36 @@ +# Copyright 2019 Gentoo Authors +# Distributed under the terms of the GNU General Public License v2 + +EAPI=7 + +DESCRIPTION="Policy-driven snapshot management and replication tools for ZFS" +HOMEPAGE="https://github.com/jimsalterjrs/sanoid" +SRC_URI="https://github.com/jimsalterjrs/${PN}/archive/v${PV}.tar.gz -> ${P}.tar.gz" + +LICENSE="GPL-3.0" +SLOT="0" +KEYWORDS="~x86 ~amd64" +IUSE="" + +DEPEND="app-arch/lzop + dev-perl/Config-IniFiles + sys-apps/pv + sys-block/mbuffer + virtual/perl-Data-Dumper" +RDEPEND="${DEPEND}" +BDEPEND="" + +DOCS=( README.md ) + +src_install() { + dobin findoid + dobin sanoid + dobin sleepymutex + dobin syncoid + keepdir /etc/${PN} + insinto /etc/${PN} + doins sanoid.conf + doins sanoid.defaults.conf + insinto /etc/cron.d + newins "${FILESDIR}/${PN}.cron" ${PN} +} diff --git a/packages/gentoo/sys-fs/sanoid/sanoid-9999.ebuild b/packages/gentoo/sys-fs/sanoid/sanoid-9999.ebuild new file mode 100644 index 0000000..7eaf509 --- /dev/null +++ b/packages/gentoo/sys-fs/sanoid/sanoid-9999.ebuild @@ -0,0 +1,38 @@ +# Copyright 2019 Gentoo Authors +# Distributed under the terms of the GNU General Public License v2 + +EAPI=7 + +EGIT_REPO_URI="https://github.com/jimsalterjrs/${PN}.git" +inherit git-r3 + +DESCRIPTION="Policy-driven snapshot management and replication tools for ZFS" +HOMEPAGE="https://github.com/jimsalterjrs/sanoid" + +LICENSE="GPL-3.0" +SLOT="0" +KEYWORDS="**" +IUSE="" + +DEPEND="app-arch/lzop + dev-perl/Config-IniFiles + sys-apps/pv + sys-block/mbuffer + virtual/perl-Data-Dumper" +RDEPEND="${DEPEND}" +BDEPEND="" + +DOCS=( README.md ) + +src_install() { + dobin findoid + dobin sanoid + dobin sleepymutex + dobin syncoid + keepdir /etc/${PN} + insinto /etc/${PN} + doins sanoid.conf + doins sanoid.defaults.conf + insinto /etc/cron.d + newins "${FILESDIR}/${PN}.cron" ${PN} +} diff --git a/sanoid b/sanoid index 82cfce9..daa81a1 100755 --- a/sanoid +++ b/sanoid @@ -502,7 +502,13 @@ sub take_snapshots { # update to most current possible datestamp %datestamp = get_date(); # print "we should have had a $type snapshot of $path $maxage seconds ago; most recent is $newestage seconds old.\n"; - push(@newsnaps, "$path\@autosnap_$datestamp{'sortable'}${dateSuffix}_$type"); + + # use zfs (atomic) recursion if specified in config + if ($config{$section}{'zfs_recursion'}) { + push(@newsnaps, "$path\@autosnap_$datestamp{'sortable'}${dateSuffix}_$type\@"); + } else { + push(@newsnaps, "$path\@autosnap_$datestamp{'sortable'}${dateSuffix}_$type"); + } } } } @@ -510,8 +516,16 @@ sub take_snapshots { if ( (scalar(@newsnaps)) > 0) { foreach my $snap ( @newsnaps ) { - my $dataset = (split '@', $snap)[0]; - my $snapname = (split '@', $snap)[1]; + my $extraMessage = ""; + my @split = split '@', $snap, -1; + my $recursiveFlag = 0; + if (scalar(@split) == 3) { + $recursiveFlag = 1; + $extraMessage = " (zfs recursive)"; + chop $snap; + } + my $dataset = $split[0]; + my $snapname = $split[1]; my $presnapshotfailure = 0; my $ret = 0; if ($config{$dataset}{'pre_snapshot_script'}) { @@ -532,17 +546,22 @@ sub take_snapshots { $presnapshotfailure = 1; } } - if ($args{'verbose'}) { print "taking snapshot $snap\n"; } + if ($args{'verbose'}) { print "taking snapshot $snap$extraMessage\n"; } if (!$args{'readonly'}) { - system($zfs, "snapshot", "$snap") == 0 - or warn "CRITICAL ERROR: $zfs snapshot $snap failed, $?"; + if ($recursiveFlag) { + system($zfs, "snapshot", "-r", "$snap") == 0 + or warn "CRITICAL ERROR: $zfs snapshot -r $snap failed, $?"; + } else { + system($zfs, "snapshot", "$snap") == 0 + or warn "CRITICAL ERROR: $zfs snapshot $snap failed, $?"; + } } if ($config{$dataset}{'post_snapshot_script'}) { if (!$presnapshotfailure or $config{$dataset}{'force_post_snapshot_script'}) { $ENV{'SANOID_TARGET'} = $dataset; $ENV{'SANOID_SNAPNAME'} = $snapname; if ($args{'verbose'}) { print "executing post_snapshot_script '".$config{$dataset}{'post_snapshot_script'}."' on dataset '$dataset'\n"; } - + if (!$args{'readonly'}) { runscript('post_snapshot_script',$dataset); } @@ -755,6 +774,8 @@ sub init { # we'll use these later to normalize potentially true and false values on any toggle keys my @toggles = ('autosnap','autoprune','monitor_dont_warn','monitor_dont_crit','monitor','recursive','process_children_only','skip_children','no_inconsistent_snapshot','force_post_snapshot_script'); + # recursive is defined as toggle but can also have the special value "zfs", it is kept to be backward compatible + my @istrue=(1,"true","True","TRUE","yes","Yes","YES","on","On","ON"); my @isfalse=(0,"false","False","FALSE","no","No","NO","off","Off","OFF"); @@ -850,26 +871,49 @@ sub init { } # how 'bout some recursion? =) + if ($config{$section}{'zfs_recursion'} && $config{$section}{'zfs_recursion'} == 1 && $config{$section}{'autosnap'} == 1) { + warn "ignored autosnap configuration for '$section' because it's part of a zfs recursion.\n"; + $config{$section}{'autosnap'} = 0; + } + my $recursive = $ini{$section}{'recursive'} && grep( /^$ini{$section}{'recursive'}$/, @istrue ); + my $zfsRecursive = $ini{$section}{'recursive'} && $ini{$section}{'recursive'} =~ /zfs/i; my $skipChildren = $ini{$section}{'skip_children'} && grep( /^$ini{$section}{'skip_children'}$/, @istrue ); my @datasets; - if ($recursive || $skipChildren) { + if ($zfsRecursive || $recursive || $skipChildren) { + if ($zfsRecursive) { + $config{$section}{'zfs_recursion'} = 1; + } + @datasets = getchilddatasets($config{$section}{'path'}); DATASETS: foreach my $dataset(@datasets) { chomp $dataset; - if ($skipChildren) { - if ($args{'debug'}) { print "DEBUG: ignoring $dataset.\n"; } - delete $config{$dataset}; - next DATASETS; - } + if ($zfsRecursive) { + # don't try to take the snapshot ourself, recursive zfs snapshot will take care of that + $config{$dataset}{'autosnap'} = 0; - foreach my $key (keys %{$config{$section}} ) { - if (! ($key =~ /template|recursive|children_only/)) { - if ($args{'debug'}) { print "DEBUG: recursively setting $key from $section to $dataset.\n"; } - $config{$dataset}{$key} = $config{$section}{$key}; + foreach my $key (keys %{$config{$section}} ) { + if (! ($key =~ /template|recursive|children_only|autosnap/)) { + if ($args{'debug'}) { print "DEBUG: recursively setting $key from $section to $dataset.\n"; } + $config{$dataset}{$key} = $config{$section}{$key}; + } + } + } else { + if ($skipChildren) { + if ($args{'debug'}) { print "DEBUG: ignoring $dataset.\n"; } + delete $config{$dataset}; + next DATASETS; + } + + foreach my $key (keys %{$config{$section}} ) { + if (! ($key =~ /template|recursive|children_only/)) { + if ($args{'debug'}) { print "DEBUG: recursively setting $key from $section to $dataset.\n"; } + $config{$dataset}{$key} = $config{$section}{$key}; + } } } + $config{$dataset}{'path'} = $dataset; $config{$dataset}{'initialized'} = 1; } @@ -1422,7 +1466,7 @@ sub getchilddatasets { my $fs = shift; my $mysudocmd = ''; - my $getchildrencmd = "$mysudocmd $zfs list -o name -Hr $fs |"; + my $getchildrencmd = "$mysudocmd $zfs list -o name -t filesystem,volume -Hr $fs |"; if ($args{'debug'}) { print "DEBUG: getting list of child datasets on $fs using $getchildrencmd...\n"; } open FH, $getchildrencmd; my @children = ; diff --git a/sanoid.conf b/sanoid.conf index 9f13105..6bd5c62 100644 --- a/sanoid.conf +++ b/sanoid.conf @@ -69,6 +69,27 @@ daily_warn = 48 daily_crit = 60 +[template_hotspare] + autoprune = yes + frequently = 0 + hourly = 30 + daily = 90 + monthly = 3 + yearly = 0 + + ### don't take new snapshots - snapshots on backup + ### datasets are replicated in from source, not + ### generated locally + autosnap = no + + ### monitor hourlies and dailies, but don't warn or + ### crit until they're over 4h old, since replication + ### is typically hourly only + hourly_warn = 4h + hourly_crit = 6h + daily_warn = 2d + daily_crit = 4d + [template_scripts] ### dataset and snapshot name will be supplied as environment variables ### for all pre/post/prune scripts ($SANOID_TARGET, $SANOID_SNAPNAME) diff --git a/syncoid b/syncoid index bd558c6..c2a0649 100755 --- a/syncoid +++ b/syncoid @@ -23,18 +23,29 @@ GetOptions(\%args, "no-command-checks", "monitor-version", "compress=s", "dumpsn "source-bwlimit=s", "target-bwlimit=s", "sshkey=s", "sshport=i", "sshcipher|c=s", "sshoption|o=s@", "debug", "quiet", "no-stream", "no-sync-snap", "no-resume", "exclude=s@", "skip-parent", "identifier=s", "no-clone-handling", "no-privilege-elevation", "force-delete", "no-clone-rollback", "no-rollback", + "create-bookmark", "mbuffer-size=s" => \$mbuffer_size) or pod2usage(2); my %compressargs = %{compressargset($args{'compress'} || 'default')}; # Can't be done with GetOptions arg, as default still needs to be set -my $sendoptions = ''; +my @sendoptions = (); if (length $args{'sendoptions'}) { - $sendoptions = $args{'sendoptions'} + @sendoptions = parsespecialoptions($args{'sendoptions'}); + if (! defined($sendoptions[0])) { + warn "invalid send options!"; + pod2usage(2); + exit 127; + } } -my $recvoptions = ''; +my @recvoptions = (); if (length $args{'recvoptions'}) { - $recvoptions = $args{'recvoptions'} + @recvoptions = parsespecialoptions($args{'recvoptions'}); + if (! defined($recvoptions[0])) { + warn "invalid receive options!"; + pod2usage(2); + exit 127; + } } @@ -67,13 +78,14 @@ my $resume = !$args{'no-resume'}; $ENV{'PATH'} = $ENV{'PATH'} . ":/bin:/usr/bin:/sbin"; my $zfscmd = 'zfs'; +my $zpoolcmd = 'zpool'; my $sshcmd = 'ssh'; my $pscmd = 'ps'; my $pvcmd = 'pv'; my $mbuffercmd = 'mbuffer'; my $sudocmd = 'sudo'; -my $mbufferoptions = '-q -s 128k -m $mbuffer_size 2>/dev/null'; +my $mbufferoptions = "-q -s 128k -m $mbuffer_size 2>/dev/null"; # currently using POSIX compatible command to check for program existence because we aren't depending on perl # being present on remote machines. my $checkcmd = 'command -v'; @@ -127,6 +139,12 @@ if (!defined $args{'recursive'}) { if ($debug) { print "DEBUG: recursive sync of $sourcefs.\n"; } my @datasets = getchilddatasets($sourcehost, $sourcefs, $sourceisroot); + if (!@datasets) { + warn "CRITICAL ERROR: no datasets found"; + @datasets = (); + $exitcode = 2; + } + my @deferred; foreach my $datasetProperties(@datasets) { @@ -230,7 +248,6 @@ sub getchilddatasets { if (defined $args{'exclude'}) { my $excludes = $args{'exclude'}; foreach (@$excludes) { - print("$dataset\n"); if ($dataset =~ /$_/) { if ($debug) { print "DEBUG: excluded $dataset because of $_\n"; } next DATASETS; @@ -266,6 +283,12 @@ sub syncdataset { my $sync = getzfsvalue($sourcehost,$sourcefs,$sourceisroot,'syncoid:sync'); + if (!defined $sync) { + # zfs already printed the corresponding error + if ($exitcode < 2) { $exitcode = 2; } + return 0; + } + if ($sync eq 'true' || $sync eq '-' || $sync eq '') { # empty is handled the same as unset (aka: '-') # definitely sync this dataset - if a host is called 'true' or '-', then you're special @@ -355,6 +378,9 @@ sub syncdataset { # with ZFS on Linux (possibly OpenZFS in general) when setting/unsetting readonly. #my $originaltargetreadonly; + my $sendoptions = getoptionsline(\@sendoptions, ('D','L','P','R','c','e','h','p','v','w')); + my $recvoptions = getoptionsline(\@recvoptions, ('h','o','x','u','v')); + # sync 'em up. if (! $targetexists) { # do an initial sync from the oldest source snapshot @@ -379,7 +405,13 @@ sub syncdataset { } # if --no-stream is specified, our full needs to be the newest snapshot, not the oldest. - if (defined $args{'no-stream'}) { $oldestsnap = getnewestsnapshot(\%snaps); } + if (defined $args{'no-stream'}) { + if (defined ($args{'no-sync-snap'}) ) { + $oldestsnap = getnewestsnapshot(\%snaps); + } else { + $oldestsnap = $newsyncsnap; + } + } my $oldestsnapescaped = escapeshellparam($oldestsnap); my $sendcmd = "$sourcesudocmd $zfscmd send $sendoptions $sourcefsescaped\@$oldestsnapescaped"; @@ -419,6 +451,12 @@ sub syncdataset { return 0; } system($synccmd) == 0 or do { + if (defined $origin) { + print "INFO: clone creation failed, trying ordinary replication as fallback\n"; + syncdataset($sourcehost, $sourcefs, $targethost, $targetfs, undef, 1); + return 0; + } + warn "CRITICAL ERROR: $synccmd failed: $?"; if ($exitcode < 2) { $exitcode = 2; } return 0; @@ -473,6 +511,7 @@ sub syncdataset { # and because this will ony resume the receive to the next # snapshot, do a normal sync after that if (defined($receivetoken)) { + $sendoptions = getoptionsline(\@sendoptions, ('P','e','v','w')); my $sendcmd = "$sourcesudocmd $zfscmd send $sendoptions -t $receivetoken"; my $recvcmd = "$targetsudocmd $zfscmd receive $recvoptions $receiveextraargs $forcedrecv $targetfsescaped"; my $pvsize = getsendsize($sourcehost,"","",$sourceisroot,$receivetoken); @@ -583,6 +622,7 @@ sub syncdataset { if ($matchingsnap eq $newsyncsnap) { # barf some text but don't touch the filesystem if (!$quiet) { print "INFO: no snapshots on source newer than $newsyncsnap on target. Nothing to do, not syncing.\n"; } + return 0; } else { my $matchingsnapescaped = escapeshellparam($matchingsnap); # rollback target to matchingsnap @@ -622,9 +662,9 @@ sub syncdataset { my $pvsize = 0; my $disp_pvsize = "UNKNOWN"; + $sendoptions = getoptionsline(\@sendoptions, ('L','c','e','w')); if ($nextsnapshot) { my $nextsnapshotescaped = escapeshellparam($nextsnapshot); - my $sendcmd = "$sourcesudocmd $zfscmd send $sendoptions -i $sourcefsescaped#$bookmarkescaped $sourcefsescaped\@$nextsnapshotescaped"; my $recvcmd = "$targetsudocmd $zfscmd receive $recvoptions $receiveextraargs $forcedrecv $targetfsescaped"; my $synccmd = buildsynccmd($sendcmd,$recvcmd,$pvsize,$sourceisroot,$targetisroot); @@ -657,6 +697,12 @@ sub syncdataset { # do a normal replication if bookmarks aren't used or if previous # bookmark replication was only done to the next oldest snapshot if (!$bookmark || $nextsnapshot) { + if ($matchingsnap eq $newsyncsnap) { + # edge case: bookmark replication used the latest snapshot + return 0; + } + + $sendoptions = getoptionsline(\@sendoptions, ('D','L','P','R','c','e','h','p','v','w')); my $sendcmd = "$sourcesudocmd $zfscmd send $sendoptions $args{'streamarg'} $sourcefsescaped\@$matchingsnapescaped $sourcefsescaped\@$newsyncsnapescaped"; my $recvcmd = "$targetsudocmd $zfscmd receive $recvoptions $receiveextraargs $forcedrecv $targetfsescaped"; my $pvsize = getsendsize($sourcehost,"$sourcefs\@$matchingsnap","$sourcefs\@$newsyncsnap",$sourceisroot); @@ -680,7 +726,36 @@ sub syncdataset { } } - if (!defined $args{'no-sync-snap'}) { + if (defined $args{'no-sync-snap'}) { + if (defined $args{'create-bookmark'}) { + my $bookmarkcmd; + if ($sourcehost ne '') { + $bookmarkcmd = "$sshcmd $sourcehost " . escapeshellparam("$sourcesudocmd $zfscmd bookmark $sourcefsescaped\@$newsyncsnapescaped $sourcefsescaped\#$newsyncsnapescaped"); + } else { + $bookmarkcmd = "$sourcesudocmd $zfscmd bookmark $sourcefsescaped\@$newsyncsnapescaped $sourcefsescaped\#$newsyncsnapescaped"; + } + if ($debug) { print "DEBUG: $bookmarkcmd\n"; } + system($bookmarkcmd) == 0 or do { + # fallback: assume nameing conflict and try again with guid based suffix + my $guid = $snaps{'source'}{$newsyncsnap}{'guid'}; + $guid = substr($guid, 0, 6); + + if (!$quiet) { print "INFO: bookmark creation failed, retrying with guid based suffix ($guid)...\n"; } + + if ($sourcehost ne '') { + $bookmarkcmd = "$sshcmd $sourcehost " . escapeshellparam("$sourcesudocmd $zfscmd bookmark $sourcefsescaped\@$newsyncsnapescaped $sourcefsescaped\#$newsyncsnapescaped$guid"); + } else { + $bookmarkcmd = "$sourcesudocmd $zfscmd bookmark $sourcefsescaped\@$newsyncsnapescaped $sourcefsescaped\#$newsyncsnapescaped$guid"; + } + if ($debug) { print "DEBUG: $bookmarkcmd\n"; } + system($bookmarkcmd) == 0 or do { + warn "CRITICAL ERROR: $bookmarkcmd failed: $?"; + if ($exitcode < 2) { $exitcode = 2; } + return 0; + } + }; + } + } else { # prune obsolete sync snaps on source and target (only if this run created ones). pruneoldsyncsnaps($sourcehost,$sourcefs,$newsyncsnap,$sourceisroot,keys %{ $snaps{'source'}}); pruneoldsyncsnaps($targethost,$targetfs,$newsyncsnap,$targetisroot,keys %{ $snaps{'target'}}); @@ -820,6 +895,7 @@ sub checkcommands { if (!defined $avail{'sourcecompress'}) { $avail{'sourcecompress'} = ''; } if (!defined $avail{'targetcompress'}) { $avail{'targetcompress'} = ''; } + if (!defined $avail{'localcompress'}) { $avail{'localcompress'} = ''; } if (!defined $avail{'sourcembuffer'}) { $avail{'sourcembuffer'} = ''; } if (!defined $avail{'targetmbuffer'}) { $avail{'targetmbuffer'} = ''; } @@ -889,14 +965,32 @@ sub checkcommands { # check for ZFS resume feature support if ($resume) { - my $resumechkcmd = "$zfscmd get -d 0 receive_resume_token"; + my @parts = split ('/', $sourcefs); + my $srcpool = $parts[0]; + @parts = split ('/', $targetfs); + my $dstpool = $parts[0]; + + $srcpool = escapeshellparam($srcpool); + $dstpool = escapeshellparam($dstpool); + + if ($sourcehost ne '') { + # double escaping needed + $srcpool = escapeshellparam($srcpool); + } + + if ($targethost ne '') { + # double escaping needed + $dstpool = escapeshellparam($dstpool); + } + + my $resumechkcmd = "$zpoolcmd get -o value -H feature\@extensible_dataset"; if ($debug) { print "DEBUG: checking availability of zfs resume feature on source...\n"; } - $avail{'sourceresume'} = system("$sourcessh $resumechkcmd >/dev/null 2>&1"); + $avail{'sourceresume'} = system("$sourcessh $resumechkcmd $srcpool 2>/dev/null | grep '\\(active\\|enabled\\)' >/dev/null 2>&1"); $avail{'sourceresume'} = $avail{'sourceresume'} == 0 ? 1 : 0; if ($debug) { print "DEBUG: checking availability of zfs resume feature on target...\n"; } - $avail{'targetresume'} = system("$targetssh $resumechkcmd >/dev/null 2>&1"); + $avail{'targetresume'} = system("$targetssh $resumechkcmd $dstpool 2>/dev/null | grep '\\(active\\|enabled\\)' >/dev/null 2>&1"); $avail{'targetresume'} = $avail{'targetresume'} == 0 ? 1 : 0; if ($avail{'sourceresume'} == 0 || $avail{'targetresume'} == 0) { @@ -981,6 +1075,11 @@ sub getzfsvalue { open FH, "$rhost $mysudocmd $zfscmd get -H $property $fsescaped |"; my $value = ; close FH; + + if (!defined $value) { + return undef; + } + my @values = split(/\t/,$value); $value = $values[2]; return $value; @@ -1202,6 +1301,7 @@ sub newsyncsnap { my %date = getdate(); my $snapname = "syncoid\_$identifier$hostid\_$date{'stamp'}"; my $snapcmd = "$rhost $mysudocmd $zfscmd snapshot $fsescaped\@$snapname\n"; + if ($debug) { print "DEBUG: creating sync snapshot using \"$snapcmd\"...\n"; } system($snapcmd) == 0 or do { warn "CRITICAL ERROR: $snapcmd failed: $?"; if ($exitcode < 2) { $exitcode = 2; } @@ -1250,6 +1350,13 @@ sub getssh { $socket = "/tmp/syncoid-$remoteuser-$rhost-" . time(); open FH, "$sshcmd -M -S $socket -o ControlPersist=1m $args{'sshport'} $rhost exit |"; close FH; + + system("$sshcmd -S $socket $rhost echo -n") == 0 or do { + my $code = $? >> 8; + warn "CRITICAL ERROR: ssh connection echo test failed for $rhost with exit code $code"; + exit(2); + }; + $rhost = "-S $socket $rhost"; } else { my $localuid = $<; @@ -1401,6 +1508,12 @@ sub getsendsize { $snaps = "-t $receivetoken"; } + my $sendoptions; + if (defined($receivetoken)) { + $sendoptions = getoptionsline(\@sendoptions, ('e')); + } else { + $sendoptions = getoptionsline(\@sendoptions, ('D','L','R','c','e','h','p','v','w')); + } my $getsendsizecmd = "$sourcessh $mysudocmd $zfscmd send $sendoptions -nP $snaps"; if ($debug) { print "DEBUG: getting estimated transfer size from source $sourcehost using \"$getsendsizecmd 2>&1 |\"...\n"; } @@ -1471,7 +1584,7 @@ sub getreceivetoken() { my ($rhost,$fs,$isroot) = @_; my $token = getzfsvalue($rhost,$fs,$isroot,"receive_resume_token"); - if ($token ne '-' && $token ne '') { + if (defined $token && $token ne '-' && $token ne '') { return $token; } @@ -1482,6 +1595,68 @@ sub getreceivetoken() { return } +sub parsespecialoptions { + my ($line) = @_; + + my @options = (); + + my @values = split(/ /, $line); + + my $optionValue = 0; + my $lastOption; + + foreach my $value (@values) { + if ($optionValue ne 0) { + my %item = ( + "option" => $lastOption, + "line" => "-$lastOption $value", + ); + + push @options, \%item; + $optionValue = 0; + next; + } + + for my $char (split //, $value) { + if ($optionValue ne 0) { + return undef; + } + + if ($char eq 'o' || $char eq 'x') { + $lastOption = $char; + $optionValue = 1; + } else { + my %item = ( + "option" => $char, + "line" => "-$char", + ); + + push @options, \%item; + } + } + } + + return @options; +} + +sub getoptionsline { + my ($options_ref, @allowed) = @_; + + my $line = ''; + + foreach my $value (@{ $options_ref }) { + if (@allowed) { + if (!grep( /^$$value{'option'}$/, @allowed) ) { + next; + } + } + + $line = "$line$$value{'line'} "; + } + + return $line; +} + __END__ =head1 NAME @@ -1504,16 +1679,17 @@ Options: --identifier=EXTRA Extra identifier which is included in the snapshot name. Can be used for replicating to multiple targets. --recursive|r Also transfers child datasets --skip-parent Skips syncing of the parent dataset. Does nothing without '--recursive' option. - --source-bwlimit= Bandwidth limit on the source transfer - --target-bwlimit= Bandwidth limit on the target transfer + --source-bwlimit= Bandwidth limit in bytes/kbytes/etc per second on the source transfer + --target-bwlimit= Bandwidth limit in bytes/kbytes/etc per second on the target transfer --mbuffer-size=VALUE Specify the mbuffer size (default: 16M), please refer to mbuffer(1) manual page. --no-stream Replicates using newest snapshot instead of intermediates --no-sync-snap Does not create new snapshot, only transfers existing + --create-bookmark Creates a zfs bookmark for the newest snapshot on the source after replication succeeds (only works with --no-sync-snap) --no-clone-rollback Does not rollback clones on target --no-rollback Does not rollback clones or snapshots on target (it probably requires a readonly target) --exclude=REGEX Exclude specific datasets which match the given regular expression. Can be specified multiple times - --sendoptions=OPTIONS DANGER: Inject OPTIONS into zfs send, e.g. syncoid --sendoptions="-Lce" sets zfs send -Lce ... - --recvoptions=OPTIONS DANGER: Inject OPTIONS into zfs received, e.g. syncoid --recvoptions="-x property" sets zfs receive -x property ... + --sendoptions=OPTIONS Use advanced options for zfs send (the arguments are filterd as needed), e.g. syncoid --sendoptions="Lc e" sets zfs send -L -c -e ... + --recvoptions=OPTIONS Use advanced options for zfs receive (the arguments are filterd as needed), e.g. syncoid --recvoptions="ux recordsize o compression=lz4" sets zfs receive -u -x recordsize -o compression=lz4 ... --sshkey=FILE Specifies a ssh public key to use to connect --sshport=PORT Connects to remote on a particular port --sshcipher|c=CIPHER Passes CIPHER to ssh to use a particular cipher set diff --git a/tests/syncoid/4_bookmark_replication_edge_case/run.sh b/tests/syncoid/4_bookmark_replication_edge_case/run.sh new file mode 100755 index 0000000..2a93ce4 --- /dev/null +++ b/tests/syncoid/4_bookmark_replication_edge_case/run.sh @@ -0,0 +1,46 @@ +#!/bin/bash + +# test replication edge cases with bookmarks + +set -x +set -e + +. ../../common/lib.sh + +POOL_IMAGE="/tmp/syncoid-test-4.zpool" +POOL_SIZE="200M" +POOL_NAME="syncoid-test-4" +TARGET_CHECKSUM="ad383b157b01635ddcf13612ac55577ad9c8dcf3fbfc9eb91792e27ec8db739b -" + +truncate -s "${POOL_SIZE}" "${POOL_IMAGE}" + +zpool create -m none -f "${POOL_NAME}" "${POOL_IMAGE}" + +function cleanUp { + zpool export "${POOL_NAME}" +} + +# export pool in any case +trap cleanUp EXIT + +zfs create "${POOL_NAME}"/src +zfs snapshot "${POOL_NAME}"/src@snap1 +zfs bookmark "${POOL_NAME}"/src@snap1 "${POOL_NAME}"/src#snap1 +# initial replication +../../../syncoid --no-sync-snap --debug --compress=none "${POOL_NAME}"/src "${POOL_NAME}"/dst +# destroy last common snapshot on source +zfs destroy "${POOL_NAME}"/src@snap1 +zfs snapshot "${POOL_NAME}"/src@snap2 + +# replicate which should fallback to bookmarks and stop because it's already on the latest snapshot +../../../syncoid --no-sync-snap --debug --compress=none "${POOL_NAME}"/src "${POOL_NAME}"/dst || exit 1 + +# verify +output=$(zfs list -t snapshot -r -H -o name "${POOL_NAME}") +checksum=$(echo "${output}" | grep -v syncoid_ | shasum -a 256) + +if [ "${checksum}" != "${TARGET_CHECKSUM}" ]; then + exit 1 +fi + +exit 0