Merge branch 'master' into path2

This commit is contained in:
Christoph Klaffl 2020-01-17 10:34:23 +01:00
commit 987dcc4885
No known key found for this signature in database
GPG Key ID: 8FC1D76EED4970D2
23 changed files with 444 additions and 118 deletions

View File

@ -1,3 +1,32 @@
2.0.3 [sanoid] reverted DST handling and improved it as quickfix (@phreaker0)
2.0.2 [overall] documentation updates, new dependencies, small fixes, more warnings (@benyanke, @matveevandrey, @RulerOf, @klemens-u, @johnramsden, @danielewood, @g-a-c, @hartzell, @fryfrog, @phreaker0)
[sanoid] changed and simplified DST handling (@shodanshok)
[syncoid] reset partially resume state automatically (@phreaker0)
[syncoid] handle some zfs erros automatically by parsing the stderr outputs (@phreaker0)
[syncoid] fixed ordering of snapshots with the same creation timestamp (@phreaker0)
[syncoid] don't use hardcoded paths (@phreaker0)
[syncoid] fix for special setup with listsnapshots=on (@phreaker0)
[syncoid] check ssh connection on startup (@phreaker0)
[syncoid] fix edge case with initial send and no-stream option (@phreaker0)
[syncoid] fallback to normal replication if clone recreation fails (@phreaker0)
[packaging] ebuild for gentoo (@thehaven)
[syncoid] support for zfs bookmark creation (@phreaker0)
[syncoid] fixed bookmark edge cases (@phreaker0)
[syncoid] handle invalid dataset paths nicely (@phreaker0)
[syncoid] fixed resume support check to be zpool based (@phreaker0)
[sanoid] added hotspare template (@jimsalterjrs)
[syncoid] support for advanced zfs send/recv options (@clinta, @phreaker0)
[syncoid] option to change mbuffer size (@TerraTech)
[tests] fixes for FreeBSD (@phreaker0)
[sanoid] support for zfs recursion (@jMichaelA, @phreaker0)
[syncoid] fixed bookmark handling for volumens (@ppcontrib)
[sanoid] allow time units for monitoring warn/crit values (@phreaker0)
2.0.1 [sanoid] fixed broken monthly warn/critical monitoring values in default template (@jimsalterjrs)
[sanoid] flag to force pruning while filesystem is in an active zfs send/recv (@shodanshok)
[syncoid] flags to disable rollbacks (@shodanshok)
2.0.0 [overall] documentation updates, small fixes, more warnings (@sparky3387, @ljwobker, @phreaker0)
[syncoid] added force delete flag (@phreaker0)
[sanoid] removed sleeping between snapshot taking (@phreaker0)

View File

@ -51,7 +51,7 @@ Install prerequisite software:
# Install and enable epel if we don't already have it, and git too
sudo yum install -y epel-release git
# Install the packages that Sanoid depends on:
sudo yum install -y perl-Config-IniFiles perl-Data-Dumper perl-capture-tiny lzop mbuffer mhash pv
sudo yum install -y perl-Config-IniFiles perl-Data-Dumper perl-Capture-Tiny lzop mbuffer mhash pv
```
Clone this repo, then put the executables and config files into the appropriate directories:
@ -175,4 +175,6 @@ pkg install p5-Config-Inifiles p5-Capture-Tiny pv mbuffer lzop
## Sanoid
Take a look at the files `sanoid.defaults.conf` and` sanoid.conf.example` for all possible configuration options. Also have a look at the README.md
Take a look at the files `sanoid.defaults.conf` and `sanoid.conf` for all possible configuration options.
Also have a look at the README.md for a simpler suggestion for `sanoid.conf`.

View File

@ -9,7 +9,7 @@ More prosaically, you can use Sanoid to create, automatically thin, and monitor
* * * * * TZ=UTC /usr/local/bin/sanoid --cron
```
**`IMPORTANT NOTE`**: using a local timezone will result in a single hourly snapshot to be **skipped** during `daylight->nodaylight` transition. To avoid that, using UTC as timezone is recommend whenever possible.
`Note`: Using UTC as timezone is recommend to prevent problems with daylight saving times
And its /etc/sanoid/sanoid.conf might look something like this:

View File

@ -1 +1 @@
2.0.1
2.0.3

23
findoid
View File

@ -8,7 +8,7 @@
use strict;
use warnings;
my $zfs = '/sbin/zfs';
my $zfs = 'zfs';
my %args = getargs(@ARGV);
my $progversion = '1.4.7';
@ -64,6 +64,10 @@ sub getversions {
my $filename = "$dataset/$snappath/$snap/$relpath";
my ($dev,$ino,$mode,$nlink,$uid,$gid,$rdev,$size,$atime,$mtime,$ctime,$blksize,$blocks) = stat($filename);
if (!defined $size) {
next;
}
# only push to the $versions hash if this size and mtime aren't already present (simple dedupe)
my $duplicate = 0;
foreach my $version (keys %versions) {
@ -77,6 +81,14 @@ sub getversions {
}
}
my $filename = "$dataset/$relpath";
my ($dev,$ino,$mode,$nlink,$uid,$gid,$rdev,$size,$atime,$mtime,$ctime,$blksize,$blocks) = stat($filename);
if (defined $size) {
$versions{$filename}{'size'} = $size;
$versions{$filename}{'mtime'} = $mtime;
}
return %versions;
}
@ -102,14 +114,19 @@ sub getdataset {
my ($path) = @_;
open FH, "$zfs list -Ho mountpoint |";
open FH, "$zfs list -H -t filesystem -o mountpoint,mounted |";
my @datasets = <FH>;
close FH;
my @matchingdatasets;
foreach my $dataset (@datasets) {
chomp $dataset;
if ( $path =~ /^$dataset/ ) { push @matchingdatasets, $dataset; }
my ($mountpoint, $mounted) = ($dataset =~ m/([^\t]*)\t*(.*)/);
if ($mounted ne "yes") {
next;
}
if ( $path =~ /^$mountpoint/ ) { push @matchingdatasets, $mountpoint; }
}
my $bestmatch = '';

7
packages/debian/.gitignore vendored Normal file
View File

@ -0,0 +1,7 @@
*.debhelper
*.debhelper.log
*.substvars
debhelper-build-stamp
files
sanoid
tmp

12
packages/debian/TODO Normal file
View File

@ -0,0 +1,12 @@
- This package needs to be a 3.0 (quilt) format, not 3.0 (native).
- Fix the changelog
- Move the packaging out to a separate repository, or at a minimum,
a separate branch.
- Provide an extended description in debian/control
- Figure out a plan for sanoid.defaults.conf. It is not supposed to be
edited, so it shouldn't be installed in /etc. At a minimum, install
it under /usr and make a symlink, but preferably patch sanoid to look
there directly.
- Man pages are necessary for all the utilities installed.
- With these, there is probably no need to ship README.md.
- Break out syncoid into a separate package?

View File

@ -1,3 +1,44 @@
sanoid (2.0.3) unstable; urgency=medium
[sanoid] reverted DST handling and improved it as quickfix (@phreaker0)
-- Jim Salter <github@jrs-s.net> Wed, 02 Oct 2019 17:00:00 +0100
sanoid (2.0.2) unstable; urgency=medium
[overall] documentation updates, new dependencies, small fixes, more warnings (@benyanke, @matveevandrey, @RulerOf, @klemens-u, @johnramsden, @danielewood, @g-a-c, @hartzell, @fryfrog, @phreaker0)
[syncoid] changed and simplified DST handling (@shodanshok)
[syncoid] reset partially resume state automatically (@phreaker0)
[syncoid] handle some zfs erros automatically by parsing the stderr outputs (@phreaker0)
[syncoid] fixed ordering of snapshots with the same creation timestamp (@phreaker0)
[syncoid] don't use hardcoded paths (@phreaker0)
[syncoid] fix for special setup with listsnapshots=on (@phreaker0)
[syncoid] check ssh connection on startup (@phreaker0)
[syncoid] fix edge case with initial send and no-stream option (@phreaker0)
[syncoid] fallback to normal replication if clone recreation fails (@phreaker0)
[packaging] ebuild for gentoo (@thehaven)
[syncoid] support for zfs bookmark creation (@phreaker0)
[syncoid] fixed bookmark edge cases (@phreaker0)
[syncoid] handle invalid dataset paths nicely (@phreaker0)
[syncoid] fixed resume support check to be zpool based (@phreaker0)
[sanoid] added hotspare template (@jimsalterjrs)
[syncoid] support for advanced zfs send/recv options (@clinta, @phreaker0)
[syncoid] option to change mbuffer size (@TerraTech)
[tests] fixes for FreeBSD (@phreaker0)
[sanoid] support for zfs recursion (@jMichaelA, @phreaker0)
[syncoid] fixed bookmark handling for volumens (@ppcontrib)
[sanoid] allow time units for monitoring warn/crit values (@phreaker0)
-- Jim Salter <github@jrs-s.net> Fri, 20 Sep 2019 23:01:00 +0100
sanoid (2.0.1) unstable; urgency=medium
[sanoid] fixed broken monthly warn/critical monitoring values in default template (@jimsalterjrs)
[sanoid] flag to force pruning while filesystem is in an active zfs send/recv (@shodanshok)
[syncoid] flags to disable rollbacks (@shodanshok)
-- Jim Salter <github@jrs-s.net> Fri, 14 Dec 2018 16:48:00 +0100
sanoid (2.0.0) unstable; urgency=medium
[overall] documentation updates, small fixes, more warnings (@sparky3387, @ljwobker, @phreaker0)

View File

@ -1 +1 @@
9
10

View File

@ -1,14 +1,23 @@
Source: sanoid
Section: unknown
Section: utils
Priority: optional
Maintainer: Jim Salter <jim@openoid.net>
Build-Depends: debhelper (>= 9)
Standards-Version: 3.9.8
Build-Depends: debhelper (>= 10)
Standards-Version: 4.1.2
Homepage: https://github.com/jimsalterjrs/sanoid
Vcs-Git: https://github.com/jimsalterjrs/sanoid.git
Vcs-Browser: https://github.com/jimsalterjrs/sanoid
Package: sanoid
Architecture: all
Depends: ${misc:Depends}, ${perl:Depends}, zfsutils-linux | zfs, libconfig-inifiles-perl, libcapture-tiny-perl
Depends: libcapture-tiny-perl,
libconfig-inifiles-perl,
zfsutils-linux | zfs,
${misc:Depends},
${perl:Depends}
Recommends: gzip,
lzop,
mbuffer,
openssh-client | ssh-client,
pv
Description: Policy-driven snapshot management and replication tools

View File

@ -1,6 +1,6 @@
Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
Upstream-Name: sanoid
Source: <https://github.com/jimsalterjrs/sanoid>
Source: https://github.com/jimsalterjrs/sanoid
Files: *
Copyright: 2017 Jim Salter <github@jrs-s.net>
@ -8,6 +8,7 @@ License: GPL-3.0+
Files: debian/*
Copyright: 2017 Jim Salter <github@jrs-s.net>
2017 Richard Laager <rlaager@wiktel.com>
License: GPL-3.0+
License: GPL-3.0+
@ -26,8 +27,3 @@ License: GPL-3.0+
.
On Debian systems, the complete text of the GNU General
Public License version 3 can be found in "/usr/share/common-licenses/GPL-3".
# Please also look if there are files or directories which have a
# different copyright/license attached and list them here.
# Please avoid picking licenses with terms that are more restrictive than the
# packaged work, as it may make Debian's contributions unacceptable upstream.

View File

@ -5,18 +5,25 @@
#export DH_VERBOSE = 1
%:
dh $@ --with systemd
dh $@
DESTDIR = $(CURDIR)/debian/sanoid
override_dh_auto_install:
@mkdir -p $(DESTDIR)/usr/sbin; \
cp sanoid syncoid findoid sleepymutex $(DESTDIR)/usr/sbin;
@mkdir -p $(DESTDIR)/etc/sanoid; \
cp sanoid.defaults.conf $(DESTDIR)/etc/sanoid;
@mkdir -p $(DESTDIR)/usr/share/doc/sanoid; \
cp sanoid.conf $(DESTDIR)/usr/share/doc/sanoid/sanoid.conf.example;
@mkdir -p $(DESTDIR)/lib/systemd/system; \
cp debian/sanoid-prune.service debian/sanoid.timer $(DESTDIR)/lib/systemd/system;
install -d $(DESTDIR)/etc/sanoid
install -m 664 sanoid.defaults.conf $(DESTDIR)/etc/sanoid
install -d $(DESTDIR)/lib/systemd/system
install -m 664 debian/sanoid-prune.service debian/sanoid.timer \
$(DESTDIR)/lib/systemd/system
install -d $(DESTDIR)/usr/sbin
install -m 775 \
findoid sanoid sleepymutex syncoid \
$(DESTDIR)/usr/sbin
install -d $(DESTDIR)/usr/share/doc/sanoid
install -m 664 sanoid.conf \
$(DESTDIR)/usr/share/doc/sanoid/sanoid.conf.example
override_dh_installinit:
dh_installinit --noscripts

View File

@ -1 +1,2 @@
To start, copy the example config file in /usr/share/doc/sanoid to /etc/sanoid/sanoid.conf.
To start, copy the example config file in /usr/share/doc/sanoid to
/etc/sanoid/sanoid.conf.

View File

@ -1,4 +1,4 @@
AUX sanoid.cron 45 BLAKE2B 3f6294bbbf485dc21a565cd2c8da05a42fb21cdaabdf872a21500f1a7338786c60d4a1fd188bbf81ce85f06a376db16998740996f47c049707a5109bdf02c052 SHA512 7676b32f21e517e8c84a097c7934b54097cf2122852098ea756093ece242125da3f6ca756a6fbb82fc348f84b94bfd61639e86e0bfa4bbe7abf94a8a4c551419
DIST sanoid-2.0.1.tar.gz 106981 BLAKE2B 824b7271266ac9f9bf1fef5374a442215c20a4f139081f77d5d8db2ec7db9b8b349d9d0394c76f9d421a957853af64ff069097243f69e7e4b83a804f5ba992a6 SHA512 9d999b0f071bc3c3ca956df11e1501fd72a842f7d3315ede3ab3b5e0a36351100b6edbab8448bba65a2e187e4e8f77ff24671ed33b28f2fca9bb6ad0801aba9d
EBUILD sanoid-2.0.1.ebuild 796 BLAKE2B f3d633289d66c60fd26cb7731bc6b63533019f527aaec9ca8e5c0e748542d391153dbb55b17b8c981ca4fa4ae1fc8dc202b5480c13736fca250940b3b5ebb793 SHA512 d0143680c029ffe4ac37d97a979ed51527b4b8dd263d0c57e43a4650bf8a9bb8
DIST sanoid-2.0.2.tar.gz 115797 BLAKE2B d00a038062df3dd8e77d3758c7b80ed6da0bac4931fb6df6adb72eeddb839c63d5129e0a281948a483d02165dad5a8505e1a55dc851360d3b366371038908142 SHA512 73e3d25dbdd58a78ffc4384584304e7230c5f31a660ce6d2a9b9d52a92a3796f1bc25ae865dbc74ce586cbd6169dbb038340f4a28e097e77ab3eb192b15773db
EBUILD sanoid-2.0.2.ebuild 796 BLAKE2B f3d633289d66c60fd26cb7731bc6b63533019f527aaec9ca8e5c0e748542d391153dbb55b17b8c981ca4fa4ae1fc8dc202b5480c13736fca250940b3b5ebb793 SHA512 d0143680c029ffe4ac37d97a979ed51527b4b8dd263d0c57e43a4650bf8a9bb8
EBUILD sanoid-9999.ebuild 776 BLAKE2B 416b8d04a9e5a84bce46d2a6f88eaefe03804944c03bc7f49b7a5b284b844212a6204402db3de3afa5d9c0545125d2631e7231c8cb2a3537bdcb10ea1be46b6a SHA512 98d8a30a13e75d7847ae9d60797d54078465bf75c6c6d9b6fd86075e342c0374

View File

@ -1,4 +1,4 @@
%global version 2.0.0
%global version 2.0.3
%global git_tag v%{version}
# Enable with systemctl "enable sanoid.timer"
@ -14,7 +14,7 @@ License: GPLv3
URL: https://github.com/jimsalterjrs/sanoid
Source0: https://github.com/jimsalterjrs/%{name}/archive/%{git_tag}/%{name}-%{version}.tar.gz
Requires: perl, mbuffer, lzop, pv, perl-Config-IniFiles, perl-capture-tiny
Requires: perl, mbuffer, lzop, pv, perl-Config-IniFiles, perl-Capture-Tiny
%if 0%{?_with_systemd}
Requires: systemd >= 212
@ -111,6 +111,10 @@ echo "* * * * * root %{_sbindir}/sanoid --cron" > %{buildroot}%{_docdir}/%{name}
%endif
%changelog
* Wed Oct 02 2019 Christoph Klaffl <christoph@phreaker.eu> - 2.0.3
- Bump to 2.0.3
* Wed Sep 25 2019 Christoph Klaffl <christoph@phreaker.eu> - 2.0.2
- Bump to 2.0.2
* Wed Dec 04 2018 Christoph Klaffl <christoph@phreaker.eu> - 2.0.0
- Bump to 2.0.0
* Sat Apr 28 2018 Dominic Robinson <github@dcrdev.com> - 1.4.18-1

129
sanoid
View File

@ -4,7 +4,7 @@
# from http://www.gnu.org/licenses/gpl-3.0.html on 2014-11-17. A copy should also be available in this
# project's Git repository at https://github.com/jimsalterjrs/sanoid/blob/master/LICENSE.
$::VERSION = '2.0.1';
$::VERSION = '2.0.3';
my $MINIMUM_DEFAULTS_VERSION = 2;
use strict;
@ -15,6 +15,7 @@ use File::Path; # for rmtree command in use_prune
use Getopt::Long qw(:config auto_version auto_help);
use Pod::Usage; # pod2usage
use Time::Local; # to parse dates in reverse
use Capture::Tiny ':all';
my %args = ("configdir" => "/etc/sanoid");
GetOptions(\%args, "verbose", "debug", "cron", "readonly", "quiet",
@ -161,16 +162,16 @@ sub monitor_snapshots {
if ($elapsed == -1) {
push @msgs, "CRIT: $path has no $type snapshots at all!";
} else {
push @msgs, "CRIT: $path\'s newest $type snapshot is $dispelapsed old (should be < $dispcrit)";
push @msgs, "CRIT: $path newest $type snapshot is $dispelapsed old (should be < $dispcrit)";
}
}
} elsif ($elapsed > $warn) {
if ($warn > 0) {
if (! $config{$section}{'monitor_dont_warn'} && ($errorlevel < 2) ) { $errorlevel = 1; }
push @msgs, "WARN: $path\'s newest $type snapshot is $dispelapsed old (should be < $dispwarn)";
push @msgs, "WARN: $path newest $type snapshot is $dispelapsed old (should be < $dispwarn)";
}
} else {
# push @msgs .= "OK: $path\'s newest $type snapshot is $dispelapsed old \n";
# push @msgs .= "OK: $path newest $type snapshot is $dispelapsed old \n";
}
}
@ -360,6 +361,19 @@ sub take_snapshots {
my @newsnaps;
# get utc timestamp of the current day for DST check
my $daystartUtc = timelocal(0, 0, 0, $datestamp{'mday'}, ($datestamp{'mon'}-1), $datestamp{'year'});
my ($isdst) = (localtime($daystartUtc))[8];
my $dstOffset = 0;
if ($isdst ne $datestamp{'isdst'}) {
# current dst is different then at the beginning og the day
if ($isdst) {
# DST ended in the current day
$dstOffset = 60*60;
}
}
if ($args{'verbose'}) { print "INFO: taking snapshots...\n"; }
foreach my $section (keys %config) {
if ($section =~ /^template/) { next; }
@ -383,6 +397,9 @@ sub take_snapshots {
my @preferredtime;
my $lastpreferred;
# to avoid duplicates with DST
my $handleDst = 0;
if ($type eq 'frequently') {
my $frequentslice = int($datestamp{'min'} / $config{$section}{'frequent_period'});
@ -402,6 +419,13 @@ sub take_snapshots {
push @preferredtime,($datestamp{'mon'}-1); # january is month 0
push @preferredtime,$datestamp{'year'};
$lastpreferred = timelocal(@preferredtime);
if ($dstOffset ne 0) {
# timelocal doesn't take DST into account
$lastpreferred += $dstOffset;
# DST ended, avoid duplicates
$handleDst = 1;
}
if ($lastpreferred > time()) { $lastpreferred -= 60*60; } # preferred time is later this hour - so look at last hour's
} elsif ($type eq 'daily') {
push @preferredtime,0; # try to hit 0 seconds
@ -411,10 +435,29 @@ sub take_snapshots {
push @preferredtime,($datestamp{'mon'}-1); # january is month 0
push @preferredtime,$datestamp{'year'};
$lastpreferred = timelocal(@preferredtime);
if ($lastpreferred > time()) {
$preferredtime[3] -= 1; # preferred time is later today - so look at yesterday's
$lastpreferred = timelocal(@preferredtime);
# timelocal doesn't take DST into account
$lastpreferred += $dstOffset;
# check if the planned time has different DST flag than the current
my ($isdst) = (localtime($lastpreferred))[8];
if ($isdst ne $datestamp{'isdst'}) {
if (!$isdst) {
# correct DST difference
$lastpreferred -= 60*60;
}
}
if ($lastpreferred > time()) {
$lastpreferred -= 60*60*24;
if ($dstOffset ne 0) {
# because we are going back one day
# the DST difference has to be accounted
# for in reverse now
$lastpreferred -= 2*$dstOffset;
}
} # preferred time is later today - so look at yesterday's
} elsif ($type eq 'weekly') {
# calculate offset in seconds for the desired weekday
my $offset = 0;
@ -464,9 +507,17 @@ sub take_snapshots {
%datestamp = get_date();
# print "we should have had a $type snapshot of $path $maxage seconds ago; most recent is $newestage seconds old.\n";
my $flags = "";
# use zfs (atomic) recursion if specified in config
if ($config{$section}{'zfs_recursion'}) {
push(@newsnaps, "$path\@autosnap_$datestamp{'sortable'}_$type\@");
$flags .= "r";
}
if ($handleDst) {
$flags .= "d";
}
if ($flags ne "") {
push(@newsnaps, "$path\@autosnap_$datestamp{'sortable'}_$type\@$flags");
} else {
push(@newsnaps, "$path\@autosnap_$datestamp{'sortable'}_$type");
}
@ -480,9 +531,18 @@ sub take_snapshots {
my $extraMessage = "";
my @split = split '@', $snap, -1;
my $recursiveFlag = 0;
my $dstHandling = 0;
if (scalar(@split) == 3) {
$recursiveFlag = 1;
$extraMessage = " (zfs recursive)";
my $flags = $split[2];
if (index($flags, "r") != -1) {
$recursiveFlag = 1;
$extraMessage = " (zfs recursive)";
chop $snap;
}
if (index($flags, "d") != -1) {
$dstHandling = 1;
chop $snap;
}
chop $snap;
}
my $dataset = $split[0];
@ -509,13 +569,40 @@ sub take_snapshots {
}
if ($args{'verbose'}) { print "taking snapshot $snap$extraMessage\n"; }
if (!$args{'readonly'}) {
if ($recursiveFlag) {
system($zfs, "snapshot", "-r", "$snap") == 0
or warn "CRITICAL ERROR: $zfs snapshot -r $snap failed, $?";
} else {
system($zfs, "snapshot", "$snap") == 0
or warn "CRITICAL ERROR: $zfs snapshot $snap failed, $?";
}
my $stderr;
my $exit;
($stderr, $exit) = tee_stderr {
if ($recursiveFlag) {
system($zfs, "snapshot", "-r", "$snap");
} else {
system($zfs, "snapshot", "$snap");
}
};
$exit == 0 or do {
if ($dstHandling) {
if ($stderr =~ /already exists/) {
$exit = 0;
$snap =~ s/_([a-z]+)$/dst_$1/g;
if ($args{'verbose'}) { print "taking dst snapshot $snap$extraMessage\n"; }
if ($recursiveFlag) {
system($zfs, "snapshot", "-r", "$snap") == 0
or warn "CRITICAL ERROR: $zfs snapshot -r $snap failed, $?";
} else {
system($zfs, "snapshot", "$snap") == 0
or warn "CRITICAL ERROR: $zfs snapshot $snap failed, $?";
}
}
}
};
$exit == 0 or do {
if ($recursiveFlag) {
warn "CRITICAL ERROR: $zfs snapshot -r $snap failed, $?";
} else {
warn "CRITICAL ERROR: $zfs snapshot $snap failed, $?";
}
};
}
if ($config{$dataset}{'post_snapshot_script'}) {
if (!$presnapshotfailure or $config{$dataset}{'force_post_snapshot_script'}) {
@ -1305,7 +1392,9 @@ sub checklock {
# make sure lockfile contains something
if ( -z $lockfile) {
# zero size lockfile, something is wrong
die "ERROR: something is wrong! $lockfile is empty\n";
warn "WARN: deleting invalid/empty $lockfile\n";
unlink $lockfile;
return 1
}
# lockfile exists. read pid and mutex from it. see if it's our pid. if not, see if
@ -1316,7 +1405,9 @@ sub checklock {
close FH;
# if we didn't get exactly 2 items from the lock file there is a problem
if (scalar(@lock) != 2) {
die "ERROR: $lockfile is invalid.\n"
warn "WARN: deleting invalid $lockfile\n"
unlink $lockfile;
return 1
}
my $lockmutex = pop(@lock);

192
syncoid
View File

@ -4,7 +4,7 @@
# from http://www.gnu.org/licenses/gpl-3.0.html on 2014-11-17. A copy should also be available in this
# project's Git repository at https://github.com/jimsalterjrs/sanoid/blob/master/LICENSE.
$::VERSION = '2.0.0';
$::VERSION = '2.0.3';
use strict;
use warnings;
@ -16,6 +16,7 @@ use Sys::Hostname;
use Capture::Tiny ':all';
my $mbuffer_size = "16M";
my $pvoptions = "-p -t -e -r -b";
# Blank defaults to use ssh client's default
# TODO: Merge into a single "sshflags" option?
@ -24,7 +25,7 @@ GetOptions(\%args, "no-command-checks", "monitor-version", "compress=s", "dumpsn
"source-bwlimit=s", "target-bwlimit=s", "sshkey=s", "sshport=i", "sshcipher|c=s", "sshoption|o=s@",
"debug", "quiet", "no-stream", "no-sync-snap", "no-resume", "exclude=s@", "skip-parent", "identifier=s",
"no-clone-handling", "no-privilege-elevation", "force-delete", "no-clone-rollback", "no-rollback",
"create-bookmark",
"create-bookmark", "pv-options=s" => \$pvoptions,
"mbuffer-size=s" => \$mbuffer_size) or pod2usage(2);
my %compressargs = %{compressargset($args{'compress'} || 'default')}; # Can't be done with GetOptions arg, as default still needs to be set
@ -272,7 +273,6 @@ sub syncdataset {
my ($sourcehost, $sourcefs, $targethost, $targetfs, $origin, $skipsnapshot) = @_;
my $stdout;
my $stderr;
my $exit;
my $sourcefsescaped = escapeshellparam($sourcefs);
@ -286,11 +286,20 @@ sub syncdataset {
if ($debug) { print "DEBUG: syncing source $sourcefs to target $targetfs.\n"; }
my $sync = getzfsvalue($sourcehost,$sourcefs,$sourceisroot,'syncoid:sync');
my ($sync, $error) = getzfsvalue($sourcehost,$sourcefs,$sourceisroot,'syncoid:sync');
if (!defined $sync) {
# zfs already printed the corresponding error
if ($exitcode < 2) { $exitcode = 2; }
if ($error =~ /\bdataset does not exist\b/) {
if (!$quiet) { print "WARN Skipping dataset (dataset no longer exists): $sourcefs...\n"; }
return 0;
}
else {
# print the error out and set exit code
print "ERROR: $error\n";
if ($exitcode < 2) { $exitcode = 2 }
}
return 0;
}
@ -518,7 +527,7 @@ sub syncdataset {
if (defined($receivetoken)) {
$sendoptions = getoptionsline(\@sendoptions, ('P','e','v','w'));
my $sendcmd = "$sourcesudocmd $zfscmd send $sendoptions -t $receivetoken";
my $recvcmd = "$targetsudocmd $zfscmd receive $recvoptions $receiveextraargs $forcedrecv $targetfsescaped";
my $recvcmd = "$targetsudocmd $zfscmd receive $recvoptions $receiveextraargs $forcedrecv $targetfsescaped 2>&1";
my $pvsize = getsendsize($sourcehost,"","",$sourceisroot,$receivetoken);
my $disp_pvsize = readablebytes($pvsize);
if ($pvsize == 0) { $disp_pvsize = "UNKNOWN"; }
@ -527,13 +536,22 @@ sub syncdataset {
if (!$quiet) { print "Resuming interrupted zfs send/receive from $sourcefs to $targetfs (~ $disp_pvsize remaining):\n"; }
if ($debug) { print "DEBUG: $synccmd\n"; }
($stdout, $stderr, $exit) = tee {
system("$synccmd")
};
if ($pvsize == 0) {
# we need to capture the error of zfs send, this will render pv useless but in this case
# it doesn't matter because we don't know the estimated send size (probably because
# the initial snapshot used for resumed send doesn't exist anymore)
($stdout, $exit) = tee_stderr {
system("$synccmd")
};
} else {
($stdout, $exit) = tee_stdout {
system("$synccmd")
};
}
$exit == 0 or do {
if ($stderr =~ /\Qused in the initial send no longer exists\E/) {
if (!$quiet) { print "WARN: resetting partially receive state\n"; }
if ($stdout =~ /\Qused in the initial send no longer exists\E/) {
if (!$quiet) { print "WARN: resetting partially receive state because the snapshot source no longer exists\n"; }
resetreceivestate($targethost,$targetfs,$targetisroot);
# do an normal sync cycle
return syncdataset($sourcehost, $sourcefs, $targethost, $targetfs, $origin);
@ -683,18 +701,18 @@ sub syncdataset {
if ($nextsnapshot) {
my $nextsnapshotescaped = escapeshellparam($nextsnapshot);
my $sendcmd = "$sourcesudocmd $zfscmd send $sendoptions -i $sourcefsescaped#$bookmarkescaped $sourcefsescaped\@$nextsnapshotescaped";
my $recvcmd = "$targetsudocmd $zfscmd receive $recvoptions $receiveextraargs $forcedrecv $targetfsescaped";
my $recvcmd = "$targetsudocmd $zfscmd receive $recvoptions $receiveextraargs $forcedrecv $targetfsescaped 2>&1";
my $synccmd = buildsynccmd($sendcmd,$recvcmd,$pvsize,$sourceisroot,$targetisroot);
if (!$quiet) { print "Sending incremental $sourcefs#$bookmarkescaped ... $nextsnapshot (~ $disp_pvsize):\n"; }
if ($debug) { print "DEBUG: $synccmd\n"; }
($stdout, $stderr, $exit) = tee {
($stdout, $exit) = tee_stdout {
system("$synccmd")
};
$exit == 0 or do {
if (!$resume && $stderr =~ /\Qcontains partially-complete state\E/) {
if (!$resume && $stdout =~ /\Qcontains partially-complete state\E/) {
if (!$quiet) { print "WARN: resetting partially receive state\n"; }
resetreceivestate($targethost,$targetfs,$targetisroot);
system("$synccmd") == 0 or do {
@ -713,18 +731,18 @@ sub syncdataset {
$matchingsnapescaped = escapeshellparam($matchingsnap);
} else {
my $sendcmd = "$sourcesudocmd $zfscmd send $sendoptions -i $sourcefsescaped#$bookmarkescaped $sourcefsescaped\@$newsyncsnapescaped";
my $recvcmd = "$targetsudocmd $zfscmd receive $recvoptions $receiveextraargs $forcedrecv $targetfsescaped";
my $recvcmd = "$targetsudocmd $zfscmd receive $recvoptions $receiveextraargs $forcedrecv $targetfsescaped 2>&1";
my $synccmd = buildsynccmd($sendcmd,$recvcmd,$pvsize,$sourceisroot,$targetisroot);
if (!$quiet) { print "Sending incremental $sourcefs#$bookmarkescaped ... $newsyncsnap (~ $disp_pvsize):\n"; }
if ($debug) { print "DEBUG: $synccmd\n"; }
($stdout, $stderr, $exit) = tee {
($stdout, $exit) = tee_stdout {
system("$synccmd")
};
$exit == 0 or do {
if (!$resume && $stderr =~ /\Qcontains partially-complete state\E/) {
if (!$resume && $stdout =~ /\Qcontains partially-complete state\E/) {
if (!$quiet) { print "WARN: resetting partially receive state\n"; }
resetreceivestate($targethost,$targetfs,$targetisroot);
system("$synccmd") == 0 or do {
@ -751,7 +769,7 @@ sub syncdataset {
$sendoptions = getoptionsline(\@sendoptions, ('D','L','P','R','c','e','h','p','v','w'));
my $sendcmd = "$sourcesudocmd $zfscmd send $sendoptions $args{'streamarg'} $sourcefsescaped\@$matchingsnapescaped $sourcefsescaped\@$newsyncsnapescaped";
my $recvcmd = "$targetsudocmd $zfscmd receive $recvoptions $receiveextraargs $forcedrecv $targetfsescaped";
my $recvcmd = "$targetsudocmd $zfscmd receive $recvoptions $receiveextraargs $forcedrecv $targetfsescaped 2>&1";
my $pvsize = getsendsize($sourcehost,"$sourcefs\@$matchingsnap","$sourcefs\@$newsyncsnap",$sourceisroot);
my $disp_pvsize = readablebytes($pvsize);
if ($pvsize == 0) { $disp_pvsize = "UNKNOWN"; }
@ -760,12 +778,13 @@ sub syncdataset {
if (!$quiet) { print "Sending incremental $sourcefs\@$matchingsnap ... $newsyncsnap (~ $disp_pvsize):\n"; }
if ($debug) { print "DEBUG: $synccmd\n"; }
($stdout, $stderr, $exit) = tee {
($stdout, $exit) = tee_stdout {
system("$synccmd")
};
$exit == 0 or do {
if (!$resume && $stderr =~ /\Qcontains partially-complete state\E/) {
# FreeBSD reports "dataset is busy" instead of "contains partially-complete state"
if (!$resume && ($stdout =~ /\Qcontains partially-complete state\E/ || $stdout =~ /\Qdataset is busy\E/)) {
if (!$quiet) { print "WARN: resetting partially receive state\n"; }
resetreceivestate($targethost,$targetfs,$targetisroot);
system("$synccmd") == 0 or do {
@ -1134,17 +1153,22 @@ sub getzfsvalue {
my $mysudocmd;
if ($isroot) { $mysudocmd = ''; } else { $mysudocmd = $sudocmd; }
if ($debug) { print "$rhost $mysudocmd $zfscmd get -H $property $fsescaped\n"; }
open FH, "$rhost $mysudocmd $zfscmd get -H $property $fsescaped |";
my $value = <FH>;
close FH;
if (!defined $value) {
return undef;
}
my ($value, $error, $exit) = capture {
system("$rhost $mysudocmd $zfscmd get -H $property $fsescaped");
};
my @values = split(/\t/,$value);
$value = $values[2];
return $value;
my $wantarray = wantarray || 0;
# If we are in scalar context and there is an error, print it out.
# Otherwise we assume the caller will deal with it.
if (!$wantarray and $error) {
print "ERROR getzfsvalue $fs $property: $error\n";
}
return $wantarray ? ($value, $error) : $value;
}
sub readablebytes {
@ -1217,13 +1241,13 @@ sub buildsynccmd {
}
if ($avail{'sourcembuffer'}) { $synccmd .= " $mbuffercmd $bwlimit $mbufferoptions |"; }
if ($avail{'localpv'} && !$quiet) { $synccmd .= " $pvcmd -s $pvsize |"; }
if ($avail{'localpv'} && !$quiet) { $synccmd .= " $pvcmd $pvoptions -s $pvsize |"; }
$synccmd .= " $recvcmd";
} elsif ($sourcehost eq '') {
# local source, remote target.
#$synccmd = "$sendcmd | $pvcmd | $compressargs{'cmd'} | $mbuffercmd | $sshcmd $targethost '$compressargs{'decomcmd'} | $mbuffercmd | $recvcmd'";
$synccmd = "$sendcmd |";
if ($avail{'localpv'} && !$quiet) { $synccmd .= " $pvcmd -s $pvsize |"; }
if ($avail{'localpv'} && !$quiet) { $synccmd .= " $pvcmd $pvoptions -s $pvsize |"; }
if ($avail{'compress'}) { $synccmd .= " $compressargs{'cmd'} |"; }
if ($avail{'sourcembuffer'}) { $synccmd .= " $mbuffercmd $args{'source-bwlimit'} $mbufferoptions |"; }
$synccmd .= " $sshcmd $targethost ";
@ -1246,7 +1270,7 @@ sub buildsynccmd {
$synccmd .= " | ";
if ($avail{'targetmbuffer'}) { $synccmd .= "$mbuffercmd $args{'target-bwlimit'} $mbufferoptions | "; }
if ($avail{'compress'}) { $synccmd .= "$compressargs{'decomcmd'} | "; }
if ($avail{'localpv'} && !$quiet) { $synccmd .= "$pvcmd -s $pvsize | "; }
if ($avail{'localpv'} && !$quiet) { $synccmd .= "$pvcmd $pvoptions -s $pvsize | "; }
$synccmd .= "$recvcmd";
} else {
#remote source, remote target... weird, but whatever, I'm not here to judge you.
@ -1260,7 +1284,7 @@ sub buildsynccmd {
$synccmd .= " | ";
if ($avail{'compress'}) { $synccmd .= "$compressargs{'decomcmd'} | "; }
if ($avail{'localpv'} && !$quiet) { $synccmd .= "$pvcmd -s $pvsize | "; }
if ($avail{'localpv'} && !$quiet) { $synccmd .= "$pvcmd $pvoptions -s $pvsize | "; }
if ($avail{'compress'}) { $synccmd .= "$compressargs{'cmd'} | "; }
if ($avail{'localmbuffer'}) { $synccmd .= "$mbuffercmd $mbufferoptions | "; }
$synccmd .= "$sshcmd $targethost ";
@ -1446,11 +1470,19 @@ sub getsnaps() {
$fsescaped = escapeshellparam($fsescaped);
}
my $getsnapcmd = "$rhost $mysudocmd $zfscmd get -Hpd 1 -t snapshot guid,creation $fsescaped |";
if ($debug) { print "DEBUG: getting list of snapshots on $fs using $getsnapcmd...\n"; }
my $getsnapcmd = "$rhost $mysudocmd $zfscmd get A-Hpd 1 -t snapshot guid,creation $fsescaped";
if ($debug) {
$getsnapcmd = "$getsnapcmd |";
print "DEBUG: getting list of snapshots on $fs using $getsnapcmd...\n";
} else {
$getsnapcmd = "$getsnapcmd 2>/dev/null |";
}
open FH, $getsnapcmd;
my @rawsnaps = <FH>;
close FH or die "CRITICAL ERROR: snapshots couldn't be listed for $fs (exit code $?)";
close FH or do {
# fallback (solaris for example doesn't support the -t option)
return getsnapsfallback($type,$rhost,$fs,$isroot,%snaps);
};
# this is a little obnoxious. get guid,creation returns guid,creation on two separate lines
# as though each were an entirely separate get command.
@ -1501,6 +1533,89 @@ sub getsnaps() {
return %snaps;
}
sub getsnapsfallback() {
# fallback (solaris for example doesn't support the -t option)
my ($type,$rhost,$fs,$isroot,%snaps) = @_;
my $mysudocmd;
my $fsescaped = escapeshellparam($fs);
if ($isroot) { $mysudocmd = ''; } else { $mysudocmd = $sudocmd; }
if ($rhost ne '') {
$rhost = "$sshcmd $rhost";
# double escaping needed
$fsescaped = escapeshellparam($fsescaped);
}
my $getsnapcmd = "$rhost $mysudocmd $zfscmd get -Hpd 1 type,guid,creation $fsescaped |";
warn "snapshot listing failed, trying fallback command";
if ($debug) { print "DEBUG: FALLBACK, getting list of snapshots on $fs using $getsnapcmd...\n"; }
open FH, $getsnapcmd;
my @rawsnaps = <FH>;
close FH or die "CRITICAL ERROR: snapshots couldn't be listed for $fs (exit code $?)";
my %creationtimes=();
my $state = 0;
foreach my $line (@rawsnaps) {
if ($state < 0) {
$state++;
next;
}
if ($state eq 0) {
if ($line !~ /\Q$fs\E\@.*type\s*snapshot/) {
# skip non snapshot type object
$state = -2;
next;
}
} elsif ($state eq 1) {
if ($line !~ /\Q$fs\E\@.*guid/) {
die "CRITICAL ERROR: snapshots couldn't be listed for $fs (guid parser error)";
}
chomp $line;
my $guid = $line;
$guid =~ s/^.*\tguid\t*(\d*).*/$1/;
my $snap = $line;
$snap =~ s/^.*\@(.*)\tguid.*$/$1/;
$snaps{$type}{$snap}{'guid'}=$guid;
} elsif ($state eq 2) {
if ($line !~ /\Q$fs\E\@.*creation/) {
die "CRITICAL ERROR: snapshots couldn't be listed for $fs (creation parser error)";
}
chomp $line;
my $creation = $line;
$creation =~ s/^.*\tcreation\t*(\d*).*/$1/;
my $snap = $line;
$snap =~ s/^.*\@(.*)\tcreation.*$/$1/;
# the accuracy of the creation timestamp is only for a second, but
# snapshots in the same second are highly likely. The list command
# has an ordered output so we append another three digit running number
# to the creation timestamp and make sure those are ordered correctly
# for snapshot with the same creation timestamp
my $counter = 0;
my $creationsuffix;
while ($counter < 999) {
$creationsuffix = sprintf("%s%03d", $creation, $counter);
if (!defined $creationtimes{$creationsuffix}) {
$creationtimes{$creationsuffix} = 1;
last;
}
$counter += 1;
}
$snaps{$type}{$snap}{'creation'}=$creationsuffix;
$state = -1;
}
$state++;
}
return %snaps;
}
sub getbookmarks() {
my ($rhost,$fs,$isroot,%bookmarks) = @_;
my $mysudocmd;
@ -1593,9 +1708,9 @@ sub getsendsize {
if (defined($receivetoken)) {
$sendoptions = getoptionsline(\@sendoptions, ('e'));
} else {
$sendoptions = getoptionsline(\@sendoptions, ('D','L','R','c','e','h','p','v','w'));
$sendoptions = getoptionsline(\@sendoptions, ('D','L','R','c','e','h','p','w'));
}
my $getsendsizecmd = "$sourcessh $mysudocmd $zfscmd send $sendoptions -nP $snaps";
my $getsendsizecmd = "$sourcessh $mysudocmd $zfscmd send $sendoptions -nvP $snaps";
if ($debug) { print "DEBUG: getting estimated transfer size from source $sourcehost using \"$getsendsizecmd 2>&1 |\"...\n"; }
open FH, "$getsendsizecmd 2>&1 |";
@ -1783,6 +1898,7 @@ Options:
--source-bwlimit=<limit k|m|g|t> Bandwidth limit in bytes/kbytes/etc per second on the source transfer
--target-bwlimit=<limit k|m|g|t> Bandwidth limit in bytes/kbytes/etc per second on the target transfer
--mbuffer-size=VALUE Specify the mbuffer size (default: 16M), please refer to mbuffer(1) manual page.
--pv-options=OPTIONS Configure how pv displays the progress bar, default '-p -t -e -r -b'
--no-stream Replicates using newest snapshot instead of intermediates
--no-sync-snap Does not create new snapshot, only transfers existing
--create-bookmark Creates a zfs bookmark for the newest snapshot on the source after replication succeeds (only works with --no-sync-snap)
@ -1791,7 +1907,7 @@ Options:
--exclude=REGEX Exclude specific datasets which match the given regular expression. Can be specified multiple times
--sendoptions=OPTIONS Use advanced options for zfs send (the arguments are filterd as needed), e.g. syncoid --sendoptions="Lc e" sets zfs send -L -c -e ...
--recvoptions=OPTIONS Use advanced options for zfs receive (the arguments are filterd as needed), e.g. syncoid --recvoptions="ux recordsize o compression=lz4" sets zfs receive -u -x recordsize -o compression=lz4 ...
--sshkey=FILE Specifies a ssh public key to use to connect
--sshkey=FILE Specifies a ssh key to use to connect
--sshport=PORT Connects to remote on a particular port
--sshcipher|c=CIPHER Passes CIPHER to ssh to use a particular cipher set
--sshoption|o=OPTION Passes OPTION to ssh for remote usage. Can be specified multiple times

View File

@ -10,7 +10,7 @@ set -x
POOL_NAME="sanoid-test-1"
POOL_TARGET="" # root
RESULT="/tmp/sanoid_test_result"
RESULT_CHECKSUM="68c67161a59d0e248094a66061972f53613067c9db52ad981030f36bc081fed7"
RESULT_CHECKSUM="92f2c7afba94b59e8a6f6681705f0aa3f1c61e4aededaa38281e0b7653856935"
# UTC timestamp of start and end
START="1483225200"

View File

@ -13,7 +13,7 @@ set -x
POOL_NAME="sanoid-test-2"
POOL_TARGET="" # root
RESULT="/tmp/sanoid_test_result"
RESULT_CHECKSUM="0a6336ccdc948c69563cb56994d190aebbc9b21588aef17bb97e51ae074f879a"
RESULT_CHECKSUM="846372ef238f2182b382c77a73ecddf99aa82f28cc9995bcc95592cc78305463"
# UTC timestamp of start and end
START="1509141600"
@ -49,6 +49,6 @@ done
saveSnapshotList "${POOL_NAME}" "${RESULT}"
# hourly daily monthly
verifySnapshotList "${RESULT}" 72 3 1 "${RESULT_CHECKSUM}"
verifySnapshotList "${RESULT}" 73 3 1 "${RESULT_CHECKSUM}"
# one more hour because of DST

View File

@ -61,9 +61,9 @@ function saveSnapshotList {
# clear the seconds for comparing
if [ "$unamestr" == 'FreeBSD' ]; then
sed -i '' 's/\(autosnap_[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]_[0-9][0-9]:[0-9][0-9]:\)[0-9][0-9]_/\100_/g' "${RESULT}"
sed -i '' 's/\(autosnap_[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]_[0-9][0-9]:[0-9][0-9]:\)[0-9][0-9]/\100/g' "${RESULT}"
else
sed -i 's/\(autosnap_[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]_[0-9][0-9]:[0-9][0-9]:\)[0-9][0-9]_/\100_/g' "${RESULT}"
sed -i 's/\(autosnap_[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]_[0-9][0-9]:[0-9][0-9]:\)[0-9][0-9]/\100/g' "${RESULT}"
fi
}

View File

@ -23,7 +23,7 @@ function cleanUp {
# export pool in any case
trap cleanUp EXIT
zfs create "${POOL_NAME}"/src -o mountpoint="${MOUNT_TARGET}"
zfs create -o mountpoint="${MOUNT_TARGET}" "${POOL_NAME}"/src
../../../syncoid --debug --compress=none "${POOL_NAME}"/src "${POOL_NAME}"/dst
dd if=/dev/urandom of="${MOUNT_TARGET}"/big_file bs=1M count=200
@ -31,19 +31,16 @@ dd if=/dev/urandom of="${MOUNT_TARGET}"/big_file bs=1M count=200
../../../syncoid --debug --compress=none --source-bwlimit=2m "${POOL_NAME}"/src "${POOL_NAME}"/dst &
syncoid_pid=$!
sleep 5
list_descendants ()
{
local children=$(ps -o pid= --ppid "$1")
for pid in $children
do
list_descendants "$pid"
done
echo "$children"
function getcpid() {
cpids=$(pgrep -P "$1"|xargs)
for cpid in $cpids;
do
echo "$cpid"
getcpid "$cpid"
done
}
kill $(list_descendants $$) || true
kill $(getcpid $$) || true
wait
sleep 1

View File

@ -23,7 +23,7 @@ function cleanUp {
# export pool in any case
trap cleanUp EXIT
zfs create "${POOL_NAME}"/src -o mountpoint="${MOUNT_TARGET}"
zfs create -o mountpoint="${MOUNT_TARGET}" "${POOL_NAME}"/src
../../../syncoid --debug --compress=none "${POOL_NAME}"/src "${POOL_NAME}"/dst
dd if=/dev/urandom of="${MOUNT_TARGET}"/big_file bs=1M count=200
@ -32,19 +32,16 @@ zfs snapshot "${POOL_NAME}"/src@big
../../../syncoid --debug --no-sync-snap --compress=none --source-bwlimit=2m "${POOL_NAME}"/src "${POOL_NAME}"/dst &
syncoid_pid=$!
sleep 5
list_descendants ()
{
local children=$(ps -o pid= --ppid "$1")
for pid in $children
do
list_descendants "$pid"
done
echo "$children"
function getcpid() {
cpids=$(pgrep -P "$1"|xargs)
for cpid in $cpids;
do
echo "$cpid"
getcpid "$cpid"
done
}
kill $(list_descendants $$) || true
kill $(getcpid $$) || true
wait
sleep 1