ungenutzt plugins entfernt, bzw, werden aus https://git.mgrote.net/mg/mirror-munin-contrib gezogen
This commit is contained in:
parent
1413693d91
commit
6a479c4ef1
17 changed files with 0 additions and 3811 deletions
198
extern/acng
vendored
198
extern/acng
vendored
|
@ -1,198 +0,0 @@
|
||||||
#!/usr/bin/perl
|
|
||||||
|
|
||||||
=head1 NAME
|
|
||||||
|
|
||||||
acng - Graph activity for Apt-Cacher NG, request count and bytes
|
|
||||||
|
|
||||||
=head1 APPLICABLE SYSTEMS
|
|
||||||
|
|
||||||
Systems with "Apt-Cacher NG" installed and running.
|
|
||||||
|
|
||||||
=head1 DESCRIPTION
|
|
||||||
|
|
||||||
This plugin will add graphs for "bytes in and out" and "requests in
|
|
||||||
and out" for systems with "Apt-Cacher NG" installed.
|
|
||||||
|
|
||||||
=head1 CONFIGURATION
|
|
||||||
|
|
||||||
The plugin must have permission to read the log of Apt-Cacher NG. (On
|
|
||||||
Debian 8, this file is world readable by default).
|
|
||||||
|
|
||||||
The path to the logfile can be set with the "logfile" environment
|
|
||||||
variable.
|
|
||||||
|
|
||||||
=head2 DEFAULT CONFIGURATION
|
|
||||||
|
|
||||||
[acng]
|
|
||||||
env.logfile /var/log/apt-cacher-ng/apt-cacher.log
|
|
||||||
|
|
||||||
=head1 USAGE
|
|
||||||
|
|
||||||
Link this plugin to /etc/munin/plugins/ and restart the munin-node.
|
|
||||||
|
|
||||||
=head1 MAGIC MARKERS
|
|
||||||
|
|
||||||
#%# family=contrib
|
|
||||||
#%# capabilities=autoconf
|
|
||||||
|
|
||||||
=head1 AUTHOR
|
|
||||||
|
|
||||||
Stig Sandbeck Mathisen
|
|
||||||
|
|
||||||
=head1 LICENSE
|
|
||||||
|
|
||||||
GPLv3
|
|
||||||
|
|
||||||
=cut
|
|
||||||
|
|
||||||
use strict;
|
|
||||||
use warnings;
|
|
||||||
use Munin::Plugin;
|
|
||||||
|
|
||||||
use Storable qw(nfreeze thaw);
|
|
||||||
use MIME::Base64;
|
|
||||||
|
|
||||||
my $logfile = $ENV{'logfile'} ||= '/var/log/apt-cacher-ng/apt-cacher.log';
|
|
||||||
|
|
||||||
need_multigraph;
|
|
||||||
|
|
||||||
# Read or initialize state used by the log tailer, and the plugin.
|
|
||||||
sub read_state {
|
|
||||||
|
|
||||||
my ($pos, $statsin) = restore_state;
|
|
||||||
my $stats = thaw(decode_base64 $statsin) if $statsin;
|
|
||||||
|
|
||||||
$pos = 0 unless defined $pos;
|
|
||||||
$stats = {} unless defined $stats;
|
|
||||||
|
|
||||||
return ($pos, $stats);
|
|
||||||
}
|
|
||||||
|
|
||||||
# Write state.
|
|
||||||
#
|
|
||||||
# "pos" is logfile position, and "stats" is a data structure with
|
|
||||||
# counters used by the plugin.
|
|
||||||
#
|
|
||||||
# Note: Munin::Plugin::save_state has limited functionality, so the
|
|
||||||
# data structure is serialized and converted to plain text.
|
|
||||||
sub write_state {
|
|
||||||
my ($pos, $stats) = @_;
|
|
||||||
|
|
||||||
my $statsout = encode_base64 nfreeze($stats);
|
|
||||||
save_state($pos, $statsout);
|
|
||||||
}
|
|
||||||
|
|
||||||
sub parse_logfile {
|
|
||||||
my $logfile = shift;
|
|
||||||
my ($pos, $stats) = read_state;
|
|
||||||
|
|
||||||
my @keys = ( 'time', 'direction', 'size', 'client', 'file' );
|
|
||||||
|
|
||||||
# Open log
|
|
||||||
my ( $fh, $reset ) = tail_open( $logfile, $pos );
|
|
||||||
|
|
||||||
die "Unable to open logfile\n" unless ($fh);
|
|
||||||
|
|
||||||
while (<$fh>) {
|
|
||||||
chomp;
|
|
||||||
my @values = split( /\|/, $_ );
|
|
||||||
|
|
||||||
my %logentry;
|
|
||||||
@logentry{@keys} = @values;
|
|
||||||
|
|
||||||
$stats->{'bytes'}{ $logentry{'direction'} } += $logentry{'size'};
|
|
||||||
$stats->{'requests'}{ $logentry{'direction'} }++;
|
|
||||||
}
|
|
||||||
|
|
||||||
# Close log
|
|
||||||
$pos = tail_close($fh);
|
|
||||||
|
|
||||||
write_state($pos, $stats);
|
|
||||||
|
|
||||||
return $stats;
|
|
||||||
}
|
|
||||||
|
|
||||||
sub print_autoconf{
|
|
||||||
my $logfile = shift;
|
|
||||||
if ( open(my $fh, '<', $logfile) ) {
|
|
||||||
print "yes\n";
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
printf "no (could not open %s)\n", $logfile;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
sub print_config{
|
|
||||||
my $stats = shift;
|
|
||||||
|
|
||||||
print << 'EOC';
|
|
||||||
multigraph acng_bytes
|
|
||||||
graph_category acng
|
|
||||||
graph_title Apt-Cacher NG bytes
|
|
||||||
graph_order origin client
|
|
||||||
graph_vlabel bytes per ${graph_period}
|
|
||||||
graph_info Bytes transferred between origin, apt-cacher-ng and clients
|
|
||||||
origin.info bytes transferred between origin and apt-cacher-ng
|
|
||||||
origin.label origin
|
|
||||||
origin.type DERIVE
|
|
||||||
origin.min 0
|
|
||||||
client.info bytes transferred between apt-cacher-ng and clients
|
|
||||||
client.label client
|
|
||||||
client.type DERIVE
|
|
||||||
client.min 0
|
|
||||||
EOC
|
|
||||||
print << "EOV" if $ENV{'MUNIN_CAP_DIRTYCONFIG'};
|
|
||||||
origin.value $stats->{bytes}{I}
|
|
||||||
client.value $stats->{bytes}{O}
|
|
||||||
EOV
|
|
||||||
|
|
||||||
print << 'EOC';
|
|
||||||
|
|
||||||
multigraph acng_requests
|
|
||||||
graph_category acng
|
|
||||||
graph_title Apt-Cacher NG requests
|
|
||||||
graph_order origin client
|
|
||||||
graph_vlabel requests per ${graph_period}
|
|
||||||
graph_info Requests from clients to apt-cacher-ng, and from apt-cacher-ng to origin
|
|
||||||
origin.info requests from apt-cacher-ng to origin
|
|
||||||
origin.label origin
|
|
||||||
origin.type DERIVE
|
|
||||||
origin.min 0
|
|
||||||
client.info requests from clients to apt-cacher-ng
|
|
||||||
client.label client
|
|
||||||
client.type DERIVE
|
|
||||||
client.min 0
|
|
||||||
EOC
|
|
||||||
|
|
||||||
print << "EOV" if $ENV{'MUNIN_CAP_DIRTYCONFIG'};
|
|
||||||
origin.value $stats->{requests}{I}
|
|
||||||
client.value $stats->{requests}{O}
|
|
||||||
EOV
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
sub print_values{
|
|
||||||
my $stats = shift;
|
|
||||||
|
|
||||||
print << "EOV";
|
|
||||||
multigraph acng_bytes
|
|
||||||
origin.value $stats->{bytes}{I}
|
|
||||||
client.value $stats->{bytes}{O}
|
|
||||||
|
|
||||||
multigraph acng_requests
|
|
||||||
origin.value $stats->{requests}{I}
|
|
||||||
client.value $stats->{requests}{O}
|
|
||||||
EOV
|
|
||||||
}
|
|
||||||
|
|
||||||
if ($ARGV[0] and $ARGV[0] eq 'autoconf') {
|
|
||||||
print_autoconf($logfile);
|
|
||||||
}
|
|
||||||
elsif ($ARGV[0] and $ARGV[0] eq 'config') {
|
|
||||||
my $stats = parse_logfile($logfile);
|
|
||||||
print_config($stats);
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
my $stats = parse_logfile($logfile);
|
|
||||||
print_values($stats);
|
|
||||||
}
|
|
124
extern/chrony
vendored
124
extern/chrony
vendored
|
@ -1,124 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
|
|
||||||
: <<=cut
|
|
||||||
|
|
||||||
=head1 NAME
|
|
||||||
|
|
||||||
parse Chrony Tracking output for timeserver status information
|
|
||||||
|
|
||||||
=head1 APPLICABLE SYSTEMS
|
|
||||||
|
|
||||||
Any system with a local chronyd service.
|
|
||||||
|
|
||||||
=head1 CONFIGURATION
|
|
||||||
|
|
||||||
No configuration.
|
|
||||||
|
|
||||||
|
|
||||||
=head1 VERSION
|
|
||||||
|
|
||||||
Revision 0.1 2008/08/23 13:06:00 joti
|
|
||||||
|
|
||||||
First version only chronyc tracking, autodetection included.
|
|
||||||
|
|
||||||
Revision 0.2 2008/10/11 16:09:00 joti
|
|
||||||
|
|
||||||
Added scaling of other values to match with frequency, added more description to fields
|
|
||||||
|
|
||||||
Revision 0.3 2014/02/16 zjttoefs
|
|
||||||
|
|
||||||
reduce forking by using awk
|
|
||||||
do not limit output precision
|
|
||||||
add stratum monitoring
|
|
||||||
detect slow/fast time or frequency and adjust sign of value accordingly
|
|
||||||
remove commented out code
|
|
||||||
|
|
||||||
Revision 0.4 2016/11/10 Lars Kruse
|
|
||||||
|
|
||||||
rewrite field handling
|
|
||||||
use "which" for "chronyc" location
|
|
||||||
switch from "bash" to "sh"
|
|
||||||
fix exit code of failing "autoconf"
|
|
||||||
|
|
||||||
|
|
||||||
=head1 AUTHOR
|
|
||||||
|
|
||||||
Copyright (C) 2008 joti
|
|
||||||
|
|
||||||
Copyright (C) 2014 zjttoefs
|
|
||||||
|
|
||||||
Copyright (C) 2016 Lars Kruse <devel@sumpfralle>
|
|
||||||
|
|
||||||
|
|
||||||
=head1 MAGIC MARKERS
|
|
||||||
|
|
||||||
#%# family=auto
|
|
||||||
#%# capabilities=autoconf
|
|
||||||
|
|
||||||
=cut
|
|
||||||
|
|
||||||
CHRONYC="$(which chronyc | head -1)"
|
|
||||||
|
|
||||||
# Frequency has extremely higher values than other. Therefore they are fitted by scaling via suitable factors.
|
|
||||||
# field definitions:
|
|
||||||
# - munin fieldname
|
|
||||||
# - factor for graph visualization (all values are supposed to reach a similar dimension)
|
|
||||||
# - regular expression of the chrony output line (may not contain whitespace, case insensitive)
|
|
||||||
# - label (may include "%d" for including the factor; may contain whitespace)
|
|
||||||
fields="stratum 1 ^Stratum Stratum
|
|
||||||
systime 1000 ^System.time System Time (x%d)
|
|
||||||
frequency 1 ^Frequency Frequency (ppm)
|
|
||||||
residualfreq 100 ^Residual.freq Residual Freq (ppm, x%d)
|
|
||||||
skew 100 ^Skew Skew (ppm, x%d)
|
|
||||||
rootdelay 1000 ^Root.delay Root delay (seconds, x%d)
|
|
||||||
rootdispersion 1000 ^Root.dispersion Root dispersion (seconds, x%d)"
|
|
||||||
|
|
||||||
# chrony example output (v2.4.1):
|
|
||||||
# Reference ID : 131.188.3.221 (ntp1.rrze.uni-erlangen.de)
|
|
||||||
# Stratum : 2
|
|
||||||
# Ref time (UTC) : Thu Nov 10 22:39:50 2016
|
|
||||||
# System time : 0.000503798 seconds slow of NTP time
|
|
||||||
# Last offset : +0.000254355 seconds
|
|
||||||
# RMS offset : 0.002186779 seconds
|
|
||||||
# Frequency : 17.716 ppm slow
|
|
||||||
# Residual freq : +0.066 ppm
|
|
||||||
# Skew : 4.035 ppm
|
|
||||||
# Root delay : 0.042980 seconds
|
|
||||||
# Root dispersion : 0.005391 seconds
|
|
||||||
# Update interval : 258.4 seconds
|
|
||||||
# Leap status : Normal
|
|
||||||
|
|
||||||
|
|
||||||
if [ "$1" = "autoconf" ]; then
|
|
||||||
if [ -n "$CHRONYC" ] && [ -x "$CHRONYC" ]; then
|
|
||||||
echo yes
|
|
||||||
else
|
|
||||||
echo "no (missing 'chronyc' executable)"
|
|
||||||
fi
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$1" = "config" ]; then
|
|
||||||
echo 'graph_title Chrony Tracking Stats'
|
|
||||||
echo 'graph_args --base 1000 -l 0'
|
|
||||||
echo 'graph_vlabel (seconds,ppm)'
|
|
||||||
echo 'graph_category time'
|
|
||||||
echo "$fields" | while read fieldname factor regex label; do
|
|
||||||
# insert the factor, if "%d" is part of the label
|
|
||||||
printf "${fieldname}.label $label\n" "$factor"
|
|
||||||
echo "${fieldname}.type GAUGE"
|
|
||||||
done
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
chrony_status="$("$CHRONYC" tracking)"
|
|
||||||
echo "$fields" | while read fieldname factor regex label; do
|
|
||||||
status_line="$(echo "$chrony_status" | grep -i -- "$regex " | cut -d ":" -f 2-)"
|
|
||||||
if [ -z "$status_line" ]; then
|
|
||||||
value="U"
|
|
||||||
else
|
|
||||||
# the keyword "slow" indicates negative values
|
|
||||||
value="$(echo "$status_line" | awk '{ /slow/ ? SIGN=-1 : SIGN=1; print $1 * SIGN * '"$factor"' }')"
|
|
||||||
fi
|
|
||||||
echo "${fieldname}.value $value"
|
|
||||||
done
|
|
552
extern/docker_
vendored
552
extern/docker_
vendored
|
@ -1,552 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
"""
|
|
||||||
=head1 NAME
|
|
||||||
|
|
||||||
docker_ - Docker wildcard-plugin to monitor a L<Docker|https://www.docker.com> host.
|
|
||||||
|
|
||||||
This wildcard plugin provides series C<containers>, C<images>, C<status>,
|
|
||||||
C<volumes>, C<cpu>, C<memory> and C<network> as separate graphs. It also
|
|
||||||
supports a C<multi> suffix that provides all of those as a multigraph.
|
|
||||||
|
|
||||||
=head1 INSTALLATION
|
|
||||||
|
|
||||||
- Copy this plugin in your munin plugins directory
|
|
||||||
- Install Python3 "docker" package
|
|
||||||
|
|
||||||
=over 2
|
|
||||||
|
|
||||||
If you want all the graphs as a multigraph, create a single multi symlink.
|
|
||||||
|
|
||||||
ln -s /usr/share/munin/plugins/docker_ /etc/munin/plugins/docker_multi
|
|
||||||
|
|
||||||
Or choose a subset of those you want.
|
|
||||||
|
|
||||||
ln -s /usr/share/munin/plugins/docker_ /etc/munin/plugins/docker_containers
|
|
||||||
ln -s /usr/share/munin/plugins/docker_ /etc/munin/plugins/docker_cpu
|
|
||||||
ln -s /usr/share/munin/plugins/docker_ /etc/munin/plugins/docker_images
|
|
||||||
ln -s /usr/share/munin/plugins/docker_ /etc/munin/plugins/docker_memory
|
|
||||||
ln -s /usr/share/munin/plugins/docker_ /etc/munin/plugins/docker_network
|
|
||||||
ln -s /usr/share/munin/plugins/docker_ /etc/munin/plugins/docker_status
|
|
||||||
ln -s /usr/share/munin/plugins/docker_ /etc/munin/plugins/docker_volumes
|
|
||||||
|
|
||||||
=back
|
|
||||||
|
|
||||||
After the installation you need to restart your munin-node:
|
|
||||||
|
|
||||||
=over 2
|
|
||||||
|
|
||||||
systemctl restart munin-node
|
|
||||||
|
|
||||||
=back
|
|
||||||
|
|
||||||
=head1 CONFIGURATION
|
|
||||||
|
|
||||||
This plugin need to run as root, you need to create a file named docker placed in the
|
|
||||||
directory /etc/munin/plugin-conf.d/ with the following config (you can also use
|
|
||||||
Docker environment variables here as described in
|
|
||||||
https://docs.docker.com/compose/reference/envvars/):
|
|
||||||
|
|
||||||
You can use the EXCLUDE_CONTAINER_NAME environment variable to specify a regular expression
|
|
||||||
which if matched will exclude the matching containers from the memory and cpu graphs.
|
|
||||||
|
|
||||||
For example
|
|
||||||
|
|
||||||
env.EXCLUDE_CONTAINER_NAME runner
|
|
||||||
|
|
||||||
Would exclude all containers with the word "runner" in the name.
|
|
||||||
|
|
||||||
|
|
||||||
=over 2
|
|
||||||
|
|
||||||
[docker_*]
|
|
||||||
group docker
|
|
||||||
env.DOCKER_HOST unix://run/docker.sock
|
|
||||||
env.EXCLUDE_CONTAINER_NAME regexp
|
|
||||||
|
|
||||||
=back
|
|
||||||
|
|
||||||
You may need to pick a different group depending on the name schema of your
|
|
||||||
distribution. Or maybe use "user root", if nothing else works.
|
|
||||||
|
|
||||||
=head1 AUTHORS
|
|
||||||
|
|
||||||
This section has been reverse-engineered from git logs
|
|
||||||
|
|
||||||
Codimp <contact@lithio.fr>: original rewrite
|
|
||||||
|
|
||||||
Rowan Wookey <admin@rwky.net>: performance improvement
|
|
||||||
|
|
||||||
Olivier Mehani <shtrom@ssji.net>: Network support, ClientWrapper, general cleanup, multigraph
|
|
||||||
|
|
||||||
=head1 MAGIC MARKERS
|
|
||||||
|
|
||||||
#%# family=auto
|
|
||||||
#%# capabilities=autoconf suggest multigraph
|
|
||||||
|
|
||||||
=cut
|
|
||||||
"""
|
|
||||||
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
import re
|
|
||||||
try:
|
|
||||||
from functools import cached_property
|
|
||||||
except ImportError:
|
|
||||||
# If cached_property is not available,
|
|
||||||
# just use the property decorator, without caching
|
|
||||||
# This is for backward compatibility with Python<3.8
|
|
||||||
cached_property = property
|
|
||||||
from multiprocessing import Process, Queue
|
|
||||||
|
|
||||||
|
|
||||||
def sorted_by_creation_date(func):
|
|
||||||
def sorted_func(*args, **kwargs):
|
|
||||||
return sorted(
|
|
||||||
func(*args, **kwargs),
|
|
||||||
key=(
|
|
||||||
lambda x: x.attrs['CreatedAt']
|
|
||||||
if 'CreatedAt' in x.attrs
|
|
||||||
else x.attrs['Created']
|
|
||||||
)
|
|
||||||
)
|
|
||||||
return sorted_func
|
|
||||||
|
|
||||||
|
|
||||||
def clean_fieldname(text):
|
|
||||||
if text == "root":
|
|
||||||
# "root" is a magic (forbidden) word
|
|
||||||
return "_root"
|
|
||||||
else:
|
|
||||||
return re.sub(r"(^[^A-Za-z_]|[^A-Za-z0-9_])", "_", text)
|
|
||||||
|
|
||||||
|
|
||||||
class ClientWrapper:
|
|
||||||
"""
|
|
||||||
A small wrapper for the docker client, to centralise some parsing logic,
|
|
||||||
and support caching.
|
|
||||||
|
|
||||||
In addition, when the exclude_re parameter is not None,
|
|
||||||
any container which name is matched by the RE will not be excluded from reports.
|
|
||||||
"""
|
|
||||||
client = None
|
|
||||||
exclude = None
|
|
||||||
|
|
||||||
def __init__(self, client, exclude_re=None):
|
|
||||||
self.client = client
|
|
||||||
if exclude_re:
|
|
||||||
self.exclude = re.compile(exclude_re)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def api(self):
|
|
||||||
return self.client.api
|
|
||||||
|
|
||||||
@cached_property
|
|
||||||
@sorted_by_creation_date
|
|
||||||
def all_containers(self):
|
|
||||||
return [
|
|
||||||
c for c in self.client.containers.list(all=True)
|
|
||||||
if (c.status == 'running') and (not self.exclude or not self.exclude.search(c.name))
|
|
||||||
]
|
|
||||||
|
|
||||||
@cached_property
|
|
||||||
@sorted_by_creation_date
|
|
||||||
def intermediate_images(self):
|
|
||||||
return list(
|
|
||||||
set(self.all_images)
|
|
||||||
.difference(
|
|
||||||
set(self.images)
|
|
||||||
.difference(
|
|
||||||
set(self.dangling_images)
|
|
||||||
)
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
@cached_property
|
|
||||||
@sorted_by_creation_date
|
|
||||||
def all_images(self):
|
|
||||||
return self.client.images.list(all=True)
|
|
||||||
|
|
||||||
@cached_property
|
|
||||||
@sorted_by_creation_date
|
|
||||||
def images(self):
|
|
||||||
images = self.client.images.list()
|
|
||||||
return list(
|
|
||||||
set(images)
|
|
||||||
.difference(
|
|
||||||
set(self.dangling_images))
|
|
||||||
)
|
|
||||||
|
|
||||||
@cached_property
|
|
||||||
@sorted_by_creation_date
|
|
||||||
def dangling_images(self):
|
|
||||||
return self.client.images.list(filters={'dangling': True})
|
|
||||||
|
|
||||||
@cached_property
|
|
||||||
@sorted_by_creation_date
|
|
||||||
def volumes(self):
|
|
||||||
return self.client.volumes.list()
|
|
||||||
|
|
||||||
|
|
||||||
def container_summary(container, *args):
|
|
||||||
summary = container.name
|
|
||||||
attributes = container_attributes(container, *args)
|
|
||||||
if attributes:
|
|
||||||
summary += f' ({attributes})'
|
|
||||||
return summary
|
|
||||||
|
|
||||||
|
|
||||||
def container_attributes(container, *args):
|
|
||||||
attributes = container.image.tags
|
|
||||||
attributes.append(container.attrs['Created'])
|
|
||||||
return ', '.join(attributes + list(args))
|
|
||||||
|
|
||||||
|
|
||||||
def print_containers_status(client):
|
|
||||||
running = []
|
|
||||||
unhealthy = []
|
|
||||||
paused = []
|
|
||||||
created = []
|
|
||||||
restarting = []
|
|
||||||
removing = []
|
|
||||||
exited = []
|
|
||||||
dead = []
|
|
||||||
for container in client.all_containers:
|
|
||||||
if container.status == 'running':
|
|
||||||
state = client.api.inspect_container(container.name)['State']
|
|
||||||
if state.get('Health', {}).get('Status') == 'unhealthy':
|
|
||||||
unhealthy.append(container)
|
|
||||||
else:
|
|
||||||
running.append(container)
|
|
||||||
elif container.status == 'paused':
|
|
||||||
paused.append(container)
|
|
||||||
elif container.status == 'created':
|
|
||||||
created.append(container)
|
|
||||||
elif container.status == 'restarting':
|
|
||||||
restarting.append(container)
|
|
||||||
elif container.status == 'removing':
|
|
||||||
removing.append(container)
|
|
||||||
elif container.status == 'exited':
|
|
||||||
exited.append(container)
|
|
||||||
elif container.status == 'dead':
|
|
||||||
dead.append(container)
|
|
||||||
print('running.value', len(running))
|
|
||||||
print('running.extinfo', ', '.join(container_summary(c) for c in running))
|
|
||||||
print('unhealthy.value', len(unhealthy))
|
|
||||||
print('unhealthy.extinfo', ', '.join(container_summary(c) for c in unhealthy))
|
|
||||||
print('paused.value', len(paused))
|
|
||||||
print('paused.extinfo', ', '.join(container_summary(c) for c in paused))
|
|
||||||
print('created.value', len(created))
|
|
||||||
print('created.extinfo', ', '.join(container_summary(c) for c in created))
|
|
||||||
print('restarting.value', len(restarting))
|
|
||||||
print('restarting.extinfo', ', '.join(container_summary(c) for c in restarting))
|
|
||||||
print('removing.value', len(removing))
|
|
||||||
print('removing.extinfo', ', '.join(container_summary(c) for c in removing))
|
|
||||||
print('exited.value', len(exited))
|
|
||||||
print('exited.extinfo', ', '.join(container_summary(c) for c in exited))
|
|
||||||
print('dead.value', len(dead))
|
|
||||||
print('dead.extinfo', ', '.join(container_summary(c) for c in dead))
|
|
||||||
|
|
||||||
|
|
||||||
def image_summary(image):
|
|
||||||
attributes = image.tags
|
|
||||||
attributes.append(image.attrs['Created'])
|
|
||||||
attributes.append(f"{round(image.attrs['Size']/1024**2, 2)} MiB")
|
|
||||||
return f"{image.short_id} ({', '.join(attributes)})"
|
|
||||||
|
|
||||||
|
|
||||||
def print_images_count(client):
|
|
||||||
images = client.images
|
|
||||||
intermediate = client.intermediate_images
|
|
||||||
dangling = client.dangling_images
|
|
||||||
|
|
||||||
print('intermediate_quantity.value', len(intermediate))
|
|
||||||
print('intermediate_quantity.extinfo', ', '.join(image_summary(i) for i in intermediate))
|
|
||||||
print('images_quantity.value', len(images))
|
|
||||||
print('images_quantity.extinfo', ', '.join(image_summary(i) for i in images))
|
|
||||||
print('dangling_quantity.value', len(dangling))
|
|
||||||
print('dangling_quantity.extinfo', ', '.join(image_summary(i) for i in dangling))
|
|
||||||
|
|
||||||
|
|
||||||
def get_container_stats(container, q):
|
|
||||||
q.put(container.stats(stream=False))
|
|
||||||
|
|
||||||
|
|
||||||
def parallel_container_stats(client):
|
|
||||||
proc_list = []
|
|
||||||
stats = {}
|
|
||||||
for container in client.all_containers:
|
|
||||||
q = Queue()
|
|
||||||
p = Process(target=get_container_stats, args=(container, q))
|
|
||||||
proc_list.append({'proc': p, 'queue': q, 'container': container})
|
|
||||||
p.start()
|
|
||||||
for proc in proc_list:
|
|
||||||
proc['proc'].join()
|
|
||||||
stats[proc['container']] = proc['queue'].get()
|
|
||||||
return stats.items()
|
|
||||||
|
|
||||||
|
|
||||||
def print_containers_cpu(client):
|
|
||||||
for container, stats in parallel_container_stats(client):
|
|
||||||
cpu_percent = 0.0
|
|
||||||
cpu_delta = (float(stats["cpu_stats"]["cpu_usage"]["total_usage"])
|
|
||||||
- float(stats["precpu_stats"]["cpu_usage"]["total_usage"]))
|
|
||||||
system_delta = (float(stats["cpu_stats"]["system_cpu_usage"])
|
|
||||||
- float(stats["precpu_stats"]["system_cpu_usage"]))
|
|
||||||
if system_delta > 0.0:
|
|
||||||
cpu_percent = cpu_delta / system_delta * 100.0 * os.cpu_count()
|
|
||||||
clean_container_name = clean_fieldname(container.name)
|
|
||||||
print(clean_container_name + '.value', cpu_percent)
|
|
||||||
print(clean_container_name + '.extinfo', container_attributes(container))
|
|
||||||
|
|
||||||
|
|
||||||
def print_containers_memory(client):
|
|
||||||
for container, stats in parallel_container_stats(client):
|
|
||||||
if 'total_rss' in stats['memory_stats']['stats']: # cgroupv1 only?
|
|
||||||
memory_usage = stats['memory_stats']['stats']['total_rss']
|
|
||||||
extinfo = 'Resident Set Size'
|
|
||||||
else:
|
|
||||||
memory_usage = stats['memory_stats']['usage']
|
|
||||||
extinfo = 'Total memory usage'
|
|
||||||
clean_container_name = clean_fieldname(container.name)
|
|
||||||
print(clean_container_name + '.value', memory_usage)
|
|
||||||
print(clean_container_name + '.extinfo', container_attributes(container, extinfo))
|
|
||||||
|
|
||||||
|
|
||||||
def print_containers_network(client):
|
|
||||||
for container, stats in parallel_container_stats(client):
|
|
||||||
tx_bytes = 0
|
|
||||||
rx_bytes = 0
|
|
||||||
if "networks" in stats:
|
|
||||||
for data in stats['networks'].values():
|
|
||||||
tx_bytes += data['tx_bytes']
|
|
||||||
rx_bytes += data['rx_bytes']
|
|
||||||
clean_container_name = clean_fieldname(container.name)
|
|
||||||
print(clean_container_name + '_up.value', tx_bytes)
|
|
||||||
print(clean_container_name + '_down.value', rx_bytes)
|
|
||||||
print(clean_container_name + '_up.extinfo', container_attributes(container))
|
|
||||||
|
|
||||||
|
|
||||||
def volume_summary(volume):
|
|
||||||
summary = f"{volume.short_id}"
|
|
||||||
if volume.attrs['Labels']:
|
|
||||||
summary += f" ({', '.join(volume.attrs['Labels'])})"
|
|
||||||
return summary
|
|
||||||
|
|
||||||
|
|
||||||
def status(client, mode):
|
|
||||||
if mode == "config":
|
|
||||||
print("graph_title Docker status")
|
|
||||||
print("graph_vlabel containers")
|
|
||||||
print("graph_category container")
|
|
||||||
print("graph_total All containers")
|
|
||||||
print("running.label RUNNING")
|
|
||||||
print("running.draw AREASTACK")
|
|
||||||
print("running.info Running containers can be manipulated with "
|
|
||||||
"`docker container [attach|kill|logs|pause|restart|stop] <NAME>` or "
|
|
||||||
"commands run in them with `docker container exec "
|
|
||||||
"[--detach|--interactive,--privileged,--tty] <NAME> <COMMAND>`"
|
|
||||||
)
|
|
||||||
print("unhealthy.label UNHEALTHY")
|
|
||||||
print("unhealthy.draw AREASTACK")
|
|
||||||
print("unhealthy.warning 1")
|
|
||||||
print("unhealthy.info Unhealthy containers can be restarted with "
|
|
||||||
"`docker container restart <NAME>`")
|
|
||||||
print("paused.label PAUSED")
|
|
||||||
print("paused.draw AREASTACK")
|
|
||||||
print("paused.info Paused containers can be resumed with "
|
|
||||||
"`docker container unpause <NAME>`")
|
|
||||||
print("created.label CREATED")
|
|
||||||
print("created.draw AREASTACK")
|
|
||||||
print("created.info New containers can be created with "
|
|
||||||
"`docker container create --name <NAME> <IMAGE_ID >` or "
|
|
||||||
"`docker container run --name <NAME> <IMAGE_ID> <COMMAND>`")
|
|
||||||
print("restarting.label RESTARTING")
|
|
||||||
print("restarting.draw AREASTACK")
|
|
||||||
print("restarting.info Containers can be restarted with "
|
|
||||||
"`docker container restart <NAME>`")
|
|
||||||
print("removing.label REMOVING")
|
|
||||||
print("removing.draw AREASTACK")
|
|
||||||
print("removing.info Containers can be removed with "
|
|
||||||
"`docker container rm <NAME>`")
|
|
||||||
print("exited.label EXITED")
|
|
||||||
print("exited.draw AREASTACK")
|
|
||||||
print("exited.info Exited containers can be started with "
|
|
||||||
"`docker container start [--attach] <NAME>`")
|
|
||||||
print("dead.label DEAD")
|
|
||||||
print("dead.draw AREASTACK")
|
|
||||||
print("dead.warning 1")
|
|
||||||
print("dead.info Dead containers can be started with "
|
|
||||||
"`docker container start <NAME>`")
|
|
||||||
else:
|
|
||||||
print_containers_status(client)
|
|
||||||
|
|
||||||
|
|
||||||
def containers(client, mode):
|
|
||||||
if mode == "config":
|
|
||||||
print("graph_title Docker containers")
|
|
||||||
print("graph_vlabel containers")
|
|
||||||
print("graph_category container")
|
|
||||||
print("containers_quantity.label Containers")
|
|
||||||
else:
|
|
||||||
print('containers_quantity.value', len(client.all_containers))
|
|
||||||
|
|
||||||
|
|
||||||
def images(client, mode):
|
|
||||||
if mode == "config":
|
|
||||||
print("graph_title Docker images")
|
|
||||||
print("graph_vlabel images")
|
|
||||||
print("graph_category container")
|
|
||||||
print("graph_total All images")
|
|
||||||
print("intermediate_quantity.label Intermediate images")
|
|
||||||
print("intermediate_quantity.draw AREASTACK")
|
|
||||||
print("intermediate_quantity.info All unused images can be deleted with "
|
|
||||||
"`docker image prune --all`")
|
|
||||||
print("images_quantity.label Images")
|
|
||||||
print("images_quantity.draw AREASTACK")
|
|
||||||
print("images_quantity.info Images can be used in containers with "
|
|
||||||
"`docker container create --name <NAME> <IMAGE_ID >` or "
|
|
||||||
"`docker container run --name <NAME> <IMAGE_ID> <COMMAND>`")
|
|
||||||
print("dangling_quantity.label Dangling images")
|
|
||||||
print("dangling_quantity.draw AREASTACK")
|
|
||||||
print("dangling_quantity.info Dangling images can be deleted with "
|
|
||||||
"`docker image prune`"
|
|
||||||
"or tagged with `docker image tag <IMAGE_ID> <NAME>`")
|
|
||||||
print("dangling_quantity.warning 10")
|
|
||||||
else:
|
|
||||||
print_images_count(client)
|
|
||||||
|
|
||||||
|
|
||||||
def volumes(client, mode):
|
|
||||||
if mode == "config":
|
|
||||||
print("graph_title Docker volumes")
|
|
||||||
print("graph_vlabel volumes")
|
|
||||||
print("graph_category container")
|
|
||||||
print("volumes_quantity.label Volumes")
|
|
||||||
print("volumes_quantity.draw AREASTACK")
|
|
||||||
print("volumes_quantity.info Unused volumes can be deleted with "
|
|
||||||
"`docker volume prune`")
|
|
||||||
else:
|
|
||||||
print('volumes_quantity.value', len(client.volumes))
|
|
||||||
print('volumes_quantity.extinfo', ', '.join(volume_summary(v) for v in client.volumes))
|
|
||||||
|
|
||||||
|
|
||||||
def cpu(client, mode):
|
|
||||||
if mode == "config":
|
|
||||||
graphlimit = str(os.cpu_count() * 100)
|
|
||||||
print("graph_title Docker containers CPU usage")
|
|
||||||
print("graph_args --base 1000 -r --lower-limit 0 --upper-limit " + graphlimit)
|
|
||||||
print("graph_scale no")
|
|
||||||
print("graph_period second")
|
|
||||||
print("graph_vlabel CPU usage (%)")
|
|
||||||
print("graph_category container")
|
|
||||||
print("graph_info This graph shows docker container CPU usage.")
|
|
||||||
print("graph_total Total CPU usage")
|
|
||||||
for container in client.all_containers:
|
|
||||||
fieldname = clean_fieldname(container.name)
|
|
||||||
print("{}.label {}".format(fieldname, container.name))
|
|
||||||
print("{}.draw AREASTACK".format(fieldname))
|
|
||||||
print("{}.info {}".format(fieldname, container_attributes(container)))
|
|
||||||
else:
|
|
||||||
print_containers_cpu(client)
|
|
||||||
|
|
||||||
|
|
||||||
def network(client, mode):
|
|
||||||
if mode == "config":
|
|
||||||
print("graph_title Docker containers network usage")
|
|
||||||
print("graph_args --base 1024 -l 0")
|
|
||||||
print("graph_vlabel bits in (-) / out (+) per ${graph_period}")
|
|
||||||
print("graph_category container")
|
|
||||||
print("graph_info This graph shows docker container network usage.")
|
|
||||||
print("graph_total Total network usage")
|
|
||||||
for container in client.all_containers:
|
|
||||||
fieldname = clean_fieldname(container.name)
|
|
||||||
print("{}_down.label {}_received".format(fieldname, container.name))
|
|
||||||
print("{}_down.type DERIVE".format(fieldname))
|
|
||||||
print("{}_down.min 0".format(fieldname))
|
|
||||||
print("{}_down.graph no".format(fieldname))
|
|
||||||
print("{}_down.cdef {}_down,8,*".format(fieldname, fieldname))
|
|
||||||
print("{}_up.label {}".format(fieldname, container.name))
|
|
||||||
print("{}_up.draw LINESTACK1".format(fieldname))
|
|
||||||
print("{}_up.type DERIVE".format(fieldname))
|
|
||||||
print("{}_up.min 0".format(fieldname))
|
|
||||||
print("{}_up.negative {}_down".format(fieldname, fieldname))
|
|
||||||
print("{}_up.cdef {}_up,8,*".format(fieldname, fieldname))
|
|
||||||
print("{}_up.info {}".format(fieldname, container_attributes(container)))
|
|
||||||
else:
|
|
||||||
print_containers_network(client)
|
|
||||||
|
|
||||||
|
|
||||||
def memory(client, mode):
|
|
||||||
if mode == "config":
|
|
||||||
print("graph_title Docker containers memory usage")
|
|
||||||
print("graph_args --base 1024 -l 0")
|
|
||||||
print("graph_vlabel Bytes")
|
|
||||||
print("graph_category container")
|
|
||||||
print("graph_info This graph shows docker container memory usage.")
|
|
||||||
print("graph_total Total memory usage")
|
|
||||||
for container in client.all_containers:
|
|
||||||
fieldname = clean_fieldname(container.name)
|
|
||||||
print("{}.label {}".format(fieldname, container.name))
|
|
||||||
print("{}.draw AREASTACK".format(fieldname))
|
|
||||||
print("{}.info {}".format(fieldname, container_attributes(container)))
|
|
||||||
else:
|
|
||||||
print_containers_memory(client)
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
series = [
|
|
||||||
'containers',
|
|
||||||
'cpu',
|
|
||||||
'images',
|
|
||||||
'memory',
|
|
||||||
'network',
|
|
||||||
'status',
|
|
||||||
'volumes',
|
|
||||||
]
|
|
||||||
|
|
||||||
try:
|
|
||||||
mode = sys.argv[1]
|
|
||||||
except IndexError:
|
|
||||||
mode = ""
|
|
||||||
wildcard = sys.argv[0].split("docker_")[1].split("_")[0]
|
|
||||||
|
|
||||||
try:
|
|
||||||
import docker
|
|
||||||
client = docker.from_env()
|
|
||||||
if mode == "autoconf":
|
|
||||||
client.ping()
|
|
||||||
print('yes')
|
|
||||||
sys.exit(0)
|
|
||||||
except Exception as e:
|
|
||||||
print(f'no ({e})')
|
|
||||||
if mode == "autoconf":
|
|
||||||
sys.exit(0)
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
if mode == "suggest":
|
|
||||||
# The multigraph covers all other graphs,
|
|
||||||
# so we only need to suggest one
|
|
||||||
print("multi")
|
|
||||||
sys.exit(0)
|
|
||||||
|
|
||||||
client = ClientWrapper(client,
|
|
||||||
exclude_re=os.getenv('EXCLUDE_CONTAINER_NAME'))
|
|
||||||
|
|
||||||
if wildcard in series:
|
|
||||||
# dereference the function name by looking in the globals()
|
|
||||||
# this assumes that the function name matches the series name exactly
|
|
||||||
# if this were to change, a different approach would be needed,
|
|
||||||
# most likely using a Dict of series name string to callable
|
|
||||||
globals()[wildcard](client, mode)
|
|
||||||
elif wildcard == 'multi':
|
|
||||||
for s in series:
|
|
||||||
print(f'multigraph docker_{s}')
|
|
||||||
# ditto
|
|
||||||
globals()[s](client, mode)
|
|
||||||
else:
|
|
||||||
print(f'unknown series ({wildcard})', file=sys.stderr)
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
main()
|
|
115
extern/ksm_
vendored
115
extern/ksm_
vendored
|
@ -1,115 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
#
|
|
||||||
# ksm
|
|
||||||
#
|
|
||||||
# Plugin to monitor ksm - Kernel Samepage Merging.
|
|
||||||
#
|
|
||||||
# Author: Markus Heberling <markus@tisoft.de>
|
|
||||||
#
|
|
||||||
# v1.0 2011-04-05 - First version
|
|
||||||
#
|
|
||||||
# Usage: place in /etc/munin/plugins/ (or link it there using ln -s)
|
|
||||||
#
|
|
||||||
# Parameters understood:
|
|
||||||
#
|
|
||||||
# config (required)
|
|
||||||
# autoconf (optional - used by munin-config)
|
|
||||||
#
|
|
||||||
# Magic markers - optional - used by installation scripts and
|
|
||||||
# munin-config:
|
|
||||||
#
|
|
||||||
# #%# capabilities=autoconf suggest
|
|
||||||
# #%# family=auto
|
|
||||||
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
import warnings # noqa
|
|
||||||
|
|
||||||
#################################
|
|
||||||
title = 'Kernel Samepage Merging'
|
|
||||||
#################################
|
|
||||||
|
|
||||||
|
|
||||||
def autoconf():
|
|
||||||
if os.path.exists('/sys/kernel/mm/ksm/run'):
|
|
||||||
for line in open('/sys/kernel/mm/ksm/run'):
|
|
||||||
if line.strip() == '1':
|
|
||||||
print('yes')
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
print('no (/sys/kernel/mm/ksm/run does not contain "1")')
|
|
||||||
else:
|
|
||||||
print('no (/sys/kernel/mm/ksm/run not found)')
|
|
||||||
sys.exit(0)
|
|
||||||
|
|
||||||
|
|
||||||
def suggest():
|
|
||||||
print('pages_absolute')
|
|
||||||
print('pages_relative')
|
|
||||||
print('full_scans')
|
|
||||||
sys.exit(0)
|
|
||||||
|
|
||||||
|
|
||||||
def config():
|
|
||||||
if('ksm_pages_absolute' in sys.argv[0]):
|
|
||||||
print('graph_category system')
|
|
||||||
print('graph_title %s Pages Absolute' % (title))
|
|
||||||
print('graph_order pages_unshared pages_volatile pages_shared pages_sharing')
|
|
||||||
print('pages_shared.info how many shared pages are being used')
|
|
||||||
print('pages_sharing.info how many more sites are sharing them i.e. how much saved')
|
|
||||||
print('pages_unshared.info how many pages unique but repeatedly checked for merging')
|
|
||||||
print('pages_volatile.info how many pages changing too fast to be placed in a tree')
|
|
||||||
print('pages_shared.label pages_shared')
|
|
||||||
print('pages_sharing.label pages_sharing')
|
|
||||||
print('pages_unshared.label pages_unshared')
|
|
||||||
print('pages_volatile.label pages_volatile')
|
|
||||||
print('pages_shared.draw AREASTACK')
|
|
||||||
print('pages_sharing.draw AREASTACK')
|
|
||||||
print('pages_unshared.draw AREASTACK')
|
|
||||||
print('pages_volatile.draw AREASTACK')
|
|
||||||
elif('ksm_pages_relative' in sys.argv[0]):
|
|
||||||
print('graph_category system')
|
|
||||||
print('graph_title %s Pages Relative' % (title))
|
|
||||||
print('pages_sharing_shared.info ratio of sharing to shared pages')
|
|
||||||
print('pages_unshared_sharing.info ratio of unshared to sharing pages')
|
|
||||||
print('pages_sharing_shared.label pages_sharing_shared')
|
|
||||||
print('pages_unshared_sharing.label pages_unshared_sharing')
|
|
||||||
print('pages_sharing_shared.cdef pages_sharing_shared,100,*')
|
|
||||||
print('pages_unshared_sharing.cdef pages_unshared_sharing,100,*')
|
|
||||||
elif('ksm_full_scans' in sys.argv[0]):
|
|
||||||
print('graph_category system')
|
|
||||||
print('graph_title %s Full Scans' % (title))
|
|
||||||
print('full_scans.info how many times all mergeable areas have been scanned')
|
|
||||||
print('full_scans.label full_scans')
|
|
||||||
sys.exit(0)
|
|
||||||
|
|
||||||
|
|
||||||
if len(sys.argv) > 1:
|
|
||||||
if sys.argv[1] == 'autoconf':
|
|
||||||
autoconf()
|
|
||||||
elif sys.argv[1] == 'config':
|
|
||||||
config()
|
|
||||||
elif sys.argv[1] == 'suggest':
|
|
||||||
suggest()
|
|
||||||
elif sys.argv[1]:
|
|
||||||
print('unknown argument "' + sys.argv[1] + '"')
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
pages_shared = int(open('/sys/kernel/mm/ksm/pages_shared').read())
|
|
||||||
pages_sharing = int(open('/sys/kernel/mm/ksm/pages_sharing').read())
|
|
||||||
pages_unshared = int(open('/sys/kernel/mm/ksm/pages_unshared').read())
|
|
||||||
pages_volatile = int(open('/sys/kernel/mm/ksm/pages_volatile').read())
|
|
||||||
full_scans = int(open('/sys/kernel/mm/ksm/full_scans').read())
|
|
||||||
|
|
||||||
if('ksm_pages_absolute' in sys.argv[0]):
|
|
||||||
print('pages_shared.value %i' % pages_shared)
|
|
||||||
print('pages_sharing.value %i' % pages_sharing)
|
|
||||||
print('pages_unshared.value %i' % pages_unshared)
|
|
||||||
print('pages_volatile.value %i' % pages_volatile)
|
|
||||||
elif('ksm_pages_relative' in sys.argv[0]):
|
|
||||||
print('pages_sharing_shared.value %f'
|
|
||||||
% (float(pages_sharing) / float(pages_shared) if pages_shared > 0 else 0))
|
|
||||||
print('pages_unshared_sharing.value %f'
|
|
||||||
% (float(pages_unshared) / float(pages_sharing) if pages_sharing > 0 else 0))
|
|
||||||
elif('ksm_full_scans' in sys.argv[0]):
|
|
||||||
print('full_scans.value %i' % full_scans)
|
|
137
extern/kvm_cpu
vendored
137
extern/kvm_cpu
vendored
|
@ -1,137 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
"""
|
|
||||||
=encoding utf8
|
|
||||||
|
|
||||||
=head1 NAME
|
|
||||||
|
|
||||||
kvm_cpu - show CPU usage of VM
|
|
||||||
|
|
||||||
|
|
||||||
=head1 CONFIGURATION
|
|
||||||
|
|
||||||
Parsed environment variables:
|
|
||||||
|
|
||||||
vmsuffix: part of VM name to be removed
|
|
||||||
|
|
||||||
|
|
||||||
=head1 LICENSE
|
|
||||||
|
|
||||||
GPLv3
|
|
||||||
|
|
||||||
SPDX-License-Identifier: GPL-3.0-only
|
|
||||||
|
|
||||||
|
|
||||||
=head1 AUTHORS
|
|
||||||
|
|
||||||
Maxence Dunnewind
|
|
||||||
|
|
||||||
Rodolphe Quiédeville
|
|
||||||
|
|
||||||
|
|
||||||
=head1 MAGIC MARKERS
|
|
||||||
|
|
||||||
#%# capabilities=autoconf
|
|
||||||
#%# family=contrib
|
|
||||||
|
|
||||||
=cut
|
|
||||||
"""
|
|
||||||
|
|
||||||
import os
|
|
||||||
import re
|
|
||||||
import sys
|
|
||||||
from subprocess import Popen, PIPE
|
|
||||||
|
|
||||||
|
|
||||||
def config(vm_names):
|
|
||||||
''' Print the plugin's config
|
|
||||||
@param vm_names : a list of "cleaned" vms' name
|
|
||||||
'''
|
|
||||||
percent = 100 * len(
|
|
||||||
list(
|
|
||||||
filter(
|
|
||||||
lambda x: x[0:3] == 'cpu' and x[3] != ' ', open('/proc/stat', 'r').readlines())))
|
|
||||||
|
|
||||||
base_config = """graph_title KVM Virtual Machine CPU usage
|
|
||||||
graph_vlabel %%
|
|
||||||
graph_category virtualization
|
|
||||||
graph_scale no
|
|
||||||
graph_period second
|
|
||||||
graph_info This graph shows the current CPU used by virtual machines
|
|
||||||
graph_args --base 1000 -r --lower-limit 0 --upper-limit %d""" % percent
|
|
||||||
print(base_config)
|
|
||||||
for vm in vm_names:
|
|
||||||
print("%s_cpu.label %s" % (vm, vm))
|
|
||||||
print("%s_cpu.min 0" % vm)
|
|
||||||
print("%s_cpu.type DERIVE" % vm)
|
|
||||||
print("%s_cpu.draw AREASTACK" % vm)
|
|
||||||
print("%s_cpu.info percent of cpu time used by virtual machine" % vm)
|
|
||||||
|
|
||||||
|
|
||||||
def clean_vm_name(vm_name):
|
|
||||||
''' Replace all special chars
|
|
||||||
@param vm_name : a vm's name
|
|
||||||
@return cleaned vm's name
|
|
||||||
'''
|
|
||||||
# suffix part defined in conf
|
|
||||||
suffix = os.getenv('vmsuffix')
|
|
||||||
if suffix:
|
|
||||||
vm_name = re.sub(suffix, '', vm_name)
|
|
||||||
# proxmox uses kvm with -name parameter
|
|
||||||
parts = vm_name.split('\x00')
|
|
||||||
if parts[0].endswith('kvm'):
|
|
||||||
try:
|
|
||||||
return parts[parts.index('-name') + 1]
|
|
||||||
except ValueError:
|
|
||||||
pass
|
|
||||||
return re.sub(r"[^a-zA-Z0-9_]", "_", vm_name)
|
|
||||||
|
|
||||||
|
|
||||||
def detect_kvm():
|
|
||||||
''' Check if kvm is installed '''
|
|
||||||
kvm = Popen("which kvm", shell=True, stdout=PIPE)
|
|
||||||
kvm.communicate()
|
|
||||||
return not bool(kvm.returncode)
|
|
||||||
|
|
||||||
|
|
||||||
def find_vm_names(pids):
|
|
||||||
'''Find and clean vm names from pids
|
|
||||||
@return a dictionary of {pids : cleaned vm name}
|
|
||||||
'''
|
|
||||||
result = {}
|
|
||||||
for pid in pids:
|
|
||||||
cmdline = open("/proc/%s/cmdline" % pid, "r")
|
|
||||||
result[pid] = clean_vm_name(
|
|
||||||
re.sub(r"^.*guest=([a-zA-Z0-9.-_-]*).*$", r"\1", cmdline.readline()))
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
def list_pids():
|
|
||||||
''' Find the pid of kvm processes
|
|
||||||
@return a list of pids from running kvm
|
|
||||||
'''
|
|
||||||
pid = Popen("pidof qemu-kvm qemu-system-x86_64 kvm", shell=True, stdout=PIPE)
|
|
||||||
return pid.communicate()[0].decode().split()
|
|
||||||
|
|
||||||
|
|
||||||
def fetch(vms):
|
|
||||||
''' Fetch values for a list of pids
|
|
||||||
@param dictionary {kvm_pid: cleaned vm name}
|
|
||||||
'''
|
|
||||||
for pid, name in vms.items():
|
|
||||||
user, system = open("/proc/%s/stat" % pid, 'r').readline().split(' ')[13:15]
|
|
||||||
print('%s_cpu.value %d' % (name, int(user) + int(system)))
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
if len(sys.argv) > 1:
|
|
||||||
if sys.argv[1] in ['autoconf', 'detect']:
|
|
||||||
if detect_kvm():
|
|
||||||
print("yes")
|
|
||||||
else:
|
|
||||||
print("no")
|
|
||||||
elif sys.argv[1] == "config":
|
|
||||||
config(find_vm_names(list_pids()).values())
|
|
||||||
else:
|
|
||||||
fetch(find_vm_names(list_pids()))
|
|
||||||
else:
|
|
||||||
fetch(find_vm_names(list_pids()))
|
|
115
extern/kvm_io
vendored
115
extern/kvm_io
vendored
|
@ -1,115 +0,0 @@
|
||||||
#!/usr/bin/env python
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
# vim: set fileencoding=utf-8
|
|
||||||
#
|
|
||||||
# Munin plugin to show io by vm
|
|
||||||
#
|
|
||||||
# Copyright Maxence Dunnewind, Rodolphe Quiédeville
|
|
||||||
#
|
|
||||||
# License : GPLv3
|
|
||||||
#
|
|
||||||
# parsed environment variables:
|
|
||||||
# vmsuffix: part of vm name to be removed
|
|
||||||
#
|
|
||||||
#%# capabilities=autoconf
|
|
||||||
#%# family=contrib
|
|
||||||
|
|
||||||
import re, os, sys
|
|
||||||
from subprocess import Popen, PIPE
|
|
||||||
|
|
||||||
def config(vm_names):
|
|
||||||
''' Print the plugin's config
|
|
||||||
@param vm_names : a list of "cleaned" vms' name
|
|
||||||
'''
|
|
||||||
base_config = """graph_title KVM Virtual Machine IO usage
|
|
||||||
graph_vlabel Bytes read(-)/written(+) per second
|
|
||||||
graph_category virtualization
|
|
||||||
graph_info This graph shows the block device I/O used of virtual machines
|
|
||||||
graph_args --base 1024
|
|
||||||
"""
|
|
||||||
print base_config
|
|
||||||
|
|
||||||
for vm in vm_names:
|
|
||||||
print "%s_read.label %s" % (vm, vm)
|
|
||||||
print "%s_read.type COUNTER" % vm
|
|
||||||
print "%s_read.min 0" % vm
|
|
||||||
print "%s_read.info I/O used by virtual machine %s" % (vm, vm)
|
|
||||||
print "%s_read.graph no" % vm
|
|
||||||
print "%s_write.label %s" % (vm, vm)
|
|
||||||
print "%s_write.type COUNTER" % vm
|
|
||||||
print "%s_write.min 0" % vm
|
|
||||||
print "%s_write.negative %s_read" % (vm, vm)
|
|
||||||
print "%s_write.info I/O used by virtual machine %s" % (vm, vm)
|
|
||||||
|
|
||||||
def clean_vm_name(vm_name):
|
|
||||||
''' Replace all special chars
|
|
||||||
@param vm_name : a vm's name
|
|
||||||
@return cleaned vm's name
|
|
||||||
'''
|
|
||||||
# suffix part defined in conf
|
|
||||||
suffix = os.getenv('vmsuffix')
|
|
||||||
if suffix:
|
|
||||||
vm_name = re.sub(suffix,'',vm_name)
|
|
||||||
# proxmox uses kvm with -name parameter
|
|
||||||
parts = vm_name.split('\x00')
|
|
||||||
if (parts[0].endswith('kvm')):
|
|
||||||
try:
|
|
||||||
return parts[parts.index('-name')+1]
|
|
||||||
except ValueError:
|
|
||||||
pass
|
|
||||||
return re.sub(r"[^a-zA-Z0-9_]", "_", vm_name)
|
|
||||||
|
|
||||||
def fetch(vms):
|
|
||||||
''' Fetch values for a list of pids
|
|
||||||
@param dictionary {kvm_pid: cleaned vm name}
|
|
||||||
'''
|
|
||||||
res = {}
|
|
||||||
for pid in vms:
|
|
||||||
f = open("/proc/%s/io" % pid, "r")
|
|
||||||
for line in f.readlines():
|
|
||||||
if "read_bytes" in line:
|
|
||||||
read = line.split()[1]
|
|
||||||
print "%s_read.value %s" % (vms[pid], read)
|
|
||||||
if "write_bytes" in line:
|
|
||||||
write = line.split()[1]
|
|
||||||
print "%s_write.value %s" % (vms[pid], write)
|
|
||||||
break
|
|
||||||
f.close()
|
|
||||||
|
|
||||||
def detect_kvm():
|
|
||||||
''' Check if kvm is installed
|
|
||||||
'''
|
|
||||||
kvm = Popen("which kvm", shell=True, stdout=PIPE)
|
|
||||||
kvm.communicate()
|
|
||||||
return not bool(kvm.returncode)
|
|
||||||
|
|
||||||
def find_vm_names(pids):
|
|
||||||
'''Find and clean vm names from pids
|
|
||||||
@return a dictionary of {pids : cleaned vm name}
|
|
||||||
'''
|
|
||||||
result = {}
|
|
||||||
for pid in pids:
|
|
||||||
cmdline = open("/proc/%s/cmdline" % pid, "r")
|
|
||||||
result[pid] = clean_vm_name(re.sub(r"^.*guest=([a-zA-Z0-9.-_-]*).*$",r"\1", cmdline.readline()))
|
|
||||||
return result
|
|
||||||
|
|
||||||
def list_pids():
|
|
||||||
''' Find the pid of kvm processes
|
|
||||||
@return a list of pids from running kvm
|
|
||||||
'''
|
|
||||||
pid = Popen("pidof qemu-kvm qemu-system-x86_64 kvm", shell=True, stdout=PIPE)
|
|
||||||
return pid.communicate()[0].split()
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
if len(sys.argv) > 1:
|
|
||||||
if sys.argv[1] in ['autoconf', 'detect']:
|
|
||||||
if detect_kvm():
|
|
||||||
print "yes"
|
|
||||||
else:
|
|
||||||
print "no"
|
|
||||||
elif sys.argv[1] == "config":
|
|
||||||
config(find_vm_names(list_pids()).values())
|
|
||||||
else:
|
|
||||||
fetch(find_vm_names(list_pids()))
|
|
||||||
else:
|
|
||||||
fetch(find_vm_names(list_pids()))
|
|
110
extern/kvm_mem
vendored
110
extern/kvm_mem
vendored
|
@ -1,110 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
# vim: set fileencoding=utf-8
|
|
||||||
#
|
|
||||||
# Munin plugin to show amount of memory used by vm
|
|
||||||
#
|
|
||||||
# Copyright Maxence Dunnewind, Rodolphe Quiédeville, Adrien Pujol
|
|
||||||
#
|
|
||||||
# License : GPLv3
|
|
||||||
#
|
|
||||||
# parsed environment variables:
|
|
||||||
# vmsuffix: part of vm name to be removed
|
|
||||||
#
|
|
||||||
#%# capabilities=autoconf
|
|
||||||
#%# family=contrib
|
|
||||||
|
|
||||||
import re, os, sys
|
|
||||||
from subprocess import Popen, PIPE
|
|
||||||
|
|
||||||
def config(vm_names):
|
|
||||||
''' Print the plugin's config
|
|
||||||
@param vm_names : a list of "cleaned" vms' name
|
|
||||||
'''
|
|
||||||
base_config = """graph_title KVM Virtual Machine Memory usage
|
|
||||||
graph_vlabel Bytes
|
|
||||||
graph_category virtualization
|
|
||||||
graph_info This graph shows the current amount of memory used by virtual machines
|
|
||||||
graph_args --base 1024 -l 0"""
|
|
||||||
print(base_config)
|
|
||||||
for vm in vm_names:
|
|
||||||
print("%s_mem.label %s" % (vm, vm))
|
|
||||||
print("%s_mem.type GAUGE" % vm)
|
|
||||||
print("%s_mem.draw %s" % (vm, "AREASTACK"))
|
|
||||||
print("%s_mem.info memory used by virtual machine %s" % (vm, vm))
|
|
||||||
|
|
||||||
|
|
||||||
def clean_vm_name(vm_name):
|
|
||||||
''' Replace all special chars
|
|
||||||
@param vm_name : a vm's name
|
|
||||||
@return cleaned vm's name
|
|
||||||
'''
|
|
||||||
# suffix part defined in conf
|
|
||||||
suffix = os.getenv('vmsuffix')
|
|
||||||
if suffix:
|
|
||||||
vm_name = re.sub(suffix,'',vm_name)
|
|
||||||
|
|
||||||
# proxmox uses kvm with -name parameter
|
|
||||||
parts = vm_name.split('\x00')
|
|
||||||
if (parts[0].endswith('kvm')):
|
|
||||||
try:
|
|
||||||
return parts[parts.index('-name')+1]
|
|
||||||
except ValueError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
return re.sub(r"[^a-zA-Z0-9_]", "_", vm_name)
|
|
||||||
|
|
||||||
def fetch(vms):
|
|
||||||
''' Fetch values for a list of pids
|
|
||||||
@param dictionary {kvm_pid: cleaned vm name}
|
|
||||||
'''
|
|
||||||
res = {}
|
|
||||||
for pid in vms:
|
|
||||||
try:
|
|
||||||
cmdline = open("/proc/%s/cmdline" % pid, "r")
|
|
||||||
amount = re.sub(r"^.*-m\x00(.*)\x00-smp.*$",r"\1", cmdline.readline())
|
|
||||||
amount = int(amount) * 1024 * 1024
|
|
||||||
print("%s_mem.value %s" % (vms[pid], amount))
|
|
||||||
except:
|
|
||||||
cmdline = open("/proc/%s/cmdline" % pid, "r")
|
|
||||||
amount = re.sub(r"^.*-m\x00(\d+).*$",r"\1", cmdline.readline())
|
|
||||||
amount = int(amount) * 1024 * 1024
|
|
||||||
print("%s_mem.value %s" % (vms[pid], amount))
|
|
||||||
|
|
||||||
def detect_kvm():
|
|
||||||
''' Check if kvm is installed
|
|
||||||
'''
|
|
||||||
kvm = Popen("which kvm", shell=True, stdout=PIPE)
|
|
||||||
kvm.communicate()
|
|
||||||
return not bool(kvm.returncode)
|
|
||||||
|
|
||||||
def find_vm_names(pids):
|
|
||||||
'''Find and clean vm names from pids
|
|
||||||
@return a dictionary of {pids : cleaned vm name}
|
|
||||||
'''
|
|
||||||
result = {}
|
|
||||||
for pid in pids:
|
|
||||||
cmdline = open("/proc/%s/cmdline" % pid, "r")
|
|
||||||
result[pid] = clean_vm_name(re.sub(r"^.*guest=([a-zA-Z0-9.-_-]*).*$",r"\1", cmdline.readline()))
|
|
||||||
return result
|
|
||||||
|
|
||||||
def list_pids():
|
|
||||||
''' Find the pid of kvm processes
|
|
||||||
@return a list of pids from running kvm
|
|
||||||
'''
|
|
||||||
pid = Popen("pidof qemu-kvm qemu-system-x86_64 kvm", shell=True, stdout=PIPE, text=True)
|
|
||||||
return pid.communicate()[0].split()
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
if len(sys.argv) > 1:
|
|
||||||
if sys.argv[1] in ['autoconf', 'detect']:
|
|
||||||
if detect_kvm():
|
|
||||||
print("yes")
|
|
||||||
else:
|
|
||||||
print("no")
|
|
||||||
elif sys.argv[1] == "config":
|
|
||||||
config(find_vm_names(list_pids()).values())
|
|
||||||
else:
|
|
||||||
fetch(find_vm_names(list_pids()))
|
|
||||||
else:
|
|
||||||
fetch(find_vm_names(list_pids()))
|
|
240
extern/kvm_net
vendored
240
extern/kvm_net
vendored
|
@ -1,240 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
"""
|
|
||||||
|
|
||||||
=head1 NAME
|
|
||||||
|
|
||||||
kvm_net - Munin plugin to show the network I/O per VM
|
|
||||||
|
|
||||||
|
|
||||||
=head1 APPLICABLE SYSTEMS
|
|
||||||
|
|
||||||
Virtualization server with VMs based on KVM may be able to track the network
|
|
||||||
traffic of their VMs, if the KVM processes are started in a specific way.
|
|
||||||
|
|
||||||
Probably proxmox-based virtualization hosts fit into this category.
|
|
||||||
|
|
||||||
You can easily check if your KVM processes are started in the expected way, by
|
|
||||||
running the following command:
|
|
||||||
|
|
||||||
ps -ef | grep "netdev.*ifname="
|
|
||||||
|
|
||||||
The plugin can be used, if the above command outputs one line for every
|
|
||||||
currently running VM.
|
|
||||||
|
|
||||||
In all other cases you need to use other munin plugins instead, e.g. "libvirt".
|
|
||||||
|
|
||||||
|
|
||||||
=head1 CONFIGURATION
|
|
||||||
|
|
||||||
parsed environment variables:
|
|
||||||
|
|
||||||
* vmsuffix: part of vm name to be removed
|
|
||||||
|
|
||||||
|
|
||||||
=head1 AUTHOR
|
|
||||||
|
|
||||||
Copyright (C) 2012 - Igor Borodikhin
|
|
||||||
Copyright (C) 2018 - Lars Kruse <devel@sumpfralle.de>
|
|
||||||
|
|
||||||
|
|
||||||
=head1 LICENSE
|
|
||||||
|
|
||||||
GPLv3
|
|
||||||
|
|
||||||
|
|
||||||
=head1 MAGIC MARKERS
|
|
||||||
|
|
||||||
#%# capabilities=autoconf
|
|
||||||
#%# family=contrib
|
|
||||||
|
|
||||||
=cut
|
|
||||||
"""
|
|
||||||
|
|
||||||
import os
|
|
||||||
import re
|
|
||||||
from subprocess import Popen, PIPE
|
|
||||||
import sys
|
|
||||||
|
|
||||||
|
|
||||||
VM_NAME_REGEX = re.compile("^.*\x00-{arg_name}\x00(.+)\x00.*$")
|
|
||||||
KVM_INTERFACE_NAME_REGEX = re.compile("(?:^|,)ifname=([^,]+)(?:,|$)")
|
|
||||||
|
|
||||||
|
|
||||||
def config(vm_names):
|
|
||||||
""" Print the plugin's config
|
|
||||||
|
|
||||||
@param vm_names : a list of "cleaned" vms' name
|
|
||||||
"""
|
|
||||||
print("graph_title KVM Network I/O")
|
|
||||||
print("graph_vlabel Bytes rx(-)/tx(+) per second")
|
|
||||||
print("graph_category virtualization")
|
|
||||||
print("graph_args --base 1024")
|
|
||||||
print("graph_info This graph shows the network I/O of the virtual "
|
|
||||||
"machines. It is only usable for VMs that were started in a very "
|
|
||||||
"specific way. If you see no values in the diagrams, then you "
|
|
||||||
"should check, if the command \"ps -ef | grep 'netdev.*ifname='\" "
|
|
||||||
"returns one line of output for every running VM. If there is no "
|
|
||||||
"output, then you need to change the setup of your VMs or you need "
|
|
||||||
"to use a different munin plugin for monitoring the network traffic "
|
|
||||||
"(e.g. 'libvirt').")
|
|
||||||
print()
|
|
||||||
for vm in vm_names:
|
|
||||||
print("%s_in.label %s" % (vm, vm))
|
|
||||||
print("%s_in.type COUNTER" % vm)
|
|
||||||
print("%s_in.min 0" % vm)
|
|
||||||
print("%s_in.graph no" % vm)
|
|
||||||
print("%s_out.negative %s_in" % (vm, vm))
|
|
||||||
print("%s_out.label %s" % (vm, vm))
|
|
||||||
print("%s_out.type COUNTER" % vm)
|
|
||||||
print("%s_out.min 0" % vm)
|
|
||||||
|
|
||||||
|
|
||||||
def clean_vm_name(vm_name):
|
|
||||||
""" Replace all special chars
|
|
||||||
|
|
||||||
@param vm_name : a vm's name
|
|
||||||
@return cleaned vm's name
|
|
||||||
"""
|
|
||||||
# suffix part defined in conf
|
|
||||||
suffix = os.getenv("vmsuffix")
|
|
||||||
if suffix:
|
|
||||||
vm_name = re.sub(suffix, "", vm_name)
|
|
||||||
# proxmox uses kvm with -name parameter
|
|
||||||
parts = vm_name.split('\x00')
|
|
||||||
if (parts[0].endswith('kvm')):
|
|
||||||
try:
|
|
||||||
return parts[parts.index('-name')+1]
|
|
||||||
except ValueError:
|
|
||||||
pass
|
|
||||||
return re.sub(r"[^a-zA-Z0-9_]", "_", vm_name)
|
|
||||||
|
|
||||||
|
|
||||||
def fetch(vms):
|
|
||||||
""" Fetch values for a list of pids
|
|
||||||
|
|
||||||
@param dictionary {kvm_pid: cleaned vm name}
|
|
||||||
"""
|
|
||||||
for pid, vm_data in vms.items():
|
|
||||||
vm_interface_names = get_vm_network_interface_names(pid)
|
|
||||||
sum_incoming = 0
|
|
||||||
sum_outgoing = 0
|
|
||||||
interface_found = False
|
|
||||||
with open("/proc/net/dev", "r") as net_file:
|
|
||||||
for line in net_file.readlines():
|
|
||||||
tokens = line.split()
|
|
||||||
current_interface_name = tokens[0].rstrip(":").strip()
|
|
||||||
if current_interface_name in vm_interface_names:
|
|
||||||
sum_incoming += int(tokens[1])
|
|
||||||
sum_outgoing += int(tokens[9])
|
|
||||||
interface_found = True
|
|
||||||
if not interface_found:
|
|
||||||
# we want to distinguish "no traffic" from "not found"
|
|
||||||
sum_incoming = "U"
|
|
||||||
sum_outgoing = "U"
|
|
||||||
print("%s_in.value %s" % (vm_data, sum_incoming))
|
|
||||||
print("%s_out.value %s" % (vm_data, sum_outgoing))
|
|
||||||
|
|
||||||
|
|
||||||
def get_vm_network_interface_names(pid):
|
|
||||||
""" return the MAC addresses configured for network interfacs of a PID """
|
|
||||||
result = set()
|
|
||||||
for netdev_description in _get_kvm_process_arguments(pid, "netdev"):
|
|
||||||
match = KVM_INTERFACE_NAME_REGEX.search(netdev_description)
|
|
||||||
if match:
|
|
||||||
result.add(match.groups()[0])
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
def detect_kvm():
|
|
||||||
""" Check if kvm is installed """
|
|
||||||
kvm = Popen(["which", "kvm"], stdout=PIPE)
|
|
||||||
kvm.communicate()
|
|
||||||
return kvm.returncode == 0
|
|
||||||
|
|
||||||
|
|
||||||
def find_vm_names(pids):
|
|
||||||
"""Find and clean vm names from pids
|
|
||||||
|
|
||||||
@return a dictionary of {pids : cleaned vm name}
|
|
||||||
"""
|
|
||||||
result = {}
|
|
||||||
for pid in pids:
|
|
||||||
name = None
|
|
||||||
name_arg_values = _get_kvm_process_arguments(pid, "name")
|
|
||||||
if name_arg_values:
|
|
||||||
name_arg_value = name_arg_values[0]
|
|
||||||
if "," in name_arg_value:
|
|
||||||
# the modern parameter format may look like this:
|
|
||||||
# guest=foo,debug-threads=on
|
|
||||||
for index, token in enumerate(name_arg_value.split(",")):
|
|
||||||
if (index == 0) and ("=" not in token):
|
|
||||||
# the first item may the plain name
|
|
||||||
name = value
|
|
||||||
elif "=" in token:
|
|
||||||
key, value = token.split("=", 1)
|
|
||||||
if key == "guest":
|
|
||||||
name = value
|
|
||||||
else:
|
|
||||||
# unknown format (no "mapping")
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
name = name_arg_value
|
|
||||||
if name is None:
|
|
||||||
print("Failed to parse VM name from commandline of process: {}"
|
|
||||||
.format(name_arg_values), file=sys.stderr)
|
|
||||||
else:
|
|
||||||
result[pid] = clean_vm_name(name)
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
def _get_kvm_process_arguments(pid, arg_name):
|
|
||||||
""" parse all value with the given name from the process identified by PID
|
|
||||||
|
|
||||||
The result is a list of tokens, that follow this argument name. The result
|
|
||||||
is empty in case of problems.
|
|
||||||
"""
|
|
||||||
# the "cmdline" (e.g. /proc/self/cmdline) is a null-separated token list
|
|
||||||
try:
|
|
||||||
with open("/proc/%s/cmdline" % pid, "r") as cmdline_file:
|
|
||||||
cmdline = cmdline_file.read()
|
|
||||||
except IOError:
|
|
||||||
# the process seems to have died meanwhile
|
|
||||||
return []
|
|
||||||
is_value = False
|
|
||||||
result = []
|
|
||||||
for arg_token in cmdline.split("\0"):
|
|
||||||
if is_value:
|
|
||||||
# the previous token was our argument name
|
|
||||||
result.append(arg_token)
|
|
||||||
is_value = False
|
|
||||||
elif arg_token == "-{}".format(arg_name):
|
|
||||||
# this is our argument name - we want to store the next value
|
|
||||||
is_value = True
|
|
||||||
else:
|
|
||||||
# any other irrelevant value
|
|
||||||
pass
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
def list_pids():
|
|
||||||
""" Find the pid of kvm processes
|
|
||||||
|
|
||||||
@return a list of pids from running kvm
|
|
||||||
"""
|
|
||||||
pid = Popen(["pidof", "qemu-kvm", "qemu-system-x86_64", "kvm"], stdout=PIPE)
|
|
||||||
return pid.communicate()[0].decode().split()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
action = sys.argv[1] if len(sys.argv) > 1 else None
|
|
||||||
if action == "autoconf":
|
|
||||||
if detect_kvm():
|
|
||||||
print("yes")
|
|
||||||
else:
|
|
||||||
print("no")
|
|
||||||
elif action == "config":
|
|
||||||
vm_data = find_vm_names(list_pids())
|
|
||||||
config(vm_data.values())
|
|
||||||
else:
|
|
||||||
vm_data = find_vm_names(list_pids())
|
|
||||||
fetch(vm_data)
|
|
99
extern/lvm_
vendored
99
extern/lvm_
vendored
|
@ -1,99 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
# -*- sh -*-
|
|
||||||
|
|
||||||
: << EOF
|
|
||||||
=head1 NAME
|
|
||||||
|
|
||||||
lvm_ - Wildcard plugin for monitoring disk usage on LVM. Each Volume Group is graphed separately.
|
|
||||||
|
|
||||||
=head1 CONFIGURATION
|
|
||||||
|
|
||||||
This plugin needs to run as the root user in order to have permission to run sudo lvs and vgs
|
|
||||||
|
|
||||||
[lvm_*]
|
|
||||||
user root
|
|
||||||
|
|
||||||
=head1 AUTHOR
|
|
||||||
|
|
||||||
=over 4
|
|
||||||
|
|
||||||
=item * PatrickDK (Original Author)
|
|
||||||
|
|
||||||
=item * Niall Donegan
|
|
||||||
|
|
||||||
=back
|
|
||||||
|
|
||||||
=head1 LICENSE
|
|
||||||
|
|
||||||
Unknown license
|
|
||||||
|
|
||||||
|
|
||||||
=head1 MAGIC MARKERS
|
|
||||||
|
|
||||||
=begin comment
|
|
||||||
|
|
||||||
These magic markers are used by munin-node-configure when installing
|
|
||||||
munin-node.
|
|
||||||
|
|
||||||
=end comment
|
|
||||||
|
|
||||||
#%# family=auto
|
|
||||||
#%# capabilities=autoconf suggest
|
|
||||||
|
|
||||||
=cut
|
|
||||||
|
|
||||||
EOF
|
|
||||||
|
|
||||||
. $MUNIN_LIBDIR/plugins/plugin.sh
|
|
||||||
|
|
||||||
|
|
||||||
if [ "$1" = "autoconf" ]; then
|
|
||||||
if ! command -v sudo lvs >/dev/null; then
|
|
||||||
echo "no (sudo lvs not found)"
|
|
||||||
elif ! command -v vgs >/dev/null; then
|
|
||||||
echo "no (vgs not found)"
|
|
||||||
else
|
|
||||||
echo "yes"
|
|
||||||
fi
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$1" = "suggest" ]; then
|
|
||||||
sudo vgs -o vg_name --noheadings | sed -e 's/\ *//'
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
vg=`echo $0 | awk '{ sub(".*lvm_","",\$1); print \$1; }'`
|
|
||||||
|
|
||||||
clean_name() {
|
|
||||||
echo "$(clean_fieldname "$1")"
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
if [ "$1" = "config" ]; then
|
|
||||||
|
|
||||||
echo "graph_title Logical Volume Usage($vg)"
|
|
||||||
echo 'graph_args --base 1024 -l 0'
|
|
||||||
echo 'graph_category disk'
|
|
||||||
echo 'graph_info This graph shows disk usage on the machine.'
|
|
||||||
echo "free.label free"
|
|
||||||
echo "free.draw AREA"
|
|
||||||
sudo lvs --units b --nosuffix --noheadings | grep "$vg" | while read i; do
|
|
||||||
name=`clean_name $i`
|
|
||||||
echo -n "$name.label "
|
|
||||||
echo $i | awk '{ print $1 }'
|
|
||||||
echo "$name.draw STACK"
|
|
||||||
done
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
i=`sudo vgs --units b --nosuffix --noheadings | grep "$vg"`
|
|
||||||
echo -n "free.value "
|
|
||||||
echo $i | awk '{ print $7 }'
|
|
||||||
|
|
||||||
sudo lvs --units b --nosuffix --noheadings | grep "$vg" | while read i; do
|
|
||||||
name=`clean_name $i`
|
|
||||||
echo -n "$name.value "
|
|
||||||
echo $i | awk '{ print $4 }'
|
|
||||||
done
|
|
245
extern/nextcloud_
vendored
245
extern/nextcloud_
vendored
|
@ -1,245 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
# shellcheck shell=dash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
: << =cut
|
|
||||||
|
|
||||||
=head1 NAME
|
|
||||||
|
|
||||||
nextcloud_ - Monitor usage of nextcloud instances
|
|
||||||
|
|
||||||
=head1 APPLICABLE SYSTEMS
|
|
||||||
|
|
||||||
Nexcloud instances
|
|
||||||
|
|
||||||
=head1 CONFIGURATION
|
|
||||||
|
|
||||||
Requires installed curl and jq, a command-line json processor.
|
|
||||||
|
|
||||||
This is a wildcard plugin. To monitor a nextcloud instance, link
|
|
||||||
nextcloud_<nextcloud-domain> to this file. You can even append a port
|
|
||||||
(:8443) to the file if needed. For example,
|
|
||||||
|
|
||||||
ln -s /usr/share/munin/plugins/nextcloud_ \
|
|
||||||
/etc/munin/plugins/nextcloud_cloud.domain.tld
|
|
||||||
|
|
||||||
Set username and password in your munin-node configuration
|
|
||||||
|
|
||||||
[nextcloud_cloud.domain.tld]
|
|
||||||
env.username <nexcloud_user>
|
|
||||||
env.password <nextcloud_password>
|
|
||||||
env.api_path <default: /ocs/v2.php/apps/serverinfo/api/v1/info>
|
|
||||||
env.scheme <default: https>
|
|
||||||
env.timeout <default: 2s>
|
|
||||||
env.updates_warning <default: 1>
|
|
||||||
|
|
||||||
It's advised to set an app password (for this plugin) in your nextcloud
|
|
||||||
instance and not to use the "real" password of your nextcloud user.
|
|
||||||
|
|
||||||
=head1 AUTHOR
|
|
||||||
|
|
||||||
Copyright (C) 2020 Sebastian L. (https://momou.ch),
|
|
||||||
Olivier Mehani <shtrom+munin@ssji.net>
|
|
||||||
|
|
||||||
=head1 LICENSE
|
|
||||||
|
|
||||||
GPLv2
|
|
||||||
|
|
||||||
=head1 MAGIC MARKERS
|
|
||||||
|
|
||||||
#%# family=manual
|
|
||||||
#%# capabilities=autoconf
|
|
||||||
|
|
||||||
=cut
|
|
||||||
|
|
||||||
# shellcheck disable=SC1090
|
|
||||||
. "$MUNIN_LIBDIR/plugins/plugin.sh"
|
|
||||||
|
|
||||||
if [ "${MUNIN_DEBUG:-0}" = 1 ]; then
|
|
||||||
set -x
|
|
||||||
fi
|
|
||||||
|
|
||||||
API_PATH="${api_path:-/ocs/v2.php/apps/serverinfo/api/v1/info}?format=json"
|
|
||||||
DOMAIN="${0##*nextcloud_}"
|
|
||||||
SCHEME="${scheme:-https}://"
|
|
||||||
TIMEOUT="${timeout:-2}"
|
|
||||||
UPDATES_WARNING="${updates_warning:-1}"
|
|
||||||
CLEANDOMAIN="$(clean_fieldname "${DOMAIN}")"
|
|
||||||
USERNAME="${username:-}"
|
|
||||||
PASSWORD="${password:-}"
|
|
||||||
|
|
||||||
fetch_url () {
|
|
||||||
curl -s -f -m "${TIMEOUT}" "$@"
|
|
||||||
}
|
|
||||||
|
|
||||||
case $1 in
|
|
||||||
|
|
||||||
autoconf)
|
|
||||||
if [ ! -x "$(command -v curl)" ]; then
|
|
||||||
echo "no (curl not found)"
|
|
||||||
elif [ ! -x "$(command -v jq)" ]; then
|
|
||||||
echo "no (jq not found)"
|
|
||||||
else
|
|
||||||
fetch_url -I -u "${USERNAME}:${PASSWORD}" -I "${SCHEME}${DOMAIN}${API_PATH}" \
|
|
||||||
| grep -iq "Content-Type: application/json" \
|
|
||||||
&& echo "yes" \
|
|
||||||
|| echo "no (invalid or empty response from nextcloud serverinfo api)"
|
|
||||||
fi
|
|
||||||
exit 0
|
|
||||||
;;
|
|
||||||
config)
|
|
||||||
|
|
||||||
cat << EOM
|
|
||||||
multigraph nextcloud_users_${CLEANDOMAIN}
|
|
||||||
graph_title Nextcloud users on ${DOMAIN}
|
|
||||||
graph_args --base 1000 -l 0
|
|
||||||
graph_printf %.0lf
|
|
||||||
graph_vlabel connected users
|
|
||||||
graph_info number of connected user
|
|
||||||
graph_category nextcloud
|
|
||||||
last5minutes.label last 5 minutes
|
|
||||||
last5minutes.info users connected in the last 5 minutes
|
|
||||||
last5minutes.min 0
|
|
||||||
last1hour.label last hour
|
|
||||||
last1hour.info users connected in the last hour
|
|
||||||
last1hour.min 0
|
|
||||||
last24hours.label last 24 hours
|
|
||||||
last24hours.info users connected in the last 24 hours
|
|
||||||
last24hours.min 0
|
|
||||||
num_users.label number of users
|
|
||||||
num_users.info total number of users
|
|
||||||
num_users.min 0
|
|
||||||
multigraph nextcloud_files_${CLEANDOMAIN}
|
|
||||||
graph_title Nextcloud files on ${DOMAIN}
|
|
||||||
graph_args --base 1000 -l 0
|
|
||||||
graph_printf %.0lf
|
|
||||||
graph_vlabel number of files
|
|
||||||
graph_info number of files
|
|
||||||
graph_category nextcloud
|
|
||||||
num_files.label number of files
|
|
||||||
num_files.info current number of files
|
|
||||||
num_files.min 0
|
|
||||||
multigraph nextcloud_shares_${CLEANDOMAIN}
|
|
||||||
graph_title Nextcloud shares on ${DOMAIN}
|
|
||||||
graph_args --base 1000 -l 0
|
|
||||||
graph_printf %.0lf
|
|
||||||
graph_vlabel number of shares
|
|
||||||
graph_info number of shares
|
|
||||||
graph_category nextcloud
|
|
||||||
num_shares.label total number of shares
|
|
||||||
num_shares.info current over all total of shares
|
|
||||||
num_shares.min 0
|
|
||||||
num_shares_user.label user shares
|
|
||||||
num_shares_user.info current total of user shares
|
|
||||||
num_shares_user.min 0
|
|
||||||
num_shares_groups.label group shares
|
|
||||||
num_shares_groups.info current total of group shares
|
|
||||||
num_shares_groups.min 0
|
|
||||||
num_shares_link.label link shares
|
|
||||||
num_shares_link.info current total of link shares
|
|
||||||
num_shares_link.min 0
|
|
||||||
num_shares_mail.label mail shares
|
|
||||||
num_shares_mail.info current total of mail shares
|
|
||||||
num_shares_mail.min 0
|
|
||||||
num_shares_room.label room shares
|
|
||||||
num_shares_room.info current total of room shares
|
|
||||||
num_shares_room.min 0
|
|
||||||
num_shares_link_no_password.label link shares without password protection
|
|
||||||
num_shares_link_no_password.info current total of link shares without password protection
|
|
||||||
num_shares_link_no_password.min 0
|
|
||||||
num_fed_shares_sent.label federated shares sent
|
|
||||||
num_fed_shares_sent.info current total of federated shares sent
|
|
||||||
num_fed_shares_sent.min 0
|
|
||||||
num_fed_shares_received.label federated shares received
|
|
||||||
num_fed_shares_received.info current total of federated shares received
|
|
||||||
num_fed_shares_received.min 0
|
|
||||||
multigraph nextcloud_dbsize_${CLEANDOMAIN}
|
|
||||||
graph_title Nextcloud database size on ${DOMAIN}
|
|
||||||
graph_args --base 1024 -l 0
|
|
||||||
graph_vlabel size in bytes
|
|
||||||
graph_info database database size in bytes
|
|
||||||
graph_category nextcloud
|
|
||||||
db_size.label database size in bytes
|
|
||||||
db_size.info database size in bytes
|
|
||||||
db_size.draw AREA
|
|
||||||
db_size.min 0
|
|
||||||
multigraph nextcloud_storages_${CLEANDOMAIN}
|
|
||||||
graph_title Nextcloud storages on ${DOMAIN}
|
|
||||||
graph_args --base 1000 -l 0
|
|
||||||
graph_printf %.0lf
|
|
||||||
graph_vlabel number
|
|
||||||
graph_info number of storages
|
|
||||||
graph_category nextcloud
|
|
||||||
num_storages.label total number of storages
|
|
||||||
num_storages.info current total of storages
|
|
||||||
num_storages.min 0
|
|
||||||
num_storages_local.label number of local storages
|
|
||||||
num_storages_local.info current number of local storages
|
|
||||||
num_storages_local.min 0
|
|
||||||
num_storages_home.label number of home storages
|
|
||||||
num_storages_home.info current number of home storages
|
|
||||||
num_storages_home.min 0
|
|
||||||
num_storages_other.label number of other storages
|
|
||||||
num_storages_other.info current number of other storages
|
|
||||||
num_storages_other.min 0
|
|
||||||
multigraph nextcloud_apps_${CLEANDOMAIN}
|
|
||||||
graph_title Nextcloud apps on ${DOMAIN}
|
|
||||||
graph_args --base 1000 -l 0
|
|
||||||
graph_printf %.0lf
|
|
||||||
graph_vlabel apps
|
|
||||||
graph_info number of installed and updatable apps
|
|
||||||
graph_category nextcloud
|
|
||||||
num_updates_available.label available app updates
|
|
||||||
num_updates_available.info number of available app updates
|
|
||||||
num_updates_available.min 0
|
|
||||||
num_updates_available.warning ${UPDATES_WARNING}
|
|
||||||
num_installed.label installed apps
|
|
||||||
num_installed.info number of installed apps
|
|
||||||
num_installed.min 0
|
|
||||||
EOM
|
|
||||||
exit 0
|
|
||||||
;;
|
|
||||||
|
|
||||||
esac
|
|
||||||
|
|
||||||
|
|
||||||
# users
|
|
||||||
fetch_url -u "${USERNAME}:${PASSWORD}" "${SCHEME}${DOMAIN}${API_PATH}" \
|
|
||||||
| sed 's/\\/\\\\/g' \
|
|
||||||
| jq -r '.ocs.data
|
|
||||||
| @text "
|
|
||||||
multigraph nextcloud_users_'"${CLEANDOMAIN}"'
|
|
||||||
last5minutes.value \(.activeUsers.last5minutes)
|
|
||||||
last1hour.value \(.activeUsers.last1hour)
|
|
||||||
last24hours.value \(.activeUsers.last24hours)
|
|
||||||
num_users.value \(.nextcloud.storage.num_users)
|
|
||||||
|
|
||||||
multigraph nextcloud_files_'"${CLEANDOMAIN}"'
|
|
||||||
num_files.value \(.nextcloud.storage.num_files)
|
|
||||||
|
|
||||||
multigraph nextcloud_storages_'"${CLEANDOMAIN}"'
|
|
||||||
num_storages.value \(.nextcloud.storage.num_storages)
|
|
||||||
num_storages_local.value \(.nextcloud.storage.num_storages_local)
|
|
||||||
num_storages_home.value \(.nextcloud.storage.num_storages_home)
|
|
||||||
num_storages_other.value \(.nextcloud.storage.num_storages_other)
|
|
||||||
|
|
||||||
multigraph nextcloud_shares_'"${CLEANDOMAIN}"'
|
|
||||||
num_shares.value \(.nextcloud.shares.num_shares)
|
|
||||||
num_shares_user.value \(.nextcloud.shares.num_shares_user)
|
|
||||||
num_shares_groups.value \(.nextcloud.shares.num_shares_groups)
|
|
||||||
num_shares_link.value \(.nextcloud.shares.num_shares_link)
|
|
||||||
num_shares_mail.value \(.nextcloud.shares.num_shares_mail)
|
|
||||||
num_shares_room.value \(.nextcloud.shares.num_shares_room)
|
|
||||||
num_shares_link_no_password.value \(.nextcloud.shares.num_shares_link_no_password)
|
|
||||||
num_fed_shares_sent.value \(.nextcloud.shares.num_fed_shares_sent)
|
|
||||||
num_fed_shares_received.value \(.nextcloud.shares.num_fed_shares_received)
|
|
||||||
|
|
||||||
multigraph nextcloud_dbsize_'"${CLEANDOMAIN}"'
|
|
||||||
db_size.value \(.server.database.size)
|
|
||||||
|
|
||||||
multigraph nextcloud_apps_'"${CLEANDOMAIN}"'
|
|
||||||
num_installed.value \(.nextcloud.system.apps.num_installed)
|
|
||||||
num_updates_available.value \(.nextcloud.system.apps.num_updates_available)
|
|
||||||
"' \
|
|
||||||
| sed 's/ null$/ U/'
|
|
111
extern/systemd_status
vendored
111
extern/systemd_status
vendored
|
@ -1,111 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
# pylint: disable=invalid-name
|
|
||||||
# pylint: enable=invalid-name
|
|
||||||
|
|
||||||
"""Munin plugin to monitor systemd service status.
|
|
||||||
|
|
||||||
=head1 NAME
|
|
||||||
|
|
||||||
systemd_status - monitor systemd service status, including normal services,
|
|
||||||
mounts, hotplugs and socket activations
|
|
||||||
|
|
||||||
=head1 APPLICABLE SYSTEMS
|
|
||||||
|
|
||||||
Linux systems with systemd installed.
|
|
||||||
|
|
||||||
=head1 CONFIGURATION
|
|
||||||
|
|
||||||
No configuration is required for this plugin.
|
|
||||||
|
|
||||||
Warning level for systemd "failed" state is set to 0:0. If any of the services
|
|
||||||
enters "failed" state, Munin will emit warning.
|
|
||||||
|
|
||||||
=head1 AUTHOR
|
|
||||||
|
|
||||||
Kim B. Heino <b@bbbs.net>
|
|
||||||
|
|
||||||
=head1 LICENSE
|
|
||||||
|
|
||||||
GPLv2
|
|
||||||
|
|
||||||
=head1 MAGIC MARKERS
|
|
||||||
|
|
||||||
#%# family=auto
|
|
||||||
#%# capabilities=autoconf
|
|
||||||
|
|
||||||
=cut
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
import os
|
|
||||||
import re
|
|
||||||
import subprocess
|
|
||||||
import sys
|
|
||||||
|
|
||||||
|
|
||||||
STATES = (
|
|
||||||
'failed',
|
|
||||||
'dead',
|
|
||||||
'running',
|
|
||||||
'exited',
|
|
||||||
'active',
|
|
||||||
'listening',
|
|
||||||
'waiting',
|
|
||||||
'plugged',
|
|
||||||
'mounted',
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def config():
|
|
||||||
"""Autoconfig values."""
|
|
||||||
print('graph_title systemd services')
|
|
||||||
print('graph_vlabel Services')
|
|
||||||
print('graph_category processes')
|
|
||||||
print('graph_args --base 1000 --lower-limit 0')
|
|
||||||
print('graph_scale no')
|
|
||||||
print('graph_info Number of services in given activation state.')
|
|
||||||
for state in STATES:
|
|
||||||
print('{state}.label Services in {state} state'.format(state=state))
|
|
||||||
print('failed.warning 0:0')
|
|
||||||
if os.environ.get('MUNIN_CAP_DIRTYCONFIG') == '1':
|
|
||||||
fetch()
|
|
||||||
|
|
||||||
|
|
||||||
def fetch():
|
|
||||||
"""Print runtime values."""
|
|
||||||
# Get data
|
|
||||||
try:
|
|
||||||
# deb9/py3.5 doesn't have encoding parameter in subprocess
|
|
||||||
output = subprocess.check_output(['/bin/systemctl', 'list-units'])
|
|
||||||
except (OSError, subprocess.CalledProcessError):
|
|
||||||
return
|
|
||||||
output = output.decode('utf-8', 'ignore')
|
|
||||||
|
|
||||||
# Parse data
|
|
||||||
states = {state: 0 for state in STATES}
|
|
||||||
for line in output.splitlines():
|
|
||||||
token = line.split()
|
|
||||||
if len(token) < 4:
|
|
||||||
continue
|
|
||||||
if len(token[0]) < 3: # Skip failed-bullet
|
|
||||||
token = token[1:]
|
|
||||||
if token[0].endswith('.scope'):
|
|
||||||
continue # Ignore scopes
|
|
||||||
if re.match(r'user.*@\d+\.service', token[0]):
|
|
||||||
continue # These fail randomly in older systemd
|
|
||||||
if token[3] in states:
|
|
||||||
states[token[3]] = states[token[3]] + 1
|
|
||||||
|
|
||||||
# Output
|
|
||||||
for state in STATES:
|
|
||||||
print('{}.value {}'.format(state, states[state]))
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
if len(sys.argv) > 1 and sys.argv[1] == 'autoconf':
|
|
||||||
print('yes' if os.path.exists('/run/systemd/system') else
|
|
||||||
'no (systemd is not running)')
|
|
||||||
elif len(sys.argv) > 1 and sys.argv[1] == 'config':
|
|
||||||
config()
|
|
||||||
else:
|
|
||||||
fetch()
|
|
556
extern/tor_
vendored
556
extern/tor_
vendored
|
@ -1,556 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
'''
|
|
||||||
=head1 NAME
|
|
||||||
|
|
||||||
tor_
|
|
||||||
|
|
||||||
=head1 DESCRIPTION
|
|
||||||
|
|
||||||
Wildcard plugin that gathers some metrics from the Tor daemon
|
|
||||||
(https://github.com/daftaupe/munin-tor).
|
|
||||||
|
|
||||||
Derived from https://github.com/mweinelt/munin-tor
|
|
||||||
|
|
||||||
This plugin requires the stem library (https://stem.torproject.org/).
|
|
||||||
|
|
||||||
This plugin requires the GeoIP library (https://www.maxmind.com) for the countries plugin.
|
|
||||||
|
|
||||||
Available plugins:
|
|
||||||
|
|
||||||
=over 4
|
|
||||||
|
|
||||||
=item tor_bandwidth - graph the glabal bandwidth
|
|
||||||
|
|
||||||
=item tor_connections - graph the number of connexions
|
|
||||||
|
|
||||||
=item tor_countries - graph the countries represented our connexions
|
|
||||||
|
|
||||||
=item tor_dormant - graph if tor is dormant or not
|
|
||||||
|
|
||||||
=item tor_flags - graph the different flags of the relay
|
|
||||||
|
|
||||||
=item tor_routers - graph the number of routers seen by the relay
|
|
||||||
|
|
||||||
=item tor_traffic - graph the read/written traffic
|
|
||||||
|
|
||||||
=back
|
|
||||||
|
|
||||||
=head2 CONFIGURATION
|
|
||||||
|
|
||||||
The default configuration is:
|
|
||||||
|
|
||||||
[tor_*]
|
|
||||||
user toranon # or any other user/group that is running tor
|
|
||||||
group toranon
|
|
||||||
env.torcachefile munin_tor_country_stats.json
|
|
||||||
env.torconnectmethod port
|
|
||||||
env.torgeoippath /usr/share/GeoIP/GeoIP.dat
|
|
||||||
env.tormaxcountries 15
|
|
||||||
env.torport 9051
|
|
||||||
env.torsocket /var/run/tor/control
|
|
||||||
|
|
||||||
To make it connect through a socket, you simply need to change C<torconnectmethod>:
|
|
||||||
|
|
||||||
env.torconnectmethod socket
|
|
||||||
|
|
||||||
=head1 COPYRIGHT
|
|
||||||
|
|
||||||
MIT License
|
|
||||||
|
|
||||||
SPDX-License-Identifier: MIT
|
|
||||||
|
|
||||||
=head1 AUTHOR
|
|
||||||
|
|
||||||
Pierre-Alain TORET <pierre-alain.toret@protonmail.com>
|
|
||||||
|
|
||||||
=head1 MAGIC MARKERS
|
|
||||||
|
|
||||||
#%# family=auto
|
|
||||||
#%# capabilities=autoconf suggest
|
|
||||||
|
|
||||||
=cut
|
|
||||||
'''
|
|
||||||
|
|
||||||
import collections
|
|
||||||
import json
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
|
|
||||||
try:
|
|
||||||
import GeoIP
|
|
||||||
import stem
|
|
||||||
import stem.control
|
|
||||||
import stem.connection
|
|
||||||
missing_dependency_error = None
|
|
||||||
except ImportError as exc:
|
|
||||||
# missing dependencies are reported via "autoconf"
|
|
||||||
# thus failure is acceptable here
|
|
||||||
missing_dependency_error = str(exc)
|
|
||||||
|
|
||||||
default_torcachefile = 'munin_tor_country_stats.json'
|
|
||||||
default_torconnectmethod = 'port'
|
|
||||||
default_torgeoippath = '/usr/share/GeoIP/GeoIP.dat'
|
|
||||||
default_tormaxcountries = 15
|
|
||||||
default_torport = 9051
|
|
||||||
default_torsocket = '/var/run/tor/control'
|
|
||||||
|
|
||||||
|
|
||||||
class ConnectionError(Exception):
|
|
||||||
"""Error connecting to the controller"""
|
|
||||||
|
|
||||||
|
|
||||||
class AuthError(Exception):
|
|
||||||
"""Error authenticating to the controller"""
|
|
||||||
|
|
||||||
|
|
||||||
def authenticate(controller):
|
|
||||||
try:
|
|
||||||
controller.authenticate()
|
|
||||||
return
|
|
||||||
except stem.connection.MissingPassword:
|
|
||||||
pass
|
|
||||||
|
|
||||||
try:
|
|
||||||
password = os.environ['torpassword']
|
|
||||||
except KeyError:
|
|
||||||
raise AuthError("Please configure the 'torpassword' "
|
|
||||||
"environment variable")
|
|
||||||
|
|
||||||
try:
|
|
||||||
controller.authenticate(password=password)
|
|
||||||
except stem.connection.PasswordAuthFailed:
|
|
||||||
print("Authentication failed (incorrect password)", file=sys.stderr)
|
|
||||||
|
|
||||||
|
|
||||||
def gen_controller():
|
|
||||||
connect_method = os.environ.get('torconnectmethod', default_torconnectmethod)
|
|
||||||
if connect_method == 'port':
|
|
||||||
return stem.control.Controller.from_port(port=int(os.environ.get('torport',
|
|
||||||
default_torport)))
|
|
||||||
elif connect_method == 'socket':
|
|
||||||
return stem.control.Controller.from_socket_file(path=os.environ.get('torsocket',
|
|
||||||
default_torsocket))
|
|
||||||
else:
|
|
||||||
print("env.torconnectmethod contains an invalid value. "
|
|
||||||
"Please specify either 'port' or 'socket'.", file=sys.stderr)
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
|
|
||||||
#########################
|
|
||||||
# Base Class
|
|
||||||
#########################
|
|
||||||
|
|
||||||
|
|
||||||
class TorPlugin(object):
|
|
||||||
def __init__(self):
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
def conf(self):
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def conf_from_dict(graph, labels):
|
|
||||||
# header
|
|
||||||
for key, val in graph.items():
|
|
||||||
print('graph_{} {}'.format(key, val))
|
|
||||||
# values
|
|
||||||
for label, attributes in labels.items():
|
|
||||||
for key, val in attributes.items():
|
|
||||||
print('{}.{} {}'.format(label, key, val))
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def get_autoconf_status():
|
|
||||||
try:
|
|
||||||
import stem
|
|
||||||
except ImportError as e:
|
|
||||||
return 'no (failed to import the required python module "stem": {})'.format(e)
|
|
||||||
try:
|
|
||||||
import GeoIP # noqa: F401
|
|
||||||
except ImportError as e:
|
|
||||||
return 'no (failed to import the required python module "GeoIP": {})'.format(e)
|
|
||||||
try:
|
|
||||||
with gen_controller() as controller:
|
|
||||||
try:
|
|
||||||
authenticate(controller)
|
|
||||||
return 'yes'
|
|
||||||
except stem.connection.AuthenticationFailure as e:
|
|
||||||
return 'no (Authentication failed: {})'.format(e)
|
|
||||||
except stem.SocketError:
|
|
||||||
return 'no (Connection failed)'
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def suggest():
|
|
||||||
options = ['bandwidth', 'connections', 'countries', 'dormant', 'flags', 'routers',
|
|
||||||
'traffic']
|
|
||||||
|
|
||||||
for option in options:
|
|
||||||
print(option)
|
|
||||||
|
|
||||||
def fetch(self):
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
|
|
||||||
##########################
|
|
||||||
# Child Classes
|
|
||||||
##########################
|
|
||||||
|
|
||||||
|
|
||||||
class TorBandwidth(TorPlugin):
|
|
||||||
def __init__(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def conf(self):
|
|
||||||
graph = {'title': 'Tor observed bandwidth',
|
|
||||||
'args': '-l 0 --base 1000',
|
|
||||||
'vlabel': 'bytes/s',
|
|
||||||
'category': 'tor',
|
|
||||||
'info': 'estimated capacity based on usage in bytes/s'}
|
|
||||||
labels = {'bandwidth': {'label': 'bandwidth', 'min': 0, 'type': 'GAUGE'}}
|
|
||||||
|
|
||||||
TorPlugin.conf_from_dict(graph, labels)
|
|
||||||
|
|
||||||
def fetch(self):
|
|
||||||
with gen_controller() as controller:
|
|
||||||
try:
|
|
||||||
authenticate(controller)
|
|
||||||
except stem.connection.AuthenticationFailure as e:
|
|
||||||
print('Authentication failed ({})'.format(e))
|
|
||||||
return
|
|
||||||
|
|
||||||
# Get fingerprint of our own relay to look up the descriptor for.
|
|
||||||
# In Stem 1.3.0 and later, get_server_descriptor() will fetch the
|
|
||||||
# relay's own descriptor if no argument is provided, so this will
|
|
||||||
# no longer be needed.
|
|
||||||
fingerprint = controller.get_info('fingerprint', None)
|
|
||||||
if fingerprint is None:
|
|
||||||
print("Error while reading fingerprint from Tor daemon", file=sys.stderr)
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
response = controller.get_server_descriptor(fingerprint, None)
|
|
||||||
if response is None:
|
|
||||||
print("Error while getting server descriptor from Tor daemon", file=sys.stderr)
|
|
||||||
sys.exit(1)
|
|
||||||
print('bandwidth.value {}'.format(response.observed_bandwidth))
|
|
||||||
|
|
||||||
|
|
||||||
class TorConnections(TorPlugin):
|
|
||||||
def __init__(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def conf(self):
|
|
||||||
graph = {'title': 'Tor connections',
|
|
||||||
'args': '-l 0 --base 1000',
|
|
||||||
'vlabel': 'connections',
|
|
||||||
'category': 'tor',
|
|
||||||
'info': 'OR connections by state'}
|
|
||||||
labels = {'new': {'label': 'new', 'min': 0, 'max': 25000, 'type': 'GAUGE'},
|
|
||||||
'launched': {'label': 'launched', 'min': 0, 'max': 25000, 'type': 'GAUGE'},
|
|
||||||
'connected': {'label': 'connected', 'min': 0, 'max': 25000, 'type': 'GAUGE'},
|
|
||||||
'failed': {'label': 'failed', 'min': 0, 'max': 25000, 'type': 'GAUGE'},
|
|
||||||
'closed': {'label': 'closed', 'min': 0, 'max': 25000, 'type': 'GAUGE'}}
|
|
||||||
|
|
||||||
TorPlugin.conf_from_dict(graph, labels)
|
|
||||||
|
|
||||||
def fetch(self):
|
|
||||||
with gen_controller() as controller:
|
|
||||||
try:
|
|
||||||
authenticate(controller)
|
|
||||||
|
|
||||||
response = controller.get_info('orconn-status', None)
|
|
||||||
if response is None:
|
|
||||||
print("No response from Tor daemon in TorConnection.fetch()", file=sys.stderr)
|
|
||||||
sys.exit(1)
|
|
||||||
else:
|
|
||||||
connections = response.split('\n')
|
|
||||||
states = dict((state, 0) for state in stem.ORStatus)
|
|
||||||
for connection in connections:
|
|
||||||
states[connection.rsplit(None, 1)[-1]] += 1
|
|
||||||
for state, count in states.items():
|
|
||||||
print('{}.value {}'.format(state.lower(), count))
|
|
||||||
except stem.connection.AuthenticationFailure as e:
|
|
||||||
print('Authentication failed ({})'.format(e))
|
|
||||||
|
|
||||||
|
|
||||||
class TorCountries(TorPlugin):
|
|
||||||
def __init__(self):
|
|
||||||
# Configure plugin
|
|
||||||
self.cache_dir_name = os.environ.get('torcachedir', None)
|
|
||||||
if self.cache_dir_name is not None:
|
|
||||||
self.cache_dir_name = os.path.join(
|
|
||||||
self.cache_dir_name, os.environ.get('torcachefile', default_torcachefile))
|
|
||||||
|
|
||||||
max_countries = os.environ.get('tormaxcountries', default_tormaxcountries)
|
|
||||||
self.max_countries = int(max_countries)
|
|
||||||
|
|
||||||
geoip_path = os.environ.get('torgeoippath', default_torgeoippath)
|
|
||||||
self.geodb = GeoIP.open(geoip_path, GeoIP.GEOIP_MEMORY_CACHE)
|
|
||||||
|
|
||||||
def conf(self):
|
|
||||||
"""Configure plugin"""
|
|
||||||
|
|
||||||
graph = {'title': 'Tor countries',
|
|
||||||
'args': '-l 0 --base 1000',
|
|
||||||
'vlabel': 'countries',
|
|
||||||
'category': 'tor',
|
|
||||||
'info': 'OR connections by state'}
|
|
||||||
labels = {}
|
|
||||||
|
|
||||||
countries_num = self.top_countries()
|
|
||||||
|
|
||||||
for c, v in countries_num:
|
|
||||||
labels[c] = {'label': c, 'min': 0, 'max': 25000, 'type': 'GAUGE'}
|
|
||||||
|
|
||||||
TorPlugin.conf_from_dict(graph, labels)
|
|
||||||
|
|
||||||
# If needed, create cache file at config time
|
|
||||||
if self.cache_dir_name:
|
|
||||||
with open(self.cache_dir_name, 'w') as f:
|
|
||||||
json.dump(countries_num, f)
|
|
||||||
|
|
||||||
def fetch(self):
|
|
||||||
"""Generate metrics"""
|
|
||||||
# Fallback if cache_dir_name is not set, unreadable or any other error
|
|
||||||
countries_num = self.top_countries()
|
|
||||||
# If possible, read cached data instead of doing the processing twice
|
|
||||||
if self.cache_dir_name:
|
|
||||||
try:
|
|
||||||
with open(self.cache_dir_name) as f:
|
|
||||||
countries_num = json.load(f)
|
|
||||||
except (IOError, ValueError):
|
|
||||||
# use the fallback value above
|
|
||||||
pass
|
|
||||||
|
|
||||||
for c, v in countries_num:
|
|
||||||
print("%s.value %d" % (c, v))
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _gen_ipaddrs_from_statuses(controller):
|
|
||||||
"""Generate a sequence of ipaddrs for every network status"""
|
|
||||||
for desc in controller.get_network_statuses():
|
|
||||||
ipaddr = desc.address
|
|
||||||
yield ipaddr
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def simplify(cn):
|
|
||||||
"""Simplify country name"""
|
|
||||||
cn = cn.replace(' ', '_')
|
|
||||||
cn = cn.replace("'", '_')
|
|
||||||
cn = cn.split(',', 1)[0]
|
|
||||||
return cn
|
|
||||||
|
|
||||||
def _gen_countries(self, controller):
|
|
||||||
"""Generate a sequence of countries for every built circuit"""
|
|
||||||
for ipaddr in self._gen_ipaddrs_from_statuses(controller):
|
|
||||||
country = self.geodb.country_name_by_addr(ipaddr)
|
|
||||||
if country is None:
|
|
||||||
yield 'Unknown'
|
|
||||||
continue
|
|
||||||
|
|
||||||
yield self.simplify(country)
|
|
||||||
|
|
||||||
def top_countries(self):
|
|
||||||
"""Build a list of top countries by number of circuits"""
|
|
||||||
with gen_controller() as controller:
|
|
||||||
try:
|
|
||||||
authenticate(controller)
|
|
||||||
c = collections.Counter(self._gen_countries(controller))
|
|
||||||
return sorted(c.most_common(self.max_countries))
|
|
||||||
except stem.connection.AuthenticationFailure as e:
|
|
||||||
print('Authentication failed ({})'.format(e))
|
|
||||||
return []
|
|
||||||
|
|
||||||
|
|
||||||
class TorDormant(TorPlugin):
|
|
||||||
def __init__(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def conf(self):
|
|
||||||
graph = {'title': 'Tor dormant',
|
|
||||||
'args': '-l 0 --base 1000',
|
|
||||||
'vlabel': 'dormant',
|
|
||||||
'category': 'tor',
|
|
||||||
'info': 'Is Tor not building circuits because it is idle?'}
|
|
||||||
labels = {'dormant': {'label': 'dormant', 'min': 0, 'max': 1, 'type': 'GAUGE'}}
|
|
||||||
|
|
||||||
TorPlugin.conf_from_dict(graph, labels)
|
|
||||||
|
|
||||||
def fetch(self):
|
|
||||||
with gen_controller() as controller:
|
|
||||||
try:
|
|
||||||
authenticate(controller)
|
|
||||||
|
|
||||||
response = controller.get_info('dormant', None)
|
|
||||||
if response is None:
|
|
||||||
print("Error while reading dormant state from Tor daemon", file=sys.stderr)
|
|
||||||
sys.exit(1)
|
|
||||||
print('dormant.value {}'.format(response))
|
|
||||||
except stem.connection.AuthenticationFailure as e:
|
|
||||||
print('Authentication failed ({})'.format(e))
|
|
||||||
|
|
||||||
|
|
||||||
class TorFlags(TorPlugin):
|
|
||||||
def __init__(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def conf(self):
|
|
||||||
graph = {'title': 'Tor relay flags',
|
|
||||||
'args': '-l 0 --base 1000',
|
|
||||||
'vlabel': 'flags',
|
|
||||||
'category': 'tor',
|
|
||||||
'info': 'Flags active for relay'}
|
|
||||||
labels = {flag: {'label': flag, 'min': 0, 'max': 1, 'type': 'GAUGE'} for flag in stem.Flag}
|
|
||||||
|
|
||||||
TorPlugin.conf_from_dict(graph, labels)
|
|
||||||
|
|
||||||
def fetch(self):
|
|
||||||
with gen_controller() as controller:
|
|
||||||
try:
|
|
||||||
authenticate(controller)
|
|
||||||
except stem.connection.AuthenticationFailure as e:
|
|
||||||
print('Authentication failed ({})'.format(e))
|
|
||||||
return
|
|
||||||
|
|
||||||
# Get fingerprint of our own relay to look up the status entry for.
|
|
||||||
# In Stem 1.3.0 and later, get_network_status() will fetch the
|
|
||||||
# relay's own status entry if no argument is provided, so this will
|
|
||||||
# no longer be needed.
|
|
||||||
fingerprint = controller.get_info('fingerprint', None)
|
|
||||||
if fingerprint is None:
|
|
||||||
print("Error while reading fingerprint from Tor daemon", file=sys.stderr)
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
response = controller.get_network_status(fingerprint, None)
|
|
||||||
if response is None:
|
|
||||||
print("Error while getting server descriptor from Tor daemon", file=sys.stderr)
|
|
||||||
sys.exit(1)
|
|
||||||
for flag in stem.Flag:
|
|
||||||
if flag in response.flags:
|
|
||||||
print('{}.value 1'.format(flag))
|
|
||||||
else:
|
|
||||||
print('{}.value 0'.format(flag))
|
|
||||||
|
|
||||||
|
|
||||||
class TorRouters(TorPlugin):
|
|
||||||
def __init__(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def conf(self):
|
|
||||||
graph = {'title': 'Tor routers',
|
|
||||||
'args': '-l 0',
|
|
||||||
'vlabel': 'routers',
|
|
||||||
'category': 'tor',
|
|
||||||
'info': 'known Tor onion routers'}
|
|
||||||
labels = {'routers': {'label': 'routers', 'min': 0, 'type': 'GAUGE'}}
|
|
||||||
TorPlugin.conf_from_dict(graph, labels)
|
|
||||||
|
|
||||||
def fetch(self):
|
|
||||||
with gen_controller() as controller:
|
|
||||||
try:
|
|
||||||
authenticate(controller)
|
|
||||||
except stem.connection.AuthenticationFailure as e:
|
|
||||||
print('Authentication failed ({})'.format(e))
|
|
||||||
return
|
|
||||||
response = controller.get_info('ns/all', None)
|
|
||||||
if response is None:
|
|
||||||
print("Error while reading ns/all from Tor daemon", file=sys.stderr)
|
|
||||||
sys.exit(1)
|
|
||||||
else:
|
|
||||||
routers = response.split('\n')
|
|
||||||
onr = 0
|
|
||||||
for router in routers:
|
|
||||||
if router[0] == "r":
|
|
||||||
onr += 1
|
|
||||||
|
|
||||||
print('routers.value {}'.format(onr))
|
|
||||||
|
|
||||||
|
|
||||||
class TorTraffic(TorPlugin):
|
|
||||||
def __init__(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def conf(self):
|
|
||||||
graph = {'title': 'Tor traffic',
|
|
||||||
'args': '-l 0 --base 1024',
|
|
||||||
'vlabel': 'bytes/s',
|
|
||||||
'category': 'tor',
|
|
||||||
'info': 'bytes read/written'}
|
|
||||||
labels = {'read': {'label': 'read', 'min': 0, 'type': 'DERIVE'},
|
|
||||||
'written': {'label': 'written', 'min': 0, 'type': 'DERIVE'}}
|
|
||||||
|
|
||||||
TorPlugin.conf_from_dict(graph, labels)
|
|
||||||
|
|
||||||
def fetch(self):
|
|
||||||
with gen_controller() as controller:
|
|
||||||
try:
|
|
||||||
authenticate(controller)
|
|
||||||
except stem.connection.AuthenticationFailure as e:
|
|
||||||
print('Authentication failed ({})'.format(e))
|
|
||||||
return
|
|
||||||
|
|
||||||
response = controller.get_info('traffic/read', None)
|
|
||||||
if response is None:
|
|
||||||
print("Error while reading traffic/read from Tor daemon", file=sys.stderr)
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
print('read.value {}'.format(response))
|
|
||||||
|
|
||||||
response = controller.get_info('traffic/written', None)
|
|
||||||
if response is None:
|
|
||||||
print("Error while reading traffic/write from Tor daemon", file=sys.stderr)
|
|
||||||
sys.exit(1)
|
|
||||||
print('written.value {}'.format(response))
|
|
||||||
|
|
||||||
|
|
||||||
##########################
|
|
||||||
# Main
|
|
||||||
##########################
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
if len(sys.argv) > 1:
|
|
||||||
param = sys.argv[1].lower()
|
|
||||||
else:
|
|
||||||
param = 'fetch'
|
|
||||||
|
|
||||||
if param == 'autoconf':
|
|
||||||
print(TorPlugin.get_autoconf_status())
|
|
||||||
sys.exit()
|
|
||||||
elif param == 'suggest':
|
|
||||||
TorPlugin.suggest()
|
|
||||||
sys.exit()
|
|
||||||
else:
|
|
||||||
if missing_dependency_error is not None:
|
|
||||||
print("Failed to run tor_ due to missing dependency: {}"
|
|
||||||
.format(missing_dependency_error), file=sys.stderr)
|
|
||||||
sys.exit(1)
|
|
||||||
# detect data provider
|
|
||||||
if __file__.endswith('_bandwidth'):
|
|
||||||
provider = TorBandwidth()
|
|
||||||
elif __file__.endswith('_connections'):
|
|
||||||
provider = TorConnections()
|
|
||||||
elif __file__.endswith('_countries'):
|
|
||||||
provider = TorCountries()
|
|
||||||
elif __file__.endswith('_dormant'):
|
|
||||||
provider = TorDormant()
|
|
||||||
elif __file__.endswith('_flags'):
|
|
||||||
provider = TorFlags()
|
|
||||||
elif __file__.endswith('_routers'):
|
|
||||||
provider = TorRouters()
|
|
||||||
elif __file__.endswith('_traffic'):
|
|
||||||
provider = TorTraffic()
|
|
||||||
else:
|
|
||||||
print('Unknown plugin name, try "suggest" for a list of possible ones.',
|
|
||||||
file=sys.stderr)
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
if param == 'config':
|
|
||||||
provider.conf()
|
|
||||||
elif param == 'fetch':
|
|
||||||
provider.fetch()
|
|
||||||
else:
|
|
||||||
print('Unknown parameter "{}"'.format(param), file=sys.stderr)
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
main()
|
|
361
extern/zfs_arcstats
vendored
361
extern/zfs_arcstats
vendored
|
@ -1,361 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
: << =cut
|
|
||||||
|
|
||||||
=head1 NAME
|
|
||||||
|
|
||||||
zfs_arcstats - Munin multi-graph plugin to monitor ZFS ARC statistics
|
|
||||||
|
|
||||||
These functions are implemented:
|
|
||||||
size : to monitor ARC size
|
|
||||||
activity : to monitor ARC activities
|
|
||||||
actlist : to monitor ARC activities by cache list (MFU/MRU)
|
|
||||||
actdata : to monitor ARC activities by data type (Demand/Prefetch)
|
|
||||||
hitratio : to monitor ARC hit ratio
|
|
||||||
|
|
||||||
Tested with Solaris 10 and 11, OpenIndiana Hipster, FreeBSD 11, CentOS 7
|
|
||||||
This plugin is inspired by arcstat.pl [https://github.com/mharsch/arcstat]
|
|
||||||
|
|
||||||
=head1 CONFIGURATION
|
|
||||||
|
|
||||||
Make symlink:
|
|
||||||
cd /path/to/munin/etc/plugins
|
|
||||||
ln -s /path/to/munin/lib/plugins/zfs_arcstats .
|
|
||||||
|
|
||||||
For FreeBSD, it should be necessary to change shebang /bin/bash -> /usr/local/bin/bash
|
|
||||||
|
|
||||||
=head1 ENVIRONMENT VARIABLES
|
|
||||||
|
|
||||||
None
|
|
||||||
|
|
||||||
=head1 AUTHOR
|
|
||||||
|
|
||||||
K.Cima https://github.com/shakemid
|
|
||||||
|
|
||||||
=head1 LICENSE
|
|
||||||
|
|
||||||
GPLv2
|
|
||||||
|
|
||||||
=head1 Magic markers
|
|
||||||
|
|
||||||
#%# family=contrib
|
|
||||||
#%# capabilities=autoconf
|
|
||||||
|
|
||||||
=cut
|
|
||||||
|
|
||||||
# Include plugin.sh
|
|
||||||
. "${MUNIN_LIBDIR:-}/plugins/plugin.sh"
|
|
||||||
is_multigraph "$@"
|
|
||||||
|
|
||||||
# Shell options
|
|
||||||
set -o nounset
|
|
||||||
|
|
||||||
# Set global variables
|
|
||||||
plugin_name=zfs_arcstats
|
|
||||||
functions='size activity actlist actdata hitratio'
|
|
||||||
|
|
||||||
# Functions
|
|
||||||
|
|
||||||
get_osname() {
|
|
||||||
local osname osver
|
|
||||||
|
|
||||||
osname=$( uname -s )
|
|
||||||
osver=$( uname -v )
|
|
||||||
|
|
||||||
case $osname in
|
|
||||||
SunOS)
|
|
||||||
case $osver in
|
|
||||||
illumos*)
|
|
||||||
osname=illumos
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
echo "$osname"
|
|
||||||
}
|
|
||||||
|
|
||||||
preconfig() {
|
|
||||||
local func=$1
|
|
||||||
|
|
||||||
# data_attr format: field type draw label
|
|
||||||
# label can contain white-spaces.
|
|
||||||
|
|
||||||
case $func in
|
|
||||||
size)
|
|
||||||
global_attr="
|
|
||||||
graph_title ZFS ARC - Size
|
|
||||||
graph_category fs
|
|
||||||
graph_args --base 1024 --lower-limit 0
|
|
||||||
graph_vlabel Bytes
|
|
||||||
graph_info ZFS ARC - Size
|
|
||||||
"
|
|
||||||
case $osname in
|
|
||||||
SunOS)
|
|
||||||
# For Solaris 10,11
|
|
||||||
data_attr="
|
|
||||||
data_size GAUGE AREASTACK Data size
|
|
||||||
prefetch_meta_size GAUGE AREASTACK Prefetch meta size
|
|
||||||
buf_size GAUGE AREASTACK Buf size
|
|
||||||
other_size GAUGE AREASTACK Other size
|
|
||||||
"
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
# For illumos, FreeBSD, Linux (OpenZFS)
|
|
||||||
data_attr="
|
|
||||||
data_size GAUGE AREASTACK Data size
|
|
||||||
metadata_size GAUGE AREASTACK Metadata size
|
|
||||||
hdr_size GAUGE AREASTACK Hdr size
|
|
||||||
other_size GAUGE AREASTACK Other size
|
|
||||||
mru_size GAUGE LINE MRU size
|
|
||||||
mfu_size GAUGE LINE MFU size
|
|
||||||
"
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
data_attr="
|
|
||||||
$data_attr
|
|
||||||
size GAUGE LINE ARC size
|
|
||||||
c GAUGE LINE Target size
|
|
||||||
p GAUGE LINE Target MRU size
|
|
||||||
"
|
|
||||||
;;
|
|
||||||
activity)
|
|
||||||
global_attr="
|
|
||||||
graph_title ZFS ARC - Activities
|
|
||||||
graph_category fs
|
|
||||||
graph_args --base 1000 --lower-limit 0
|
|
||||||
graph_vlabel misses (-) / hits (+) per second
|
|
||||||
graph_info ZFS ARC - Activities
|
|
||||||
|
|
||||||
hits.negative misses
|
|
||||||
l2_hits.negative l2_misses
|
|
||||||
"
|
|
||||||
data_attr="
|
|
||||||
misses DERIVE LINE dummy
|
|
||||||
hits DERIVE LINE ARC
|
|
||||||
l2_misses DERIVE LINE dummy
|
|
||||||
l2_hits DERIVE LINE L2ARC
|
|
||||||
"
|
|
||||||
;;
|
|
||||||
actlist)
|
|
||||||
global_attr="
|
|
||||||
graph_title ZFS ARC - Activities by cache list
|
|
||||||
graph_category fs
|
|
||||||
graph_args --base 1000 --lower-limit 0
|
|
||||||
graph_vlabel ghost hits (-) / hits (+) per second
|
|
||||||
graph_info ZFS ARC - Activities by cache list
|
|
||||||
|
|
||||||
mfu_hits.negative mfu_ghost_hits
|
|
||||||
mru_hits.negative mru_ghost_hits
|
|
||||||
"
|
|
||||||
data_attr="
|
|
||||||
mfu_ghost_hits DERIVE LINE dummy
|
|
||||||
mfu_hits DERIVE LINE MFU
|
|
||||||
mru_ghost_hits DERIVE LINE dummy
|
|
||||||
mru_hits DERIVE LINE MRU
|
|
||||||
"
|
|
||||||
;;
|
|
||||||
actdata)
|
|
||||||
global_attr="
|
|
||||||
graph_title ZFS ARC - Activities by data type
|
|
||||||
graph_category fs
|
|
||||||
graph_args --base 1000 --lower-limit 0
|
|
||||||
graph_vlabel misses (-) / hits (+) per second
|
|
||||||
graph_info ZFS ARC - Activities by data type
|
|
||||||
|
|
||||||
demand_data_hits.negative demand_data_misses
|
|
||||||
demand_metadata_hits.negative demand_metadata_misses
|
|
||||||
prefetch_data_hits.negative prefetch_data_misses
|
|
||||||
prefetch_metadata_hits.negative prefetch_metadata_misses
|
|
||||||
"
|
|
||||||
data_attr="
|
|
||||||
demand_data_misses DERIVE LINE dummy
|
|
||||||
demand_data_hits DERIVE LINE D data
|
|
||||||
demand_metadata_misses DERIVE LINE dummy
|
|
||||||
demand_metadata_hits DERIVE LINE D meta
|
|
||||||
prefetch_data_misses DERIVE LINE dummy
|
|
||||||
prefetch_data_hits DERIVE LINE P data
|
|
||||||
prefetch_metadata_misses DERIVE LINE dummy
|
|
||||||
prefetch_metadata_hits DERIVE LINE P meta
|
|
||||||
"
|
|
||||||
;;
|
|
||||||
hitratio)
|
|
||||||
global_attr="
|
|
||||||
graph_title ZFS ARC - Hit ratio
|
|
||||||
graph_category fs
|
|
||||||
graph_args --base 1000 --lower-limit 0 --upper-limit 100 --rigid
|
|
||||||
graph_vlabel % hits
|
|
||||||
graph_info ZFS ARC - Hit ratio - The graph shows cache hit ratio between munin-update intervals (usually 5 minutes).
|
|
||||||
|
|
||||||
hitratio.cdef hits,DUP,misses,+,/,100,*
|
|
||||||
l2_hitratio.cdef l2_hits,DUP,l2_misses,+,/,100,*
|
|
||||||
demand_data_hitratio.cdef demand_data_hits,DUP,demand_data_misses,+,/,100,*
|
|
||||||
demand_metadata_hitratio.cdef demand_metadata_hits,DUP,demand_metadata_misses,+,/,100,*
|
|
||||||
prefetch_data_hitratio.cdef prefetch_data_hits,DUP,prefetch_data_misses,+,/,100,*
|
|
||||||
prefetch_metadata_hitratio.cdef prefetch_metadata_hits,DUP,prefetch_metadata_misses,+,/,100,*
|
|
||||||
"
|
|
||||||
data_attr="
|
|
||||||
hits DERIVE LINE dummy
|
|
||||||
misses DERIVE LINE dummy
|
|
||||||
l2_hits DERIVE LINE dummy
|
|
||||||
l2_misses DERIVE LINE dummy
|
|
||||||
demand_data_hits DERIVE LINE dummy
|
|
||||||
demand_data_misses DERIVE LINE dummy
|
|
||||||
demand_metadata_hits DERIVE LINE dummy
|
|
||||||
demand_metadata_misses DERIVE LINE dummy
|
|
||||||
prefetch_data_hits DERIVE LINE dummy
|
|
||||||
prefetch_data_misses DERIVE LINE dummy
|
|
||||||
prefetch_metadata_hits DERIVE LINE dummy
|
|
||||||
prefetch_metadata_misses DERIVE LINE dummy
|
|
||||||
hitratio GAUGE LINE2 ARC hits
|
|
||||||
l2_hitratio GAUGE LINE L2ARC hits
|
|
||||||
demand_data_hitratio GAUGE LINE Demand data hits
|
|
||||||
demand_metadata_hitratio GAUGE LINE Demand metadata hits
|
|
||||||
prefetch_data_hitratio GAUGE LINE Prefetch data hits
|
|
||||||
prefetch_metadata_hitratio GAUGE LINE Prefetch metadata hits
|
|
||||||
"
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo "Unknown function: $func"
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
}
|
|
||||||
|
|
||||||
do_config() {
|
|
||||||
local func=$1
|
|
||||||
local label_max_length=45
|
|
||||||
local field type draw label
|
|
||||||
|
|
||||||
preconfig "$func"
|
|
||||||
echo "multigraph ${plugin_name}_${func}"
|
|
||||||
|
|
||||||
# print global attributes
|
|
||||||
echo "$global_attr" | sed -e 's/^ *//' -e '/^$/d'
|
|
||||||
|
|
||||||
# print data source attributes
|
|
||||||
echo "$data_attr" | while read -r field type draw label
|
|
||||||
do
|
|
||||||
[ -z "$field" ] && continue
|
|
||||||
|
|
||||||
echo "${field}.type ${type}"
|
|
||||||
echo "${field}.draw ${draw}"
|
|
||||||
echo "${field}.label ${label:0:${label_max_length}}"
|
|
||||||
if [ "$type" = 'DERIVE' ]; then
|
|
||||||
echo "${field}.min 0"
|
|
||||||
fi
|
|
||||||
if [ "$label" = 'dummy' ]; then
|
|
||||||
echo "${field}.graph no"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
echo
|
|
||||||
}
|
|
||||||
|
|
||||||
get_stats() {
|
|
||||||
local arcstats stat value
|
|
||||||
|
|
||||||
case $osname in
|
|
||||||
SunOS|illumos)
|
|
||||||
arcstats=$( kstat -p 'zfs:0:arcstats' | sed -e 's/:/ /g' | awk '{ print $4,$5 }' )
|
|
||||||
# kstat output example:
|
|
||||||
# $ kstat -p zfs:0:arcstats
|
|
||||||
# zfs:0:arcstats:c 4135233544
|
|
||||||
# ...
|
|
||||||
;;
|
|
||||||
*BSD)
|
|
||||||
arcstats=$( /sbin/sysctl -a | sed -n -e 's/^kstat\.zfs\.misc\.arcstats\.//p' | awk -F: '{ print $1,$2 }' )
|
|
||||||
# sysctl output example:
|
|
||||||
# $ sysctl -a
|
|
||||||
# ...
|
|
||||||
# kstat.zfs.misc.arcstats.c: 632540160
|
|
||||||
# ...
|
|
||||||
;;
|
|
||||||
Linux)
|
|
||||||
arcstats=$( sed '1,2d' /proc/spl/kstat/zfs/arcstats | awk '{ print $1,$3 }' )
|
|
||||||
# proc file output example:
|
|
||||||
# $ cat /proc/spl/kstat/zfs/arcstats
|
|
||||||
# ...
|
|
||||||
# name type data
|
|
||||||
# hits 4 62
|
|
||||||
# ...
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo "Unsupported OS: $osname"
|
|
||||||
exit 1
|
|
||||||
esac
|
|
||||||
|
|
||||||
while read -r stat value
|
|
||||||
do
|
|
||||||
printf -v "arcstats_${stat}" "%s" "$value"
|
|
||||||
# printf -v means indirect variable assignment (similar to eval)
|
|
||||||
done <<< "$arcstats"
|
|
||||||
}
|
|
||||||
|
|
||||||
do_fetch() {
|
|
||||||
local func=$1
|
|
||||||
local field type draw label value ref
|
|
||||||
|
|
||||||
preconfig "$func"
|
|
||||||
echo "multigraph ${plugin_name}_${func}"
|
|
||||||
|
|
||||||
echo "$data_attr" | while read -r field type draw label
|
|
||||||
do
|
|
||||||
[ -z "$field" ] && continue
|
|
||||||
|
|
||||||
ref="arcstats_${field}"
|
|
||||||
value=${!ref:-0}
|
|
||||||
# ${!varname} means indirect evaluation (similar to eval)
|
|
||||||
|
|
||||||
echo "${field}.value ${value}"
|
|
||||||
done
|
|
||||||
|
|
||||||
echo
|
|
||||||
}
|
|
||||||
|
|
||||||
autoconf() {
|
|
||||||
if [ -x /sbin/zfs ]; then
|
|
||||||
echo yes
|
|
||||||
else
|
|
||||||
echo "no (ZFS looks unavailable)"
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
config() {
|
|
||||||
local func
|
|
||||||
|
|
||||||
for func in $functions
|
|
||||||
do
|
|
||||||
do_config "$func"
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
fetch() {
|
|
||||||
local func
|
|
||||||
|
|
||||||
get_stats
|
|
||||||
|
|
||||||
for func in $functions
|
|
||||||
do
|
|
||||||
do_fetch "$func"
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
# Main
|
|
||||||
|
|
||||||
osname=$( get_osname )
|
|
||||||
|
|
||||||
case ${1:-} in
|
|
||||||
autoconf)
|
|
||||||
autoconf
|
|
||||||
;;
|
|
||||||
config)
|
|
||||||
config
|
|
||||||
if [ "${MUNIN_CAP_DIRTYCONFIG:-0}" = "1" ]; then fetch; fi
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
fetch
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
exit 0
|
|
99
extern/zfs_list
vendored
99
extern/zfs_list
vendored
|
@ -1,99 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
#
|
|
||||||
# Plugin to monitor ZFS Filesystems
|
|
||||||
# Author: Adam Michel (elfurbe@furbism.com)
|
|
||||||
# Description:
|
|
||||||
# This is an extension of the zfs_fs plugin
|
|
||||||
# modified as a multigraph to graph all zfs
|
|
||||||
# filesystems it can find
|
|
||||||
#
|
|
||||||
# Tested on Ubuntu-14.04
|
|
||||||
#
|
|
||||||
# Parameters understood:
|
|
||||||
#
|
|
||||||
# config (required)
|
|
||||||
# autoconf (optional - used by munin-config)
|
|
||||||
#
|
|
||||||
#%# family=auto
|
|
||||||
|
|
||||||
. "$MUNIN_LIBDIR/plugins/plugin.sh"
|
|
||||||
|
|
||||||
need_multigraph()
|
|
||||||
|
|
||||||
if [ "$1" = "autoconf" ]; then
|
|
||||||
# Makes little sense to autoconf if you can't suggest
|
|
||||||
echo no
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$1" = "suggest" ]; then
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$1" = "config" ]; then
|
|
||||||
for i in `zfs list -Hp | awk '{print $1}'`; do
|
|
||||||
values=( $(zfs get -p usedbydataset,usedbychildren,usedbysnapshots,usedbyrefreservation,available,quota $i | awk 'BEGIN {total=0;} { if( NR==1 ) next; } !/quota/ {total=total+$3;} {print $3} END{print total;}') )
|
|
||||||
fsname=$(clean_fieldname $(echo "$i" | sed 's/\//__/g'))
|
|
||||||
|
|
||||||
echo <<EOF "multigraph zfs_list_$fsname
|
|
||||||
graph_title $fsname usage
|
|
||||||
graph_order usedbydataset usedbychildren usedbysnapshots usedbyrefreservation available total quota
|
|
||||||
graph_args --base 1024 -r -l 0 --vertical-label Bytes --upper-limit ${values[6]}
|
|
||||||
graph_info This graph shows how is used a zfs filesystems.
|
|
||||||
graph_category fs
|
|
||||||
graph_period second
|
|
||||||
usedbydataset.label UsedByDataset
|
|
||||||
usedbydataset.draw AREA
|
|
||||||
usedbydataset.info Used space by Dataset
|
|
||||||
usedbydataset.colour FF0000
|
|
||||||
usedbychildren.label UsedByChildren
|
|
||||||
usedbychildren.draw STACK
|
|
||||||
usedbychildren.info Used space by children
|
|
||||||
usedbychildren.colour FFCC33
|
|
||||||
usedbysnapshots.label UsedBySnapshots
|
|
||||||
usedbysnapshots.draw STACK
|
|
||||||
usedbysnapshots.info Used space by snapshot
|
|
||||||
usedbysnapshots.colour 0000FF
|
|
||||||
usedbyrefreservation.label Usedbyrefreservation
|
|
||||||
usedbyrefreservation.draw STACK
|
|
||||||
usedbyrefreservation.info Used space by Ref Reservation
|
|
||||||
usedbyrefreservation.colour 33CCFF
|
|
||||||
available.label Available
|
|
||||||
available.draw STACK
|
|
||||||
available.info Free space
|
|
||||||
available.colour 00FF00
|
|
||||||
total.label Total
|
|
||||||
total.draw LINE1
|
|
||||||
total.info Total
|
|
||||||
total.colour 000000
|
|
||||||
quota.label Quota
|
|
||||||
quota.draw LINE1
|
|
||||||
quota.info Quota
|
|
||||||
quota.colour 555555"
|
|
||||||
EOF
|
|
||||||
done
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
for i in `zfs list -Hp | awk '{print $1}'`; do
|
|
||||||
values=( $(zfs get -p usedbydataset,usedbychildren,usedbysnapshots,usedbyrefreservation,available,quota $i | awk 'BEGIN {total=0;} { if( NR==1 ) next; } !/quota/ {total=total+$3;} {print $3} END{print total;}') )
|
|
||||||
fsname=$(clean_fieldname $(echo "$i" | sed 's/\//__/g'))
|
|
||||||
|
|
||||||
if [ ${values[5]} = "-" ]; then
|
|
||||||
quota=0
|
|
||||||
else
|
|
||||||
quota=${values[5]}
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo <<EOF "multigraph zfs_list_$fsname
|
|
||||||
usedbydataset.value ${values[0]}
|
|
||||||
usedbysnapshots.value ${values[2]}
|
|
||||||
usedbychildren.value ${values[1]}
|
|
||||||
usedbyrefreservation.value ${values[3]}
|
|
||||||
available.value ${values[4]}
|
|
||||||
total.value ${values[6]}
|
|
||||||
quota.value $quota"
|
|
||||||
EOF
|
|
||||||
done
|
|
||||||
|
|
||||||
exit 0
|
|
355
extern/zfsonlinux_stats_
vendored
355
extern/zfsonlinux_stats_
vendored
|
@ -1,355 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
# ZFS statistics for ZFSonLinux
|
|
||||||
# Author: Adam Michel (elfurbe@furbism.com)
|
|
||||||
#
|
|
||||||
# Description:
|
|
||||||
# This is a modification of the zfs_stats
|
|
||||||
# plugin by David Bjornsson (which was a
|
|
||||||
# rewrite of zfs-stats-for-freebsd scripts
|
|
||||||
# by patpro) modified to work with ZFSonLinux.
|
|
||||||
#
|
|
||||||
# Tested on Ubuntu-14.04
|
|
||||||
#
|
|
||||||
# Usage: zfs_stats_FUNCTION
|
|
||||||
#
|
|
||||||
# Available functions:
|
|
||||||
# efficiency - ARC efficiency
|
|
||||||
# cachehitlist - Cache hit by cache list
|
|
||||||
# cachehitdtype - Cache hit by data type
|
|
||||||
# dmuprefetch - DMU prefetch
|
|
||||||
# utilization - ARC size breakdown
|
|
||||||
# l2utilization - L2ARC size breakdown
|
|
||||||
# l2efficiency - L2ARC efficiency
|
|
||||||
#
|
|
||||||
#%# family=auto
|
|
||||||
|
|
||||||
FUNCTION=$(basename $0 | cut -d_ -f3)
|
|
||||||
MEMMAX=`cat /proc/meminfo | grep MemTotal | awk '{print $2}'`
|
|
||||||
BC='/usr/bin/bc -q'
|
|
||||||
ARCSTATS="/proc/spl/kstat/zfs/arcstats"
|
|
||||||
ZFETCHSTATS="/proc/spl/kstat/zfs/zfetchstats"
|
|
||||||
|
|
||||||
#
|
|
||||||
# Pull all values from arcstats
|
|
||||||
#
|
|
||||||
|
|
||||||
while read name type data
|
|
||||||
do
|
|
||||||
[[ $name =~ ^[0-9].* ]] && continue
|
|
||||||
[[ $name == "name" ]] && continue
|
|
||||||
[[ $name == "" ]] && continue
|
|
||||||
case $name in
|
|
||||||
"hits" )
|
|
||||||
export ARC_HITS=$data
|
|
||||||
;;
|
|
||||||
"misses" )
|
|
||||||
export ARC_MISSES=$data
|
|
||||||
;;
|
|
||||||
"p" )
|
|
||||||
export MRU_SIZE=$data
|
|
||||||
;;
|
|
||||||
"c_max" )
|
|
||||||
export MAX_SIZE=$data
|
|
||||||
;;
|
|
||||||
"c_min" )
|
|
||||||
export MIN_SIZE=$data
|
|
||||||
;;
|
|
||||||
"c" )
|
|
||||||
export TARGET_SIZE=$data
|
|
||||||
;;
|
|
||||||
* )
|
|
||||||
VARNAME=`echo $name | tr '[:lower:]' '[:upper:]'`
|
|
||||||
#declare $VARNAME=$data
|
|
||||||
export $VARNAME=$data
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
done < $ARCSTATS
|
|
||||||
|
|
||||||
#
|
|
||||||
# Pull all values from zfetchstats
|
|
||||||
#
|
|
||||||
|
|
||||||
while read name type data
|
|
||||||
do
|
|
||||||
[[ $name =~ ^[0-9].* ]] && continue
|
|
||||||
[[ $name == "name" ]] && continue
|
|
||||||
case $name in
|
|
||||||
"hits" )
|
|
||||||
export DMU_HITS=$data
|
|
||||||
;;
|
|
||||||
"misses" )
|
|
||||||
export DMU_MISSES=$data
|
|
||||||
;;
|
|
||||||
* )
|
|
||||||
VARNAME=`echo $name | tr '[:lower:]' '[:upper:]'`
|
|
||||||
export $VARNAME=$data
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
done < $ZFETCHSTATS
|
|
||||||
|
|
||||||
#
|
|
||||||
# Calculation macros
|
|
||||||
#
|
|
||||||
|
|
||||||
ANON_HITS=`echo "$ARC_HITS-($MFU_HITS+$MRU_HITS+$MFU_GHOST_HITS+$MRU_GHOST_HITS)" | $BC`
|
|
||||||
ARC_ACCESSES_TOTAL=`echo "$ARC_HITS+$ARC_MISSES" | $BC`
|
|
||||||
DEMAND_DATA_TOTAL=`echo "$DEMAND_DATA_HITS+$DEMAND_DATA_MISSES" | $BC`
|
|
||||||
PREFETCH_DATA_TOTAL=`echo "$PREFETCH_DATA_HITS+$PREFETCH_DATA_MISSES" | $BC`
|
|
||||||
REAL_HITS=`echo "$MFU_HITS+$MRU_HITS" | $BC`
|
|
||||||
|
|
||||||
if [ $ARC_ACCESSES_TOTAL != 0 ]; then
|
|
||||||
CACHE_HIT_RATIO_PERC=`echo "scale=2 ; (100*$ARC_HITS/$ARC_ACCESSES_TOTAL)" | $BC`
|
|
||||||
CACHE_MISS_RATIO_PERC=`echo "scale=2 ; (100*$ARC_MISSES/$ARC_ACCESSES_TOTAL)" | $BC`
|
|
||||||
ACTUAL_HIT_RATIO_PERC=`echo "scale=2 ; (100*$REAL_HITS/$ARC_ACCESSES_TOTAL)" | $BC`
|
|
||||||
else
|
|
||||||
CACHE_HIT_RATIO_PERC=0
|
|
||||||
CACHE_MISS_RATIO_PERC=0
|
|
||||||
ACTUAL_HIT_RATIO_PERC=0
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ $DEMAND_DATA_TOTAL != 0 ]; then DATA_DEMAND_EFFICIENCY_PERC=`echo "scale=2 ; (100*$DEMAND_DATA_HITS/$DEMAND_DATA_TOTAL)" | $BC`; else DATA_DEMAND_EFFICIENCY_PERC=0; fi
|
|
||||||
if [ $PREFETCH_DATA_TOTAL != 0 ]; then DATA_PREFETCH_EFFICENCY_PERC=`echo "scale=2 ; (100*$PREFETCH_DATA_HITS/$PREFETCH_DATA_TOTAL)" | $BC`; else DATA_PREFETCH_EFFICENCY_PERC=0; fi
|
|
||||||
|
|
||||||
if [ $ARC_HITS != 0 ]; then
|
|
||||||
ANONYMOUSLY_USED_PERC=`echo "scale=2 ; (100*$ANON_HITS/$ARC_HITS)" | $BC`
|
|
||||||
MOST_RECENTLY_USED_PERC=`echo "scale=2 ; (100*$MRU_HITS/$ARC_HITS)" | $BC`
|
|
||||||
MOST_FREQUENTLY_USED_PERC=`echo "scale=2 ; (100*$MFU_HITS/$ARC_HITS)" | $BC`
|
|
||||||
MOST_RECENTLY_USED_GHOST_PERC=`echo "scale=2 ; (100*$MRU_GHOST_HITS/$ARC_HITS)" | $BC`
|
|
||||||
MOST_FREQUENTLY_USED_GHOST_PERC=`echo "scale=2 ; (100*$MFU_GHOST_HITS/$ARC_HITS)" | $BC`
|
|
||||||
|
|
||||||
DEMAND_DATA_HIT_PERC=`echo "scale=2 ; (100*$DEMAND_DATA_HITS/$ARC_HITS)" | $BC`
|
|
||||||
PREFETCH_DATA_HIT_PERC=`echo "scale=2 ; (100*$PREFETCH_DATA_HITS/$ARC_HITS)" | $BC`
|
|
||||||
DEMAND_METADATA_HIT_PERC=`echo "scale=2 ; (100*$DEMAND_METADATA_HITS/$ARC_HITS)" | $BC`
|
|
||||||
PREFETCH_METADATA_HIT_PERC=`echo "scale=2 ; (100*$PREFETCH_METADATA_HITS/$ARC_HITS)" | $BC`
|
|
||||||
else
|
|
||||||
ANONYMOUSLY_USED_PERC=0
|
|
||||||
MOST_RECENTLY_USED_PERC=0
|
|
||||||
MOST_FREQUENTLY_USED_PERC=0
|
|
||||||
MOST_RECENTLY_USED_GHOST_PERC=0
|
|
||||||
MOST_FREQUENTLY_USED_GHOST_PERC=0
|
|
||||||
|
|
||||||
DEMAND_DATA_HIT_PERC=0
|
|
||||||
PREFETCH_DATA_HIT_PERC=0
|
|
||||||
DEMAND_METADATA_HIT_PERC=0
|
|
||||||
PREFETCH_METADATA_HIT_PERC=0
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ $ARC_MISSES != 0 ]; then
|
|
||||||
PREFETCH_METADATA_MISSES_PERC=`echo "scale=2 ; (100*$PREFETCH_METADATA_MISSES/$ARC_MISSES)" | $BC`
|
|
||||||
DEMAND_DATA_MISS_PERC=`echo "scale=2 ; (100*$DEMAND_DATA_MISSES/$ARC_MISSES)" | $BC`
|
|
||||||
PREFETCH_DATA_MISS_PERC=`echo "scale=2 ; (100*$PREFETCH_DATA_MISSES/$ARC_MISSES)" | $BC`
|
|
||||||
DEMAND_METADATA_MISS_PERC=`echo "scale=2 ; (100*$DEMAND_METADATA_MISSES/$ARC_MISSES)" | $BC`
|
|
||||||
else
|
|
||||||
PREFETCH_METADATA_MISSES_PERC=0
|
|
||||||
DEMAND_DATA_MISS_PERC=0
|
|
||||||
PREFETCH_DATA_MISS_PERC=0
|
|
||||||
DEMAND_METADATA_MISS_PERC=0
|
|
||||||
fi
|
|
||||||
|
|
||||||
DMU_TOTAL=`echo "$DMU_HITS+$DMU_MISSES" | $BC`
|
|
||||||
if [ $DMU_TOTAL != 0 ]; then
|
|
||||||
DMU_HITS_PERC=`echo "scale=2 ; (100*$DMU_HITS/$DMU_TOTAL)" | $BC`
|
|
||||||
DMU_MISSES_PERC=`echo "scale=2 ; (100*$DMU_MISSES/$DMU_TOTAL)" | $BC`
|
|
||||||
else
|
|
||||||
DMU_HITS_PERC=0
|
|
||||||
DMU_MISSES_PERC=0
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ $SIZE -gt $TARGET_SIZE ]; then
|
|
||||||
MFU_SIZE=`echo "$SIZE-$MRU_SIZE" | $BC`
|
|
||||||
else
|
|
||||||
MFU_SIZE=`echo "$TARGET_SIZE-$MRU_SIZE" | $BC`
|
|
||||||
fi
|
|
||||||
|
|
||||||
L2_ACCESSES_TOTAL=`echo "$L2_HITS+$L2_MISSES" | $BC`
|
|
||||||
if [ $L2_ACCESSES_TOTAL -gt 0 ]; then
|
|
||||||
L2_HIT_RATIO_PERC=`echo "scale=2 ; (100*$L2_HITS/$L2_ACCESSES_TOTAL)" | $BC`
|
|
||||||
L2_MISS_RATIO_PERC=`echo "scale=2 ; (100*$L2_MISSES/$L2_ACCESSES_TOTAL)" | $BC`
|
|
||||||
else
|
|
||||||
L2_HIT_RATIO_PERC=0
|
|
||||||
L2_MISS_RATIO_PERC=0
|
|
||||||
fi
|
|
||||||
|
|
||||||
efficiency() {
|
|
||||||
if [ "$1" = "config" ]; then
|
|
||||||
echo 'graph_title ZFS ARC Efficiency'
|
|
||||||
echo 'graph_args -u 100'
|
|
||||||
echo 'graph_vlabel %'
|
|
||||||
echo 'graph_info This graph shows the ARC Efficiency'
|
|
||||||
|
|
||||||
echo 'hits.label Hit Ratio'
|
|
||||||
echo 'misses.label Miss Ratio'
|
|
||||||
echo 'actual_hits.label Actual Hit Ratio'
|
|
||||||
echo 'data_demand_efficiency.label Data Demand Efficiency'
|
|
||||||
echo 'data_prefetch_efficiency.label Data Prefetch Efficiency'
|
|
||||||
|
|
||||||
exit 0
|
|
||||||
else
|
|
||||||
echo 'hits.value ' $CACHE_HIT_RATIO_PERC
|
|
||||||
echo 'misses.value ' $CACHE_MISS_RATIO_PERC
|
|
||||||
echo 'actual_hits.value ' $ACTUAL_HIT_RATIO_PERC
|
|
||||||
echo 'data_demand_efficiency.value ' $DATA_DEMAND_EFFICIENCY_PERC
|
|
||||||
echo 'data_prefetch_efficiency.value ' $DATA_PREFETCH_EFFICENCY_PERC
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
cachehitlist() {
|
|
||||||
if [ "$1" = "config" ]; then
|
|
||||||
echo 'graph_title ZFS ARC Efficiency: Cache hits by cache list'
|
|
||||||
echo 'graph_args -u 100'
|
|
||||||
echo 'graph_vlabel %'
|
|
||||||
echo 'graph_info This graph shows the ARC Efficiency'
|
|
||||||
|
|
||||||
echo 'cache_list_anon.label Anonymously Used'
|
|
||||||
echo 'cache_list_most_rec.label Most Recently Used'
|
|
||||||
echo 'cache_list_most_freq.label Most Frequently Used'
|
|
||||||
echo 'cache_list_most_rec_ghost.label Most Recently Used Ghost'
|
|
||||||
echo 'cache_list_most_freq_ghost.label Most Frequently Used Ghost'
|
|
||||||
|
|
||||||
exit 0
|
|
||||||
else
|
|
||||||
echo 'cache_list_anon.value ' $ANONYMOUSLY_USED_PERC
|
|
||||||
echo 'cache_list_most_rec.value ' $MOST_RECENTLY_USED_PERC
|
|
||||||
echo 'cache_list_most_freq.value ' $MOST_FREQUENTLY_USED_PERC
|
|
||||||
echo 'cache_list_most_rec_ghost.value ' $MOST_RECENTLY_USED_GHOST_PERC
|
|
||||||
echo 'cache_list_most_freq_ghost.value ' $MOST_FREQUENTLY_USED_GHOST_PERC
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
cachehitdtype() {
|
|
||||||
if [ "$1" = "config" ]; then
|
|
||||||
echo 'graph_title ZFS ARC Efficiency: Cache hits by data type'
|
|
||||||
echo 'graph_args -u 100'
|
|
||||||
echo 'graph_vlabel %'
|
|
||||||
echo 'graph_info This graph shows the ARC Efficiency'
|
|
||||||
|
|
||||||
echo 'data_type_demand_hits.label Demand Data Hit Ratio'
|
|
||||||
echo 'data_type_demand_misses.label Demand Data Miss Ratio'
|
|
||||||
echo 'data_type_prefetch_hits.label Prefetch Data Hit Ratio'
|
|
||||||
echo 'data_type_prefetch_misses.label Prefetch Data Miss Ratio'
|
|
||||||
echo 'data_type_demand_metadata_hits.label Demand Metadata Hit Ratio'
|
|
||||||
echo 'data_type_demand_metadata_misses.label Demand Metadata Miss Ratio'
|
|
||||||
echo 'data_type_prefetch_metadata_hits.label Prefetch Metadata Hit Ratio'
|
|
||||||
echo 'data_type_prefetch_metadata_misses.label Prefetch Metadata Miss Ratio'
|
|
||||||
|
|
||||||
exit 0
|
|
||||||
else
|
|
||||||
echo 'data_type_demand_hits.value ' $DEMAND_DATA_HIT_PERC
|
|
||||||
echo 'data_type_demand_misses.value ' $DEMAND_DATA_MISS_PERC
|
|
||||||
echo 'data_type_prefetch_hits.value ' $PREFETCH_DATA_HIT_PERC
|
|
||||||
echo 'data_type_prefetch_misses.value ' $PREFETCH_DATA_MISS_PERC
|
|
||||||
echo 'data_type_demand_metadata_hits.value ' $DEMAND_METADATA_HIT_PERC
|
|
||||||
echo 'data_type_demand_metadata_misses.value ' $DEMAND_METADATA_MISS_PERC
|
|
||||||
echo 'data_type_prefetch_metadata_hits.value ' $PREFETCH_METADATA_HIT_PERC
|
|
||||||
echo 'data_type_prefetch_metadata_misses.value ' $PREFETCH_METADATA_MISSES_PERC
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
dmuprefetch() {
|
|
||||||
if [ "$1" = "config" ]; then
|
|
||||||
echo 'graph_title ZFS DMU prefetch stats'
|
|
||||||
echo 'graph_args -u 100'
|
|
||||||
echo 'graph_vlabel %'
|
|
||||||
echo 'graph_info This graph shows the DMU prefetch stats'
|
|
||||||
|
|
||||||
echo 'hits.label Hit Ratio'
|
|
||||||
echo 'misses.label Miss Ratio'
|
|
||||||
|
|
||||||
exit 0
|
|
||||||
else
|
|
||||||
echo 'hits.value ' $DMU_HITS_PERC
|
|
||||||
echo 'misses.value ' $DMU_MISSES_PERC
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
utilization() {
|
|
||||||
if [ "$1" = "config" ]; then
|
|
||||||
echo 'graph_title ZFS ARC Size'
|
|
||||||
echo 'graph_args --base 1024 -l 0 --vertical-label Bytes --upper-limit '$MEMMAX
|
|
||||||
echo 'graph_vlabel Size in MB'
|
|
||||||
echo 'graph_info This graph shows the ARC Size utilization'
|
|
||||||
|
|
||||||
echo 'max_size.label Maximum Size'
|
|
||||||
echo 'max_size.draw AREA'
|
|
||||||
echo 'size.label Size'
|
|
||||||
echo 'size.draw AREA'
|
|
||||||
echo 'min_size.label Minimum Size'
|
|
||||||
echo 'min_size.draw AREA'
|
|
||||||
echo 'target_size.label Target Size'
|
|
||||||
echo 'target_size.draw LINE1'
|
|
||||||
echo 'recently_size.label Recently Used Cache Size'
|
|
||||||
echo 'recently_size.draw LINE1'
|
|
||||||
echo 'frequently_size.label Frequently Used Cache Size'
|
|
||||||
echo 'frequently_size.draw LINE1'
|
|
||||||
|
|
||||||
exit 0
|
|
||||||
else
|
|
||||||
echo 'max_size.value ' $MAX_SIZE
|
|
||||||
echo 'size.value ' $SIZE
|
|
||||||
echo 'min_size.value ' $MIN_SIZE
|
|
||||||
echo 'target_size.value ' $TARGET_SIZE
|
|
||||||
echo 'recently_size.value ' $MRU_SIZE
|
|
||||||
echo 'frequently_size.value ' $MFU_SIZE
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
l2utilization() {
|
|
||||||
if [ "$1" = "config" ]; then
|
|
||||||
echo 'graph_title ZFS L2ARC Size'
|
|
||||||
echo 'graph_args --base 1024 -r -l 0 --vertical-label Bytes'
|
|
||||||
echo 'graph_vlabel Size in MB'
|
|
||||||
echo 'graph_info This graph shows the L2ARC Size utilization'
|
|
||||||
|
|
||||||
echo 'size.label Size'
|
|
||||||
echo 'size.draw AREA'
|
|
||||||
echo 'hdr_size.label Header Size'
|
|
||||||
echo 'hdr_size.draw AREA'
|
|
||||||
|
|
||||||
exit 0
|
|
||||||
else
|
|
||||||
echo 'size.value ' $L2_SIZE
|
|
||||||
echo 'hdr_size.value ' $L2_HDR_SIZE
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
l2efficiency() {
|
|
||||||
if [ "$1" = "config" ]; then
|
|
||||||
echo 'graph_title ZFS L2ARC Efficiency'
|
|
||||||
echo 'graph_args -u 100'
|
|
||||||
echo 'graph_vlabel %'
|
|
||||||
echo 'graph_info This graph shows the L2ARC Efficiency'
|
|
||||||
|
|
||||||
echo 'l2_hits.label Hit Ratio'
|
|
||||||
echo 'l2_misses.label Miss Ratio'
|
|
||||||
else
|
|
||||||
echo 'l2_hits.value ' $L2_HIT_RATIO_PERC
|
|
||||||
echo 'l2_misses.value ' $L2_MISS_RATIO_PERC
|
|
||||||
fi
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
[ "$1" = "config" ] && echo "graph_category fs"
|
|
||||||
|
|
||||||
case "$FUNCTION" in
|
|
||||||
efficiency)
|
|
||||||
efficiency $1
|
|
||||||
;;
|
|
||||||
cachehitlist)
|
|
||||||
cachehitlist $1
|
|
||||||
;;
|
|
||||||
cachehitdtype)
|
|
||||||
cachehitdtype $1
|
|
||||||
;;
|
|
||||||
dmuprefetch)
|
|
||||||
dmuprefetch $1
|
|
||||||
;;
|
|
||||||
utilization)
|
|
||||||
utilization $1
|
|
||||||
;;
|
|
||||||
l2utilization)
|
|
||||||
l2utilization $1
|
|
||||||
;;
|
|
||||||
l2efficiency)
|
|
||||||
l2efficiency $1
|
|
||||||
;;
|
|
||||||
esac
|
|
267
extern/zpool_capacity
vendored
267
extern/zpool_capacity
vendored
|
@ -1,267 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
: << =cut
|
|
||||||
|
|
||||||
=head1 NAME
|
|
||||||
|
|
||||||
zpool_capacity - Munin plugin to monitor ZFS capacity
|
|
||||||
|
|
||||||
These functions are implemented:
|
|
||||||
capacity : to monitor zpool capacity %
|
|
||||||
allocated : to monitor zpool allocated bytes
|
|
||||||
dedup : to monitor zpool dedup and compress ratio
|
|
||||||
|
|
||||||
Tested with Solaris 10 and 11, OpenIndiana Hipster, FreeBSD 11, CentOS 7
|
|
||||||
|
|
||||||
=head1 CONFIGURATION
|
|
||||||
|
|
||||||
Make symlink:
|
|
||||||
cd /path/to/munin/etc/plugins
|
|
||||||
ln -s /path/to/munin/lib/plugins/zpool_capacity .
|
|
||||||
|
|
||||||
For FreeBSD, it should be necessary to change shebang /bin/bash -> /usr/local/bin/bash
|
|
||||||
|
|
||||||
For Linux, root privilege is necessary to run zpool command.
|
|
||||||
[zpool_capacity]
|
|
||||||
user root
|
|
||||||
|
|
||||||
=head1 ENVIRONMENT VARIABLES
|
|
||||||
|
|
||||||
critical : default 90
|
|
||||||
warning : default 80
|
|
||||||
|
|
||||||
=head1 AUTHOR
|
|
||||||
|
|
||||||
K.Cima https://github.com/shakemid
|
|
||||||
|
|
||||||
=head1 LICENSE
|
|
||||||
|
|
||||||
GPLv2
|
|
||||||
|
|
||||||
=head1 Magic markers
|
|
||||||
|
|
||||||
#%# family=contrib
|
|
||||||
#%# capabilities=autoconf
|
|
||||||
|
|
||||||
=cut
|
|
||||||
|
|
||||||
# Include plugin.sh
|
|
||||||
. "${MUNIN_LIBDIR:-}/plugins/plugin.sh"
|
|
||||||
is_multigraph "$@"
|
|
||||||
|
|
||||||
# Shell options
|
|
||||||
set -o nounset
|
|
||||||
|
|
||||||
# Global variables
|
|
||||||
plugin_name=zpool_capacity
|
|
||||||
functions='capacity allocated dedup'
|
|
||||||
zpool_cmd=/sbin/zpool
|
|
||||||
zfs_cmd=/sbin/zfs
|
|
||||||
|
|
||||||
# Environment variables
|
|
||||||
: "${warning:=80}"
|
|
||||||
: "${critical:=90}"
|
|
||||||
|
|
||||||
# Note: The performance of ZFS may significantly degrade when zpool capacity > 90%
|
|
||||||
# See also: https://docs.oracle.com/cd/E53394_01/html/E54801/zfspools-4.html
|
|
||||||
|
|
||||||
# Functions
|
|
||||||
|
|
||||||
preconfig() {
|
|
||||||
local func="$1"
|
|
||||||
local p c
|
|
||||||
|
|
||||||
# data_attr format: field type draw label
|
|
||||||
# label can contain white-spaces.
|
|
||||||
data_attr=
|
|
||||||
|
|
||||||
case $func in
|
|
||||||
capacity)
|
|
||||||
global_attr="
|
|
||||||
graph_title ZFS storage pool - Capacity
|
|
||||||
graph_category fs
|
|
||||||
graph_args --base 1000 --lower-limit 0 --upper-limit 100
|
|
||||||
graph_vlabel % allocated
|
|
||||||
graph_info ZFS storage pool - Capacity
|
|
||||||
warning ${warning}
|
|
||||||
critical ${critical}
|
|
||||||
"
|
|
||||||
for p in $pool_list
|
|
||||||
do
|
|
||||||
data_attr="${data_attr}
|
|
||||||
${p} GAUGE LINE2 ${p}"
|
|
||||||
done
|
|
||||||
;;
|
|
||||||
allocated)
|
|
||||||
global_attr="
|
|
||||||
graph_title ZFS storage pool - Allocated bytes
|
|
||||||
graph_category fs
|
|
||||||
graph_args --base 1024 --lower-limit 0
|
|
||||||
graph_vlabel Bytes
|
|
||||||
graph_info ZFS storage pool - Allocated bytes
|
|
||||||
"
|
|
||||||
c=0
|
|
||||||
for p in $pool_list
|
|
||||||
do
|
|
||||||
data_attr="${data_attr}
|
|
||||||
${p}_size GAUGE LINE ${p} size
|
|
||||||
${p}_allocated GAUGE LINE2 ${p} allocated"
|
|
||||||
global_attr="${global_attr}
|
|
||||||
${p}_size.colour COLOUR${c}
|
|
||||||
${p}_allocated.colour COLOUR${c}"
|
|
||||||
c=$(( c + 1 ))
|
|
||||||
done
|
|
||||||
;;
|
|
||||||
dedup)
|
|
||||||
global_attr="
|
|
||||||
graph_title ZFS storage pool - Dedup and compress ratio
|
|
||||||
graph_category fs
|
|
||||||
graph_args --base 1000 --lower-limit 1
|
|
||||||
graph_vlabel Ratio
|
|
||||||
graph_info ZFS storage pool - Dedup and compress ratio
|
|
||||||
"
|
|
||||||
for p in $pool_list
|
|
||||||
do
|
|
||||||
data_attr="${data_attr}
|
|
||||||
${p}_dedup GAUGE LINE ${p} dedup
|
|
||||||
${p}_compress GAUGE LINE ${p} compress"
|
|
||||||
done
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
}
|
|
||||||
|
|
||||||
do_config() {
|
|
||||||
local func="$1"
|
|
||||||
local label_max_length=45
|
|
||||||
local field type draw label
|
|
||||||
|
|
||||||
preconfig "$func"
|
|
||||||
echo "multigraph ${plugin_name}_${func}"
|
|
||||||
|
|
||||||
# print global attributes
|
|
||||||
echo "$global_attr" | sed -e 's/^ *//' -e '/^$/d'
|
|
||||||
|
|
||||||
# print data source attributes
|
|
||||||
echo "$data_attr" | while read -r field type draw label
|
|
||||||
do
|
|
||||||
[ -z "$field" ] && continue
|
|
||||||
|
|
||||||
field=$( clean_fieldname "$field" )
|
|
||||||
echo "${field}.type ${type}"
|
|
||||||
echo "${field}.draw ${draw}"
|
|
||||||
echo "${field}.label ${label:0:${label_max_length}}"
|
|
||||||
if [ "$type" = 'DERIVE' ]; then
|
|
||||||
echo "${field}.min 0"
|
|
||||||
fi
|
|
||||||
if [ "$label" = 'dummy' ]; then
|
|
||||||
echo "${field}.graph no"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
echo
|
|
||||||
}
|
|
||||||
|
|
||||||
get_stats() {
|
|
||||||
local func="$1"
|
|
||||||
|
|
||||||
case $func in
|
|
||||||
capacity)
|
|
||||||
"$zpool_cmd" list -H -o name,capacity | sed 's/%$//'
|
|
||||||
;;
|
|
||||||
allocated)
|
|
||||||
( "$zpool_cmd" list -H -o name,allocated \
|
|
||||||
| awk '{ print $1"_allocated", $2 }'
|
|
||||||
"$zpool_cmd" list -H -o name,size \
|
|
||||||
| awk '{ print $1"_size", $2 }'
|
|
||||||
) \
|
|
||||||
| perl -ane '
|
|
||||||
@unit{ qw/ K M G T P E / } = ( 1 .. 6 );
|
|
||||||
$name = $F[0];
|
|
||||||
$byteu = $F[1];
|
|
||||||
( $n, $u ) = $byteu =~ /^([\d.]+)([KMGTPE]?)$/;
|
|
||||||
$byte = int( $n * 1024 ** ( $u ? $unit{ $u } : 0 ) );
|
|
||||||
print "$name $byte\n";
|
|
||||||
'
|
|
||||||
# Note: ZFS supports up to 16EB.
|
|
||||||
;;
|
|
||||||
dedup)
|
|
||||||
"$zpool_cmd" list -H -o name,dedup \
|
|
||||||
| sed 's/x$//' \
|
|
||||||
| awk '{ print $1"_dedup", $2 }'
|
|
||||||
# example output:
|
|
||||||
# $ zpool list -H -o name,dedup
|
|
||||||
# rpool 1.00x
|
|
||||||
# ...
|
|
||||||
|
|
||||||
"$zpool_cmd" list -H -o name \
|
|
||||||
| xargs "$zfs_cmd" get -H -o name,value compressratio \
|
|
||||||
| sed 's/x$//' \
|
|
||||||
| awk '{ print $1"_compress", $2 }'
|
|
||||||
# example output:
|
|
||||||
# $ zfs get -H -o name,value compressratio rpool
|
|
||||||
# rpool 1.00x
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
}
|
|
||||||
|
|
||||||
do_fetch() {
|
|
||||||
local func="$1"
|
|
||||||
local zpool_stats field value
|
|
||||||
|
|
||||||
# zpool_stats contains 'key value\n'
|
|
||||||
zpool_stats=$( get_stats "$func" )
|
|
||||||
|
|
||||||
echo "multigraph ${plugin_name}_${func}"
|
|
||||||
|
|
||||||
echo "$zpool_stats" | while read -r field value
|
|
||||||
do
|
|
||||||
field=$( clean_fieldname "$field" )
|
|
||||||
echo "${field}.value ${value}"
|
|
||||||
done
|
|
||||||
|
|
||||||
echo
|
|
||||||
}
|
|
||||||
|
|
||||||
autoconf() {
|
|
||||||
if [ -x "$zpool_cmd" ]; then
|
|
||||||
echo yes
|
|
||||||
else
|
|
||||||
echo "no (failed to find executable 'zpool')"
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
config() {
|
|
||||||
local func
|
|
||||||
|
|
||||||
pool_list=$( "$zpool_cmd" list -H -o name )
|
|
||||||
|
|
||||||
for func in $functions
|
|
||||||
do
|
|
||||||
do_config "$func"
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
fetch() {
|
|
||||||
local func
|
|
||||||
|
|
||||||
for func in $functions
|
|
||||||
do
|
|
||||||
do_fetch "$func"
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
# Main
|
|
||||||
case ${1:-} in
|
|
||||||
autoconf)
|
|
||||||
autoconf
|
|
||||||
;;
|
|
||||||
config)
|
|
||||||
config
|
|
||||||
if [ "${MUNIN_CAP_DIRTYCONFIG:-0}" = "1" ]; then fetch; fi
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
fetch
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
exit 0
|
|
127
extern/zpool_iostat
vendored
127
extern/zpool_iostat
vendored
|
@ -1,127 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
# -*- sh -*-
|
|
||||||
|
|
||||||
set -eu
|
|
||||||
|
|
||||||
: <<=cut
|
|
||||||
|
|
||||||
=head1 NAME
|
|
||||||
|
|
||||||
zpool_iostat - Plugin to monitor transfer statistics of ZFS pools
|
|
||||||
|
|
||||||
=head1 APPLICABLE SYSTEMS
|
|
||||||
|
|
||||||
All systems with "zpool" installed.
|
|
||||||
|
|
||||||
=head1 CONFIGURATION
|
|
||||||
|
|
||||||
No configuration is required.
|
|
||||||
|
|
||||||
=head1 INTERPRETATION
|
|
||||||
|
|
||||||
This plugin shows a graph with read (positive) and write (negative) values
|
|
||||||
for the IO transfer of each pool.
|
|
||||||
|
|
||||||
=head1 MAGIC MARKERS
|
|
||||||
|
|
||||||
#%# family=auto
|
|
||||||
#%# capabilities=autoconf
|
|
||||||
|
|
||||||
=head1 AUTHOR
|
|
||||||
|
|
||||||
tsaavik <github@hellspark.com>
|
|
||||||
Peter Doherty <peterd@acranox.org>
|
|
||||||
Lars Kruse <devel@sumpfralle.de>
|
|
||||||
|
|
||||||
=head1 LICENSE
|
|
||||||
|
|
||||||
GPLv2
|
|
||||||
|
|
||||||
=cut
|
|
||||||
|
|
||||||
|
|
||||||
# shellcheck source=/usr/share/munin/plugins/plugin.sh
|
|
||||||
. "$MUNIN_LIBDIR/plugins/plugin.sh"
|
|
||||||
|
|
||||||
|
|
||||||
ZPOOL_BIN=/sbin/zpool
|
|
||||||
ACTION="${1:-}"
|
|
||||||
|
|
||||||
|
|
||||||
if [ "$ACTION" = "autoconf" ]; then
|
|
||||||
if [ -x "$ZPOOL_BIN" ]; then
|
|
||||||
echo yes
|
|
||||||
else
|
|
||||||
echo "no (missing executable '$ZPOOL_BIN')"
|
|
||||||
fi
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
zlines=$("$ZPOOL_BIN" iostat -v | wc -l | sed 's/ //g')
|
|
||||||
iostats=$("$ZPOOL_BIN" iostat -v 1 1 | tail "-$zlines")
|
|
||||||
zlist=$(echo "$iostats" \
|
|
||||||
| awk '/alloc/ {next}; /avail/ {next}; /raid/ {next}; /mirror/ {next};
|
|
||||||
{ if ( $4 >=0 ) print $1}' \
|
|
||||||
| tr ' ' '\n')
|
|
||||||
|
|
||||||
# Parse the n'th column of the iostat output for a given pool or disk as a
|
|
||||||
# number (interpreting K and M suffixes).
|
|
||||||
get_device_iostat_column() {
|
|
||||||
local device_label="$1"
|
|
||||||
local stat_column="$2"
|
|
||||||
# convert all numeric values into kB
|
|
||||||
echo "$iostats" \
|
|
||||||
| awk '{ if ($1 == "'"$device_label"'") print $'"$stat_column"'; }' \
|
|
||||||
| awk '/M/ {print int($1)*1000};
|
|
||||||
/K/ {print int($1)};
|
|
||||||
/[0-9]$/ {print int($1)/1000}'
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
get_device_fieldname() {
|
|
||||||
local device_id="$1"
|
|
||||||
# Backwards compatibility (until 2016): keep the unprefixed pool name
|
|
||||||
# for the fieldname, except for pool names starting with digits.
|
|
||||||
if echo "$device_id" | grep -q "^[0-9]"; then
|
|
||||||
clean_fieldname "_$device_id"
|
|
||||||
else
|
|
||||||
clean_fieldname "$device_id"
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
if [ "$ACTION" = "config" ]; then
|
|
||||||
echo 'graph_title zpool iostat'
|
|
||||||
echo 'graph_args --base 1000 -l 0'
|
|
||||||
echo 'graph_vlabel write (-) / read (+) KBytes/s'
|
|
||||||
echo 'graph_category disk'
|
|
||||||
echo 'graph_scale no'
|
|
||||||
echo 'graph_info This graph shows zpool iostat'
|
|
||||||
# Assemble the "graph_order" as a sorted list of read/write pairs for
|
|
||||||
# each device.
|
|
||||||
printf "graph_order"
|
|
||||||
echo "$zlist" | while read -r device_id; do
|
|
||||||
fieldname="$(get_device_fieldname "$device_id")"
|
|
||||||
printf " %s_read %s_write" "$fieldname" "$fieldname"
|
|
||||||
done
|
|
||||||
# finalize the 'graph_order' with a newline
|
|
||||||
echo
|
|
||||||
# output all fields: write as negative numbers and read as positive
|
|
||||||
echo "$zlist" | while read -r device_id; do
|
|
||||||
fieldname="$(get_device_fieldname "$device_id")"
|
|
||||||
echo "${fieldname}_read.label $device_id"
|
|
||||||
echo "${fieldname}_read.type GAUGE"
|
|
||||||
echo "${fieldname}_read.graph no"
|
|
||||||
echo "${fieldname}_write.label $device_id"
|
|
||||||
echo "${fieldname}_write.type GAUGE"
|
|
||||||
echo "${fieldname}_write.negative ${fieldname}_read"
|
|
||||||
done
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
echo "$zlist" | while read -r device_id; do
|
|
||||||
fieldname="$(get_device_fieldname "$device_id")"
|
|
||||||
echo "${fieldname}_read.value $(get_device_iostat_column "$device_id" 6)"
|
|
||||||
echo "${fieldname}_write.value $(get_device_iostat_column "$device_id" 7)"
|
|
||||||
done
|
|
Loading…
Reference in a new issue