Compare commits
37 commits
Author | SHA1 | Date | |
---|---|---|---|
6f7b7d6779 | |||
87bdb9c71f | |||
3fdd147d1b | |||
ca99c62002 | |||
c1e6db8955 | |||
4d3f1d8214 | |||
4948ce1544 | |||
8baa885bb1 | |||
6a479c4ef1 | |||
1413693d91 | |||
5e7b1f3e94 | |||
f323ee239a | |||
ab16e162ee | |||
6aafbf4a37 | |||
9c8c31988d | |||
fd3674bd28 | |||
c15ba5885c | |||
3fa33e32c2 | |||
e6759640eb | |||
4d5ca7c30a | |||
010d3101e3 | |||
5b4fff96c7 | |||
d9c8a28716 | |||
2880cc98fd | |||
a77b262ef3 | |||
e17311744a | |||
82f143380e | |||
274b83c401 | |||
6fc99676f7 | |||
bdb91b782d | |||
67cad49c97 | |||
abf6ed8ce5 | |||
f2208bd01b | |||
e79510f163 | |||
eeaaedadf0 | |||
4fe11fdbf0 | |||
9b85111755 |
31 changed files with 523 additions and 3980 deletions
198
extern/acng
vendored
198
extern/acng
vendored
|
@ -1,198 +0,0 @@
|
|||
#!/usr/bin/perl
|
||||
|
||||
=head1 NAME
|
||||
|
||||
acng - Graph activity for Apt-Cacher NG, request count and bytes
|
||||
|
||||
=head1 APPLICABLE SYSTEMS
|
||||
|
||||
Systems with "Apt-Cacher NG" installed and running.
|
||||
|
||||
=head1 DESCRIPTION
|
||||
|
||||
This plugin will add graphs for "bytes in and out" and "requests in
|
||||
and out" for systems with "Apt-Cacher NG" installed.
|
||||
|
||||
=head1 CONFIGURATION
|
||||
|
||||
The plugin must have permission to read the log of Apt-Cacher NG. (On
|
||||
Debian 8, this file is world readable by default).
|
||||
|
||||
The path to the logfile can be set with the "logfile" environment
|
||||
variable.
|
||||
|
||||
=head2 DEFAULT CONFIGURATION
|
||||
|
||||
[acng]
|
||||
env.logfile /var/log/apt-cacher-ng/apt-cacher.log
|
||||
|
||||
=head1 USAGE
|
||||
|
||||
Link this plugin to /etc/munin/plugins/ and restart the munin-node.
|
||||
|
||||
=head1 MAGIC MARKERS
|
||||
|
||||
#%# family=contrib
|
||||
#%# capabilities=autoconf
|
||||
|
||||
=head1 AUTHOR
|
||||
|
||||
Stig Sandbeck Mathisen
|
||||
|
||||
=head1 LICENSE
|
||||
|
||||
GPLv3
|
||||
|
||||
=cut
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use Munin::Plugin;
|
||||
|
||||
use Storable qw(nfreeze thaw);
|
||||
use MIME::Base64;
|
||||
|
||||
my $logfile = $ENV{'logfile'} ||= '/var/log/apt-cacher-ng/apt-cacher.log';
|
||||
|
||||
need_multigraph;
|
||||
|
||||
# Read or initialize state used by the log tailer, and the plugin.
|
||||
sub read_state {
|
||||
|
||||
my ($pos, $statsin) = restore_state;
|
||||
my $stats = thaw(decode_base64 $statsin) if $statsin;
|
||||
|
||||
$pos = 0 unless defined $pos;
|
||||
$stats = {} unless defined $stats;
|
||||
|
||||
return ($pos, $stats);
|
||||
}
|
||||
|
||||
# Write state.
|
||||
#
|
||||
# "pos" is logfile position, and "stats" is a data structure with
|
||||
# counters used by the plugin.
|
||||
#
|
||||
# Note: Munin::Plugin::save_state has limited functionality, so the
|
||||
# data structure is serialized and converted to plain text.
|
||||
sub write_state {
|
||||
my ($pos, $stats) = @_;
|
||||
|
||||
my $statsout = encode_base64 nfreeze($stats);
|
||||
save_state($pos, $statsout);
|
||||
}
|
||||
|
||||
sub parse_logfile {
|
||||
my $logfile = shift;
|
||||
my ($pos, $stats) = read_state;
|
||||
|
||||
my @keys = ( 'time', 'direction', 'size', 'client', 'file' );
|
||||
|
||||
# Open log
|
||||
my ( $fh, $reset ) = tail_open( $logfile, $pos );
|
||||
|
||||
die "Unable to open logfile\n" unless ($fh);
|
||||
|
||||
while (<$fh>) {
|
||||
chomp;
|
||||
my @values = split( /\|/, $_ );
|
||||
|
||||
my %logentry;
|
||||
@logentry{@keys} = @values;
|
||||
|
||||
$stats->{'bytes'}{ $logentry{'direction'} } += $logentry{'size'};
|
||||
$stats->{'requests'}{ $logentry{'direction'} }++;
|
||||
}
|
||||
|
||||
# Close log
|
||||
$pos = tail_close($fh);
|
||||
|
||||
write_state($pos, $stats);
|
||||
|
||||
return $stats;
|
||||
}
|
||||
|
||||
sub print_autoconf{
|
||||
my $logfile = shift;
|
||||
if ( open(my $fh, '<', $logfile) ) {
|
||||
print "yes\n";
|
||||
}
|
||||
else {
|
||||
printf "no (could not open %s)\n", $logfile;
|
||||
}
|
||||
}
|
||||
|
||||
sub print_config{
|
||||
my $stats = shift;
|
||||
|
||||
print << 'EOC';
|
||||
multigraph acng_bytes
|
||||
graph_category acng
|
||||
graph_title Apt-Cacher NG bytes
|
||||
graph_order origin client
|
||||
graph_vlabel bytes per ${graph_period}
|
||||
graph_info Bytes transferred between origin, apt-cacher-ng and clients
|
||||
origin.info bytes transferred between origin and apt-cacher-ng
|
||||
origin.label origin
|
||||
origin.type DERIVE
|
||||
origin.min 0
|
||||
client.info bytes transferred between apt-cacher-ng and clients
|
||||
client.label client
|
||||
client.type DERIVE
|
||||
client.min 0
|
||||
EOC
|
||||
print << "EOV" if $ENV{'MUNIN_CAP_DIRTYCONFIG'};
|
||||
origin.value $stats->{bytes}{I}
|
||||
client.value $stats->{bytes}{O}
|
||||
EOV
|
||||
|
||||
print << 'EOC';
|
||||
|
||||
multigraph acng_requests
|
||||
graph_category acng
|
||||
graph_title Apt-Cacher NG requests
|
||||
graph_order origin client
|
||||
graph_vlabel requests per ${graph_period}
|
||||
graph_info Requests from clients to apt-cacher-ng, and from apt-cacher-ng to origin
|
||||
origin.info requests from apt-cacher-ng to origin
|
||||
origin.label origin
|
||||
origin.type DERIVE
|
||||
origin.min 0
|
||||
client.info requests from clients to apt-cacher-ng
|
||||
client.label client
|
||||
client.type DERIVE
|
||||
client.min 0
|
||||
EOC
|
||||
|
||||
print << "EOV" if $ENV{'MUNIN_CAP_DIRTYCONFIG'};
|
||||
origin.value $stats->{requests}{I}
|
||||
client.value $stats->{requests}{O}
|
||||
EOV
|
||||
|
||||
}
|
||||
|
||||
sub print_values{
|
||||
my $stats = shift;
|
||||
|
||||
print << "EOV";
|
||||
multigraph acng_bytes
|
||||
origin.value $stats->{bytes}{I}
|
||||
client.value $stats->{bytes}{O}
|
||||
|
||||
multigraph acng_requests
|
||||
origin.value $stats->{requests}{I}
|
||||
client.value $stats->{requests}{O}
|
||||
EOV
|
||||
}
|
||||
|
||||
if ($ARGV[0] and $ARGV[0] eq 'autoconf') {
|
||||
print_autoconf($logfile);
|
||||
}
|
||||
elsif ($ARGV[0] and $ARGV[0] eq 'config') {
|
||||
my $stats = parse_logfile($logfile);
|
||||
print_config($stats);
|
||||
}
|
||||
else {
|
||||
my $stats = parse_logfile($logfile);
|
||||
print_values($stats);
|
||||
}
|
124
extern/chrony
vendored
124
extern/chrony
vendored
|
@ -1,124 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
: <<=cut
|
||||
|
||||
=head1 NAME
|
||||
|
||||
parse Chrony Tracking output for timeserver status information
|
||||
|
||||
=head1 APPLICABLE SYSTEMS
|
||||
|
||||
Any system with a local chronyd service.
|
||||
|
||||
=head1 CONFIGURATION
|
||||
|
||||
No configuration.
|
||||
|
||||
|
||||
=head1 VERSION
|
||||
|
||||
Revision 0.1 2008/08/23 13:06:00 joti
|
||||
|
||||
First version only chronyc tracking, autodetection included.
|
||||
|
||||
Revision 0.2 2008/10/11 16:09:00 joti
|
||||
|
||||
Added scaling of other values to match with frequency, added more description to fields
|
||||
|
||||
Revision 0.3 2014/02/16 zjttoefs
|
||||
|
||||
reduce forking by using awk
|
||||
do not limit output precision
|
||||
add stratum monitoring
|
||||
detect slow/fast time or frequency and adjust sign of value accordingly
|
||||
remove commented out code
|
||||
|
||||
Revision 0.4 2016/11/10 Lars Kruse
|
||||
|
||||
rewrite field handling
|
||||
use "which" for "chronyc" location
|
||||
switch from "bash" to "sh"
|
||||
fix exit code of failing "autoconf"
|
||||
|
||||
|
||||
=head1 AUTHOR
|
||||
|
||||
Copyright (C) 2008 joti
|
||||
|
||||
Copyright (C) 2014 zjttoefs
|
||||
|
||||
Copyright (C) 2016 Lars Kruse <devel@sumpfralle>
|
||||
|
||||
|
||||
=head1 MAGIC MARKERS
|
||||
|
||||
#%# family=auto
|
||||
#%# capabilities=autoconf
|
||||
|
||||
=cut
|
||||
|
||||
CHRONYC="$(which chronyc | head -1)"
|
||||
|
||||
# Frequency has extremely higher values than other. Therefore they are fitted by scaling via suitable factors.
|
||||
# field definitions:
|
||||
# - munin fieldname
|
||||
# - factor for graph visualization (all values are supposed to reach a similar dimension)
|
||||
# - regular expression of the chrony output line (may not contain whitespace, case insensitive)
|
||||
# - label (may include "%d" for including the factor; may contain whitespace)
|
||||
fields="stratum 1 ^Stratum Stratum
|
||||
systime 1000 ^System.time System Time (x%d)
|
||||
frequency 1 ^Frequency Frequency (ppm)
|
||||
residualfreq 100 ^Residual.freq Residual Freq (ppm, x%d)
|
||||
skew 100 ^Skew Skew (ppm, x%d)
|
||||
rootdelay 1000 ^Root.delay Root delay (seconds, x%d)
|
||||
rootdispersion 1000 ^Root.dispersion Root dispersion (seconds, x%d)"
|
||||
|
||||
# chrony example output (v2.4.1):
|
||||
# Reference ID : 131.188.3.221 (ntp1.rrze.uni-erlangen.de)
|
||||
# Stratum : 2
|
||||
# Ref time (UTC) : Thu Nov 10 22:39:50 2016
|
||||
# System time : 0.000503798 seconds slow of NTP time
|
||||
# Last offset : +0.000254355 seconds
|
||||
# RMS offset : 0.002186779 seconds
|
||||
# Frequency : 17.716 ppm slow
|
||||
# Residual freq : +0.066 ppm
|
||||
# Skew : 4.035 ppm
|
||||
# Root delay : 0.042980 seconds
|
||||
# Root dispersion : 0.005391 seconds
|
||||
# Update interval : 258.4 seconds
|
||||
# Leap status : Normal
|
||||
|
||||
|
||||
if [ "$1" = "autoconf" ]; then
|
||||
if [ -n "$CHRONYC" ] && [ -x "$CHRONYC" ]; then
|
||||
echo yes
|
||||
else
|
||||
echo "no (missing 'chronyc' executable)"
|
||||
fi
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ "$1" = "config" ]; then
|
||||
echo 'graph_title Chrony Tracking Stats'
|
||||
echo 'graph_args --base 1000 -l 0'
|
||||
echo 'graph_vlabel (seconds,ppm)'
|
||||
echo 'graph_category time'
|
||||
echo "$fields" | while read fieldname factor regex label; do
|
||||
# insert the factor, if "%d" is part of the label
|
||||
printf "${fieldname}.label $label\n" "$factor"
|
||||
echo "${fieldname}.type GAUGE"
|
||||
done
|
||||
exit 0
|
||||
fi
|
||||
|
||||
chrony_status="$("$CHRONYC" tracking)"
|
||||
echo "$fields" | while read fieldname factor regex label; do
|
||||
status_line="$(echo "$chrony_status" | grep -i -- "$regex " | cut -d ":" -f 2-)"
|
||||
if [ -z "$status_line" ]; then
|
||||
value="U"
|
||||
else
|
||||
# the keyword "slow" indicates negative values
|
||||
value="$(echo "$status_line" | awk '{ /slow/ ? SIGN=-1 : SIGN=1; print $1 * SIGN * '"$factor"' }')"
|
||||
fi
|
||||
echo "${fieldname}.value $value"
|
||||
done
|
552
extern/docker_
vendored
552
extern/docker_
vendored
|
@ -1,552 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
"""
|
||||
=head1 NAME
|
||||
|
||||
docker_ - Docker wildcard-plugin to monitor a L<Docker|https://www.docker.com> host.
|
||||
|
||||
This wildcard plugin provides series C<containers>, C<images>, C<status>,
|
||||
C<volumes>, C<cpu>, C<memory> and C<network> as separate graphs. It also
|
||||
supports a C<multi> suffix that provides all of those as a multigraph.
|
||||
|
||||
=head1 INSTALLATION
|
||||
|
||||
- Copy this plugin in your munin plugins directory
|
||||
- Install Python3 "docker" package
|
||||
|
||||
=over 2
|
||||
|
||||
If you want all the graphs as a multigraph, create a single multi symlink.
|
||||
|
||||
ln -s /usr/share/munin/plugins/docker_ /etc/munin/plugins/docker_multi
|
||||
|
||||
Or choose a subset of those you want.
|
||||
|
||||
ln -s /usr/share/munin/plugins/docker_ /etc/munin/plugins/docker_containers
|
||||
ln -s /usr/share/munin/plugins/docker_ /etc/munin/plugins/docker_cpu
|
||||
ln -s /usr/share/munin/plugins/docker_ /etc/munin/plugins/docker_images
|
||||
ln -s /usr/share/munin/plugins/docker_ /etc/munin/plugins/docker_memory
|
||||
ln -s /usr/share/munin/plugins/docker_ /etc/munin/plugins/docker_network
|
||||
ln -s /usr/share/munin/plugins/docker_ /etc/munin/plugins/docker_status
|
||||
ln -s /usr/share/munin/plugins/docker_ /etc/munin/plugins/docker_volumes
|
||||
|
||||
=back
|
||||
|
||||
After the installation you need to restart your munin-node:
|
||||
|
||||
=over 2
|
||||
|
||||
systemctl restart munin-node
|
||||
|
||||
=back
|
||||
|
||||
=head1 CONFIGURATION
|
||||
|
||||
This plugin need to run as root, you need to create a file named docker placed in the
|
||||
directory /etc/munin/plugin-conf.d/ with the following config (you can also use
|
||||
Docker environment variables here as described in
|
||||
https://docs.docker.com/compose/reference/envvars/):
|
||||
|
||||
You can use the EXCLUDE_CONTAINER_NAME environment variable to specify a regular expression
|
||||
which if matched will exclude the matching containers from the memory and cpu graphs.
|
||||
|
||||
For example
|
||||
|
||||
env.EXCLUDE_CONTAINER_NAME runner
|
||||
|
||||
Would exclude all containers with the word "runner" in the name.
|
||||
|
||||
|
||||
=over 2
|
||||
|
||||
[docker_*]
|
||||
group docker
|
||||
env.DOCKER_HOST unix://run/docker.sock
|
||||
env.EXCLUDE_CONTAINER_NAME regexp
|
||||
|
||||
=back
|
||||
|
||||
You may need to pick a different group depending on the name schema of your
|
||||
distribution. Or maybe use "user root", if nothing else works.
|
||||
|
||||
=head1 AUTHORS
|
||||
|
||||
This section has been reverse-engineered from git logs
|
||||
|
||||
Codimp <contact@lithio.fr>: original rewrite
|
||||
|
||||
Rowan Wookey <admin@rwky.net>: performance improvement
|
||||
|
||||
Olivier Mehani <shtrom@ssji.net>: Network support, ClientWrapper, general cleanup, multigraph
|
||||
|
||||
=head1 MAGIC MARKERS
|
||||
|
||||
#%# family=auto
|
||||
#%# capabilities=autoconf suggest multigraph
|
||||
|
||||
=cut
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import re
|
||||
try:
|
||||
from functools import cached_property
|
||||
except ImportError:
|
||||
# If cached_property is not available,
|
||||
# just use the property decorator, without caching
|
||||
# This is for backward compatibility with Python<3.8
|
||||
cached_property = property
|
||||
from multiprocessing import Process, Queue
|
||||
|
||||
|
||||
def sorted_by_creation_date(func):
|
||||
def sorted_func(*args, **kwargs):
|
||||
return sorted(
|
||||
func(*args, **kwargs),
|
||||
key=(
|
||||
lambda x: x.attrs['CreatedAt']
|
||||
if 'CreatedAt' in x.attrs
|
||||
else x.attrs['Created']
|
||||
)
|
||||
)
|
||||
return sorted_func
|
||||
|
||||
|
||||
def clean_fieldname(text):
|
||||
if text == "root":
|
||||
# "root" is a magic (forbidden) word
|
||||
return "_root"
|
||||
else:
|
||||
return re.sub(r"(^[^A-Za-z_]|[^A-Za-z0-9_])", "_", text)
|
||||
|
||||
|
||||
class ClientWrapper:
|
||||
"""
|
||||
A small wrapper for the docker client, to centralise some parsing logic,
|
||||
and support caching.
|
||||
|
||||
In addition, when the exclude_re parameter is not None,
|
||||
any container which name is matched by the RE will not be excluded from reports.
|
||||
"""
|
||||
client = None
|
||||
exclude = None
|
||||
|
||||
def __init__(self, client, exclude_re=None):
|
||||
self.client = client
|
||||
if exclude_re:
|
||||
self.exclude = re.compile(exclude_re)
|
||||
|
||||
@property
|
||||
def api(self):
|
||||
return self.client.api
|
||||
|
||||
@cached_property
|
||||
@sorted_by_creation_date
|
||||
def all_containers(self):
|
||||
return [
|
||||
c for c in self.client.containers.list(all=True)
|
||||
if (c.status == 'running') and (not self.exclude or not self.exclude.search(c.name))
|
||||
]
|
||||
|
||||
@cached_property
|
||||
@sorted_by_creation_date
|
||||
def intermediate_images(self):
|
||||
return list(
|
||||
set(self.all_images)
|
||||
.difference(
|
||||
set(self.images)
|
||||
.difference(
|
||||
set(self.dangling_images)
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
@cached_property
|
||||
@sorted_by_creation_date
|
||||
def all_images(self):
|
||||
return self.client.images.list(all=True)
|
||||
|
||||
@cached_property
|
||||
@sorted_by_creation_date
|
||||
def images(self):
|
||||
images = self.client.images.list()
|
||||
return list(
|
||||
set(images)
|
||||
.difference(
|
||||
set(self.dangling_images))
|
||||
)
|
||||
|
||||
@cached_property
|
||||
@sorted_by_creation_date
|
||||
def dangling_images(self):
|
||||
return self.client.images.list(filters={'dangling': True})
|
||||
|
||||
@cached_property
|
||||
@sorted_by_creation_date
|
||||
def volumes(self):
|
||||
return self.client.volumes.list()
|
||||
|
||||
|
||||
def container_summary(container, *args):
|
||||
summary = container.name
|
||||
attributes = container_attributes(container, *args)
|
||||
if attributes:
|
||||
summary += f' ({attributes})'
|
||||
return summary
|
||||
|
||||
|
||||
def container_attributes(container, *args):
|
||||
attributes = container.image.tags
|
||||
attributes.append(container.attrs['Created'])
|
||||
return ', '.join(attributes + list(args))
|
||||
|
||||
|
||||
def print_containers_status(client):
|
||||
running = []
|
||||
unhealthy = []
|
||||
paused = []
|
||||
created = []
|
||||
restarting = []
|
||||
removing = []
|
||||
exited = []
|
||||
dead = []
|
||||
for container in client.all_containers:
|
||||
if container.status == 'running':
|
||||
state = client.api.inspect_container(container.name)['State']
|
||||
if state.get('Health', {}).get('Status') == 'unhealthy':
|
||||
unhealthy.append(container)
|
||||
else:
|
||||
running.append(container)
|
||||
elif container.status == 'paused':
|
||||
paused.append(container)
|
||||
elif container.status == 'created':
|
||||
created.append(container)
|
||||
elif container.status == 'restarting':
|
||||
restarting.append(container)
|
||||
elif container.status == 'removing':
|
||||
removing.append(container)
|
||||
elif container.status == 'exited':
|
||||
exited.append(container)
|
||||
elif container.status == 'dead':
|
||||
dead.append(container)
|
||||
print('running.value', len(running))
|
||||
print('running.extinfo', ', '.join(container_summary(c) for c in running))
|
||||
print('unhealthy.value', len(unhealthy))
|
||||
print('unhealthy.extinfo', ', '.join(container_summary(c) for c in unhealthy))
|
||||
print('paused.value', len(paused))
|
||||
print('paused.extinfo', ', '.join(container_summary(c) for c in paused))
|
||||
print('created.value', len(created))
|
||||
print('created.extinfo', ', '.join(container_summary(c) for c in created))
|
||||
print('restarting.value', len(restarting))
|
||||
print('restarting.extinfo', ', '.join(container_summary(c) for c in restarting))
|
||||
print('removing.value', len(removing))
|
||||
print('removing.extinfo', ', '.join(container_summary(c) for c in removing))
|
||||
print('exited.value', len(exited))
|
||||
print('exited.extinfo', ', '.join(container_summary(c) for c in exited))
|
||||
print('dead.value', len(dead))
|
||||
print('dead.extinfo', ', '.join(container_summary(c) for c in dead))
|
||||
|
||||
|
||||
def image_summary(image):
|
||||
attributes = image.tags
|
||||
attributes.append(image.attrs['Created'])
|
||||
attributes.append(f"{round(image.attrs['Size']/1024**2, 2)} MiB")
|
||||
return f"{image.short_id} ({', '.join(attributes)})"
|
||||
|
||||
|
||||
def print_images_count(client):
|
||||
images = client.images
|
||||
intermediate = client.intermediate_images
|
||||
dangling = client.dangling_images
|
||||
|
||||
print('intermediate_quantity.value', len(intermediate))
|
||||
print('intermediate_quantity.extinfo', ', '.join(image_summary(i) for i in intermediate))
|
||||
print('images_quantity.value', len(images))
|
||||
print('images_quantity.extinfo', ', '.join(image_summary(i) for i in images))
|
||||
print('dangling_quantity.value', len(dangling))
|
||||
print('dangling_quantity.extinfo', ', '.join(image_summary(i) for i in dangling))
|
||||
|
||||
|
||||
def get_container_stats(container, q):
|
||||
q.put(container.stats(stream=False))
|
||||
|
||||
|
||||
def parallel_container_stats(client):
|
||||
proc_list = []
|
||||
stats = {}
|
||||
for container in client.all_containers:
|
||||
q = Queue()
|
||||
p = Process(target=get_container_stats, args=(container, q))
|
||||
proc_list.append({'proc': p, 'queue': q, 'container': container})
|
||||
p.start()
|
||||
for proc in proc_list:
|
||||
proc['proc'].join()
|
||||
stats[proc['container']] = proc['queue'].get()
|
||||
return stats.items()
|
||||
|
||||
|
||||
def print_containers_cpu(client):
|
||||
for container, stats in parallel_container_stats(client):
|
||||
cpu_percent = 0.0
|
||||
cpu_delta = (float(stats["cpu_stats"]["cpu_usage"]["total_usage"])
|
||||
- float(stats["precpu_stats"]["cpu_usage"]["total_usage"]))
|
||||
system_delta = (float(stats["cpu_stats"]["system_cpu_usage"])
|
||||
- float(stats["precpu_stats"]["system_cpu_usage"]))
|
||||
if system_delta > 0.0:
|
||||
cpu_percent = cpu_delta / system_delta * 100.0 * os.cpu_count()
|
||||
clean_container_name = clean_fieldname(container.name)
|
||||
print(clean_container_name + '.value', cpu_percent)
|
||||
print(clean_container_name + '.extinfo', container_attributes(container))
|
||||
|
||||
|
||||
def print_containers_memory(client):
|
||||
for container, stats in parallel_container_stats(client):
|
||||
if 'total_rss' in stats['memory_stats']['stats']: # cgroupv1 only?
|
||||
memory_usage = stats['memory_stats']['stats']['total_rss']
|
||||
extinfo = 'Resident Set Size'
|
||||
else:
|
||||
memory_usage = stats['memory_stats']['usage']
|
||||
extinfo = 'Total memory usage'
|
||||
clean_container_name = clean_fieldname(container.name)
|
||||
print(clean_container_name + '.value', memory_usage)
|
||||
print(clean_container_name + '.extinfo', container_attributes(container, extinfo))
|
||||
|
||||
|
||||
def print_containers_network(client):
|
||||
for container, stats in parallel_container_stats(client):
|
||||
tx_bytes = 0
|
||||
rx_bytes = 0
|
||||
if "networks" in stats:
|
||||
for data in stats['networks'].values():
|
||||
tx_bytes += data['tx_bytes']
|
||||
rx_bytes += data['rx_bytes']
|
||||
clean_container_name = clean_fieldname(container.name)
|
||||
print(clean_container_name + '_up.value', tx_bytes)
|
||||
print(clean_container_name + '_down.value', rx_bytes)
|
||||
print(clean_container_name + '_up.extinfo', container_attributes(container))
|
||||
|
||||
|
||||
def volume_summary(volume):
|
||||
summary = f"{volume.short_id}"
|
||||
if volume.attrs['Labels']:
|
||||
summary += f" ({', '.join(volume.attrs['Labels'])})"
|
||||
return summary
|
||||
|
||||
|
||||
def status(client, mode):
|
||||
if mode == "config":
|
||||
print("graph_title Docker status")
|
||||
print("graph_vlabel containers")
|
||||
print("graph_category virtualization")
|
||||
print("graph_total All containers")
|
||||
print("running.label RUNNING")
|
||||
print("running.draw AREASTACK")
|
||||
print("running.info Running containers can be manipulated with "
|
||||
"`docker container [attach|kill|logs|pause|restart|stop] <NAME>` or "
|
||||
"commands run in them with `docker container exec "
|
||||
"[--detach|--interactive,--privileged,--tty] <NAME> <COMMAND>`"
|
||||
)
|
||||
print("unhealthy.label UNHEALTHY")
|
||||
print("unhealthy.draw AREASTACK")
|
||||
print("unhealthy.warning 1")
|
||||
print("unhealthy.info Unhealthy containers can be restarted with "
|
||||
"`docker container restart <NAME>`")
|
||||
print("paused.label PAUSED")
|
||||
print("paused.draw AREASTACK")
|
||||
print("paused.info Paused containers can be resumed with "
|
||||
"`docker container unpause <NAME>`")
|
||||
print("created.label CREATED")
|
||||
print("created.draw AREASTACK")
|
||||
print("created.info New containers can be created with "
|
||||
"`docker container create --name <NAME> <IMAGE_ID >` or "
|
||||
"`docker container run --name <NAME> <IMAGE_ID> <COMMAND>`")
|
||||
print("restarting.label RESTARTING")
|
||||
print("restarting.draw AREASTACK")
|
||||
print("restarting.info Containers can be restarted with "
|
||||
"`docker container restart <NAME>`")
|
||||
print("removing.label REMOVING")
|
||||
print("removing.draw AREASTACK")
|
||||
print("removing.info Containers can be removed with "
|
||||
"`docker container rm <NAME>`")
|
||||
print("exited.label EXITED")
|
||||
print("exited.draw AREASTACK")
|
||||
print("exited.info Exited containers can be started with "
|
||||
"`docker container start [--attach] <NAME>`")
|
||||
print("dead.label DEAD")
|
||||
print("dead.draw AREASTACK")
|
||||
print("dead.warning 1")
|
||||
print("dead.info Dead containers can be started with "
|
||||
"`docker container start <NAME>`")
|
||||
else:
|
||||
print_containers_status(client)
|
||||
|
||||
|
||||
def containers(client, mode):
|
||||
if mode == "config":
|
||||
print("graph_title Docker containers")
|
||||
print("graph_vlabel containers")
|
||||
print("graph_category virtualization")
|
||||
print("containers_quantity.label Containers")
|
||||
else:
|
||||
print('containers_quantity.value', len(client.all_containers))
|
||||
|
||||
|
||||
def images(client, mode):
|
||||
if mode == "config":
|
||||
print("graph_title Docker images")
|
||||
print("graph_vlabel images")
|
||||
print("graph_category virtualization")
|
||||
print("graph_total All images")
|
||||
print("intermediate_quantity.label Intermediate images")
|
||||
print("intermediate_quantity.draw AREASTACK")
|
||||
print("intermediate_quantity.info All unused images can be deleted with "
|
||||
"`docker image prune --all`")
|
||||
print("images_quantity.label Images")
|
||||
print("images_quantity.draw AREASTACK")
|
||||
print("images_quantity.info Images can be used in containers with "
|
||||
"`docker container create --name <NAME> <IMAGE_ID >` or "
|
||||
"`docker container run --name <NAME> <IMAGE_ID> <COMMAND>`")
|
||||
print("dangling_quantity.label Dangling images")
|
||||
print("dangling_quantity.draw AREASTACK")
|
||||
print("dangling_quantity.info Dangling images can be deleted with "
|
||||
"`docker image prune`"
|
||||
"or tagged with `docker image tag <IMAGE_ID> <NAME>`")
|
||||
print("dangling_quantity.warning 10")
|
||||
else:
|
||||
print_images_count(client)
|
||||
|
||||
|
||||
def volumes(client, mode):
|
||||
if mode == "config":
|
||||
print("graph_title Docker volumes")
|
||||
print("graph_vlabel volumes")
|
||||
print("graph_category virtualization")
|
||||
print("volumes_quantity.label Volumes")
|
||||
print("volumes_quantity.draw AREASTACK")
|
||||
print("volumes_quantity.info Unused volumes can be deleted with "
|
||||
"`docker volume prune`")
|
||||
else:
|
||||
print('volumes_quantity.value', len(client.volumes))
|
||||
print('volumes_quantity.extinfo', ', '.join(volume_summary(v) for v in client.volumes))
|
||||
|
||||
|
||||
def cpu(client, mode):
|
||||
if mode == "config":
|
||||
graphlimit = str(os.cpu_count() * 100)
|
||||
print("graph_title Docker containers CPU usage")
|
||||
print("graph_args --base 1000 -r --lower-limit 0 --upper-limit " + graphlimit)
|
||||
print("graph_scale no")
|
||||
print("graph_period second")
|
||||
print("graph_vlabel CPU usage (%)")
|
||||
print("graph_category virtualization")
|
||||
print("graph_info This graph shows docker container CPU usage.")
|
||||
print("graph_total Total CPU usage")
|
||||
for container in client.all_containers:
|
||||
fieldname = clean_fieldname(container.name)
|
||||
print("{}.label {}".format(fieldname, container.name))
|
||||
print("{}.draw AREASTACK".format(fieldname))
|
||||
print("{}.info {}".format(fieldname, container_attributes(container)))
|
||||
else:
|
||||
print_containers_cpu(client)
|
||||
|
||||
|
||||
def network(client, mode):
|
||||
if mode == "config":
|
||||
print("graph_title Docker containers network usage")
|
||||
print("graph_args --base 1024 -l 0")
|
||||
print("graph_vlabel bits in (-) / out (+) per ${graph_period}")
|
||||
print("graph_category virtualization")
|
||||
print("graph_info This graph shows docker container network usage.")
|
||||
print("graph_total Total network usage")
|
||||
for container in client.all_containers:
|
||||
fieldname = clean_fieldname(container.name)
|
||||
print("{}_down.label {}_received".format(fieldname, container.name))
|
||||
print("{}_down.type DERIVE".format(fieldname))
|
||||
print("{}_down.min 0".format(fieldname))
|
||||
print("{}_down.graph no".format(fieldname))
|
||||
print("{}_down.cdef {}_down,8,*".format(fieldname, fieldname))
|
||||
print("{}_up.label {}".format(fieldname, container.name))
|
||||
print("{}_up.draw LINESTACK1".format(fieldname))
|
||||
print("{}_up.type DERIVE".format(fieldname))
|
||||
print("{}_up.min 0".format(fieldname))
|
||||
print("{}_up.negative {}_down".format(fieldname, fieldname))
|
||||
print("{}_up.cdef {}_up,8,*".format(fieldname, fieldname))
|
||||
print("{}_up.info {}".format(fieldname, container_attributes(container)))
|
||||
else:
|
||||
print_containers_network(client)
|
||||
|
||||
|
||||
def memory(client, mode):
|
||||
if mode == "config":
|
||||
print("graph_title Docker containers memory usage")
|
||||
print("graph_args --base 1024 -l 0")
|
||||
print("graph_vlabel Bytes")
|
||||
print("graph_category virtualization")
|
||||
print("graph_info This graph shows docker container memory usage.")
|
||||
print("graph_total Total memory usage")
|
||||
for container in client.all_containers:
|
||||
fieldname = clean_fieldname(container.name)
|
||||
print("{}.label {}".format(fieldname, container.name))
|
||||
print("{}.draw AREASTACK".format(fieldname))
|
||||
print("{}.info {}".format(fieldname, container_attributes(container)))
|
||||
else:
|
||||
print_containers_memory(client)
|
||||
|
||||
|
||||
def main():
|
||||
series = [
|
||||
'containers',
|
||||
'cpu',
|
||||
'images',
|
||||
'memory',
|
||||
'network',
|
||||
'status',
|
||||
'volumes',
|
||||
]
|
||||
|
||||
try:
|
||||
mode = sys.argv[1]
|
||||
except IndexError:
|
||||
mode = ""
|
||||
wildcard = sys.argv[0].split("docker_")[1].split("_")[0]
|
||||
|
||||
try:
|
||||
import docker
|
||||
client = docker.from_env()
|
||||
if mode == "autoconf":
|
||||
client.ping()
|
||||
print('yes')
|
||||
sys.exit(0)
|
||||
except Exception as e:
|
||||
print(f'no ({e})')
|
||||
if mode == "autoconf":
|
||||
sys.exit(0)
|
||||
sys.exit(1)
|
||||
|
||||
if mode == "suggest":
|
||||
# The multigraph covers all other graphs,
|
||||
# so we only need to suggest one
|
||||
print("multi")
|
||||
sys.exit(0)
|
||||
|
||||
client = ClientWrapper(client,
|
||||
exclude_re=os.getenv('EXCLUDE_CONTAINER_NAME'))
|
||||
|
||||
if wildcard in series:
|
||||
# dereference the function name by looking in the globals()
|
||||
# this assumes that the function name matches the series name exactly
|
||||
# if this were to change, a different approach would be needed,
|
||||
# most likely using a Dict of series name string to callable
|
||||
globals()[wildcard](client, mode)
|
||||
elif wildcard == 'multi':
|
||||
for s in series:
|
||||
print(f'multigraph docker_{s}')
|
||||
# ditto
|
||||
globals()[s](client, mode)
|
||||
else:
|
||||
print(f'unknown series ({wildcard})', file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
145
extern/fail2ban
vendored
Normal file
145
extern/fail2ban
vendored
Normal file
|
@ -0,0 +1,145 @@
|
|||
#!/bin/bash
|
||||
: <<=cut
|
||||
|
||||
=head1 NAME
|
||||
|
||||
fail2ban - Plugin to monitor fail2ban blacklists
|
||||
|
||||
=head1 APPLICABLE SYSTEMS
|
||||
|
||||
All systems with "bash" and "fail2ban"
|
||||
|
||||
=head1 CONFIGURATION
|
||||
|
||||
The following is the default configuration
|
||||
|
||||
[fail2ban]
|
||||
env.client /usr/bin/fail2ban-client
|
||||
env.config_dir /etc/fail2ban
|
||||
|
||||
The user running this plugin needs read and write access to the
|
||||
fail2ban communications socket. You will need to add this:
|
||||
|
||||
[fail2ban]
|
||||
user root
|
||||
|
||||
=head1 INTERPRETATION
|
||||
|
||||
This plugin shows a graph with one line per active fail2ban jail, each
|
||||
showing the number of blacklisted addresses for that jail.
|
||||
|
||||
In addition, a line with the total number of blacklisted addresses is
|
||||
displayed.
|
||||
|
||||
=head1 MAGIC MARKERS
|
||||
|
||||
#%# family=auto
|
||||
#%# capabilities=autoconf
|
||||
|
||||
=head1 VERSION
|
||||
|
||||
1.0.20090423
|
||||
|
||||
=head1 BUGS
|
||||
|
||||
Needs bash, due zo using bashisms to avoid running external programs.
|
||||
|
||||
=head1 AUTHOR
|
||||
|
||||
Stig Sandbeck Mathisen <ssm@fnord.no>
|
||||
|
||||
=head1 LICENSE
|
||||
|
||||
GPLv2
|
||||
|
||||
=cut
|
||||
|
||||
|
||||
##############################
|
||||
# Configurable variables
|
||||
client=${client:-/usr/bin/fail2ban-client}
|
||||
config_dir=${config_dir:-/etc/fail2ban}
|
||||
|
||||
##############################
|
||||
# Functions
|
||||
|
||||
# Run fail2ban
|
||||
run_fail2ban() {
|
||||
"$client" -c "$config_dir" "$@"
|
||||
}
|
||||
|
||||
# List jails, one on each line
|
||||
list_jails() {
|
||||
run_fail2ban status | while read -r line; do
|
||||
case $line in
|
||||
*'Jail list:'*)
|
||||
line="${line##*Jail list*:}"
|
||||
line="${line//[ $'\t']/}"
|
||||
if [ -n "$line" ]; then echo "${line//,/$'\n'}"; fi
|
||||
;;
|
||||
esac
|
||||
done
|
||||
}
|
||||
|
||||
# Print the munin values
|
||||
values() {
|
||||
list_jails | while read -r jail; do
|
||||
run_fail2ban status "$jail" | while read -r line; do
|
||||
case $line in
|
||||
*'Currently banned'*)
|
||||
line="${line##*Currently banned:}"
|
||||
num="${line//[ $'\t']/}"
|
||||
echo "${jail//[^0-9A-Za-z]/_}.value $num"
|
||||
;;
|
||||
esac
|
||||
done
|
||||
done
|
||||
}
|
||||
|
||||
# Print the munin config
|
||||
config() {
|
||||
echo 'graph_title Hosts blacklisted by fail2ban'
|
||||
echo 'graph_info This graph shows the number of host blacklisted by fail2ban'
|
||||
echo 'graph_category network'
|
||||
echo 'graph_vlabel Number of hosts'
|
||||
|
||||
echo 'graph_args --base 1000 -l 0'
|
||||
echo 'graph_total total'
|
||||
|
||||
list_jails | while read -r jail; do
|
||||
echo "${jail//[^0-9A-Za-z]/_}.label $jail"
|
||||
done
|
||||
}
|
||||
|
||||
# Print autoconfiguration hint
|
||||
autoconf() {
|
||||
if [ -e "$client" ]; then
|
||||
if [ -x "$client" ]; then
|
||||
if run_fail2ban ping >/dev/null; then
|
||||
echo "yes"
|
||||
else
|
||||
echo "no (fail2ban-server does not respond to ping)"
|
||||
fi
|
||||
else
|
||||
echo "no (${client} is not executable)"
|
||||
fi
|
||||
else
|
||||
echo "no (${client} not found)"
|
||||
fi
|
||||
exit
|
||||
}
|
||||
|
||||
##############################
|
||||
# Main
|
||||
|
||||
case $1 in
|
||||
config)
|
||||
config
|
||||
;;
|
||||
autoconf)
|
||||
autoconf
|
||||
;;
|
||||
*)
|
||||
values
|
||||
;;
|
||||
esac
|
297
extern/fail2ban_
vendored
297
extern/fail2ban_
vendored
|
@ -1,297 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
=head1 NAME
|
||||
|
||||
fail2ban_ - Wildcard plugin to monitor fail2ban blacklists
|
||||
|
||||
=head1 ABOUT
|
||||
|
||||
Requires Python 2.7
|
||||
Requires fail2ban 0.9.2
|
||||
|
||||
=head1 AUTHOR
|
||||
|
||||
Copyright (c) 2015 Lee Clemens
|
||||
|
||||
Inspired by fail2ban plugin written by Stig Sandbeck Mathisen
|
||||
|
||||
=head1 CONFIGURATION
|
||||
|
||||
fail2ban-client needs to be run as root.
|
||||
|
||||
Add the following to your @@CONFDIR@@/munin-node:
|
||||
|
||||
[fail2ban_*]
|
||||
user root
|
||||
|
||||
=head1 LICENSE
|
||||
|
||||
GNU GPLv2 or any later version
|
||||
|
||||
=begin comment
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or (at
|
||||
your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful, but
|
||||
WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
General Public License for more details
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
=end comment
|
||||
|
||||
=head1 BUGS
|
||||
|
||||
Transient values (particularly ASNs) come and go...
|
||||
Better error handling (Popen), logging
|
||||
Optimize loops and parsing in __get_jail_status() and parse_fail2ban_status()
|
||||
Cymru ASNs aren't displayed in numerical order (internal name has alpha-prefix)
|
||||
Use JSON status once fail2ban exposes JSON status data
|
||||
|
||||
=head1 MAGIC MARKERS
|
||||
|
||||
#%# family=auto
|
||||
#%# capabilities=autoconf suggest
|
||||
|
||||
=cut
|
||||
"""
|
||||
|
||||
from collections import Counter
|
||||
from os import path, stat, access, X_OK, environ
|
||||
from subprocess import Popen, PIPE
|
||||
from time import time
|
||||
import re
|
||||
import sys
|
||||
|
||||
|
||||
PLUGIN_BASE = "fail2ban_"
|
||||
|
||||
CACHE_DIR = environ['MUNIN_PLUGSTATE']
|
||||
CACHE_MAX_AGE = 120
|
||||
|
||||
STATUS_FLAVORS_FIELDS = {
|
||||
"basic": ["jail"],
|
||||
"cymru": ["asn", "country", "rir"]
|
||||
}
|
||||
|
||||
|
||||
def __parse_plugin_name():
|
||||
if path.basename(__file__).count("_") == 1:
|
||||
return path.basename(__file__)[len(PLUGIN_BASE):], ""
|
||||
else:
|
||||
return (path.basename(__file__)[len(PLUGIN_BASE):].split("_")[0],
|
||||
path.basename(__file__)[len(PLUGIN_BASE):].split("_")[1])
|
||||
|
||||
|
||||
def __get_jails_cache_file():
|
||||
return "%s/%s.state" % (CACHE_DIR, path.basename(__file__))
|
||||
|
||||
|
||||
def __get_jail_status_cache_file(jail_name):
|
||||
return "%s/%s__%s.state" % (CACHE_DIR, path.basename(__file__), jail_name)
|
||||
|
||||
|
||||
def __parse_jail_names(jails_data):
|
||||
"""
|
||||
Parse the jails returned by `fail2ban-client status`:
|
||||
|
||||
Status
|
||||
|- Number of jail: 3
|
||||
`- Jail list: apache-badbots, dovecot, sshd
|
||||
"""
|
||||
jails = []
|
||||
for line in jails_data.splitlines()[1:]:
|
||||
if line.startswith("`- Jail list:"):
|
||||
return [jail.strip(" ,\t") for jail in
|
||||
line.split(":", 1)[1].split(" ")]
|
||||
return jails
|
||||
|
||||
|
||||
def __get_jail_names():
|
||||
"""
|
||||
Read jails from cache or execute `fail2ban-client status`
|
||||
and pass stdout to __parse_jail_names
|
||||
"""
|
||||
cache_filename = __get_jails_cache_file()
|
||||
try:
|
||||
mtime = stat(cache_filename).st_mtime
|
||||
except OSError:
|
||||
mtime = 0
|
||||
if time() - mtime > CACHE_MAX_AGE:
|
||||
p = Popen(["fail2ban-client", "status"], shell=False, stdout=PIPE)
|
||||
jails_data = p.communicate()[0]
|
||||
with open(cache_filename, 'w') as f:
|
||||
f.write(jails_data)
|
||||
else:
|
||||
with open(cache_filename, 'r') as f:
|
||||
jails_data = f.read()
|
||||
return __parse_jail_names(jails_data)
|
||||
|
||||
|
||||
def autoconf():
|
||||
"""
|
||||
Attempt to find fail2ban-client in path (using `which`) and ping the client
|
||||
"""
|
||||
p_which = Popen(["which", "fail2ban-client"], shell=False, stdout=PIPE,
|
||||
stderr=PIPE)
|
||||
stdout, stderr = p_which.communicate()
|
||||
if len(stdout) > 0:
|
||||
client_path = stdout.strip()
|
||||
if access(client_path, X_OK):
|
||||
p_ping = Popen([client_path, "ping"], shell=False)
|
||||
p_ping.communicate()
|
||||
if p_ping.returncode == 0:
|
||||
print("yes")
|
||||
else:
|
||||
print("no (fail2ban-server does not respond to ping)")
|
||||
else:
|
||||
print("no (fail2ban-client is not executable)")
|
||||
else:
|
||||
import os
|
||||
|
||||
print("no (fail2ban-client not found in path: %s)" %
|
||||
os.environ["PATH"])
|
||||
|
||||
|
||||
def suggest():
|
||||
"""
|
||||
Iterate all defined flavors (source of data) and fields (graph to display)
|
||||
"""
|
||||
# Just use basic for autoconf/suggest
|
||||
flavor = "basic"
|
||||
for field in STATUS_FLAVORS_FIELDS[flavor]:
|
||||
print("%s_%s" % (flavor, field if len(flavor) > 0 else flavor))
|
||||
|
||||
|
||||
def __get_jail_status(jail, flavor):
|
||||
"""
|
||||
Return cache or execute `fail2ban-client status <jail> <flavor>`
|
||||
and save to cache and return
|
||||
"""
|
||||
cache_filename = __get_jail_status_cache_file(jail)
|
||||
try:
|
||||
mtime = stat(cache_filename).st_mtime
|
||||
except OSError:
|
||||
mtime = 0
|
||||
if time() - mtime > CACHE_MAX_AGE:
|
||||
p = Popen(["fail2ban-client", "status", jail, flavor], shell=False,
|
||||
stdout=PIPE)
|
||||
jail_status_data = p.communicate()[0]
|
||||
with open(cache_filename, 'w') as f:
|
||||
f.write(jail_status_data)
|
||||
else:
|
||||
with open(cache_filename, 'r') as f:
|
||||
jail_status_data = f.read()
|
||||
return jail_status_data
|
||||
|
||||
|
||||
def __normalize(name):
|
||||
name = re.sub("[^a-z0-9A-Z]", "_", name)
|
||||
return name
|
||||
|
||||
|
||||
def __count_groups(value_str):
|
||||
"""
|
||||
Helper method to count unique values in the space-delimited value_str
|
||||
"""
|
||||
return Counter([key for key in value_str.split(" ") if key])
|
||||
|
||||
|
||||
def config(flavor, field):
|
||||
"""
|
||||
Print config data (e.g. munin-run config), including possible labels
|
||||
by parsing real status data
|
||||
"""
|
||||
print("graph_title fail2ban %s %s" % (flavor, field))
|
||||
print("graph_args --base 1000 -l 0")
|
||||
print("graph_vlabel Hosts banned")
|
||||
print("graph_category security")
|
||||
print("graph_info"
|
||||
" Number of hosts banned using status flavor %s and field %s" %
|
||||
(flavor, field))
|
||||
print("graph_total total")
|
||||
munin_fields, field_labels, values = parse_fail2ban_status(flavor, field)
|
||||
for munin_field in munin_fields:
|
||||
print("%s.label %s" % (munin_field, field_labels[munin_field]))
|
||||
|
||||
|
||||
def run(flavor, field):
|
||||
"""
|
||||
Parse the status data and print all values for a given flavor and field
|
||||
"""
|
||||
munin_fields, field_labels, values = parse_fail2ban_status(flavor, field)
|
||||
for munin_field in munin_fields:
|
||||
print("%s.value %s" % (munin_field, values[munin_field]))
|
||||
|
||||
|
||||
def parse_fail2ban_status(flavor, field):
|
||||
"""
|
||||
Shared method to parse jail status output and determine field names
|
||||
and aggregate counts
|
||||
"""
|
||||
field_labels = dict()
|
||||
values = dict()
|
||||
for jail in __get_jail_names():
|
||||
jail_status = __get_jail_status(jail, flavor)
|
||||
for line in jail_status.splitlines()[1:]:
|
||||
if flavor == "basic":
|
||||
if field == "jail":
|
||||
if line.startswith(" |- Currently banned:"):
|
||||
internal_name = __normalize(jail)
|
||||
field_labels[internal_name] = jail
|
||||
values[internal_name] = line.split(":", 1)[1].strip()
|
||||
else:
|
||||
raise Exception(
|
||||
"Undefined field %s for flavor %s for jail %s" %
|
||||
(field, flavor, jail))
|
||||
elif flavor == "cymru":
|
||||
# Determine which line of output we care about
|
||||
if field == "asn":
|
||||
search_string = " |- Banned ASN list:"
|
||||
elif field == "country":
|
||||
search_string = " |- Banned Country list:"
|
||||
elif field == "rir":
|
||||
search_string = " `- Banned RIR list:"
|
||||
else:
|
||||
raise Exception(
|
||||
"Undefined field %s for flavor %s for jail %s" %
|
||||
(field, flavor, jail))
|
||||
if line.startswith(search_string):
|
||||
prefix = "%s_%s" % (flavor, field)
|
||||
# Now process/aggregate the counts
|
||||
counts_dict = __count_groups(line.split(":", 1)[1].strip())
|
||||
for key in counts_dict:
|
||||
internal_name = "%s_%s" % (prefix, __normalize(key))
|
||||
if internal_name in field_labels:
|
||||
values[internal_name] += counts_dict[key]
|
||||
else:
|
||||
field_labels[internal_name] = key
|
||||
values[internal_name] = counts_dict[key]
|
||||
else:
|
||||
raise Exception("Undefined flavor: %s for jail %s" %
|
||||
(flavor, jail))
|
||||
return sorted(field_labels.keys()), field_labels, values
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) > 1:
|
||||
command = sys.argv[1]
|
||||
else:
|
||||
command = ""
|
||||
if command == "autoconf":
|
||||
autoconf()
|
||||
elif command == "suggest":
|
||||
suggest()
|
||||
elif command == 'config':
|
||||
flavor_, field_ = __parse_plugin_name()
|
||||
config(flavor_, field_)
|
||||
else:
|
||||
flavor_, field_ = __parse_plugin_name()
|
||||
run(flavor_, field_)
|
115
extern/ksm_
vendored
115
extern/ksm_
vendored
|
@ -1,115 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
#
|
||||
# ksm
|
||||
#
|
||||
# Plugin to monitor ksm - Kernel Samepage Merging.
|
||||
#
|
||||
# Author: Markus Heberling <markus@tisoft.de>
|
||||
#
|
||||
# v1.0 2011-04-05 - First version
|
||||
#
|
||||
# Usage: place in /etc/munin/plugins/ (or link it there using ln -s)
|
||||
#
|
||||
# Parameters understood:
|
||||
#
|
||||
# config (required)
|
||||
# autoconf (optional - used by munin-config)
|
||||
#
|
||||
# Magic markers - optional - used by installation scripts and
|
||||
# munin-config:
|
||||
#
|
||||
# #%# capabilities=autoconf suggest
|
||||
# #%# family=auto
|
||||
|
||||
import os
|
||||
import sys
|
||||
import warnings # noqa
|
||||
|
||||
#################################
|
||||
title = 'Kernel Samepage Merging'
|
||||
#################################
|
||||
|
||||
|
||||
def autoconf():
|
||||
if os.path.exists('/sys/kernel/mm/ksm/run'):
|
||||
for line in open('/sys/kernel/mm/ksm/run'):
|
||||
if line.strip() == '1':
|
||||
print('yes')
|
||||
break
|
||||
else:
|
||||
print('no (/sys/kernel/mm/ksm/run does not contain "1")')
|
||||
else:
|
||||
print('no (/sys/kernel/mm/ksm/run not found)')
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
def suggest():
|
||||
print('pages_absolute')
|
||||
print('pages_relative')
|
||||
print('full_scans')
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
def config():
|
||||
if('ksm_pages_absolute' in sys.argv[0]):
|
||||
print('graph_category system')
|
||||
print('graph_title %s Pages Absolute' % (title))
|
||||
print('graph_order pages_unshared pages_volatile pages_shared pages_sharing')
|
||||
print('pages_shared.info how many shared pages are being used')
|
||||
print('pages_sharing.info how many more sites are sharing them i.e. how much saved')
|
||||
print('pages_unshared.info how many pages unique but repeatedly checked for merging')
|
||||
print('pages_volatile.info how many pages changing too fast to be placed in a tree')
|
||||
print('pages_shared.label pages_shared')
|
||||
print('pages_sharing.label pages_sharing')
|
||||
print('pages_unshared.label pages_unshared')
|
||||
print('pages_volatile.label pages_volatile')
|
||||
print('pages_shared.draw AREASTACK')
|
||||
print('pages_sharing.draw AREASTACK')
|
||||
print('pages_unshared.draw AREASTACK')
|
||||
print('pages_volatile.draw AREASTACK')
|
||||
elif('ksm_pages_relative' in sys.argv[0]):
|
||||
print('graph_category system')
|
||||
print('graph_title %s Pages Relative' % (title))
|
||||
print('pages_sharing_shared.info ratio of sharing to shared pages')
|
||||
print('pages_unshared_sharing.info ratio of unshared to sharing pages')
|
||||
print('pages_sharing_shared.label pages_sharing_shared')
|
||||
print('pages_unshared_sharing.label pages_unshared_sharing')
|
||||
print('pages_sharing_shared.cdef pages_sharing_shared,100,*')
|
||||
print('pages_unshared_sharing.cdef pages_unshared_sharing,100,*')
|
||||
elif('ksm_full_scans' in sys.argv[0]):
|
||||
print('graph_category system')
|
||||
print('graph_title %s Full Scans' % (title))
|
||||
print('full_scans.info how many times all mergeable areas have been scanned')
|
||||
print('full_scans.label full_scans')
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
if len(sys.argv) > 1:
|
||||
if sys.argv[1] == 'autoconf':
|
||||
autoconf()
|
||||
elif sys.argv[1] == 'config':
|
||||
config()
|
||||
elif sys.argv[1] == 'suggest':
|
||||
suggest()
|
||||
elif sys.argv[1]:
|
||||
print('unknown argument "' + sys.argv[1] + '"')
|
||||
sys.exit(1)
|
||||
|
||||
pages_shared = int(open('/sys/kernel/mm/ksm/pages_shared').read())
|
||||
pages_sharing = int(open('/sys/kernel/mm/ksm/pages_sharing').read())
|
||||
pages_unshared = int(open('/sys/kernel/mm/ksm/pages_unshared').read())
|
||||
pages_volatile = int(open('/sys/kernel/mm/ksm/pages_volatile').read())
|
||||
full_scans = int(open('/sys/kernel/mm/ksm/full_scans').read())
|
||||
|
||||
if('ksm_pages_absolute' in sys.argv[0]):
|
||||
print('pages_shared.value %i' % pages_shared)
|
||||
print('pages_sharing.value %i' % pages_sharing)
|
||||
print('pages_unshared.value %i' % pages_unshared)
|
||||
print('pages_volatile.value %i' % pages_volatile)
|
||||
elif('ksm_pages_relative' in sys.argv[0]):
|
||||
print('pages_sharing_shared.value %f'
|
||||
% (float(pages_sharing) / float(pages_shared) if pages_shared > 0 else 0))
|
||||
print('pages_unshared_sharing.value %f'
|
||||
% (float(pages_unshared) / float(pages_sharing) if pages_sharing > 0 else 0))
|
||||
elif('ksm_full_scans' in sys.argv[0]):
|
||||
print('full_scans.value %i' % full_scans)
|
137
extern/kvm_cpu
vendored
137
extern/kvm_cpu
vendored
|
@ -1,137 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
"""
|
||||
=encoding utf8
|
||||
|
||||
=head1 NAME
|
||||
|
||||
kvm_cpu - show CPU usage of VM
|
||||
|
||||
|
||||
=head1 CONFIGURATION
|
||||
|
||||
Parsed environment variables:
|
||||
|
||||
vmsuffix: part of VM name to be removed
|
||||
|
||||
|
||||
=head1 LICENSE
|
||||
|
||||
GPLv3
|
||||
|
||||
SPDX-License-Identifier: GPL-3.0-only
|
||||
|
||||
|
||||
=head1 AUTHORS
|
||||
|
||||
Maxence Dunnewind
|
||||
|
||||
Rodolphe Quiédeville
|
||||
|
||||
|
||||
=head1 MAGIC MARKERS
|
||||
|
||||
#%# capabilities=autoconf
|
||||
#%# family=contrib
|
||||
|
||||
=cut
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
from subprocess import Popen, PIPE
|
||||
|
||||
|
||||
def config(vm_names):
|
||||
''' Print the plugin's config
|
||||
@param vm_names : a list of "cleaned" vms' name
|
||||
'''
|
||||
percent = 100 * len(
|
||||
list(
|
||||
filter(
|
||||
lambda x: x[0:3] == 'cpu' and x[3] != ' ', open('/proc/stat', 'r').readlines())))
|
||||
|
||||
base_config = """graph_title KVM Virtual Machine CPU usage
|
||||
graph_vlabel %%
|
||||
graph_category virtualization
|
||||
graph_scale no
|
||||
graph_period second
|
||||
graph_info This graph shows the current CPU used by virtual machines
|
||||
graph_args --base 1000 -r --lower-limit 0 --upper-limit %d""" % percent
|
||||
print(base_config)
|
||||
for vm in vm_names:
|
||||
print("%s_cpu.label %s" % (vm, vm))
|
||||
print("%s_cpu.min 0" % vm)
|
||||
print("%s_cpu.type DERIVE" % vm)
|
||||
print("%s_cpu.draw AREASTACK" % vm)
|
||||
print("%s_cpu.info percent of cpu time used by virtual machine" % vm)
|
||||
|
||||
|
||||
def clean_vm_name(vm_name):
|
||||
''' Replace all special chars
|
||||
@param vm_name : a vm's name
|
||||
@return cleaned vm's name
|
||||
'''
|
||||
# suffix part defined in conf
|
||||
suffix = os.getenv('vmsuffix')
|
||||
if suffix:
|
||||
vm_name = re.sub(suffix, '', vm_name)
|
||||
# proxmox uses kvm with -name parameter
|
||||
parts = vm_name.split('\x00')
|
||||
if parts[0].endswith('kvm'):
|
||||
try:
|
||||
return parts[parts.index('-name') + 1]
|
||||
except ValueError:
|
||||
pass
|
||||
return re.sub(r"[^a-zA-Z0-9_]", "_", vm_name)
|
||||
|
||||
|
||||
def detect_kvm():
|
||||
''' Check if kvm is installed '''
|
||||
kvm = Popen("which kvm", shell=True, stdout=PIPE)
|
||||
kvm.communicate()
|
||||
return not bool(kvm.returncode)
|
||||
|
||||
|
||||
def find_vm_names(pids):
|
||||
'''Find and clean vm names from pids
|
||||
@return a dictionary of {pids : cleaned vm name}
|
||||
'''
|
||||
result = {}
|
||||
for pid in pids:
|
||||
cmdline = open("/proc/%s/cmdline" % pid, "r")
|
||||
result[pid] = clean_vm_name(
|
||||
re.sub(r"^.*guest=([a-zA-Z0-9.-_-]*).*$", r"\1", cmdline.readline()))
|
||||
return result
|
||||
|
||||
|
||||
def list_pids():
|
||||
''' Find the pid of kvm processes
|
||||
@return a list of pids from running kvm
|
||||
'''
|
||||
pid = Popen("pidof qemu-kvm qemu-system-x86_64 kvm", shell=True, stdout=PIPE)
|
||||
return pid.communicate()[0].decode().split()
|
||||
|
||||
|
||||
def fetch(vms):
|
||||
''' Fetch values for a list of pids
|
||||
@param dictionary {kvm_pid: cleaned vm name}
|
||||
'''
|
||||
for pid, name in vms.items():
|
||||
user, system = open("/proc/%s/stat" % pid, 'r').readline().split(' ')[13:15]
|
||||
print('%s_cpu.value %d' % (name, int(user) + int(system)))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) > 1:
|
||||
if sys.argv[1] in ['autoconf', 'detect']:
|
||||
if detect_kvm():
|
||||
print("yes")
|
||||
else:
|
||||
print("no")
|
||||
elif sys.argv[1] == "config":
|
||||
config(find_vm_names(list_pids()).values())
|
||||
else:
|
||||
fetch(find_vm_names(list_pids()))
|
||||
else:
|
||||
fetch(find_vm_names(list_pids()))
|
115
extern/kvm_io
vendored
115
extern/kvm_io
vendored
|
@ -1,115 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
# vim: set fileencoding=utf-8
|
||||
#
|
||||
# Munin plugin to show io by vm
|
||||
#
|
||||
# Copyright Maxence Dunnewind, Rodolphe Quiédeville
|
||||
#
|
||||
# License : GPLv3
|
||||
#
|
||||
# parsed environment variables:
|
||||
# vmsuffix: part of vm name to be removed
|
||||
#
|
||||
#%# capabilities=autoconf
|
||||
#%# family=contrib
|
||||
|
||||
import re, os, sys
|
||||
from subprocess import Popen, PIPE
|
||||
|
||||
def config(vm_names):
|
||||
''' Print the plugin's config
|
||||
@param vm_names : a list of "cleaned" vms' name
|
||||
'''
|
||||
base_config = """graph_title KVM Virtual Machine IO usage
|
||||
graph_vlabel Bytes read(-)/written(+) per second
|
||||
graph_category virtualization
|
||||
graph_info This graph shows the block device I/O used of virtual machines
|
||||
graph_args --base 1024
|
||||
"""
|
||||
print (base_config)
|
||||
|
||||
for vm in vm_names:
|
||||
print "%s_read.label %s" % (vm, vm)
|
||||
print "%s_read.type COUNTER" % vm
|
||||
print "%s_read.min 0" % vm
|
||||
print "%s_read.info I/O used by virtual machine %s" % (vm, vm)
|
||||
print "%s_read.graph no" % vm
|
||||
print "%s_write.label %s" % (vm, vm)
|
||||
print "%s_write.type COUNTER" % vm
|
||||
print "%s_write.min 0" % vm
|
||||
print "%s_write.negative %s_read" % (vm, vm)
|
||||
print "%s_write.info I/O used by virtual machine %s" % (vm, vm)
|
||||
|
||||
def clean_vm_name(vm_name):
|
||||
''' Replace all special chars
|
||||
@param vm_name : a vm's name
|
||||
@return cleaned vm's name
|
||||
'''
|
||||
# suffix part defined in conf
|
||||
suffix = os.getenv('vmsuffix')
|
||||
if suffix:
|
||||
vm_name = re.sub(suffix,'',vm_name)
|
||||
# proxmox uses kvm with -name parameter
|
||||
parts = vm_name.split('\x00')
|
||||
if (parts[0].endswith('kvm')):
|
||||
try:
|
||||
return parts[parts.index('-name')+1]
|
||||
except ValueError:
|
||||
pass
|
||||
return re.sub(r"[^a-zA-Z0-9_]", "_", vm_name)
|
||||
|
||||
def fetch(vms):
|
||||
''' Fetch values for a list of pids
|
||||
@param dictionary {kvm_pid: cleaned vm name}
|
||||
'''
|
||||
res = {}
|
||||
for pid in vms:
|
||||
f = open("/proc/%s/io" % pid, "r")
|
||||
for line in f.readlines():
|
||||
if "read_bytes" in line:
|
||||
read = line.split()[1]
|
||||
print "%s_read.value %s" % (vms[pid], read)
|
||||
if "write_bytes" in line:
|
||||
write = line.split()[1]
|
||||
print "%s_write.value %s" % (vms[pid], write)
|
||||
break
|
||||
f.close()
|
||||
|
||||
def detect_kvm():
|
||||
''' Check if kvm is installed
|
||||
'''
|
||||
kvm = Popen("which kvm", shell=True, stdout=PIPE)
|
||||
kvm.communicate()
|
||||
return not bool(kvm.returncode)
|
||||
|
||||
def find_vm_names(pids):
|
||||
'''Find and clean vm names from pids
|
||||
@return a dictionary of {pids : cleaned vm name}
|
||||
'''
|
||||
result = {}
|
||||
for pid in pids:
|
||||
cmdline = open("/proc/%s/cmdline" % pid, "r")
|
||||
result[pid] = clean_vm_name(re.sub(r"^.*guest=([a-zA-Z0-9.-_-]*).*$",r"\1", cmdline.readline()))
|
||||
return result
|
||||
|
||||
def list_pids():
|
||||
''' Find the pid of kvm processes
|
||||
@return a list of pids from running kvm
|
||||
'''
|
||||
pid = Popen("pidof qemu-kvm qemu-system-x86_64 kvm", shell=True, stdout=PIPE)
|
||||
return pid.communicate()[0].split()
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) > 1:
|
||||
if sys.argv[1] in ['autoconf', 'detect']:
|
||||
if detect_kvm():
|
||||
print "yes"
|
||||
else:
|
||||
print "no"
|
||||
elif sys.argv[1] == "config":
|
||||
config(find_vm_names(list_pids()).values())
|
||||
else:
|
||||
fetch(find_vm_names(list_pids()))
|
||||
else:
|
||||
fetch(find_vm_names(list_pids()))
|
110
extern/kvm_mem
vendored
110
extern/kvm_mem
vendored
|
@ -1,110 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
# vim: set fileencoding=utf-8
|
||||
#
|
||||
# Munin plugin to show amount of memory used by vm
|
||||
#
|
||||
# Copyright Maxence Dunnewind, Rodolphe Quiédeville, Adrien Pujol
|
||||
#
|
||||
# License : GPLv3
|
||||
#
|
||||
# parsed environment variables:
|
||||
# vmsuffix: part of vm name to be removed
|
||||
#
|
||||
#%# capabilities=autoconf
|
||||
#%# family=contrib
|
||||
|
||||
import re, os, sys
|
||||
from subprocess import Popen, PIPE
|
||||
|
||||
def config(vm_names):
|
||||
''' Print the plugin's config
|
||||
@param vm_names : a list of "cleaned" vms' name
|
||||
'''
|
||||
base_config = """graph_title KVM Virtual Machine Memory usage
|
||||
graph_vlabel Bytes
|
||||
graph_category virtualization
|
||||
graph_info This graph shows the current amount of memory used by virtual machines
|
||||
graph_args --base 1024 -l 0"""
|
||||
print(base_config)
|
||||
for vm in vm_names:
|
||||
print("%s_mem.label %s" % (vm, vm))
|
||||
print("%s_mem.type GAUGE" % vm)
|
||||
print("%s_mem.draw %s" % (vm, "AREASTACK"))
|
||||
print("%s_mem.info memory used by virtual machine %s" % (vm, vm))
|
||||
|
||||
|
||||
def clean_vm_name(vm_name):
|
||||
''' Replace all special chars
|
||||
@param vm_name : a vm's name
|
||||
@return cleaned vm's name
|
||||
'''
|
||||
# suffix part defined in conf
|
||||
suffix = os.getenv('vmsuffix')
|
||||
if suffix:
|
||||
vm_name = re.sub(suffix,'',vm_name)
|
||||
|
||||
# proxmox uses kvm with -name parameter
|
||||
parts = vm_name.split('\x00')
|
||||
if (parts[0].endswith('kvm')):
|
||||
try:
|
||||
return parts[parts.index('-name')+1]
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
return re.sub(r"[^a-zA-Z0-9_]", "_", vm_name)
|
||||
|
||||
def fetch(vms):
|
||||
''' Fetch values for a list of pids
|
||||
@param dictionary {kvm_pid: cleaned vm name}
|
||||
'''
|
||||
res = {}
|
||||
for pid in vms:
|
||||
try:
|
||||
cmdline = open("/proc/%s/cmdline" % pid, "r")
|
||||
amount = re.sub(r"^.*-m\x00(.*)\x00-smp.*$",r"\1", cmdline.readline())
|
||||
amount = int(amount) * 1024 * 1024
|
||||
print("%s_mem.value %s" % (vms[pid], amount))
|
||||
except:
|
||||
cmdline = open("/proc/%s/cmdline" % pid, "r")
|
||||
amount = re.sub(r"^.*-m\x00(\d+).*$",r"\1", cmdline.readline())
|
||||
amount = int(amount) * 1024 * 1024
|
||||
print("%s_mem.value %s" % (vms[pid], amount))
|
||||
|
||||
def detect_kvm():
|
||||
''' Check if kvm is installed
|
||||
'''
|
||||
kvm = Popen("which kvm", shell=True, stdout=PIPE)
|
||||
kvm.communicate()
|
||||
return not bool(kvm.returncode)
|
||||
|
||||
def find_vm_names(pids):
|
||||
'''Find and clean vm names from pids
|
||||
@return a dictionary of {pids : cleaned vm name}
|
||||
'''
|
||||
result = {}
|
||||
for pid in pids:
|
||||
cmdline = open("/proc/%s/cmdline" % pid, "r")
|
||||
result[pid] = clean_vm_name(re.sub(r"^.*guest=([a-zA-Z0-9.-_-]*).*$",r"\1", cmdline.readline()))
|
||||
return result
|
||||
|
||||
def list_pids():
|
||||
''' Find the pid of kvm processes
|
||||
@return a list of pids from running kvm
|
||||
'''
|
||||
pid = Popen("pidof qemu-kvm qemu-system-x86_64 kvm", shell=True, stdout=PIPE, text=True)
|
||||
return pid.communicate()[0].split()
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) > 1:
|
||||
if sys.argv[1] in ['autoconf', 'detect']:
|
||||
if detect_kvm():
|
||||
print("yes")
|
||||
else:
|
||||
print("no")
|
||||
elif sys.argv[1] == "config":
|
||||
config(find_vm_names(list_pids()).values())
|
||||
else:
|
||||
fetch(find_vm_names(list_pids()))
|
||||
else:
|
||||
fetch(find_vm_names(list_pids()))
|
240
extern/kvm_net
vendored
240
extern/kvm_net
vendored
|
@ -1,240 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
"""
|
||||
|
||||
=head1 NAME
|
||||
|
||||
kvm_net - Munin plugin to show the network I/O per VM
|
||||
|
||||
|
||||
=head1 APPLICABLE SYSTEMS
|
||||
|
||||
Virtualization server with VMs based on KVM may be able to track the network
|
||||
traffic of their VMs, if the KVM processes are started in a specific way.
|
||||
|
||||
Probably proxmox-based virtualization hosts fit into this category.
|
||||
|
||||
You can easily check if your KVM processes are started in the expected way, by
|
||||
running the following command:
|
||||
|
||||
ps -ef | grep "netdev.*ifname="
|
||||
|
||||
The plugin can be used, if the above command outputs one line for every
|
||||
currently running VM.
|
||||
|
||||
In all other cases you need to use other munin plugins instead, e.g. "libvirt".
|
||||
|
||||
|
||||
=head1 CONFIGURATION
|
||||
|
||||
parsed environment variables:
|
||||
|
||||
* vmsuffix: part of vm name to be removed
|
||||
|
||||
|
||||
=head1 AUTHOR
|
||||
|
||||
Copyright (C) 2012 - Igor Borodikhin
|
||||
Copyright (C) 2018 - Lars Kruse <devel@sumpfralle.de>
|
||||
|
||||
|
||||
=head1 LICENSE
|
||||
|
||||
GPLv3
|
||||
|
||||
|
||||
=head1 MAGIC MARKERS
|
||||
|
||||
#%# capabilities=autoconf
|
||||
#%# family=contrib
|
||||
|
||||
=cut
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
from subprocess import Popen, PIPE
|
||||
import sys
|
||||
|
||||
|
||||
VM_NAME_REGEX = re.compile("^.*\x00-{arg_name}\x00(.+)\x00.*$")
|
||||
KVM_INTERFACE_NAME_REGEX = re.compile("(?:^|,)ifname=([^,]+)(?:,|$)")
|
||||
|
||||
|
||||
def config(vm_names):
|
||||
""" Print the plugin's config
|
||||
|
||||
@param vm_names : a list of "cleaned" vms' name
|
||||
"""
|
||||
print("graph_title KVM Network I/O")
|
||||
print("graph_vlabel Bytes rx(-)/tx(+) per second")
|
||||
print("graph_category virtualization")
|
||||
print("graph_args --base 1024")
|
||||
print("graph_info This graph shows the network I/O of the virtual "
|
||||
"machines. It is only usable for VMs that were started in a very "
|
||||
"specific way. If you see no values in the diagrams, then you "
|
||||
"should check, if the command \"ps -ef | grep 'netdev.*ifname='\" "
|
||||
"returns one line of output for every running VM. If there is no "
|
||||
"output, then you need to change the setup of your VMs or you need "
|
||||
"to use a different munin plugin for monitoring the network traffic "
|
||||
"(e.g. 'libvirt').")
|
||||
print()
|
||||
for vm in vm_names:
|
||||
print("%s_in.label %s" % (vm, vm))
|
||||
print("%s_in.type COUNTER" % vm)
|
||||
print("%s_in.min 0" % vm)
|
||||
print("%s_in.graph no" % vm)
|
||||
print("%s_out.negative %s_in" % (vm, vm))
|
||||
print("%s_out.label %s" % (vm, vm))
|
||||
print("%s_out.type COUNTER" % vm)
|
||||
print("%s_out.min 0" % vm)
|
||||
|
||||
|
||||
def clean_vm_name(vm_name):
|
||||
""" Replace all special chars
|
||||
|
||||
@param vm_name : a vm's name
|
||||
@return cleaned vm's name
|
||||
"""
|
||||
# suffix part defined in conf
|
||||
suffix = os.getenv("vmsuffix")
|
||||
if suffix:
|
||||
vm_name = re.sub(suffix, "", vm_name)
|
||||
# proxmox uses kvm with -name parameter
|
||||
parts = vm_name.split('\x00')
|
||||
if (parts[0].endswith('kvm')):
|
||||
try:
|
||||
return parts[parts.index('-name')+1]
|
||||
except ValueError:
|
||||
pass
|
||||
return re.sub(r"[^a-zA-Z0-9_]", "_", vm_name)
|
||||
|
||||
|
||||
def fetch(vms):
|
||||
""" Fetch values for a list of pids
|
||||
|
||||
@param dictionary {kvm_pid: cleaned vm name}
|
||||
"""
|
||||
for pid, vm_data in vms.items():
|
||||
vm_interface_names = get_vm_network_interface_names(pid)
|
||||
sum_incoming = 0
|
||||
sum_outgoing = 0
|
||||
interface_found = False
|
||||
with open("/proc/net/dev", "r") as net_file:
|
||||
for line in net_file.readlines():
|
||||
tokens = line.split()
|
||||
current_interface_name = tokens[0].rstrip(":").strip()
|
||||
if current_interface_name in vm_interface_names:
|
||||
sum_incoming += int(tokens[1])
|
||||
sum_outgoing += int(tokens[9])
|
||||
interface_found = True
|
||||
if not interface_found:
|
||||
# we want to distinguish "no traffic" from "not found"
|
||||
sum_incoming = "U"
|
||||
sum_outgoing = "U"
|
||||
print("%s_in.value %s" % (vm_data, sum_incoming))
|
||||
print("%s_out.value %s" % (vm_data, sum_outgoing))
|
||||
|
||||
|
||||
def get_vm_network_interface_names(pid):
|
||||
""" return the MAC addresses configured for network interfacs of a PID """
|
||||
result = set()
|
||||
for netdev_description in _get_kvm_process_arguments(pid, "netdev"):
|
||||
match = KVM_INTERFACE_NAME_REGEX.search(netdev_description)
|
||||
if match:
|
||||
result.add(match.groups()[0])
|
||||
return result
|
||||
|
||||
|
||||
def detect_kvm():
|
||||
""" Check if kvm is installed """
|
||||
kvm = Popen(["which", "kvm"], stdout=PIPE)
|
||||
kvm.communicate()
|
||||
return kvm.returncode == 0
|
||||
|
||||
|
||||
def find_vm_names(pids):
|
||||
"""Find and clean vm names from pids
|
||||
|
||||
@return a dictionary of {pids : cleaned vm name}
|
||||
"""
|
||||
result = {}
|
||||
for pid in pids:
|
||||
name = None
|
||||
name_arg_values = _get_kvm_process_arguments(pid, "name")
|
||||
if name_arg_values:
|
||||
name_arg_value = name_arg_values[0]
|
||||
if "," in name_arg_value:
|
||||
# the modern parameter format may look like this:
|
||||
# guest=foo,debug-threads=on
|
||||
for index, token in enumerate(name_arg_value.split(",")):
|
||||
if (index == 0) and ("=" not in token):
|
||||
# the first item may the plain name
|
||||
name = value
|
||||
elif "=" in token:
|
||||
key, value = token.split("=", 1)
|
||||
if key == "guest":
|
||||
name = value
|
||||
else:
|
||||
# unknown format (no "mapping")
|
||||
pass
|
||||
else:
|
||||
name = name_arg_value
|
||||
if name is None:
|
||||
print("Failed to parse VM name from commandline of process: {}"
|
||||
.format(name_arg_values), file=sys.stderr)
|
||||
else:
|
||||
result[pid] = clean_vm_name(name)
|
||||
return result
|
||||
|
||||
|
||||
def _get_kvm_process_arguments(pid, arg_name):
|
||||
""" parse all value with the given name from the process identified by PID
|
||||
|
||||
The result is a list of tokens, that follow this argument name. The result
|
||||
is empty in case of problems.
|
||||
"""
|
||||
# the "cmdline" (e.g. /proc/self/cmdline) is a null-separated token list
|
||||
try:
|
||||
with open("/proc/%s/cmdline" % pid, "r") as cmdline_file:
|
||||
cmdline = cmdline_file.read()
|
||||
except IOError:
|
||||
# the process seems to have died meanwhile
|
||||
return []
|
||||
is_value = False
|
||||
result = []
|
||||
for arg_token in cmdline.split("\0"):
|
||||
if is_value:
|
||||
# the previous token was our argument name
|
||||
result.append(arg_token)
|
||||
is_value = False
|
||||
elif arg_token == "-{}".format(arg_name):
|
||||
# this is our argument name - we want to store the next value
|
||||
is_value = True
|
||||
else:
|
||||
# any other irrelevant value
|
||||
pass
|
||||
return result
|
||||
|
||||
|
||||
def list_pids():
|
||||
""" Find the pid of kvm processes
|
||||
|
||||
@return a list of pids from running kvm
|
||||
"""
|
||||
pid = Popen(["pidof", "qemu-kvm", "qemu-system-x86_64", "kvm"], stdout=PIPE)
|
||||
return pid.communicate()[0].decode().split()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
action = sys.argv[1] if len(sys.argv) > 1 else None
|
||||
if action == "autoconf":
|
||||
if detect_kvm():
|
||||
print("yes")
|
||||
else:
|
||||
print("no")
|
||||
elif action == "config":
|
||||
vm_data = find_vm_names(list_pids())
|
||||
config(vm_data.values())
|
||||
else:
|
||||
vm_data = find_vm_names(list_pids())
|
||||
fetch(vm_data)
|
99
extern/lvm_
vendored
99
extern/lvm_
vendored
|
@ -1,99 +0,0 @@
|
|||
#!/bin/sh
|
||||
# -*- sh -*-
|
||||
|
||||
: << EOF
|
||||
=head1 NAME
|
||||
|
||||
lvm_ - Wildcard plugin for monitoring disk usage on LVM. Each Volume Group is graphed separately.
|
||||
|
||||
=head1 CONFIGURATION
|
||||
|
||||
This plugin needs to run as the root user in order to have permission to run sudo lvs and vgs
|
||||
|
||||
[lvm_*]
|
||||
user root
|
||||
|
||||
=head1 AUTHOR
|
||||
|
||||
=over 4
|
||||
|
||||
=item * PatrickDK (Original Author)
|
||||
|
||||
=item * Niall Donegan
|
||||
|
||||
=back
|
||||
|
||||
=head1 LICENSE
|
||||
|
||||
Unknown license
|
||||
|
||||
|
||||
=head1 MAGIC MARKERS
|
||||
|
||||
=begin comment
|
||||
|
||||
These magic markers are used by munin-node-configure when installing
|
||||
munin-node.
|
||||
|
||||
=end comment
|
||||
|
||||
#%# family=auto
|
||||
#%# capabilities=autoconf suggest
|
||||
|
||||
=cut
|
||||
|
||||
EOF
|
||||
|
||||
. $MUNIN_LIBDIR/plugins/plugin.sh
|
||||
|
||||
|
||||
if [ "$1" = "autoconf" ]; then
|
||||
if ! command -v sudo lvs >/dev/null; then
|
||||
echo "no (sudo lvs not found)"
|
||||
elif ! command -v vgs >/dev/null; then
|
||||
echo "no (vgs not found)"
|
||||
else
|
||||
echo "yes"
|
||||
fi
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ "$1" = "suggest" ]; then
|
||||
sudo vgs -o vg_name --noheadings | sed -e 's/\ *//'
|
||||
exit 0
|
||||
fi
|
||||
|
||||
|
||||
vg=`echo $0 | awk '{ sub(".*lvm_","",\$1); print \$1; }'`
|
||||
|
||||
clean_name() {
|
||||
echo "$(clean_fieldname "$1")"
|
||||
}
|
||||
|
||||
|
||||
if [ "$1" = "config" ]; then
|
||||
|
||||
echo "graph_title Logical Volume Usage($vg)"
|
||||
echo 'graph_args --base 1024 -l 0'
|
||||
echo 'graph_category disk'
|
||||
echo 'graph_info This graph shows disk usage on the machine.'
|
||||
echo "free.label free"
|
||||
echo "free.draw AREA"
|
||||
sudo lvs --units b --nosuffix --noheadings | grep "$vg" | while read i; do
|
||||
name=`clean_name $i`
|
||||
echo -n "$name.label "
|
||||
echo $i | awk '{ print $1 }'
|
||||
echo "$name.draw STACK"
|
||||
done
|
||||
exit 0
|
||||
fi
|
||||
|
||||
i=`sudo vgs --units b --nosuffix --noheadings | grep "$vg"`
|
||||
echo -n "free.value "
|
||||
echo $i | awk '{ print $7 }'
|
||||
|
||||
sudo lvs --units b --nosuffix --noheadings | grep "$vg" | while read i; do
|
||||
name=`clean_name $i`
|
||||
echo -n "$name.value "
|
||||
echo $i | awk '{ print $4 }'
|
||||
done
|
65
extern/nfs4_client
vendored
Normal file
65
extern/nfs4_client
vendored
Normal file
|
@ -0,0 +1,65 @@
|
|||
#!@@GOODSH@@
|
||||
# -*- sh -*-
|
||||
|
||||
: << =cut
|
||||
|
||||
=head1 NAME
|
||||
|
||||
nfs4_client - Plugin to monitor NFSv4 client traffic
|
||||
|
||||
=head1 CONFIGURATION
|
||||
|
||||
No configuration
|
||||
|
||||
=head1 AUTHOR
|
||||
|
||||
Unknown author
|
||||
|
||||
=head1 LICENSE
|
||||
|
||||
GPLv2
|
||||
|
||||
=head1 MAGIC MARKERS
|
||||
|
||||
#%# family=auto
|
||||
#%# capabilities=autoconf
|
||||
|
||||
=cut
|
||||
|
||||
NFS=/proc/net/rpc/nfs
|
||||
|
||||
proc="read write commit open open_confirm open_noattr open_downgrade close setattr fsinfo renew setclientid setclientid_confirm lock lockt locku access getattr lookup lookup_root remove rename link symlink create pathconf statfs readlink readdir server_caps delegreturn getacl setacl"
|
||||
|
||||
if [ "$1" = "autoconf" ]; then
|
||||
if [ -f "$NFS" ]; then
|
||||
if grep -q proc4 "$NFS"; then
|
||||
echo yes
|
||||
else
|
||||
echo "no (no proc4 in $NFS)"
|
||||
fi
|
||||
exit 0
|
||||
else
|
||||
echo "no (no $NFS)"
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$1" = "config" ]; then
|
||||
|
||||
echo 'graph_title NFSv4 Client'
|
||||
echo 'graph_args --base 1000 -l 0'
|
||||
# shellcheck disable=SC2016
|
||||
echo 'graph_vlabel requests / ${graph_period}'
|
||||
echo 'graph_total total'
|
||||
echo 'graph_category NFS'
|
||||
for a in $proc ; do echo "$a.label $a" ; echo "$a.type DERIVE"; echo "$a.min 0"; done
|
||||
exit 0
|
||||
fi
|
||||
|
||||
awk '/proc4/ {
|
||||
split("'"$proc"'", names)
|
||||
split($0,values)
|
||||
for (e in names) {
|
||||
printf("%s.value %d\n", names[++i], values[i+3]);
|
||||
}
|
||||
}' $NFS
|
65
extern/nfs_client
vendored
Normal file
65
extern/nfs_client
vendored
Normal file
|
@ -0,0 +1,65 @@
|
|||
#!@@GOODSH@@
|
||||
# -*- sh -*-
|
||||
|
||||
: << =cut
|
||||
|
||||
=head1 NAME
|
||||
|
||||
nfs_client - Plugin to monitor NFS client traffic
|
||||
|
||||
=head1 CONFIGURATION
|
||||
|
||||
No configuration
|
||||
|
||||
=head1 AUTHOR
|
||||
|
||||
Unknown author
|
||||
|
||||
=head1 LICENSE
|
||||
|
||||
GPLv2
|
||||
|
||||
=head1 MAGIC MARKERS
|
||||
|
||||
#%# family=auto
|
||||
#%# capabilities=autoconf
|
||||
|
||||
=cut
|
||||
|
||||
NFS=/proc/net/rpc/nfs
|
||||
|
||||
proc="getattr setattr lookup access readlink read write create mkdir symlink mknod remove rmdir rename link readdir readdirplus fsstat fsinfo pathconf commit"
|
||||
|
||||
if [ "$1" = "autoconf" ]; then
|
||||
if [ -f "$NFS" ]; then
|
||||
if grep -q proc3 "$NFS"; then
|
||||
echo yes
|
||||
else
|
||||
echo "no (no proc3 in $NFS)"
|
||||
fi
|
||||
exit 0
|
||||
else
|
||||
echo no
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$1" = "config" ]; then
|
||||
|
||||
echo 'graph_title NFS Client'
|
||||
echo 'graph_args --base 1000 -l 0'
|
||||
# shellcheck disable=SC2016
|
||||
echo 'graph_vlabel requests / ${graph_period}'
|
||||
echo 'graph_total total'
|
||||
echo 'graph_category NFS'
|
||||
for a in $proc ; do echo "$a.label $a" ; echo "$a.type DERIVE"; echo "$a.min 0"; done
|
||||
exit 0
|
||||
fi
|
||||
|
||||
awk '/proc3/ {
|
||||
split("'"$proc"'", names)
|
||||
split($0,values)
|
||||
for (e in names) {
|
||||
printf("%s.value %d\n", names[++i], values[i+3]);
|
||||
}
|
||||
}' $NFS
|
65
extern/nfsd
vendored
Normal file
65
extern/nfsd
vendored
Normal file
|
@ -0,0 +1,65 @@
|
|||
#!@@GOODSH@@
|
||||
# -*- sh -*-
|
||||
|
||||
: << =cut
|
||||
|
||||
=head1 NAME
|
||||
|
||||
nfsd - Plugin to monitor NFS server activity
|
||||
|
||||
=head1 CONFIGURATION
|
||||
|
||||
No configuration
|
||||
|
||||
=head1 AUTHORS
|
||||
|
||||
Plugin created by Alexandre Dupouy, with the assistance of Mike Fedyk
|
||||
|
||||
=head1 LICENSE
|
||||
|
||||
GPLv2
|
||||
|
||||
=head1 MAGIC MARKERS
|
||||
|
||||
#%# family=auto
|
||||
#%# capabilities=autoconf
|
||||
|
||||
=cut
|
||||
|
||||
NFSD=/proc/net/rpc/nfsd
|
||||
|
||||
proc="getattr setattr lookup access readlink read write create mkdir symlink mknod remove rmdir rename link readdir readdirplus fsstat fsinfo pathconf commit"
|
||||
|
||||
if [ "$1" = "autoconf" ]; then
|
||||
if [ -f "$NFSD" ]; then
|
||||
grep -q proc3 "$NFSD"
|
||||
if grep -q proc3 "$NFSD"; then
|
||||
echo yes
|
||||
else
|
||||
echo "no (no proc3 in $NFSD)"
|
||||
fi
|
||||
exit 0
|
||||
else
|
||||
echo "no (no $NFSD)"
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$1" = "config" ]; then
|
||||
|
||||
echo 'graph_title NFS Server'
|
||||
echo 'graph_args --base 1000 -l 0'
|
||||
# shellcheck disable=SC2016
|
||||
echo 'graph_vlabel requests / ${graph_period}'
|
||||
echo 'graph_total total'
|
||||
echo 'graph_category NFS'
|
||||
for a in $proc ; do echo "$a.label $a" ; echo "$a.type DERIVE"; echo "$a.min 0"; done
|
||||
exit 0
|
||||
fi
|
||||
|
||||
awk '/proc3/ {
|
||||
split("'"$proc"'", names)
|
||||
split($0,values)
|
||||
for (e in names)
|
||||
printf("%s.value %d\n", names[++i], values[i+3]);
|
||||
}' $NFSD
|
78
extern/nsfd4
vendored
Normal file
78
extern/nsfd4
vendored
Normal file
|
@ -0,0 +1,78 @@
|
|||
#!@@GOODSH@@
|
||||
# -*- sh -*-
|
||||
|
||||
: << =cut
|
||||
|
||||
=head1 NAME
|
||||
|
||||
nfsd4 - Plugin to graph NFSv4 server activity
|
||||
|
||||
=head1 CONFIGURATION
|
||||
|
||||
No configuration
|
||||
|
||||
=head1 AUTHOR
|
||||
|
||||
2007/10/28 21:30:00 Christian Kujau <lists@nerdbynature.de>
|
||||
|
||||
=head1 LICENSE
|
||||
|
||||
GPLv2
|
||||
|
||||
=head1 NOTES
|
||||
|
||||
[...] based on the nfsd plugin by Alexandre Dupouy
|
||||
=head1 MAGIC MARKERS
|
||||
|
||||
#%# family=auto
|
||||
#%# capabilities=autoconf
|
||||
|
||||
=cut
|
||||
|
||||
NFSD=/proc/net/rpc/nfsd
|
||||
|
||||
proc="access close commit create delegpurge delegreturn getattr getfh link lock \
|
||||
lockt locku lookup lookup_root nverify open openattr open_conf open_dgrd \
|
||||
putfh putpubfh putrootfh read readdir readlink remove rename renew restorefh \
|
||||
savefh secinfo setattr setcltid setcltidconf verify write rellockowner"
|
||||
|
||||
if [ "$1" = "autoconf" ]; then
|
||||
if [ -f "$NFSD" ]; then
|
||||
if grep -q proc4ops "$NFSD"; then
|
||||
echo yes
|
||||
else
|
||||
echo "no (no proc4ops in $NFSD)"
|
||||
fi
|
||||
exit 0
|
||||
else
|
||||
echo "no (no $NFSD)"
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ ! -f "$NFSD" ] ; then
|
||||
echo graph_title no $NFSD
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$1" = "config" ]; then
|
||||
|
||||
echo 'graph_title NFSv4 Server'
|
||||
echo 'graph_args --base 1000 -l 0'
|
||||
# shellcheck disable=SC2016
|
||||
echo 'graph_vlabel requests / ${graph_period}'
|
||||
echo 'graph_total total'
|
||||
echo 'graph_category NFS'
|
||||
for a in $proc ; do echo "$a.label $a" ; echo "$a.type DERIVE"; echo "$a.min 0"; done
|
||||
exit 0
|
||||
fi
|
||||
|
||||
i=6;
|
||||
|
||||
for a in $proc; do
|
||||
echo -n "$a.value "
|
||||
grep proc4ops $NFSD \
|
||||
| cut -f $i -d ' ' \
|
||||
| awk '{print $1}'
|
||||
i=$((i + 1))
|
||||
done
|
44
extern/pihole/pihole_ads_percentage
vendored
Normal file
44
extern/pihole/pihole_ads_percentage
vendored
Normal file
|
@ -0,0 +1,44 @@
|
|||
#!/bin/sh
|
||||
|
||||
# Sample setup for '/etc/munin/plugin-conf.d/munin-node':
|
||||
#
|
||||
# [pihole_*]
|
||||
# user root
|
||||
# env.host 127.0.0.1
|
||||
# env.port 80
|
||||
# env.api /admin/api.php
|
||||
|
||||
envhost=${host:-127.0.0.1}
|
||||
envport=${port:-80}
|
||||
envapi=${api:-/admin/api.php}
|
||||
|
||||
apicall=$(curl -s "$envhost:$envport$envapi" 2>/dev/null)
|
||||
stats=$(echo $apicall | sed 's/[{}"]//g' | tr "," "\n")
|
||||
|
||||
case $1 in
|
||||
config)
|
||||
echo "graph_title Pihole blocked ads"
|
||||
echo "graph_vlabel %"
|
||||
echo "graph_category pihole"
|
||||
echo "graph_info This graph shows the Pihole percentage of blocked queries."
|
||||
|
||||
for stat in $stats
|
||||
do
|
||||
uid=$(echo $stat | sed 's/:.*//')
|
||||
if [ $uid = "ads_percentage_today" ]; then
|
||||
echo "$uid.label blocked queries"
|
||||
echo "$uid.warning 2:"
|
||||
fi
|
||||
done
|
||||
|
||||
exit 0;;
|
||||
esac
|
||||
|
||||
for stat in $stats
|
||||
do
|
||||
uid=$(echo $stat | sed 's/:.*//')
|
||||
if [ $uid = "ads_percentage_today" ]; then
|
||||
value=$(echo $stat | sed 's/.*://')
|
||||
echo "$uid.value $value"
|
||||
fi
|
||||
done
|
44
extern/pihole/pihole_blocked_domains
vendored
Normal file
44
extern/pihole/pihole_blocked_domains
vendored
Normal file
|
@ -0,0 +1,44 @@
|
|||
#!/bin/sh
|
||||
|
||||
# Sample setup for '/etc/munin/plugin-conf.d/munin-node':
|
||||
#
|
||||
# [pihole_*]
|
||||
# user root
|
||||
# env.host 127.0.0.1
|
||||
# env.port 80
|
||||
# env.api /admin/api.php
|
||||
|
||||
envhost=${host:-127.0.0.1}
|
||||
envport=${port:-80}
|
||||
envapi=${api:-/admin/api.php}
|
||||
|
||||
apicall=$(curl -s "$envhost:$envport$envapi" 2>/dev/null)
|
||||
stats=$(echo $apicall | sed 's/[{}"]//g' | tr "," "\n")
|
||||
|
||||
case $1 in
|
||||
config)
|
||||
echo "graph_title Pihole Blocked Domains"
|
||||
echo "graph_vlabel count"
|
||||
echo "graph_category pihole"
|
||||
echo "graph_info This graph shows the Pihole unique blocked domains."
|
||||
|
||||
for stat in $stats
|
||||
do
|
||||
uid=$(echo $stat | sed 's/:.*//')
|
||||
if [ $uid = "domains_being_blocked" ]; then
|
||||
echo "$uid.label blocked domains"
|
||||
echo "$uid.draw AREA"
|
||||
fi
|
||||
done
|
||||
|
||||
exit 0;;
|
||||
esac
|
||||
|
||||
for stat in $stats
|
||||
do
|
||||
uid=$(echo $stat | sed 's/:.*//')
|
||||
if [ $uid = "domains_being_blocked" ]; then
|
||||
value=$(echo $stat | sed 's/.*://')
|
||||
echo "$uid.value $value"
|
||||
fi
|
||||
done
|
18
extern/samba
vendored
18
extern/samba
vendored
|
@ -1,5 +1,4 @@
|
|||
#!@@GOODSH@@
|
||||
# -*- sh -*-
|
||||
#!/bin/bash
|
||||
|
||||
: << =cut
|
||||
|
||||
|
@ -40,7 +39,7 @@ GPLv2
|
|||
=cut
|
||||
|
||||
#SMBSTATUS=${smbstatus:-$(sudo command -v smbstatus)}
|
||||
SMBSTATUS=${SMBSTATUS:-$(sudo /usr/bin/smbstatus)}
|
||||
SMBSTATUS=$(which smbstatus)
|
||||
|
||||
if [ "$1" = "autoconf" ]; then
|
||||
if [ -x "$SMBSTATUS" ]; then
|
||||
|
@ -64,6 +63,7 @@ if [ "$1" = "config" ]; then
|
|||
echo 'proc.label processes'
|
||||
echo 'lock.label locked files'
|
||||
echo 'share.label Open shares'
|
||||
echo 'user.label Active Users'
|
||||
# Bug pointed out by kozik: there is no max field.
|
||||
# echo 'max.warning 900'
|
||||
# echo 'max.critical 960'
|
||||
|
@ -85,3 +85,15 @@ $SMBSTATUS -S 2>/dev/null | awk '
|
|||
'"$IGNORE"'
|
||||
{lines++}
|
||||
END {print "share.value " lines}'
|
||||
# mg@fileserver2 11:44 ~
|
||||
# > sudo smbstatus
|
||||
#
|
||||
# Samba version 4.11.6-Ubuntu
|
||||
# PID Username Group Machine Protocol Version Encryption Signing
|
||||
# ----------------------------------------------------------------------------------------------------------------------------------------
|
||||
# 3996190 michaelgrote smb_users 192.168.2.180 (ipv4:192.168.2.180:59878) SMB3_11 - partial(AES-128-CMAC)
|
||||
# 722 pve smb_users 192.168.2.15 (ipv4:192.168.2.15:40512) SMB3_00 - partial(HMAC-SHA256)
|
||||
# 736 navidrome smb_users 192.168.2.68 (ipv4:192.168.2.68:51130) SMB3_11 - partial(AES-128-CMAC)
|
||||
# 3048197 photoprism smb_users 192.168.2.35 (ipv4:192.168.2.35:33078) SMB3_11 - partial(AES-128-CMAC)
|
||||
# sucht nach der Protokolversion
|
||||
echo user.value "$($SMBSTATUS | grep -c SMB)"
|
||||
|
|
40
extern/samba_locked
vendored
40
extern/samba_locked
vendored
|
@ -1,40 +0,0 @@
|
|||
#!/bin/sh
|
||||
#
|
||||
# Plugin to monitor the number of Samba locked files on the machine.
|
||||
#
|
||||
# Parameters:
|
||||
#
|
||||
# config (required)
|
||||
# autoconf (optional - used by munin-config)
|
||||
#
|
||||
# $Log$
|
||||
# Revision 1.0 2007/04/16 Jon Higgs
|
||||
# Initial Release - Adapted from jimmyo's processes plugin.
|
||||
#
|
||||
# Magick markers (optional - used by munin-config and some installation
|
||||
# scripts):
|
||||
#%# family=auto
|
||||
#%# capabilities=autoconf
|
||||
|
||||
if [ "$1" = "autoconf" ]; then
|
||||
echo yes
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ "$1" = "config" ]; then
|
||||
|
||||
echo 'graph_title Samba Locked Files'
|
||||
echo 'graph_args --base 1000 -l 0 '
|
||||
echo 'graph_vlabel number of locked files'
|
||||
echo 'graph_category fs'
|
||||
echo 'graph_info This graph shows the number locked Samba Files.'
|
||||
echo 'samba_locked.label Locked Files'
|
||||
echo 'samba_locked.draw LINE2'
|
||||
echo 'samba_locked.info The current number of locked files.'
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "samba_locked.value $(smbstatus -L 2> /dev/null | grep -c DENY_)"
|
||||
|
||||
# If here, always return OK
|
||||
exit 0
|
46
extern/samba_users
vendored
46
extern/samba_users
vendored
|
@ -1,46 +0,0 @@
|
|||
#!/bin/sh
|
||||
#
|
||||
# Plugin to monitor the number of Samba users on the machine.
|
||||
#
|
||||
# Parameters:
|
||||
#
|
||||
# config (required)
|
||||
# autoconf (optional - used by munin-config)
|
||||
#
|
||||
# $Log$
|
||||
# Revision 1.0 2007/04/16 Jon Higgs
|
||||
# Initial Release - Adapted from jimmyo's processes plugin.
|
||||
#
|
||||
# Revision 1.1 2014/07/24 MangaII
|
||||
# Add exit 0
|
||||
# WARNING : Samba 3.6 and newer block access to smbstatus for no root user
|
||||
# On Debian make a "chmod a+w /run/samba/sessionid.tdb"
|
||||
# smbstatus must open this file with RW option
|
||||
#
|
||||
# Magick markers (optional - used by munin-config and some installation
|
||||
# scripts):
|
||||
#%# family=auto
|
||||
#%# capabilities=autoconf
|
||||
|
||||
if [ "$1" = "autoconf" ]; then
|
||||
echo yes
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ "$1" = "config" ]; then
|
||||
|
||||
echo 'graph_title Samba Users'
|
||||
echo 'graph_args --base 1000 -l 0 '
|
||||
echo 'graph_vlabel number of Samba users.'
|
||||
echo 'graph_category fs'
|
||||
echo 'graph_info This graph shows the number Samba users.'
|
||||
echo 'samba_users.label Samba Users'
|
||||
echo 'samba_users.draw LINE2'
|
||||
echo 'samba_users.info The current number of Samba users.'
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo -n "samba_users.value "
|
||||
smbstatus -b 2> /dev/null | grep -c -v -e "^Samba" -e "^---" -e "^PID" -e ^$
|
||||
|
||||
exit 0
|
111
extern/systemd_status
vendored
111
extern/systemd_status
vendored
|
@ -1,111 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
# pylint: disable=invalid-name
|
||||
# pylint: enable=invalid-name
|
||||
|
||||
"""Munin plugin to monitor systemd service status.
|
||||
|
||||
=head1 NAME
|
||||
|
||||
systemd_status - monitor systemd service status, including normal services,
|
||||
mounts, hotplugs and socket activations
|
||||
|
||||
=head1 APPLICABLE SYSTEMS
|
||||
|
||||
Linux systems with systemd installed.
|
||||
|
||||
=head1 CONFIGURATION
|
||||
|
||||
No configuration is required for this plugin.
|
||||
|
||||
Warning level for systemd "failed" state is set to 0:0. If any of the services
|
||||
enters "failed" state, Munin will emit warning.
|
||||
|
||||
=head1 AUTHOR
|
||||
|
||||
Kim B. Heino <b@bbbs.net>
|
||||
|
||||
=head1 LICENSE
|
||||
|
||||
GPLv2
|
||||
|
||||
=head1 MAGIC MARKERS
|
||||
|
||||
#%# family=auto
|
||||
#%# capabilities=autoconf
|
||||
|
||||
=cut
|
||||
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
|
||||
STATES = (
|
||||
'failed',
|
||||
'dead',
|
||||
'running',
|
||||
'exited',
|
||||
'active',
|
||||
'listening',
|
||||
'waiting',
|
||||
'plugged',
|
||||
'mounted',
|
||||
)
|
||||
|
||||
|
||||
def config():
|
||||
"""Autoconfig values."""
|
||||
print('graph_title systemd services')
|
||||
print('graph_vlabel Services')
|
||||
print('graph_category processes')
|
||||
print('graph_args --base 1000 --lower-limit 0')
|
||||
print('graph_scale no')
|
||||
print('graph_info Number of services in given activation state.')
|
||||
for state in STATES:
|
||||
print('{state}.label Services in {state} state'.format(state=state))
|
||||
print('failed.warning 0:0')
|
||||
if os.environ.get('MUNIN_CAP_DIRTYCONFIG') == '1':
|
||||
fetch()
|
||||
|
||||
|
||||
def fetch():
|
||||
"""Print runtime values."""
|
||||
# Get data
|
||||
try:
|
||||
# deb9/py3.5 doesn't have encoding parameter in subprocess
|
||||
output = subprocess.check_output(['/bin/systemctl', 'list-units'])
|
||||
except (OSError, subprocess.CalledProcessError):
|
||||
return
|
||||
output = output.decode('utf-8', 'ignore')
|
||||
|
||||
# Parse data
|
||||
states = {state: 0 for state in STATES}
|
||||
for line in output.splitlines():
|
||||
token = line.split()
|
||||
if len(token) < 4:
|
||||
continue
|
||||
if len(token[0]) < 3: # Skip failed-bullet
|
||||
token = token[1:]
|
||||
if token[0].endswith('.scope'):
|
||||
continue # Ignore scopes
|
||||
if re.match(r'user.*@\d+\.service', token[0]):
|
||||
continue # These fail randomly in older systemd
|
||||
if token[3] in states:
|
||||
states[token[3]] = states[token[3]] + 1
|
||||
|
||||
# Output
|
||||
for state in STATES:
|
||||
print('{}.value {}'.format(state, states[state]))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if len(sys.argv) > 1 and sys.argv[1] == 'autoconf':
|
||||
print('yes' if os.path.exists('/run/systemd/system') else
|
||||
'no (systemd is not running)')
|
||||
elif len(sys.argv) > 1 and sys.argv[1] == 'config':
|
||||
config()
|
||||
else:
|
||||
fetch()
|
2
extern/unifi
vendored
2
extern/unifi
vendored
|
@ -1,6 +1,8 @@
|
|||
#!/usr/bin/perl
|
||||
# -*- perl -*-
|
||||
|
||||
# CAVE: Es muss ein Hostanme per AP gesetzt sein!
|
||||
|
||||
=encoding utf8
|
||||
|
||||
=head1 NAME
|
||||
|
|
361
extern/zfs_arcstats
vendored
361
extern/zfs_arcstats
vendored
|
@ -1,361 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
: << =cut
|
||||
|
||||
=head1 NAME
|
||||
|
||||
zfs_arcstats - Munin multi-graph plugin to monitor ZFS ARC statistics
|
||||
|
||||
These functions are implemented:
|
||||
size : to monitor ARC size
|
||||
activity : to monitor ARC activities
|
||||
actlist : to monitor ARC activities by cache list (MFU/MRU)
|
||||
actdata : to monitor ARC activities by data type (Demand/Prefetch)
|
||||
hitratio : to monitor ARC hit ratio
|
||||
|
||||
Tested with Solaris 10 and 11, OpenIndiana Hipster, FreeBSD 11, CentOS 7
|
||||
This plugin is inspired by arcstat.pl [https://github.com/mharsch/arcstat]
|
||||
|
||||
=head1 CONFIGURATION
|
||||
|
||||
Make symlink:
|
||||
cd /path/to/munin/etc/plugins
|
||||
ln -s /path/to/munin/lib/plugins/zfs_arcstats .
|
||||
|
||||
For FreeBSD, it should be necessary to change shebang /bin/bash -> /usr/local/bin/bash
|
||||
|
||||
=head1 ENVIRONMENT VARIABLES
|
||||
|
||||
None
|
||||
|
||||
=head1 AUTHOR
|
||||
|
||||
K.Cima https://github.com/shakemid
|
||||
|
||||
=head1 LICENSE
|
||||
|
||||
GPLv2
|
||||
|
||||
=head1 Magic markers
|
||||
|
||||
#%# family=contrib
|
||||
#%# capabilities=autoconf
|
||||
|
||||
=cut
|
||||
|
||||
# Include plugin.sh
|
||||
. "${MUNIN_LIBDIR:-}/plugins/plugin.sh"
|
||||
is_multigraph "$@"
|
||||
|
||||
# Shell options
|
||||
set -o nounset
|
||||
|
||||
# Set global variables
|
||||
plugin_name=zfs_arcstats
|
||||
functions='size activity actlist actdata hitratio'
|
||||
|
||||
# Functions
|
||||
|
||||
get_osname() {
|
||||
local osname osver
|
||||
|
||||
osname=$( uname -s )
|
||||
osver=$( uname -v )
|
||||
|
||||
case $osname in
|
||||
SunOS)
|
||||
case $osver in
|
||||
illumos*)
|
||||
osname=illumos
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
esac
|
||||
|
||||
echo "$osname"
|
||||
}
|
||||
|
||||
preconfig() {
|
||||
local func=$1
|
||||
|
||||
# data_attr format: field type draw label
|
||||
# label can contain white-spaces.
|
||||
|
||||
case $func in
|
||||
size)
|
||||
global_attr="
|
||||
graph_title ZFS ARC - Size
|
||||
graph_category fs
|
||||
graph_args --base 1024 --lower-limit 0
|
||||
graph_vlabel Bytes
|
||||
graph_info ZFS ARC - Size
|
||||
"
|
||||
case $osname in
|
||||
SunOS)
|
||||
# For Solaris 10,11
|
||||
data_attr="
|
||||
data_size GAUGE AREASTACK Data size
|
||||
prefetch_meta_size GAUGE AREASTACK Prefetch meta size
|
||||
buf_size GAUGE AREASTACK Buf size
|
||||
other_size GAUGE AREASTACK Other size
|
||||
"
|
||||
;;
|
||||
*)
|
||||
# For illumos, FreeBSD, Linux (OpenZFS)
|
||||
data_attr="
|
||||
data_size GAUGE AREASTACK Data size
|
||||
metadata_size GAUGE AREASTACK Metadata size
|
||||
hdr_size GAUGE AREASTACK Hdr size
|
||||
other_size GAUGE AREASTACK Other size
|
||||
mru_size GAUGE LINE MRU size
|
||||
mfu_size GAUGE LINE MFU size
|
||||
"
|
||||
;;
|
||||
esac
|
||||
data_attr="
|
||||
$data_attr
|
||||
size GAUGE LINE ARC size
|
||||
c GAUGE LINE Target size
|
||||
p GAUGE LINE Target MRU size
|
||||
"
|
||||
;;
|
||||
activity)
|
||||
global_attr="
|
||||
graph_title ZFS ARC - Activities
|
||||
graph_category fs
|
||||
graph_args --base 1000 --lower-limit 0
|
||||
graph_vlabel misses (-) / hits (+) per second
|
||||
graph_info ZFS ARC - Activities
|
||||
|
||||
hits.negative misses
|
||||
l2_hits.negative l2_misses
|
||||
"
|
||||
data_attr="
|
||||
misses DERIVE LINE dummy
|
||||
hits DERIVE LINE ARC
|
||||
l2_misses DERIVE LINE dummy
|
||||
l2_hits DERIVE LINE L2ARC
|
||||
"
|
||||
;;
|
||||
actlist)
|
||||
global_attr="
|
||||
graph_title ZFS ARC - Activities by cache list
|
||||
graph_category fs
|
||||
graph_args --base 1000 --lower-limit 0
|
||||
graph_vlabel ghost hits (-) / hits (+) per second
|
||||
graph_info ZFS ARC - Activities by cache list
|
||||
|
||||
mfu_hits.negative mfu_ghost_hits
|
||||
mru_hits.negative mru_ghost_hits
|
||||
"
|
||||
data_attr="
|
||||
mfu_ghost_hits DERIVE LINE dummy
|
||||
mfu_hits DERIVE LINE MFU
|
||||
mru_ghost_hits DERIVE LINE dummy
|
||||
mru_hits DERIVE LINE MRU
|
||||
"
|
||||
;;
|
||||
actdata)
|
||||
global_attr="
|
||||
graph_title ZFS ARC - Activities by data type
|
||||
graph_category fs
|
||||
graph_args --base 1000 --lower-limit 0
|
||||
graph_vlabel misses (-) / hits (+) per second
|
||||
graph_info ZFS ARC - Activities by data type
|
||||
|
||||
demand_data_hits.negative demand_data_misses
|
||||
demand_metadata_hits.negative demand_metadata_misses
|
||||
prefetch_data_hits.negative prefetch_data_misses
|
||||
prefetch_metadata_hits.negative prefetch_metadata_misses
|
||||
"
|
||||
data_attr="
|
||||
demand_data_misses DERIVE LINE dummy
|
||||
demand_data_hits DERIVE LINE D data
|
||||
demand_metadata_misses DERIVE LINE dummy
|
||||
demand_metadata_hits DERIVE LINE D meta
|
||||
prefetch_data_misses DERIVE LINE dummy
|
||||
prefetch_data_hits DERIVE LINE P data
|
||||
prefetch_metadata_misses DERIVE LINE dummy
|
||||
prefetch_metadata_hits DERIVE LINE P meta
|
||||
"
|
||||
;;
|
||||
hitratio)
|
||||
global_attr="
|
||||
graph_title ZFS ARC - Hit ratio
|
||||
graph_category fs
|
||||
graph_args --base 1000 --lower-limit 0 --upper-limit 100 --rigid
|
||||
graph_vlabel % hits
|
||||
graph_info ZFS ARC - Hit ratio - The graph shows cache hit ratio between munin-update intervals (usually 5 minutes).
|
||||
|
||||
hitratio.cdef hits,DUP,misses,+,/,100,*
|
||||
l2_hitratio.cdef l2_hits,DUP,l2_misses,+,/,100,*
|
||||
demand_data_hitratio.cdef demand_data_hits,DUP,demand_data_misses,+,/,100,*
|
||||
demand_metadata_hitratio.cdef demand_metadata_hits,DUP,demand_metadata_misses,+,/,100,*
|
||||
prefetch_data_hitratio.cdef prefetch_data_hits,DUP,prefetch_data_misses,+,/,100,*
|
||||
prefetch_metadata_hitratio.cdef prefetch_metadata_hits,DUP,prefetch_metadata_misses,+,/,100,*
|
||||
"
|
||||
data_attr="
|
||||
hits DERIVE LINE dummy
|
||||
misses DERIVE LINE dummy
|
||||
l2_hits DERIVE LINE dummy
|
||||
l2_misses DERIVE LINE dummy
|
||||
demand_data_hits DERIVE LINE dummy
|
||||
demand_data_misses DERIVE LINE dummy
|
||||
demand_metadata_hits DERIVE LINE dummy
|
||||
demand_metadata_misses DERIVE LINE dummy
|
||||
prefetch_data_hits DERIVE LINE dummy
|
||||
prefetch_data_misses DERIVE LINE dummy
|
||||
prefetch_metadata_hits DERIVE LINE dummy
|
||||
prefetch_metadata_misses DERIVE LINE dummy
|
||||
hitratio GAUGE LINE2 ARC hits
|
||||
l2_hitratio GAUGE LINE L2ARC hits
|
||||
demand_data_hitratio GAUGE LINE Demand data hits
|
||||
demand_metadata_hitratio GAUGE LINE Demand metadata hits
|
||||
prefetch_data_hitratio GAUGE LINE Prefetch data hits
|
||||
prefetch_metadata_hitratio GAUGE LINE Prefetch metadata hits
|
||||
"
|
||||
;;
|
||||
*)
|
||||
echo "Unknown function: $func"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
do_config() {
|
||||
local func=$1
|
||||
local label_max_length=45
|
||||
local field type draw label
|
||||
|
||||
preconfig "$func"
|
||||
echo "multigraph ${plugin_name}_${func}"
|
||||
|
||||
# print global attributes
|
||||
echo "$global_attr" | sed -e 's/^ *//' -e '/^$/d'
|
||||
|
||||
# print data source attributes
|
||||
echo "$data_attr" | while read -r field type draw label
|
||||
do
|
||||
[ -z "$field" ] && continue
|
||||
|
||||
echo "${field}.type ${type}"
|
||||
echo "${field}.draw ${draw}"
|
||||
echo "${field}.label ${label:0:${label_max_length}}"
|
||||
if [ "$type" = 'DERIVE' ]; then
|
||||
echo "${field}.min 0"
|
||||
fi
|
||||
if [ "$label" = 'dummy' ]; then
|
||||
echo "${field}.graph no"
|
||||
fi
|
||||
done
|
||||
|
||||
echo
|
||||
}
|
||||
|
||||
get_stats() {
|
||||
local arcstats stat value
|
||||
|
||||
case $osname in
|
||||
SunOS|illumos)
|
||||
arcstats=$( kstat -p 'zfs:0:arcstats' | sed -e 's/:/ /g' | awk '{ print $4,$5 }' )
|
||||
# kstat output example:
|
||||
# $ kstat -p zfs:0:arcstats
|
||||
# zfs:0:arcstats:c 4135233544
|
||||
# ...
|
||||
;;
|
||||
*BSD)
|
||||
arcstats=$( /sbin/sysctl -a | sed -n -e 's/^kstat\.zfs\.misc\.arcstats\.//p' | awk -F: '{ print $1,$2 }' )
|
||||
# sysctl output example:
|
||||
# $ sysctl -a
|
||||
# ...
|
||||
# kstat.zfs.misc.arcstats.c: 632540160
|
||||
# ...
|
||||
;;
|
||||
Linux)
|
||||
arcstats=$( sed '1,2d' /proc/spl/kstat/zfs/arcstats | awk '{ print $1,$3 }' )
|
||||
# proc file output example:
|
||||
# $ cat /proc/spl/kstat/zfs/arcstats
|
||||
# ...
|
||||
# name type data
|
||||
# hits 4 62
|
||||
# ...
|
||||
;;
|
||||
*)
|
||||
echo "Unsupported OS: $osname"
|
||||
exit 1
|
||||
esac
|
||||
|
||||
while read -r stat value
|
||||
do
|
||||
printf -v "arcstats_${stat}" "%s" "$value"
|
||||
# printf -v means indirect variable assignment (similar to eval)
|
||||
done <<< "$arcstats"
|
||||
}
|
||||
|
||||
do_fetch() {
|
||||
local func=$1
|
||||
local field type draw label value ref
|
||||
|
||||
preconfig "$func"
|
||||
echo "multigraph ${plugin_name}_${func}"
|
||||
|
||||
echo "$data_attr" | while read -r field type draw label
|
||||
do
|
||||
[ -z "$field" ] && continue
|
||||
|
||||
ref="arcstats_${field}"
|
||||
value=${!ref:-0}
|
||||
# ${!varname} means indirect evaluation (similar to eval)
|
||||
|
||||
echo "${field}.value ${value}"
|
||||
done
|
||||
|
||||
echo
|
||||
}
|
||||
|
||||
autoconf() {
|
||||
if [ -x /sbin/zfs ]; then
|
||||
echo yes
|
||||
else
|
||||
echo "no (ZFS looks unavailable)"
|
||||
fi
|
||||
}
|
||||
|
||||
config() {
|
||||
local func
|
||||
|
||||
for func in $functions
|
||||
do
|
||||
do_config "$func"
|
||||
done
|
||||
}
|
||||
|
||||
fetch() {
|
||||
local func
|
||||
|
||||
get_stats
|
||||
|
||||
for func in $functions
|
||||
do
|
||||
do_fetch "$func"
|
||||
done
|
||||
}
|
||||
|
||||
# Main
|
||||
|
||||
osname=$( get_osname )
|
||||
|
||||
case ${1:-} in
|
||||
autoconf)
|
||||
autoconf
|
||||
;;
|
||||
config)
|
||||
config
|
||||
if [ "${MUNIN_CAP_DIRTYCONFIG:-0}" = "1" ]; then fetch; fi
|
||||
;;
|
||||
*)
|
||||
fetch
|
||||
;;
|
||||
esac
|
||||
|
||||
exit 0
|
99
extern/zfs_list
vendored
99
extern/zfs_list
vendored
|
@ -1,99 +0,0 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Plugin to monitor ZFS Filesystems
|
||||
# Author: Adam Michel (elfurbe@furbism.com)
|
||||
# Description:
|
||||
# This is an extension of the zfs_fs plugin
|
||||
# modified as a multigraph to graph all zfs
|
||||
# filesystems it can find
|
||||
#
|
||||
# Tested on Ubuntu-14.04
|
||||
#
|
||||
# Parameters understood:
|
||||
#
|
||||
# config (required)
|
||||
# autoconf (optional - used by munin-config)
|
||||
#
|
||||
#%# family=auto
|
||||
|
||||
. "$MUNIN_LIBDIR/plugins/plugin.sh"
|
||||
|
||||
need_multigraph()
|
||||
|
||||
if [ "$1" = "autoconf" ]; then
|
||||
# Makes little sense to autoconf if you can't suggest
|
||||
echo no
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ "$1" = "suggest" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ "$1" = "config" ]; then
|
||||
for i in `zfs list -Hp | awk '{print $1}'`; do
|
||||
values=( $(zfs get -p usedbydataset,usedbychildren,usedbysnapshots,usedbyrefreservation,available,quota $i | awk 'BEGIN {total=0;} { if( NR==1 ) next; } !/quota/ {total=total+$3;} {print $3} END{print total;}') )
|
||||
fsname=$(clean_fieldname $(echo "$i" | sed 's/\//__/g'))
|
||||
|
||||
echo <<EOF "multigraph zfs_list_$fsname
|
||||
graph_title $fsname usage
|
||||
graph_order usedbydataset usedbychildren usedbysnapshots usedbyrefreservation available total quota
|
||||
graph_args --base 1024 -r -l 0 --vertical-label Bytes --upper-limit ${values[6]}
|
||||
graph_info This graph shows how is used a zfs filesystems.
|
||||
graph_category fs
|
||||
graph_period second
|
||||
usedbydataset.label UsedByDataset
|
||||
usedbydataset.draw AREA
|
||||
usedbydataset.info Used space by Dataset
|
||||
usedbydataset.colour FF0000
|
||||
usedbychildren.label UsedByChildren
|
||||
usedbychildren.draw STACK
|
||||
usedbychildren.info Used space by children
|
||||
usedbychildren.colour FFCC33
|
||||
usedbysnapshots.label UsedBySnapshots
|
||||
usedbysnapshots.draw STACK
|
||||
usedbysnapshots.info Used space by snapshot
|
||||
usedbysnapshots.colour 0000FF
|
||||
usedbyrefreservation.label Usedbyrefreservation
|
||||
usedbyrefreservation.draw STACK
|
||||
usedbyrefreservation.info Used space by Ref Reservation
|
||||
usedbyrefreservation.colour 33CCFF
|
||||
available.label Available
|
||||
available.draw STACK
|
||||
available.info Free space
|
||||
available.colour 00FF00
|
||||
total.label Total
|
||||
total.draw LINE1
|
||||
total.info Total
|
||||
total.colour 000000
|
||||
quota.label Quota
|
||||
quota.draw LINE1
|
||||
quota.info Quota
|
||||
quota.colour 555555"
|
||||
EOF
|
||||
done
|
||||
exit 0
|
||||
fi
|
||||
|
||||
for i in `zfs list -Hp | awk '{print $1}'`; do
|
||||
values=( $(zfs get -p usedbydataset,usedbychildren,usedbysnapshots,usedbyrefreservation,available,quota $i | awk 'BEGIN {total=0;} { if( NR==1 ) next; } !/quota/ {total=total+$3;} {print $3} END{print total;}') )
|
||||
fsname=$(clean_fieldname $(echo "$i" | sed 's/\//__/g'))
|
||||
|
||||
if [ ${values[5]} = "-" ]; then
|
||||
quota=0
|
||||
else
|
||||
quota=${values[5]}
|
||||
fi
|
||||
|
||||
echo <<EOF "multigraph zfs_list_$fsname
|
||||
usedbydataset.value ${values[0]}
|
||||
usedbysnapshots.value ${values[2]}
|
||||
usedbychildren.value ${values[1]}
|
||||
usedbyrefreservation.value ${values[3]}
|
||||
available.value ${values[4]}
|
||||
total.value ${values[6]}
|
||||
quota.value $quota"
|
||||
EOF
|
||||
done
|
||||
|
||||
exit 0
|
355
extern/zfsonlinux_stats_
vendored
355
extern/zfsonlinux_stats_
vendored
|
@ -1,355 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
# ZFS statistics for ZFSonLinux
|
||||
# Author: Adam Michel (elfurbe@furbism.com)
|
||||
#
|
||||
# Description:
|
||||
# This is a modification of the zfs_stats
|
||||
# plugin by David Bjornsson (which was a
|
||||
# rewrite of zfs-stats-for-freebsd scripts
|
||||
# by patpro) modified to work with ZFSonLinux.
|
||||
#
|
||||
# Tested on Ubuntu-14.04
|
||||
#
|
||||
# Usage: zfs_stats_FUNCTION
|
||||
#
|
||||
# Available functions:
|
||||
# efficiency - ARC efficiency
|
||||
# cachehitlist - Cache hit by cache list
|
||||
# cachehitdtype - Cache hit by data type
|
||||
# dmuprefetch - DMU prefetch
|
||||
# utilization - ARC size breakdown
|
||||
# l2utilization - L2ARC size breakdown
|
||||
# l2efficiency - L2ARC efficiency
|
||||
#
|
||||
#%# family=auto
|
||||
|
||||
FUNCTION=$(basename $0 | cut -d_ -f3)
|
||||
MEMMAX=`cat /proc/meminfo | grep MemTotal | awk '{print $2}'`
|
||||
BC='/usr/bin/bc -q'
|
||||
ARCSTATS="/proc/spl/kstat/zfs/arcstats"
|
||||
ZFETCHSTATS="/proc/spl/kstat/zfs/zfetchstats"
|
||||
|
||||
#
|
||||
# Pull all values from arcstats
|
||||
#
|
||||
|
||||
while read name type data
|
||||
do
|
||||
[[ $name =~ ^[0-9].* ]] && continue
|
||||
[[ $name == "name" ]] && continue
|
||||
[[ $name == "" ]] && continue
|
||||
case $name in
|
||||
"hits" )
|
||||
export ARC_HITS=$data
|
||||
;;
|
||||
"misses" )
|
||||
export ARC_MISSES=$data
|
||||
;;
|
||||
"p" )
|
||||
export MRU_SIZE=$data
|
||||
;;
|
||||
"c_max" )
|
||||
export MAX_SIZE=$data
|
||||
;;
|
||||
"c_min" )
|
||||
export MIN_SIZE=$data
|
||||
;;
|
||||
"c" )
|
||||
export TARGET_SIZE=$data
|
||||
;;
|
||||
* )
|
||||
VARNAME=`echo $name | tr '[:lower:]' '[:upper:]'`
|
||||
#declare $VARNAME=$data
|
||||
export $VARNAME=$data
|
||||
;;
|
||||
esac
|
||||
done < $ARCSTATS
|
||||
|
||||
#
|
||||
# Pull all values from zfetchstats
|
||||
#
|
||||
|
||||
while read name type data
|
||||
do
|
||||
[[ $name =~ ^[0-9].* ]] && continue
|
||||
[[ $name == "name" ]] && continue
|
||||
case $name in
|
||||
"hits" )
|
||||
export DMU_HITS=$data
|
||||
;;
|
||||
"misses" )
|
||||
export DMU_MISSES=$data
|
||||
;;
|
||||
* )
|
||||
VARNAME=`echo $name | tr '[:lower:]' '[:upper:]'`
|
||||
export $VARNAME=$data
|
||||
;;
|
||||
esac
|
||||
done < $ZFETCHSTATS
|
||||
|
||||
#
|
||||
# Calculation macros
|
||||
#
|
||||
|
||||
ANON_HITS=`echo "$ARC_HITS-($MFU_HITS+$MRU_HITS+$MFU_GHOST_HITS+$MRU_GHOST_HITS)" | $BC`
|
||||
ARC_ACCESSES_TOTAL=`echo "$ARC_HITS+$ARC_MISSES" | $BC`
|
||||
DEMAND_DATA_TOTAL=`echo "$DEMAND_DATA_HITS+$DEMAND_DATA_MISSES" | $BC`
|
||||
PREFETCH_DATA_TOTAL=`echo "$PREFETCH_DATA_HITS+$PREFETCH_DATA_MISSES" | $BC`
|
||||
REAL_HITS=`echo "$MFU_HITS+$MRU_HITS" | $BC`
|
||||
|
||||
if [ $ARC_ACCESSES_TOTAL != 0 ]; then
|
||||
CACHE_HIT_RATIO_PERC=`echo "scale=2 ; (100*$ARC_HITS/$ARC_ACCESSES_TOTAL)" | $BC`
|
||||
CACHE_MISS_RATIO_PERC=`echo "scale=2 ; (100*$ARC_MISSES/$ARC_ACCESSES_TOTAL)" | $BC`
|
||||
ACTUAL_HIT_RATIO_PERC=`echo "scale=2 ; (100*$REAL_HITS/$ARC_ACCESSES_TOTAL)" | $BC`
|
||||
else
|
||||
CACHE_HIT_RATIO_PERC=0
|
||||
CACHE_MISS_RATIO_PERC=0
|
||||
ACTUAL_HIT_RATIO_PERC=0
|
||||
fi
|
||||
|
||||
if [ $DEMAND_DATA_TOTAL != 0 ]; then DATA_DEMAND_EFFICIENCY_PERC=`echo "scale=2 ; (100*$DEMAND_DATA_HITS/$DEMAND_DATA_TOTAL)" | $BC`; else DATA_DEMAND_EFFICIENCY_PERC=0; fi
|
||||
if [ $PREFETCH_DATA_TOTAL != 0 ]; then DATA_PREFETCH_EFFICENCY_PERC=`echo "scale=2 ; (100*$PREFETCH_DATA_HITS/$PREFETCH_DATA_TOTAL)" | $BC`; else DATA_PREFETCH_EFFICENCY_PERC=0; fi
|
||||
|
||||
if [ $ARC_HITS != 0 ]; then
|
||||
ANONYMOUSLY_USED_PERC=`echo "scale=2 ; (100*$ANON_HITS/$ARC_HITS)" | $BC`
|
||||
MOST_RECENTLY_USED_PERC=`echo "scale=2 ; (100*$MRU_HITS/$ARC_HITS)" | $BC`
|
||||
MOST_FREQUENTLY_USED_PERC=`echo "scale=2 ; (100*$MFU_HITS/$ARC_HITS)" | $BC`
|
||||
MOST_RECENTLY_USED_GHOST_PERC=`echo "scale=2 ; (100*$MRU_GHOST_HITS/$ARC_HITS)" | $BC`
|
||||
MOST_FREQUENTLY_USED_GHOST_PERC=`echo "scale=2 ; (100*$MFU_GHOST_HITS/$ARC_HITS)" | $BC`
|
||||
|
||||
DEMAND_DATA_HIT_PERC=`echo "scale=2 ; (100*$DEMAND_DATA_HITS/$ARC_HITS)" | $BC`
|
||||
PREFETCH_DATA_HIT_PERC=`echo "scale=2 ; (100*$PREFETCH_DATA_HITS/$ARC_HITS)" | $BC`
|
||||
DEMAND_METADATA_HIT_PERC=`echo "scale=2 ; (100*$DEMAND_METADATA_HITS/$ARC_HITS)" | $BC`
|
||||
PREFETCH_METADATA_HIT_PERC=`echo "scale=2 ; (100*$PREFETCH_METADATA_HITS/$ARC_HITS)" | $BC`
|
||||
else
|
||||
ANONYMOUSLY_USED_PERC=0
|
||||
MOST_RECENTLY_USED_PERC=0
|
||||
MOST_FREQUENTLY_USED_PERC=0
|
||||
MOST_RECENTLY_USED_GHOST_PERC=0
|
||||
MOST_FREQUENTLY_USED_GHOST_PERC=0
|
||||
|
||||
DEMAND_DATA_HIT_PERC=0
|
||||
PREFETCH_DATA_HIT_PERC=0
|
||||
DEMAND_METADATA_HIT_PERC=0
|
||||
PREFETCH_METADATA_HIT_PERC=0
|
||||
fi
|
||||
|
||||
if [ $ARC_MISSES != 0 ]; then
|
||||
PREFETCH_METADATA_MISSES_PERC=`echo "scale=2 ; (100*$PREFETCH_METADATA_MISSES/$ARC_MISSES)" | $BC`
|
||||
DEMAND_DATA_MISS_PERC=`echo "scale=2 ; (100*$DEMAND_DATA_MISSES/$ARC_MISSES)" | $BC`
|
||||
PREFETCH_DATA_MISS_PERC=`echo "scale=2 ; (100*$PREFETCH_DATA_MISSES/$ARC_MISSES)" | $BC`
|
||||
DEMAND_METADATA_MISS_PERC=`echo "scale=2 ; (100*$DEMAND_METADATA_MISSES/$ARC_MISSES)" | $BC`
|
||||
else
|
||||
PREFETCH_METADATA_MISSES_PERC=0
|
||||
DEMAND_DATA_MISS_PERC=0
|
||||
PREFETCH_DATA_MISS_PERC=0
|
||||
DEMAND_METADATA_MISS_PERC=0
|
||||
fi
|
||||
|
||||
DMU_TOTAL=`echo "$DMU_HITS+$DMU_MISSES" | $BC`
|
||||
if [ $DMU_TOTAL != 0 ]; then
|
||||
DMU_HITS_PERC=`echo "scale=2 ; (100*$DMU_HITS/$DMU_TOTAL)" | $BC`
|
||||
DMU_MISSES_PERC=`echo "scale=2 ; (100*$DMU_MISSES/$DMU_TOTAL)" | $BC`
|
||||
else
|
||||
DMU_HITS_PERC=0
|
||||
DMU_MISSES_PERC=0
|
||||
fi
|
||||
|
||||
if [ $SIZE -gt $TARGET_SIZE ]; then
|
||||
MFU_SIZE=`echo "$SIZE-$MRU_SIZE" | $BC`
|
||||
else
|
||||
MFU_SIZE=`echo "$TARGET_SIZE-$MRU_SIZE" | $BC`
|
||||
fi
|
||||
|
||||
L2_ACCESSES_TOTAL=`echo "$L2_HITS+$L2_MISSES" | $BC`
|
||||
if [ $L2_ACCESSES_TOTAL -gt 0 ]; then
|
||||
L2_HIT_RATIO_PERC=`echo "scale=2 ; (100*$L2_HITS/$L2_ACCESSES_TOTAL)" | $BC`
|
||||
L2_MISS_RATIO_PERC=`echo "scale=2 ; (100*$L2_MISSES/$L2_ACCESSES_TOTAL)" | $BC`
|
||||
else
|
||||
L2_HIT_RATIO_PERC=0
|
||||
L2_MISS_RATIO_PERC=0
|
||||
fi
|
||||
|
||||
efficiency() {
|
||||
if [ "$1" = "config" ]; then
|
||||
echo 'graph_title ZFS ARC Efficiency'
|
||||
echo 'graph_args -u 100'
|
||||
echo 'graph_vlabel %'
|
||||
echo 'graph_info This graph shows the ARC Efficiency'
|
||||
|
||||
echo 'hits.label Hit Ratio'
|
||||
echo 'misses.label Miss Ratio'
|
||||
echo 'actual_hits.label Actual Hit Ratio'
|
||||
echo 'data_demand_efficiency.label Data Demand Efficiency'
|
||||
echo 'data_prefetch_efficiency.label Data Prefetch Efficiency'
|
||||
|
||||
exit 0
|
||||
else
|
||||
echo 'hits.value ' $CACHE_HIT_RATIO_PERC
|
||||
echo 'misses.value ' $CACHE_MISS_RATIO_PERC
|
||||
echo 'actual_hits.value ' $ACTUAL_HIT_RATIO_PERC
|
||||
echo 'data_demand_efficiency.value ' $DATA_DEMAND_EFFICIENCY_PERC
|
||||
echo 'data_prefetch_efficiency.value ' $DATA_PREFETCH_EFFICENCY_PERC
|
||||
fi
|
||||
}
|
||||
|
||||
cachehitlist() {
|
||||
if [ "$1" = "config" ]; then
|
||||
echo 'graph_title ZFS ARC Efficiency: Cache hits by cache list'
|
||||
echo 'graph_args -u 100'
|
||||
echo 'graph_vlabel %'
|
||||
echo 'graph_info This graph shows the ARC Efficiency'
|
||||
|
||||
echo 'cache_list_anon.label Anonymously Used'
|
||||
echo 'cache_list_most_rec.label Most Recently Used'
|
||||
echo 'cache_list_most_freq.label Most Frequently Used'
|
||||
echo 'cache_list_most_rec_ghost.label Most Recently Used Ghost'
|
||||
echo 'cache_list_most_freq_ghost.label Most Frequently Used Ghost'
|
||||
|
||||
exit 0
|
||||
else
|
||||
echo 'cache_list_anon.value ' $ANONYMOUSLY_USED_PERC
|
||||
echo 'cache_list_most_rec.value ' $MOST_RECENTLY_USED_PERC
|
||||
echo 'cache_list_most_freq.value ' $MOST_FREQUENTLY_USED_PERC
|
||||
echo 'cache_list_most_rec_ghost.value ' $MOST_RECENTLY_USED_GHOST_PERC
|
||||
echo 'cache_list_most_freq_ghost.value ' $MOST_FREQUENTLY_USED_GHOST_PERC
|
||||
fi
|
||||
}
|
||||
|
||||
cachehitdtype() {
|
||||
if [ "$1" = "config" ]; then
|
||||
echo 'graph_title ZFS ARC Efficiency: Cache hits by data type'
|
||||
echo 'graph_args -u 100'
|
||||
echo 'graph_vlabel %'
|
||||
echo 'graph_info This graph shows the ARC Efficiency'
|
||||
|
||||
echo 'data_type_demand_hits.label Demand Data Hit Ratio'
|
||||
echo 'data_type_demand_misses.label Demand Data Miss Ratio'
|
||||
echo 'data_type_prefetch_hits.label Prefetch Data Hit Ratio'
|
||||
echo 'data_type_prefetch_misses.label Prefetch Data Miss Ratio'
|
||||
echo 'data_type_demand_metadata_hits.label Demand Metadata Hit Ratio'
|
||||
echo 'data_type_demand_metadata_misses.label Demand Metadata Miss Ratio'
|
||||
echo 'data_type_prefetch_metadata_hits.label Prefetch Metadata Hit Ratio'
|
||||
echo 'data_type_prefetch_metadata_misses.label Prefetch Metadata Miss Ratio'
|
||||
|
||||
exit 0
|
||||
else
|
||||
echo 'data_type_demand_hits.value ' $DEMAND_DATA_HIT_PERC
|
||||
echo 'data_type_demand_misses.value ' $DEMAND_DATA_MISS_PERC
|
||||
echo 'data_type_prefetch_hits.value ' $PREFETCH_DATA_HIT_PERC
|
||||
echo 'data_type_prefetch_misses.value ' $PREFETCH_DATA_MISS_PERC
|
||||
echo 'data_type_demand_metadata_hits.value ' $DEMAND_METADATA_HIT_PERC
|
||||
echo 'data_type_demand_metadata_misses.value ' $DEMAND_METADATA_MISS_PERC
|
||||
echo 'data_type_prefetch_metadata_hits.value ' $PREFETCH_METADATA_HIT_PERC
|
||||
echo 'data_type_prefetch_metadata_misses.value ' $PREFETCH_METADATA_MISSES_PERC
|
||||
fi
|
||||
}
|
||||
|
||||
dmuprefetch() {
|
||||
if [ "$1" = "config" ]; then
|
||||
echo 'graph_title ZFS DMU prefetch stats'
|
||||
echo 'graph_args -u 100'
|
||||
echo 'graph_vlabel %'
|
||||
echo 'graph_info This graph shows the DMU prefetch stats'
|
||||
|
||||
echo 'hits.label Hit Ratio'
|
||||
echo 'misses.label Miss Ratio'
|
||||
|
||||
exit 0
|
||||
else
|
||||
echo 'hits.value ' $DMU_HITS_PERC
|
||||
echo 'misses.value ' $DMU_MISSES_PERC
|
||||
fi
|
||||
}
|
||||
|
||||
utilization() {
|
||||
if [ "$1" = "config" ]; then
|
||||
echo 'graph_title ZFS ARC Size'
|
||||
echo 'graph_args --base 1024 -l 0 --vertical-label Bytes --upper-limit '$MEMMAX
|
||||
echo 'graph_vlabel Size in MB'
|
||||
echo 'graph_info This graph shows the ARC Size utilization'
|
||||
|
||||
echo 'max_size.label Maximum Size'
|
||||
echo 'max_size.draw AREA'
|
||||
echo 'size.label Size'
|
||||
echo 'size.draw AREA'
|
||||
echo 'min_size.label Minimum Size'
|
||||
echo 'min_size.draw AREA'
|
||||
echo 'target_size.label Target Size'
|
||||
echo 'target_size.draw LINE1'
|
||||
echo 'recently_size.label Recently Used Cache Size'
|
||||
echo 'recently_size.draw LINE1'
|
||||
echo 'frequently_size.label Frequently Used Cache Size'
|
||||
echo 'frequently_size.draw LINE1'
|
||||
|
||||
exit 0
|
||||
else
|
||||
echo 'max_size.value ' $MAX_SIZE
|
||||
echo 'size.value ' $SIZE
|
||||
echo 'min_size.value ' $MIN_SIZE
|
||||
echo 'target_size.value ' $TARGET_SIZE
|
||||
echo 'recently_size.value ' $MRU_SIZE
|
||||
echo 'frequently_size.value ' $MFU_SIZE
|
||||
fi
|
||||
}
|
||||
|
||||
l2utilization() {
|
||||
if [ "$1" = "config" ]; then
|
||||
echo 'graph_title ZFS L2ARC Size'
|
||||
echo 'graph_args --base 1024 -r -l 0 --vertical-label Bytes'
|
||||
echo 'graph_vlabel Size in MB'
|
||||
echo 'graph_info This graph shows the L2ARC Size utilization'
|
||||
|
||||
echo 'size.label Size'
|
||||
echo 'size.draw AREA'
|
||||
echo 'hdr_size.label Header Size'
|
||||
echo 'hdr_size.draw AREA'
|
||||
|
||||
exit 0
|
||||
else
|
||||
echo 'size.value ' $L2_SIZE
|
||||
echo 'hdr_size.value ' $L2_HDR_SIZE
|
||||
fi
|
||||
}
|
||||
|
||||
l2efficiency() {
|
||||
if [ "$1" = "config" ]; then
|
||||
echo 'graph_title ZFS L2ARC Efficiency'
|
||||
echo 'graph_args -u 100'
|
||||
echo 'graph_vlabel %'
|
||||
echo 'graph_info This graph shows the L2ARC Efficiency'
|
||||
|
||||
echo 'l2_hits.label Hit Ratio'
|
||||
echo 'l2_misses.label Miss Ratio'
|
||||
else
|
||||
echo 'l2_hits.value ' $L2_HIT_RATIO_PERC
|
||||
echo 'l2_misses.value ' $L2_MISS_RATIO_PERC
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
[ "$1" = "config" ] && echo "graph_category fs"
|
||||
|
||||
case "$FUNCTION" in
|
||||
efficiency)
|
||||
efficiency $1
|
||||
;;
|
||||
cachehitlist)
|
||||
cachehitlist $1
|
||||
;;
|
||||
cachehitdtype)
|
||||
cachehitdtype $1
|
||||
;;
|
||||
dmuprefetch)
|
||||
dmuprefetch $1
|
||||
;;
|
||||
utilization)
|
||||
utilization $1
|
||||
;;
|
||||
l2utilization)
|
||||
l2utilization $1
|
||||
;;
|
||||
l2efficiency)
|
||||
l2efficiency $1
|
||||
;;
|
||||
esac
|
267
extern/zpool_capacity
vendored
267
extern/zpool_capacity
vendored
|
@ -1,267 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
: << =cut
|
||||
|
||||
=head1 NAME
|
||||
|
||||
zpool_capacity - Munin plugin to monitor ZFS capacity
|
||||
|
||||
These functions are implemented:
|
||||
capacity : to monitor zpool capacity %
|
||||
allocated : to monitor zpool allocated bytes
|
||||
dedup : to monitor zpool dedup and compress ratio
|
||||
|
||||
Tested with Solaris 10 and 11, OpenIndiana Hipster, FreeBSD 11, CentOS 7
|
||||
|
||||
=head1 CONFIGURATION
|
||||
|
||||
Make symlink:
|
||||
cd /path/to/munin/etc/plugins
|
||||
ln -s /path/to/munin/lib/plugins/zpool_capacity .
|
||||
|
||||
For FreeBSD, it should be necessary to change shebang /bin/bash -> /usr/local/bin/bash
|
||||
|
||||
For Linux, root privilege is necessary to run zpool command.
|
||||
[zpool_capacity]
|
||||
user root
|
||||
|
||||
=head1 ENVIRONMENT VARIABLES
|
||||
|
||||
critical : default 90
|
||||
warning : default 80
|
||||
|
||||
=head1 AUTHOR
|
||||
|
||||
K.Cima https://github.com/shakemid
|
||||
|
||||
=head1 LICENSE
|
||||
|
||||
GPLv2
|
||||
|
||||
=head1 Magic markers
|
||||
|
||||
#%# family=contrib
|
||||
#%# capabilities=autoconf
|
||||
|
||||
=cut
|
||||
|
||||
# Include plugin.sh
|
||||
. "${MUNIN_LIBDIR:-}/plugins/plugin.sh"
|
||||
is_multigraph "$@"
|
||||
|
||||
# Shell options
|
||||
set -o nounset
|
||||
|
||||
# Global variables
|
||||
plugin_name=zpool_capacity
|
||||
functions='capacity allocated dedup'
|
||||
zpool_cmd=/sbin/zpool
|
||||
zfs_cmd=/sbin/zfs
|
||||
|
||||
# Environment variables
|
||||
: "${warning:=80}"
|
||||
: "${critical:=90}"
|
||||
|
||||
# Note: The performance of ZFS may significantly degrade when zpool capacity > 90%
|
||||
# See also: https://docs.oracle.com/cd/E53394_01/html/E54801/zfspools-4.html
|
||||
|
||||
# Functions
|
||||
|
||||
preconfig() {
|
||||
local func="$1"
|
||||
local p c
|
||||
|
||||
# data_attr format: field type draw label
|
||||
# label can contain white-spaces.
|
||||
data_attr=
|
||||
|
||||
case $func in
|
||||
capacity)
|
||||
global_attr="
|
||||
graph_title ZFS storage pool - Capacity
|
||||
graph_category fs
|
||||
graph_args --base 1000 --lower-limit 0 --upper-limit 100
|
||||
graph_vlabel % allocated
|
||||
graph_info ZFS storage pool - Capacity
|
||||
warning ${warning}
|
||||
critical ${critical}
|
||||
"
|
||||
for p in $pool_list
|
||||
do
|
||||
data_attr="${data_attr}
|
||||
${p} GAUGE LINE2 ${p}"
|
||||
done
|
||||
;;
|
||||
allocated)
|
||||
global_attr="
|
||||
graph_title ZFS storage pool - Allocated bytes
|
||||
graph_category fs
|
||||
graph_args --base 1024 --lower-limit 0
|
||||
graph_vlabel Bytes
|
||||
graph_info ZFS storage pool - Allocated bytes
|
||||
"
|
||||
c=0
|
||||
for p in $pool_list
|
||||
do
|
||||
data_attr="${data_attr}
|
||||
${p}_size GAUGE LINE ${p} size
|
||||
${p}_allocated GAUGE LINE2 ${p} allocated"
|
||||
global_attr="${global_attr}
|
||||
${p}_size.colour COLOUR${c}
|
||||
${p}_allocated.colour COLOUR${c}"
|
||||
c=$(( c + 1 ))
|
||||
done
|
||||
;;
|
||||
dedup)
|
||||
global_attr="
|
||||
graph_title ZFS storage pool - Dedup and compress ratio
|
||||
graph_category fs
|
||||
graph_args --base 1000 --lower-limit 1
|
||||
graph_vlabel Ratio
|
||||
graph_info ZFS storage pool - Dedup and compress ratio
|
||||
"
|
||||
for p in $pool_list
|
||||
do
|
||||
data_attr="${data_attr}
|
||||
${p}_dedup GAUGE LINE ${p} dedup
|
||||
${p}_compress GAUGE LINE ${p} compress"
|
||||
done
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
do_config() {
|
||||
local func="$1"
|
||||
local label_max_length=45
|
||||
local field type draw label
|
||||
|
||||
preconfig "$func"
|
||||
echo "multigraph ${plugin_name}_${func}"
|
||||
|
||||
# print global attributes
|
||||
echo "$global_attr" | sed -e 's/^ *//' -e '/^$/d'
|
||||
|
||||
# print data source attributes
|
||||
echo "$data_attr" | while read -r field type draw label
|
||||
do
|
||||
[ -z "$field" ] && continue
|
||||
|
||||
field=$( clean_fieldname "$field" )
|
||||
echo "${field}.type ${type}"
|
||||
echo "${field}.draw ${draw}"
|
||||
echo "${field}.label ${label:0:${label_max_length}}"
|
||||
if [ "$type" = 'DERIVE' ]; then
|
||||
echo "${field}.min 0"
|
||||
fi
|
||||
if [ "$label" = 'dummy' ]; then
|
||||
echo "${field}.graph no"
|
||||
fi
|
||||
done
|
||||
|
||||
echo
|
||||
}
|
||||
|
||||
get_stats() {
|
||||
local func="$1"
|
||||
|
||||
case $func in
|
||||
capacity)
|
||||
"$zpool_cmd" list -H -o name,capacity | sed 's/%$//'
|
||||
;;
|
||||
allocated)
|
||||
( "$zpool_cmd" list -H -o name,allocated \
|
||||
| awk '{ print $1"_allocated", $2 }'
|
||||
"$zpool_cmd" list -H -o name,size \
|
||||
| awk '{ print $1"_size", $2 }'
|
||||
) \
|
||||
| perl -ane '
|
||||
@unit{ qw/ K M G T P E / } = ( 1 .. 6 );
|
||||
$name = $F[0];
|
||||
$byteu = $F[1];
|
||||
( $n, $u ) = $byteu =~ /^([\d.]+)([KMGTPE]?)$/;
|
||||
$byte = int( $n * 1024 ** ( $u ? $unit{ $u } : 0 ) );
|
||||
print "$name $byte\n";
|
||||
'
|
||||
# Note: ZFS supports up to 16EB.
|
||||
;;
|
||||
dedup)
|
||||
"$zpool_cmd" list -H -o name,dedup \
|
||||
| sed 's/x$//' \
|
||||
| awk '{ print $1"_dedup", $2 }'
|
||||
# example output:
|
||||
# $ zpool list -H -o name,dedup
|
||||
# rpool 1.00x
|
||||
# ...
|
||||
|
||||
"$zpool_cmd" list -H -o name \
|
||||
| xargs "$zfs_cmd" get -H -o name,value compressratio \
|
||||
| sed 's/x$//' \
|
||||
| awk '{ print $1"_compress", $2 }'
|
||||
# example output:
|
||||
# $ zfs get -H -o name,value compressratio rpool
|
||||
# rpool 1.00x
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
do_fetch() {
|
||||
local func="$1"
|
||||
local zpool_stats field value
|
||||
|
||||
# zpool_stats contains 'key value\n'
|
||||
zpool_stats=$( get_stats "$func" )
|
||||
|
||||
echo "multigraph ${plugin_name}_${func}"
|
||||
|
||||
echo "$zpool_stats" | while read -r field value
|
||||
do
|
||||
field=$( clean_fieldname "$field" )
|
||||
echo "${field}.value ${value}"
|
||||
done
|
||||
|
||||
echo
|
||||
}
|
||||
|
||||
autoconf() {
|
||||
if [ -x "$zpool_cmd" ]; then
|
||||
echo yes
|
||||
else
|
||||
echo "no (failed to find executable 'zpool')"
|
||||
fi
|
||||
}
|
||||
|
||||
config() {
|
||||
local func
|
||||
|
||||
pool_list=$( "$zpool_cmd" list -H -o name )
|
||||
|
||||
for func in $functions
|
||||
do
|
||||
do_config "$func"
|
||||
done
|
||||
}
|
||||
|
||||
fetch() {
|
||||
local func
|
||||
|
||||
for func in $functions
|
||||
do
|
||||
do_fetch "$func"
|
||||
done
|
||||
}
|
||||
|
||||
# Main
|
||||
case ${1:-} in
|
||||
autoconf)
|
||||
autoconf
|
||||
;;
|
||||
config)
|
||||
config
|
||||
if [ "${MUNIN_CAP_DIRTYCONFIG:-0}" = "1" ]; then fetch; fi
|
||||
;;
|
||||
*)
|
||||
fetch
|
||||
;;
|
||||
esac
|
||||
|
||||
exit 0
|
127
extern/zpool_iostat
vendored
127
extern/zpool_iostat
vendored
|
@ -1,127 +0,0 @@
|
|||
#!/bin/sh
|
||||
# -*- sh -*-
|
||||
|
||||
set -eu
|
||||
|
||||
: <<=cut
|
||||
|
||||
=head1 NAME
|
||||
|
||||
zpool_iostat - Plugin to monitor transfer statistics of ZFS pools
|
||||
|
||||
=head1 APPLICABLE SYSTEMS
|
||||
|
||||
All systems with "zpool" installed.
|
||||
|
||||
=head1 CONFIGURATION
|
||||
|
||||
No configuration is required.
|
||||
|
||||
=head1 INTERPRETATION
|
||||
|
||||
This plugin shows a graph with read (positive) and write (negative) values
|
||||
for the IO transfer of each pool.
|
||||
|
||||
=head1 MAGIC MARKERS
|
||||
|
||||
#%# family=auto
|
||||
#%# capabilities=autoconf
|
||||
|
||||
=head1 AUTHOR
|
||||
|
||||
tsaavik <github@hellspark.com>
|
||||
Peter Doherty <peterd@acranox.org>
|
||||
Lars Kruse <devel@sumpfralle.de>
|
||||
|
||||
=head1 LICENSE
|
||||
|
||||
GPLv2
|
||||
|
||||
=cut
|
||||
|
||||
|
||||
# shellcheck source=/usr/share/munin/plugins/plugin.sh
|
||||
. "$MUNIN_LIBDIR/plugins/plugin.sh"
|
||||
|
||||
|
||||
ZPOOL_BIN=/sbin/zpool
|
||||
ACTION="${1:-}"
|
||||
|
||||
|
||||
if [ "$ACTION" = "autoconf" ]; then
|
||||
if [ -x "$ZPOOL_BIN" ]; then
|
||||
echo yes
|
||||
else
|
||||
echo "no (missing executable '$ZPOOL_BIN')"
|
||||
fi
|
||||
exit 0
|
||||
fi
|
||||
|
||||
zlines=$("$ZPOOL_BIN" iostat -v | wc -l | sed 's/ //g')
|
||||
iostats=$("$ZPOOL_BIN" iostat -v 1 1 | tail "-$zlines")
|
||||
zlist=$(echo "$iostats" \
|
||||
| awk '/alloc/ {next}; /avail/ {next}; /raid/ {next}; /mirror/ {next};
|
||||
{ if ( $4 >=0 ) print $1}' \
|
||||
| tr ' ' '\n')
|
||||
|
||||
# Parse the n'th column of the iostat output for a given pool or disk as a
|
||||
# number (interpreting K and M suffixes).
|
||||
get_device_iostat_column() {
|
||||
local device_label="$1"
|
||||
local stat_column="$2"
|
||||
# convert all numeric values into kB
|
||||
echo "$iostats" \
|
||||
| awk '{ if ($1 == "'"$device_label"'") print $'"$stat_column"'; }' \
|
||||
| awk '/M/ {print int($1)*1000};
|
||||
/K/ {print int($1)};
|
||||
/[0-9]$/ {print int($1)/1000}'
|
||||
}
|
||||
|
||||
|
||||
get_device_fieldname() {
|
||||
local device_id="$1"
|
||||
# Backwards compatibility (until 2016): keep the unprefixed pool name
|
||||
# for the fieldname, except for pool names starting with digits.
|
||||
if echo "$device_id" | grep -q "^[0-9]"; then
|
||||
clean_fieldname "_$device_id"
|
||||
else
|
||||
clean_fieldname "$device_id"
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
if [ "$ACTION" = "config" ]; then
|
||||
echo 'graph_title zpool iostat'
|
||||
echo 'graph_args --base 1000 -l 0'
|
||||
echo 'graph_vlabel write (-) / read (+) KBytes/s'
|
||||
echo 'graph_category disk'
|
||||
echo 'graph_scale no'
|
||||
echo 'graph_info This graph shows zpool iostat'
|
||||
# Assemble the "graph_order" as a sorted list of read/write pairs for
|
||||
# each device.
|
||||
printf "graph_order"
|
||||
echo "$zlist" | while read -r device_id; do
|
||||
fieldname="$(get_device_fieldname "$device_id")"
|
||||
printf " %s_read %s_write" "$fieldname" "$fieldname"
|
||||
done
|
||||
# finalize the 'graph_order' with a newline
|
||||
echo
|
||||
# output all fields: write as negative numbers and read as positive
|
||||
echo "$zlist" | while read -r device_id; do
|
||||
fieldname="$(get_device_fieldname "$device_id")"
|
||||
echo "${fieldname}_read.label $device_id"
|
||||
echo "${fieldname}_read.type GAUGE"
|
||||
echo "${fieldname}_read.graph no"
|
||||
echo "${fieldname}_write.label $device_id"
|
||||
echo "${fieldname}_write.type GAUGE"
|
||||
echo "${fieldname}_write.negative ${fieldname}_read"
|
||||
done
|
||||
exit 0
|
||||
fi
|
||||
|
||||
|
||||
echo "$zlist" | while read -r device_id; do
|
||||
fieldname="$(get_device_fieldname "$device_id")"
|
||||
echo "${fieldname}_read.value $(get_device_iostat_column "$device_id" 6)"
|
||||
echo "${fieldname}_write.value $(get_device_iostat_column "$device_id" 7)"
|
||||
done
|
|
@ -1,431 +0,0 @@
|
|||
#!/bin/bash
|
||||
#%# family=auto
|
||||
#%# capabilities=autoconf
|
||||
|
||||
# Michael Grote
|
||||
# Mail: michael.grote ät posteo.de
|
||||
# This plugin fetches multiple Values from a mikrotik device.
|
||||
# tested with a RB750GR3 and CRS309.
|
||||
# Dependencies:
|
||||
# - bc
|
||||
# - sshpass
|
||||
|
||||
# A User is needed for this plugin to work:
|
||||
# /user add name=munin group=read password=hallowelt address=<munin-server-ip>
|
||||
|
||||
# plugin config:
|
||||
# [mt_system_<name>]
|
||||
# user root
|
||||
# env.ssh_user munin
|
||||
# env.ssh_password hallowelt
|
||||
# env.ssh_host 192.168.2.1
|
||||
|
||||
# setze Variablen mit default-Werten
|
||||
ssh_user=${ssh_user:-user}
|
||||
ssh_password=${ssh_password:-password}
|
||||
ssh_host=${ssh_host:-192.168.2.1}
|
||||
c=0 # zähler; wird für verschiedene Schleifen benötigt
|
||||
|
||||
# Funktionen
|
||||
function get_name {
|
||||
while read -r line; do
|
||||
if echo "$line" | grep 'name:' > /dev/null; then
|
||||
name=$(echo "$line" | grep name: | awk '{ print $2 }')
|
||||
fi
|
||||
done <<< "$data"
|
||||
}
|
||||
function get_cpu_count {
|
||||
while read -r line; do
|
||||
if echo "$line" | grep 'cpu-count' > /dev/null; then
|
||||
anzahl_cpu=$(echo "$line" | grep cpu-count: | sed -r 's/(cpu-count: )([0-9]+)/\2/g' | tr -dc '0-9')
|
||||
fi
|
||||
done <<< "$data"
|
||||
}
|
||||
function check_sshpass {
|
||||
if [[ $(dpkg -l sshpass > /dev/null 2>&1) -ne "0" ]] ; then
|
||||
echo "could not find sshpass"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
function check_bc {
|
||||
if [[ $(dpkg -l bc > /dev/null 2>&1) -ne "0" ]] ; then
|
||||
echo "could not find bc"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
function get_data {
|
||||
# hole daten per ssh
|
||||
data=$(sshpass -p "$ssh_password" ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no "$ssh_user"@"$ssh_host" -q ':delay 6s; /system health print; /system resource print; /system resource cpu print; /system identity print')
|
||||
}
|
||||
function get_mem_total {
|
||||
mem_total=$(
|
||||
while read -r line; do
|
||||
echo "$line" | awk '/total-memory:/{ gsub(/MiB/,"",$2); print $2 }' | tr -dc '0-9.'
|
||||
done <<< "$data")
|
||||
}
|
||||
function get_mem_free {
|
||||
mem_free=$(
|
||||
while read -r line; do
|
||||
echo "$line" | awk '/free-memory:/{ gsub(/MiB/,"",$2); print $2 }' | tr -dc '0-9.'
|
||||
done <<< "$data")
|
||||
}
|
||||
|
||||
function get_voltage_label {
|
||||
while read -r line; do # für jede zeile in... ; siehe "done"
|
||||
# gebe die zeile aus
|
||||
# suche mit awk nach "voltage:"
|
||||
# wenn gefunden:
|
||||
# gebe jede zeile per print text aus
|
||||
# externe/bash-variablen mit "'"<var>"'"
|
||||
echo "$line" | awk '/voltage:/{
|
||||
print "multigraph voltage_graph_""'"$name"'";
|
||||
print "graph_title voltage " "'"$name"'";
|
||||
print "graph_vlabel volt";
|
||||
print "graph_category mikrotik";
|
||||
print "graph_args -l 0";
|
||||
print "voltage.label voltage";
|
||||
print "graph_info Input Voltage."
|
||||
}'
|
||||
done <<< "$data" # der variable data
|
||||
}
|
||||
function get_voltage_value {
|
||||
while read -r line; do
|
||||
# funktion wie bei den "label"-funktionen
|
||||
# gib überschrift aus wenn dirtyconfig nicht gesetzt ist
|
||||
# weil aufruf dann nicht in "config" erfolgt
|
||||
# wenn dirtyconfig nicht gesetzt ist oder =0, gebe überschrift aus
|
||||
if [ "$MUNIN_CAP_DIRTYCONFIG" = "0" ] || [ -z "$MUNIN_CAP_DIRTYCONFIG" ]; then
|
||||
echo "$line" | awk '/voltage:/{
|
||||
print "multigraph voltage_graph_""'"$name"'";
|
||||
}'
|
||||
fi
|
||||
# entferne mit gsub das zeichen % in wert $2
|
||||
# gebe $2 aus
|
||||
echo "$line" | awk '/voltage:/{
|
||||
gsub(/V/,"",$2);
|
||||
print "voltage.value " $2
|
||||
}'
|
||||
done <<< "$data"
|
||||
}
|
||||
|
||||
function get_bad_blocks_label {
|
||||
while read -r line; do
|
||||
echo "$line" | awk '/bad-blocks:/{
|
||||
print "multigraph bad_blocks_graph_""'"$name"'";
|
||||
print "graph_title bad blocks " "'"$name"'";
|
||||
print "graph_vlabel %";
|
||||
print "graph_category mikrotik";
|
||||
print "graph_args -l 0 --upper-limit 100";
|
||||
print "bad_blocks.label bad_blocks";
|
||||
print "bad_blocks.warning 50";
|
||||
print "bad_blocks.critical 90";
|
||||
print "graph_info Percentage of Bad Blocks."
|
||||
}'
|
||||
done <<< "$data"
|
||||
}
|
||||
function get_bad_blocks_value {
|
||||
while read -r line; do
|
||||
if [ "$MUNIN_CAP_DIRTYCONFIG" = "0" ] || [ -z "$MUNIN_CAP_DIRTYCONFIG" ]; then
|
||||
echo "$line" | awk '/bad-blocks:/{
|
||||
print "multigraph bad_blocks_graph_""'"$name"'";
|
||||
}'
|
||||
fi
|
||||
echo "$line" | awk '/bad-blocks:/{
|
||||
gsub(/%/,"",$2);
|
||||
print "bad_blocks.value " $2
|
||||
}'
|
||||
done <<< "$data"
|
||||
}
|
||||
|
||||
function get_write_sect_total_label {
|
||||
while read -r line; do
|
||||
echo "$line" | awk '/write-sect-total:/{
|
||||
print "multigraph write_sect_total_graph_""'"$name"'";
|
||||
print "graph_title Total sector writes " "'"$name"'";
|
||||
print "graph_vlabel count";
|
||||
print "graph_category mikrotik";
|
||||
print "graph_args -l 0";
|
||||
print "write_sect_total.label write_sect_total";
|
||||
print "graph_info Total sector writes."
|
||||
}'
|
||||
done <<< "$data"
|
||||
}
|
||||
function get_write_sect_total_value {
|
||||
while read -r line; do
|
||||
if [ "$MUNIN_CAP_DIRTYCONFIG" = "0" ] || [ -z "$MUNIN_CAP_DIRTYCONFIG" ]; then
|
||||
echo "$line" | awk '/write-sect-total:/{
|
||||
print "multigraph write_sect_total_graph_""'"$name"'";
|
||||
}'
|
||||
fi
|
||||
echo "$line" | awk '/write-sect-total:/{
|
||||
print "write_sect_total.value " $2
|
||||
}'
|
||||
done <<< "$data"
|
||||
}
|
||||
|
||||
function get_write_sect_since_reboot_label {
|
||||
while read -r line; do
|
||||
echo "$line" | awk '/write-sect-since-reboot:/{
|
||||
print "multigraph write_sect_since_reboot_graph_""'"$name"'";
|
||||
print "graph_title Sector writes since reboot " "'"$name"'";
|
||||
print "graph_vlabel count";
|
||||
print "graph_category mikrotik";
|
||||
print "graph_args -l 0";
|
||||
print "write_sect_since_reboot.label write_sect_since_reboot";
|
||||
print "graph_info Total Sector writes since last reboot."
|
||||
}'
|
||||
done <<< "$data"
|
||||
}
|
||||
function get_write_sect_since_reboot_value {
|
||||
while read -r line; do
|
||||
if [ "$MUNIN_CAP_DIRTYCONFIG" = "0" ] || [ -z "$MUNIN_CAP_DIRTYCONFIG" ]; then
|
||||
echo "$line" | awk '/write-sect-since-reboot:/{
|
||||
print "multigraph write_sect_since_reboot_graph_""'"$name"'";
|
||||
}'
|
||||
fi
|
||||
echo "$line" | awk '/write-sect-since-reboot:/{
|
||||
print "write_sect_since_reboot.value " $2
|
||||
}'
|
||||
done <<< "$data"
|
||||
}
|
||||
|
||||
function get_temperature_label {
|
||||
while read -r line; do
|
||||
echo "$line" | awk '/temperature:/{
|
||||
print "multigraph temperature_graph_""'"$name"'";
|
||||
print "graph_title temperature " "'"$name"'";
|
||||
print "graph_vlabel °C";
|
||||
print "graph_category mikrotik";
|
||||
print "graph_args -l 0";
|
||||
print "temperature.label cpu temperature";
|
||||
print "temperature.warning 75";
|
||||
print "temperature.critical 90"
|
||||
}'
|
||||
done <<< "$data"
|
||||
}
|
||||
function get_temperature_value {
|
||||
while read -r line; do
|
||||
if [ "$MUNIN_CAP_DIRTYCONFIG" = "0" ] || [ -z "$MUNIN_CAP_DIRTYCONFIG" ]; then
|
||||
echo "$line" | awk '/temperature:/{
|
||||
print "multigraph temperature_graph_""'"$name"'";
|
||||
}'
|
||||
fi
|
||||
echo "$line" | awk '/temperature:/{
|
||||
gsub(/C/,"",$2);
|
||||
print "temperature.value " $2
|
||||
}'
|
||||
done <<< "$data"
|
||||
}
|
||||
|
||||
function get_cpu_label {
|
||||
echo multigraph cpu_load_graph_"$name"
|
||||
echo graph_title cpu load "$name"
|
||||
echo graph_vlabel %
|
||||
echo graph_category mikrotik
|
||||
echo graph_args -l 0 --upper-limit 100
|
||||
echo cpu_total.label total load
|
||||
echo cpu_total.warning 75
|
||||
echo cpu_total.critical 90
|
||||
echo graph_info Total CPU Load and Load, IRQ and Disk per Core.
|
||||
|
||||
while [ "$c" -lt "$anzahl_cpu" ]; do
|
||||
while read -r line; do
|
||||
echo "$line" | grep cpu$c | awk '{
|
||||
print "cpu"'"$c"'"_load.label " "cpu" "'"$c"'" " load"
|
||||
print "cpu"'"$c"'"_irq.label " "cpu" "'"$c"'" " irq"
|
||||
print "cpu"'"$c"'"_disk.label " "cpu" "'"$c"'" " disk"
|
||||
}'
|
||||
done <<< "$data"
|
||||
c=$(( c + 1 ))
|
||||
done
|
||||
}
|
||||
function get_cpu_value {
|
||||
if [ "$MUNIN_CAP_DIRTYCONFIG" = "0" ] || [ -z "$MUNIN_CAP_DIRTYCONFIG" ]; then
|
||||
echo multigraph cpu_load_graph_"$name"
|
||||
fi
|
||||
while read -r line; do
|
||||
echo "$line" | awk '/cpu-load:/{
|
||||
gsub(/%/,"",$2);
|
||||
print "cpu_total.value " $2
|
||||
}'
|
||||
done <<< "$data"
|
||||
c=0
|
||||
while [ "$c" -lt "$anzahl_cpu" ]; do
|
||||
while read -r line; do
|
||||
echo "$line" | grep cpu$c | awk '{
|
||||
gsub(/%/,"",$3);
|
||||
gsub(/%/,"",$4);
|
||||
gsub(/%/,"",$5);
|
||||
print "cpu"'"$c"'"_load.value " $3
|
||||
print "cpu"'"$c"'"_irq.value " $4
|
||||
print "cpu"'"$c"'"_disk.value " $5
|
||||
}'
|
||||
done <<< "$data"
|
||||
c=$(( c + 1 ))
|
||||
done
|
||||
}
|
||||
|
||||
function get_memory_label {
|
||||
get_mem_total
|
||||
get_mem_free
|
||||
while read -r line; do
|
||||
echo "$line" | awk '/free-memory:/{
|
||||
print "multigraph memory_graph_""'"$name"'";
|
||||
print "graph_title memory " "'"$name"'";
|
||||
print "graph_vlabel MiB";
|
||||
print "graph_category mikrotik";
|
||||
print "graph_args -l 0";
|
||||
print "total_memory.label total memory";
|
||||
print "used_memory.label used memory";
|
||||
print "free_memory.label free memory";
|
||||
print "graph_info Total, Used & free RAM.";
|
||||
gsub(/MiB/,"",$2);
|
||||
print "used_memory.critical " $2*0.9;
|
||||
print "used_memory.warning " $2*0.7 }'
|
||||
done <<< "$data"
|
||||
}
|
||||
function get_memory_value {
|
||||
get_mem_total
|
||||
get_mem_free
|
||||
if [ "$MUNIN_CAP_DIRTYCONFIG" = "0" ] || [ -z "$MUNIN_CAP_DIRTYCONFIG" ]; then
|
||||
echo multigraph memory_"$name"
|
||||
fi
|
||||
while read -r line; do
|
||||
echo "$line" | awk '/total-memory:/{
|
||||
gsub(/MiB/,"",$2);
|
||||
print "total_memory.value " $2
|
||||
}'
|
||||
done <<< "$data"
|
||||
while read -r line; do
|
||||
echo "$line" | awk '/free-memory:/{
|
||||
gsub(/MiB/,"",$2);
|
||||
print "free_memory.value " $2
|
||||
}'
|
||||
done <<< "$data"
|
||||
# berechne used-memory
|
||||
# gesamt + frei = benutzt
|
||||
echo used_memory.value "$(echo "$mem_total"-"$mem_free" | bc)"
|
||||
}
|
||||
|
||||
function get_disk_label {
|
||||
while read -r line; do
|
||||
echo "$line" | awk '/free-hdd-space:/{
|
||||
print "multigraph disk_graph_""'"$name"'";
|
||||
print "graph_title disk " "'"$name"'";
|
||||
print "graph_vlabel KiB";
|
||||
print "graph_category mikrotik";
|
||||
print "graph_args -l 0";
|
||||
print "total_disk.label total disk space";
|
||||
print "free_disk.label free disk space";
|
||||
print "graph_info Total & free disk space."}'
|
||||
done <<< "$data"
|
||||
}
|
||||
function get_disk_value {
|
||||
if [ "$MUNIN_CAP_DIRTYCONFIG" = "0" ] || [ -z "$MUNIN_CAP_DIRTYCONFIG" ]; then
|
||||
echo multigraph disk_"$name"
|
||||
fi
|
||||
while read -r line; do
|
||||
echo "$line" | grep KiB | awk '/free-hdd-space:/ {
|
||||
gsub(/KiB/,"",$2)
|
||||
print "free_disk.value " $2 }'
|
||||
echo "$line" | grep MiB | awk '/free-hdd-space:/ {
|
||||
gsub(/MiB/,"",$2)
|
||||
print "free_disk.value " $2*1024}'
|
||||
echo "$line" | grep KiB | awk '/total-hdd-space:/ {
|
||||
gsub(/KiB/,"",$2)
|
||||
print "total_disk.value " $2 }'
|
||||
echo "$line" | grep MiB | awk '/total-hdd-space:/ {
|
||||
gsub(/MiB/,"",$2)
|
||||
print "total_disk.value " $2*1024 }'
|
||||
done <<< "$data"
|
||||
}
|
||||
|
||||
# rufe funktionen auf, reihenfolge ist wichtig
|
||||
check_sshpass
|
||||
check_bc
|
||||
get_data
|
||||
get_name
|
||||
get_cpu_count
|
||||
|
||||
# munin-Logik
|
||||
# wenn $1 = X; dann
|
||||
if [ "$1" = "autoconf" ]; then
|
||||
echo yes
|
||||
exit 0
|
||||
fi
|
||||
if [ "$1" = "config" ]; then
|
||||
# gebe label aus
|
||||
# wenn dirtyconfig gesetzt rufe value funktion auf
|
||||
get_voltage_label
|
||||
if [ "$MUNIN_CAP_DIRTYCONFIG" = "1" ]; then
|
||||
get_voltage_value
|
||||
fi
|
||||
get_bad_blocks_label
|
||||
if [ "$MUNIN_CAP_DIRTYCONFIG" = "1" ]; then
|
||||
get_bad_blocks_value
|
||||
fi
|
||||
get_write_sect_total_label
|
||||
if [ "$MUNIN_CAP_DIRTYCONFIG" = "1" ]; then
|
||||
get_write_sect_total_value
|
||||
fi
|
||||
get_write_sect_since_reboot_label
|
||||
if [ "$MUNIN_CAP_DIRTYCONFIG" = "1" ]; then
|
||||
get_write_sect_since_reboot_value
|
||||
fi
|
||||
get_temperature_label
|
||||
if [ "$MUNIN_CAP_DIRTYCONFIG" = "1" ]; then
|
||||
get_temperature_value
|
||||
fi
|
||||
get_cpu_label
|
||||
if [ "$MUNIN_CAP_DIRTYCONFIG" = "1" ]; then
|
||||
get_cpu_value
|
||||
fi
|
||||
get_memory_label
|
||||
if [ "$MUNIN_CAP_DIRTYCONFIG" = "1" ]; then
|
||||
get_memory_value
|
||||
fi
|
||||
get_disk_label
|
||||
if [ "$MUNIN_CAP_DIRTYCONFIG" = "1" ]; then
|
||||
get_disk_value
|
||||
fi
|
||||
exit 0
|
||||
fi
|
||||
# wird nie aufgerufen wenn dirtyconfig gesetzt ist
|
||||
# dann wird nur config aufgerufen und dabei werden zusätzlich die values ausgegeben
|
||||
get_voltage_value
|
||||
get_bad_blocks_value
|
||||
get_write_sect_total_value
|
||||
get_write_sect_since_reboot_value
|
||||
get_temperature_value
|
||||
get_cpu_value
|
||||
get_memory_value
|
||||
get_disk_value
|
||||
exit 0
|
||||
|
||||
# Beispieloutput:
|
||||
# voltage: 24.5V
|
||||
# temperature: 46C
|
||||
# uptime: 1w6d17h12m25s
|
||||
# version: 6.48.4 (stable)
|
||||
# build-time: Aug/18/2021 06:43:27
|
||||
# factory-software: 6.46.3
|
||||
# free-memory: 209.9MiB
|
||||
# total-memory: 256.0MiB
|
||||
# cpu: MIPS 1004Kc V2.15
|
||||
# cpu-count: 4
|
||||
# cpu-frequency: 880MHz
|
||||
# cpu-load: 0%
|
||||
# free-hdd-space: 4760.0KiB
|
||||
# total-hdd-space: 16.3MiB
|
||||
# write-sect-since-reboot: 46412
|
||||
# write-sect-total: 62012
|
||||
# bad-blocks: 0%
|
||||
# architecture-name: mmips
|
||||
# board-name: hEX
|
||||
# platform: MikroTik
|
||||
# # CPU LOAD IRQ DISK
|
||||
# 0 cpu0 0% 0% 0%
|
||||
# 1 cpu1 1% 1% 0%
|
||||
# 2 cpu2 0% 0% 0%
|
||||
# 3 cpu3 1% 0% 0%
|
||||
# name: hEX
|
|
@ -1,63 +0,0 @@
|
|||
#!/bin/bash
|
||||
#%# family=auto
|
||||
#%# capabilities=autoconf
|
||||
|
||||
# Michael Grote
|
||||
# michael.grote ät posteo.de
|
||||
# Outputs the running, stopped and total counts of ProxMox VMs and LXCs.
|
||||
# needs to be run as user: root AND group: root
|
||||
# [proxmox_count]
|
||||
# user root
|
||||
# group root
|
||||
|
||||
# Pfade zu binaries
|
||||
wqm=$(which qm)
|
||||
wpct=$(which pct)
|
||||
# Berechne VMs
|
||||
total_vm=$($wqm list | sed 1d | wc -l)
|
||||
running_vm=$($wqm list | sed 1d | grep running | wc -l)
|
||||
stopped_vm=$($wqm list | sed 1d | grep stopped | wc -l)
|
||||
# Berechne LXCs
|
||||
total_lxc=$($wpct list | sed 1d | wc -l)
|
||||
running_lxc=$($wpct list | sed 1d | grep running | wc -l)
|
||||
stopped_lxc=$($wpct list | sed 1d | grep stopped | wc -l)
|
||||
# Berechne Gesamtwert
|
||||
total=$(($total_vm + $total_lxc))
|
||||
|
||||
# wenn parameter = ...
|
||||
if [ "$1" = "autoconf" ]; then
|
||||
echo yes
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ "$1" = "config" ]; then
|
||||
# setze label
|
||||
echo total_vm.label total Virtual Machines
|
||||
echo total.label total VMs and LXCs
|
||||
|
||||
echo running_vm.label running Virtual Machines
|
||||
echo running_lxc.label running LXCs
|
||||
|
||||
echo stopped_vm.label stopped Virtual Machines
|
||||
echo stopped_lxc.label stopped LXCs
|
||||
|
||||
echo total_lxc.label total LXCs
|
||||
|
||||
# setze optionen
|
||||
echo 'graph_title ProxMox - Number of VMs and LXCs'
|
||||
echo 'graph_vlabel Count'
|
||||
echo 'graph_category virtualization'
|
||||
echo 'graph_args -l 0'
|
||||
exit 0
|
||||
fi
|
||||
echo total.value $total
|
||||
|
||||
echo total_vm.value $total_vm
|
||||
echo total_lxc.value $total_lxc
|
||||
|
||||
echo running_vm.value $running_vm
|
||||
echo running_lxc.value $running_lxc
|
||||
|
||||
echo stopped_vm.value $stopped_vm
|
||||
echo stopped_lxc.value $stopped_lxc
|
||||
exit 0
|
45
zfs_count
45
zfs_count
|
@ -1,45 +0,0 @@
|
|||
#!/bin/bash
|
||||
#%# family=auto
|
||||
#%# capabilities=autoconf
|
||||
|
||||
# Michael Grote
|
||||
# michael.grote ät posteo.de
|
||||
# Outputs the count of zfs pools, datasets and snapshots.
|
||||
|
||||
# lese alle pools, sed löscht die erste zeile
|
||||
list_pools=$(zpool list | sed 1d)
|
||||
|
||||
# wenn parameter = ...
|
||||
if [ "$1" = "autoconf" ]; then
|
||||
echo yes
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ "$1" = "config" ]; then
|
||||
# https://superuser.com/questions/284187/bash-iterating-over-lines-in-a-variable
|
||||
while read -r line; do
|
||||
# setze label <pool>.label <pool> snapshots
|
||||
echo $line | awk '{print $1"_snapshot.label" $1 " snapshots"}'
|
||||
echo $line | awk '{print $1"_dataset.label" $1 " datasets"}'
|
||||
done <<< "$list_pools"
|
||||
echo 'pools.label pools'
|
||||
# setze optionen
|
||||
echo 'graph_title zfs - pool, dataset and snapshot count' # Titelzeile
|
||||
echo 'graph_vlabel count' # Text links, hochkant
|
||||
echo 'graph_category fs' # Kategorie
|
||||
echo 'graph_args -l 0' # wertebegrenzer 0-100
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# lese jede Zeile der Variable $list
|
||||
# tue für jede Zeile
|
||||
# "echo" die Zeile, wende awk drauf, Spalte $1/$7
|
||||
while read -r line; do
|
||||
echo pools.value $(zpool list | sed 1d | wc -l)
|
||||
# setze poolnamen
|
||||
poolname=$(echo $line | awk '{ print $1 }')
|
||||
# zähle snapshots
|
||||
echo $poolname"_snapshot.value" $(zfs list -r -t snapshot $poolname| sed 1d | wc -l)
|
||||
echo $poolname"_dataset.value" $(zfs list -r $poolname| sed 1d | wc -l)
|
||||
done <<< "$list_pools"
|
||||
exit 0
|
|
@ -1,45 +0,0 @@
|
|||
#!/bin/bash
|
||||
#%# family=auto
|
||||
#%# capabilities=autoconf
|
||||
|
||||
# Michael Grote
|
||||
# michael.grote ät posteo.de
|
||||
# Outputs the zpool fragmentation per zfs pool.
|
||||
|
||||
# lese alle pools, sed löscht die erste zeile
|
||||
# entferne das %-zeichen
|
||||
list=$(zpool list | sed 1d | tr -d %)
|
||||
|
||||
# wenn parameter = ...
|
||||
if [ "$1" = "autoconf" ]; then
|
||||
echo yes
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ "$1" = "config" ]; then
|
||||
# https://superuser.com/questions/284187/bash-iterating-over-lines-in-a-variable
|
||||
while read -r line; do
|
||||
# setze label
|
||||
echo $line | awk '{print $1".label " $1}'
|
||||
# setze warn-limits
|
||||
echo $line | awk '{print $1".warning " 50}'
|
||||
echo $line | awk '{print $1".critical " 75}'
|
||||
done <<< "$list"
|
||||
# setze optionen
|
||||
echo 'graph_title ZFS storage pool - fragmentation' # Titelzeile
|
||||
echo 'graph_vlabel fragmentation in %' # Text links, hochkant
|
||||
echo 'graph_category fs' # Kategorie
|
||||
echo 'graph_info This graph shows the ZFS Pool fragmentation.' # Text über Tabelle/Infos
|
||||
echo 'graph_args -l 0 --upper-limit 100' # wertebegrenzer 0-100
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# lese jede Zeile der Variable $list
|
||||
# tue für jede Zeile
|
||||
# "echo" die Zeile, wende awk drauf, Spalte $1/$7
|
||||
while read -r line; do
|
||||
# gebe wert aus
|
||||
# <name>.value <wert>
|
||||
echo $line | awk '{print $1".value " $7}'
|
||||
done <<< "$list"
|
||||
exit 0
|
Loading…
Reference in a new issue