Plugins hinzugefügt

This commit is contained in:
Michael Grote 2021-02-27 17:36:57 +01:00
commit 4a7c3000b2
5 changed files with 1209 additions and 0 deletions

361
zfs_arcstats Normal file
View file

@ -0,0 +1,361 @@
#!/bin/bash
: << =cut
=head1 NAME
zfs_arcstats - Munin multi-graph plugin to monitor ZFS ARC statistics
These functions are implemented:
size : to monitor ARC size
activity : to monitor ARC activities
actlist : to monitor ARC activities by cache list (MFU/MRU)
actdata : to monitor ARC activities by data type (Demand/Prefetch)
hitratio : to monitor ARC hit ratio
Tested with Solaris 10 and 11, OpenIndiana Hipster, FreeBSD 11, CentOS 7
This plugin is inspired by arcstat.pl [https://github.com/mharsch/arcstat]
=head1 CONFIGURATION
Make symlink:
cd /path/to/munin/etc/plugins
ln -s /path/to/munin/lib/plugins/zfs_arcstats .
For FreeBSD, it should be necessary to change shebang /bin/bash -> /usr/local/bin/bash
=head1 ENVIRONMENT VARIABLES
None
=head1 AUTHOR
K.Cima https://github.com/shakemid
=head1 LICENSE
GPLv2
=head1 Magic markers
#%# family=contrib
#%# capabilities=autoconf
=cut
# Include plugin.sh
. "${MUNIN_LIBDIR:-}/plugins/plugin.sh"
is_multigraph "$@"
# Shell options
set -o nounset
# Set global variables
plugin_name=zfs_arcstats
functions='size activity actlist actdata hitratio'
# Functions
get_osname() {
local osname osver
osname=$( uname -s )
osver=$( uname -v )
case $osname in
SunOS)
case $osver in
illumos*)
osname=illumos
;;
esac
;;
esac
echo "$osname"
}
preconfig() {
local func=$1
# data_attr format: field type draw label
# label can contain white-spaces.
case $func in
size)
global_attr="
graph_title ZFS ARC - Size
graph_category fs
graph_args --base 1024 --lower-limit 0
graph_vlabel Bytes
graph_info ZFS ARC - Size
"
case $osname in
SunOS)
# For Solaris 10,11
data_attr="
data_size GAUGE AREASTACK Data size
prefetch_meta_size GAUGE AREASTACK Prefetch meta size
buf_size GAUGE AREASTACK Buf size
other_size GAUGE AREASTACK Other size
"
;;
*)
# For illumos, FreeBSD, Linux (OpenZFS)
data_attr="
data_size GAUGE AREASTACK Data size
metadata_size GAUGE AREASTACK Metadata size
hdr_size GAUGE AREASTACK Hdr size
other_size GAUGE AREASTACK Other size
mru_size GAUGE LINE MRU size
mfu_size GAUGE LINE MFU size
"
;;
esac
data_attr="
$data_attr
size GAUGE LINE ARC size
c GAUGE LINE Target size
p GAUGE LINE Target MRU size
"
;;
activity)
global_attr="
graph_title ZFS ARC - Activities
graph_category fs
graph_args --base 1000 --lower-limit 0
graph_vlabel misses (-) / hits (+) per second
graph_info ZFS ARC - Activities
hits.negative misses
l2_hits.negative l2_misses
"
data_attr="
misses DERIVE LINE dummy
hits DERIVE LINE ARC
l2_misses DERIVE LINE dummy
l2_hits DERIVE LINE L2ARC
"
;;
actlist)
global_attr="
graph_title ZFS ARC - Activities by cache list
graph_category fs
graph_args --base 1000 --lower-limit 0
graph_vlabel ghost hits (-) / hits (+) per second
graph_info ZFS ARC - Activities by cache list
mfu_hits.negative mfu_ghost_hits
mru_hits.negative mru_ghost_hits
"
data_attr="
mfu_ghost_hits DERIVE LINE dummy
mfu_hits DERIVE LINE MFU
mru_ghost_hits DERIVE LINE dummy
mru_hits DERIVE LINE MRU
"
;;
actdata)
global_attr="
graph_title ZFS ARC - Activities by data type
graph_category fs
graph_args --base 1000 --lower-limit 0
graph_vlabel misses (-) / hits (+) per second
graph_info ZFS ARC - Activities by data type
demand_data_hits.negative demand_data_misses
demand_metadata_hits.negative demand_metadata_misses
prefetch_data_hits.negative prefetch_data_misses
prefetch_metadata_hits.negative prefetch_metadata_misses
"
data_attr="
demand_data_misses DERIVE LINE dummy
demand_data_hits DERIVE LINE D data
demand_metadata_misses DERIVE LINE dummy
demand_metadata_hits DERIVE LINE D meta
prefetch_data_misses DERIVE LINE dummy
prefetch_data_hits DERIVE LINE P data
prefetch_metadata_misses DERIVE LINE dummy
prefetch_metadata_hits DERIVE LINE P meta
"
;;
hitratio)
global_attr="
graph_title ZFS ARC - Hit ratio
graph_category fs
graph_args --base 1000 --lower-limit 0 --upper-limit 100 --rigid
graph_vlabel % hits
graph_info ZFS ARC - Hit ratio - The graph shows cache hit ratio between munin-update intervals (usually 5 minutes).
hitratio.cdef hits,DUP,misses,+,/,100,*
l2_hitratio.cdef l2_hits,DUP,l2_misses,+,/,100,*
demand_data_hitratio.cdef demand_data_hits,DUP,demand_data_misses,+,/,100,*
demand_metadata_hitratio.cdef demand_metadata_hits,DUP,demand_metadata_misses,+,/,100,*
prefetch_data_hitratio.cdef prefetch_data_hits,DUP,prefetch_data_misses,+,/,100,*
prefetch_metadata_hitratio.cdef prefetch_metadata_hits,DUP,prefetch_metadata_misses,+,/,100,*
"
data_attr="
hits DERIVE LINE dummy
misses DERIVE LINE dummy
l2_hits DERIVE LINE dummy
l2_misses DERIVE LINE dummy
demand_data_hits DERIVE LINE dummy
demand_data_misses DERIVE LINE dummy
demand_metadata_hits DERIVE LINE dummy
demand_metadata_misses DERIVE LINE dummy
prefetch_data_hits DERIVE LINE dummy
prefetch_data_misses DERIVE LINE dummy
prefetch_metadata_hits DERIVE LINE dummy
prefetch_metadata_misses DERIVE LINE dummy
hitratio GAUGE LINE2 ARC hits
l2_hitratio GAUGE LINE L2ARC hits
demand_data_hitratio GAUGE LINE Demand data hits
demand_metadata_hitratio GAUGE LINE Demand metadata hits
prefetch_data_hitratio GAUGE LINE Prefetch data hits
prefetch_metadata_hitratio GAUGE LINE Prefetch metadata hits
"
;;
*)
echo "Unknown function: $func"
exit 1
;;
esac
}
do_config() {
local func=$1
local label_max_length=45
local field type draw label
preconfig "$func"
echo "multigraph ${plugin_name}_${func}"
# print global attributes
echo "$global_attr" | sed -e 's/^ *//' -e '/^$/d'
# print data source attributes
echo "$data_attr" | while read -r field type draw label
do
[ -z "$field" ] && continue
echo "${field}.type ${type}"
echo "${field}.draw ${draw}"
echo "${field}.label ${label:0:${label_max_length}}"
if [ "$type" = 'DERIVE' ]; then
echo "${field}.min 0"
fi
if [ "$label" = 'dummy' ]; then
echo "${field}.graph no"
fi
done
echo
}
get_stats() {
local arcstats stat value
case $osname in
SunOS|illumos)
arcstats=$( kstat -p 'zfs:0:arcstats' | sed -e 's/:/ /g' | awk '{ print $4,$5 }' )
# kstat output example:
# $ kstat -p zfs:0:arcstats
# zfs:0:arcstats:c 4135233544
# ...
;;
*BSD)
arcstats=$( /sbin/sysctl -a | sed -n -e 's/^kstat\.zfs\.misc\.arcstats\.//p' | awk -F: '{ print $1,$2 }' )
# sysctl output example:
# $ sysctl -a
# ...
# kstat.zfs.misc.arcstats.c: 632540160
# ...
;;
Linux)
arcstats=$( sed '1,2d' /proc/spl/kstat/zfs/arcstats | awk '{ print $1,$3 }' )
# proc file output example:
# $ cat /proc/spl/kstat/zfs/arcstats
# ...
# name type data
# hits 4 62
# ...
;;
*)
echo "Unsupported OS: $osname"
exit 1
esac
while read -r stat value
do
printf -v "arcstats_${stat}" "%s" "$value"
# printf -v means indirect variable assignment (similar to eval)
done <<< "$arcstats"
}
do_fetch() {
local func=$1
local field type draw label value ref
preconfig "$func"
echo "multigraph ${plugin_name}_${func}"
echo "$data_attr" | while read -r field type draw label
do
[ -z "$field" ] && continue
ref="arcstats_${field}"
value=${!ref:-0}
# ${!varname} means indirect evaluation (similar to eval)
echo "${field}.value ${value}"
done
echo
}
autoconf() {
if [ -x /sbin/zfs ]; then
echo yes
else
echo "no (ZFS looks unavailable)"
fi
}
config() {
local func
for func in $functions
do
do_config "$func"
done
}
fetch() {
local func
get_stats
for func in $functions
do
do_fetch "$func"
done
}
# Main
osname=$( get_osname )
case ${1:-} in
autoconf)
autoconf
;;
config)
config
if [ "${MUNIN_CAP_DIRTYCONFIG:-0}" = "1" ]; then fetch; fi
;;
*)
fetch
;;
esac
exit 0

99
zfs_list Normal file
View file

@ -0,0 +1,99 @@
#!/bin/bash
#
# Plugin to monitor ZFS Filesystems
# Author: Adam Michel (elfurbe@furbism.com)
# Description:
# This is an extension of the zfs_fs plugin
# modified as a multigraph to graph all zfs
# filesystems it can find
#
# Tested on Ubuntu-14.04
#
# Parameters understood:
#
# config (required)
# autoconf (optional - used by munin-config)
#
#%# family=auto
. "$MUNIN_LIBDIR/plugins/plugin.sh"
need_multigraph()
if [ "$1" = "autoconf" ]; then
# Makes little sense to autoconf if you can't suggest
echo no
exit 0
fi
if [ "$1" = "suggest" ]; then
exit 0
fi
if [ "$1" = "config" ]; then
for i in `zfs list -Hp | awk '{print $1}'`; do
values=( $(zfs get -p usedbydataset,usedbychildren,usedbysnapshots,usedbyrefreservation,available,quota $i | awk 'BEGIN {total=0;} { if( NR==1 ) next; } !/quota/ {total=total+$3;} {print $3} END{print total;}') )
fsname=$(clean_fieldname $(echo "$i" | sed 's/\//__/g'))
echo <<EOF "multigraph zfs_list_$fsname
graph_title $fsname usage
graph_order usedbydataset usedbychildren usedbysnapshots usedbyrefreservation available total quota
graph_args --base 1024 -r -l 0 --vertical-label Bytes --upper-limit ${values[6]}
graph_info This graph shows how is used a zfs filesystems.
graph_category fs
graph_period second
usedbydataset.label UsedByDataset
usedbydataset.draw AREA
usedbydataset.info Used space by Dataset
usedbydataset.colour FF0000
usedbychildren.label UsedByChildren
usedbychildren.draw STACK
usedbychildren.info Used space by children
usedbychildren.colour FFCC33
usedbysnapshots.label UsedBySnapshots
usedbysnapshots.draw STACK
usedbysnapshots.info Used space by snapshot
usedbysnapshots.colour 0000FF
usedbyrefreservation.label Usedbyrefreservation
usedbyrefreservation.draw STACK
usedbyrefreservation.info Used space by Ref Reservation
usedbyrefreservation.colour 33CCFF
available.label Available
available.draw STACK
available.info Free space
available.colour 00FF00
total.label Total
total.draw LINE1
total.info Total
total.colour 000000
quota.label Quota
quota.draw LINE1
quota.info Quota
quota.colour 555555"
EOF
done
exit 0
fi
for i in `zfs list -Hp | awk '{print $1}'`; do
values=( $(zfs get -p usedbydataset,usedbychildren,usedbysnapshots,usedbyrefreservation,available,quota $i | awk 'BEGIN {total=0;} { if( NR==1 ) next; } !/quota/ {total=total+$3;} {print $3} END{print total;}') )
fsname=$(clean_fieldname $(echo "$i" | sed 's/\//__/g'))
if [ ${values[5]} = "-" ]; then
quota=0
else
quota=${values[5]}
fi
echo <<EOF "multigraph zfs_list_$fsname
usedbydataset.value ${values[0]}
usedbysnapshots.value ${values[2]}
usedbychildren.value ${values[1]}
usedbyrefreservation.value ${values[3]}
available.value ${values[4]}
total.value ${values[6]}
quota.value $quota"
EOF
done
exit 0

355
zfsonlinux_stats_ Normal file
View file

@ -0,0 +1,355 @@
#!/usr/bin/env bash
# ZFS statistics for ZFSonLinux
# Author: Adam Michel (elfurbe@furbism.com)
#
# Description:
# This is a modification of the zfs_stats
# plugin by David Bjornsson (which was a
# rewrite of zfs-stats-for-freebsd scripts
# by patpro) modified to work with ZFSonLinux.
#
# Tested on Ubuntu-14.04
#
# Usage: zfs_stats_FUNCTION
#
# Available functions:
# efficiency - ARC efficiency
# cachehitlist - Cache hit by cache list
# cachehitdtype - Cache hit by data type
# dmuprefetch - DMU prefetch
# utilization - ARC size breakdown
# l2utilization - L2ARC size breakdown
# l2efficiency - L2ARC efficiency
#
#%# family=auto
FUNCTION=$(basename $0 | cut -d_ -f3)
MEMMAX=`cat /proc/meminfo | grep MemTotal | awk '{print $2}'`
BC='/usr/bin/bc -q'
ARCSTATS="/proc/spl/kstat/zfs/arcstats"
ZFETCHSTATS="/proc/spl/kstat/zfs/zfetchstats"
#
# Pull all values from arcstats
#
while read name type data
do
[[ $name =~ ^[0-9].* ]] && continue
[[ $name == "name" ]] && continue
[[ $name == "" ]] && continue
case $name in
"hits" )
export ARC_HITS=$data
;;
"misses" )
export ARC_MISSES=$data
;;
"p" )
export MRU_SIZE=$data
;;
"c_max" )
export MAX_SIZE=$data
;;
"c_min" )
export MIN_SIZE=$data
;;
"c" )
export TARGET_SIZE=$data
;;
* )
VARNAME=`echo $name | tr '[:lower:]' '[:upper:]'`
#declare $VARNAME=$data
export $VARNAME=$data
;;
esac
done < $ARCSTATS
#
# Pull all values from zfetchstats
#
while read name type data
do
[[ $name =~ ^[0-9].* ]] && continue
[[ $name == "name" ]] && continue
case $name in
"hits" )
export DMU_HITS=$data
;;
"misses" )
export DMU_MISSES=$data
;;
* )
VARNAME=`echo $name | tr '[:lower:]' '[:upper:]'`
export $VARNAME=$data
;;
esac
done < $ZFETCHSTATS
#
# Calculation macros
#
ANON_HITS=`echo "$ARC_HITS-($MFU_HITS+$MRU_HITS+$MFU_GHOST_HITS+$MRU_GHOST_HITS)" | $BC`
ARC_ACCESSES_TOTAL=`echo "$ARC_HITS+$ARC_MISSES" | $BC`
DEMAND_DATA_TOTAL=`echo "$DEMAND_DATA_HITS+$DEMAND_DATA_MISSES" | $BC`
PREFETCH_DATA_TOTAL=`echo "$PREFETCH_DATA_HITS+$PREFETCH_DATA_MISSES" | $BC`
REAL_HITS=`echo "$MFU_HITS+$MRU_HITS" | $BC`
if [ $ARC_ACCESSES_TOTAL != 0 ]; then
CACHE_HIT_RATIO_PERC=`echo "scale=2 ; (100*$ARC_HITS/$ARC_ACCESSES_TOTAL)" | $BC`
CACHE_MISS_RATIO_PERC=`echo "scale=2 ; (100*$ARC_MISSES/$ARC_ACCESSES_TOTAL)" | $BC`
ACTUAL_HIT_RATIO_PERC=`echo "scale=2 ; (100*$REAL_HITS/$ARC_ACCESSES_TOTAL)" | $BC`
else
CACHE_HIT_RATIO_PERC=0
CACHE_MISS_RATIO_PERC=0
ACTUAL_HIT_RATIO_PERC=0
fi
if [ $DEMAND_DATA_TOTAL != 0 ]; then DATA_DEMAND_EFFICIENCY_PERC=`echo "scale=2 ; (100*$DEMAND_DATA_HITS/$DEMAND_DATA_TOTAL)" | $BC`; else DATA_DEMAND_EFFICIENCY_PERC=0; fi
if [ $PREFETCH_DATA_TOTAL != 0 ]; then DATA_PREFETCH_EFFICENCY_PERC=`echo "scale=2 ; (100*$PREFETCH_DATA_HITS/$PREFETCH_DATA_TOTAL)" | $BC`; else DATA_PREFETCH_EFFICENCY_PERC=0; fi
if [ $ARC_HITS != 0 ]; then
ANONYMOUSLY_USED_PERC=`echo "scale=2 ; (100*$ANON_HITS/$ARC_HITS)" | $BC`
MOST_RECENTLY_USED_PERC=`echo "scale=2 ; (100*$MRU_HITS/$ARC_HITS)" | $BC`
MOST_FREQUENTLY_USED_PERC=`echo "scale=2 ; (100*$MFU_HITS/$ARC_HITS)" | $BC`
MOST_RECENTLY_USED_GHOST_PERC=`echo "scale=2 ; (100*$MRU_GHOST_HITS/$ARC_HITS)" | $BC`
MOST_FREQUENTLY_USED_GHOST_PERC=`echo "scale=2 ; (100*$MFU_GHOST_HITS/$ARC_HITS)" | $BC`
DEMAND_DATA_HIT_PERC=`echo "scale=2 ; (100*$DEMAND_DATA_HITS/$ARC_HITS)" | $BC`
PREFETCH_DATA_HIT_PERC=`echo "scale=2 ; (100*$PREFETCH_DATA_HITS/$ARC_HITS)" | $BC`
DEMAND_METADATA_HIT_PERC=`echo "scale=2 ; (100*$DEMAND_METADATA_HITS/$ARC_HITS)" | $BC`
PREFETCH_METADATA_HIT_PERC=`echo "scale=2 ; (100*$PREFETCH_METADATA_HITS/$ARC_HITS)" | $BC`
else
ANONYMOUSLY_USED_PERC=0
MOST_RECENTLY_USED_PERC=0
MOST_FREQUENTLY_USED_PERC=0
MOST_RECENTLY_USED_GHOST_PERC=0
MOST_FREQUENTLY_USED_GHOST_PERC=0
DEMAND_DATA_HIT_PERC=0
PREFETCH_DATA_HIT_PERC=0
DEMAND_METADATA_HIT_PERC=0
PREFETCH_METADATA_HIT_PERC=0
fi
if [ $ARC_MISSES != 0 ]; then
PREFETCH_METADATA_MISSES_PERC=`echo "scale=2 ; (100*$PREFETCH_METADATA_MISSES/$ARC_MISSES)" | $BC`
DEMAND_DATA_MISS_PERC=`echo "scale=2 ; (100*$DEMAND_DATA_MISSES/$ARC_MISSES)" | $BC`
PREFETCH_DATA_MISS_PERC=`echo "scale=2 ; (100*$PREFETCH_DATA_MISSES/$ARC_MISSES)" | $BC`
DEMAND_METADATA_MISS_PERC=`echo "scale=2 ; (100*$DEMAND_METADATA_MISSES/$ARC_MISSES)" | $BC`
else
PREFETCH_METADATA_MISSES_PERC=0
DEMAND_DATA_MISS_PERC=0
PREFETCH_DATA_MISS_PERC=0
DEMAND_METADATA_MISS_PERC=0
fi
DMU_TOTAL=`echo "$DMU_HITS+$DMU_MISSES" | $BC`
if [ $DMU_TOTAL != 0 ]; then
DMU_HITS_PERC=`echo "scale=2 ; (100*$DMU_HITS/$DMU_TOTAL)" | $BC`
DMU_MISSES_PERC=`echo "scale=2 ; (100*$DMU_MISSES/$DMU_TOTAL)" | $BC`
else
DMU_HITS_PERC=0
DMU_MISSES_PERC=0
fi
if [ $SIZE -gt $TARGET_SIZE ]; then
MFU_SIZE=`echo "$SIZE-$MRU_SIZE" | $BC`
else
MFU_SIZE=`echo "$TARGET_SIZE-$MRU_SIZE" | $BC`
fi
L2_ACCESSES_TOTAL=`echo "$L2_HITS+$L2_MISSES" | $BC`
if [ $L2_ACCESSES_TOTAL -gt 0 ]; then
L2_HIT_RATIO_PERC=`echo "scale=2 ; (100*$L2_HITS/$L2_ACCESSES_TOTAL)" | $BC`
L2_MISS_RATIO_PERC=`echo "scale=2 ; (100*$L2_MISSES/$L2_ACCESSES_TOTAL)" | $BC`
else
L2_HIT_RATIO_PERC=0
L2_MISS_RATIO_PERC=0
fi
efficiency() {
if [ "$1" = "config" ]; then
echo 'graph_title ZFS ARC Efficiency'
echo 'graph_args -u 100'
echo 'graph_vlabel %'
echo 'graph_info This graph shows the ARC Efficiency'
echo 'hits.label Hit Ratio'
echo 'misses.label Miss Ratio'
echo 'actual_hits.label Actual Hit Ratio'
echo 'data_demand_efficiency.label Data Demand Efficiency'
echo 'data_prefetch_efficiency.label Data Prefetch Efficiency'
exit 0
else
echo 'hits.value ' $CACHE_HIT_RATIO_PERC
echo 'misses.value ' $CACHE_MISS_RATIO_PERC
echo 'actual_hits.value ' $ACTUAL_HIT_RATIO_PERC
echo 'data_demand_efficiency.value ' $DATA_DEMAND_EFFICIENCY_PERC
echo 'data_prefetch_efficiency.value ' $DATA_PREFETCH_EFFICENCY_PERC
fi
}
cachehitlist() {
if [ "$1" = "config" ]; then
echo 'graph_title ZFS ARC Efficiency: Cache hits by cache list'
echo 'graph_args -u 100'
echo 'graph_vlabel %'
echo 'graph_info This graph shows the ARC Efficiency'
echo 'cache_list_anon.label Anonymously Used'
echo 'cache_list_most_rec.label Most Recently Used'
echo 'cache_list_most_freq.label Most Frequently Used'
echo 'cache_list_most_rec_ghost.label Most Recently Used Ghost'
echo 'cache_list_most_freq_ghost.label Most Frequently Used Ghost'
exit 0
else
echo 'cache_list_anon.value ' $ANONYMOUSLY_USED_PERC
echo 'cache_list_most_rec.value ' $MOST_RECENTLY_USED_PERC
echo 'cache_list_most_freq.value ' $MOST_FREQUENTLY_USED_PERC
echo 'cache_list_most_rec_ghost.value ' $MOST_RECENTLY_USED_GHOST_PERC
echo 'cache_list_most_freq_ghost.value ' $MOST_FREQUENTLY_USED_GHOST_PERC
fi
}
cachehitdtype() {
if [ "$1" = "config" ]; then
echo 'graph_title ZFS ARC Efficiency: Cache hits by data type'
echo 'graph_args -u 100'
echo 'graph_vlabel %'
echo 'graph_info This graph shows the ARC Efficiency'
echo 'data_type_demand_hits.label Demand Data Hit Ratio'
echo 'data_type_demand_misses.label Demand Data Miss Ratio'
echo 'data_type_prefetch_hits.label Prefetch Data Hit Ratio'
echo 'data_type_prefetch_misses.label Prefetch Data Miss Ratio'
echo 'data_type_demand_metadata_hits.label Demand Metadata Hit Ratio'
echo 'data_type_demand_metadata_misses.label Demand Metadata Miss Ratio'
echo 'data_type_prefetch_metadata_hits.label Prefetch Metadata Hit Ratio'
echo 'data_type_prefetch_metadata_misses.label Prefetch Metadata Miss Ratio'
exit 0
else
echo 'data_type_demand_hits.value ' $DEMAND_DATA_HIT_PERC
echo 'data_type_demand_misses.value ' $DEMAND_DATA_MISS_PERC
echo 'data_type_prefetch_hits.value ' $PREFETCH_DATA_HIT_PERC
echo 'data_type_prefetch_misses.value ' $PREFETCH_DATA_MISS_PERC
echo 'data_type_demand_metadata_hits.value ' $DEMAND_METADATA_HIT_PERC
echo 'data_type_demand_metadata_misses.value ' $DEMAND_METADATA_MISS_PERC
echo 'data_type_prefetch_metadata_hits.value ' $PREFETCH_METADATA_HIT_PERC
echo 'data_type_prefetch_metadata_misses.value ' $PREFETCH_METADATA_MISSES_PERC
fi
}
dmuprefetch() {
if [ "$1" = "config" ]; then
echo 'graph_title ZFS DMU prefetch stats'
echo 'graph_args -u 100'
echo 'graph_vlabel %'
echo 'graph_info This graph shows the DMU prefetch stats'
echo 'hits.label Hit Ratio'
echo 'misses.label Miss Ratio'
exit 0
else
echo 'hits.value ' $DMU_HITS_PERC
echo 'misses.value ' $DMU_MISSES_PERC
fi
}
utilization() {
if [ "$1" = "config" ]; then
echo 'graph_title ZFS ARC Size'
echo 'graph_args --base 1024 -l 0 --vertical-label Bytes --upper-limit '$MEMMAX
echo 'graph_vlabel Size in MB'
echo 'graph_info This graph shows the ARC Size utilization'
echo 'max_size.label Maximum Size'
echo 'max_size.draw AREA'
echo 'size.label Size'
echo 'size.draw AREA'
echo 'min_size.label Minimum Size'
echo 'min_size.draw AREA'
echo 'target_size.label Target Size'
echo 'target_size.draw LINE1'
echo 'recently_size.label Recently Used Cache Size'
echo 'recently_size.draw LINE1'
echo 'frequently_size.label Frequently Used Cache Size'
echo 'frequently_size.draw LINE1'
exit 0
else
echo 'max_size.value ' $MAX_SIZE
echo 'size.value ' $SIZE
echo 'min_size.value ' $MIN_SIZE
echo 'target_size.value ' $TARGET_SIZE
echo 'recently_size.value ' $MRU_SIZE
echo 'frequently_size.value ' $MFU_SIZE
fi
}
l2utilization() {
if [ "$1" = "config" ]; then
echo 'graph_title ZFS L2ARC Size'
echo 'graph_args --base 1024 -r -l 0 --vertical-label Bytes'
echo 'graph_vlabel Size in MB'
echo 'graph_info This graph shows the L2ARC Size utilization'
echo 'size.label Size'
echo 'size.draw AREA'
echo 'hdr_size.label Header Size'
echo 'hdr_size.draw AREA'
exit 0
else
echo 'size.value ' $L2_SIZE
echo 'hdr_size.value ' $L2_HDR_SIZE
fi
}
l2efficiency() {
if [ "$1" = "config" ]; then
echo 'graph_title ZFS L2ARC Efficiency'
echo 'graph_args -u 100'
echo 'graph_vlabel %'
echo 'graph_info This graph shows the L2ARC Efficiency'
echo 'l2_hits.label Hit Ratio'
echo 'l2_misses.label Miss Ratio'
else
echo 'l2_hits.value ' $L2_HIT_RATIO_PERC
echo 'l2_misses.value ' $L2_MISS_RATIO_PERC
fi
}
[ "$1" = "config" ] && echo "graph_category fs"
case "$FUNCTION" in
efficiency)
efficiency $1
;;
cachehitlist)
cachehitlist $1
;;
cachehitdtype)
cachehitdtype $1
;;
dmuprefetch)
dmuprefetch $1
;;
utilization)
utilization $1
;;
l2utilization)
l2utilization $1
;;
l2efficiency)
l2efficiency $1
;;
esac

267
zpool_capacity Normal file
View file

@ -0,0 +1,267 @@
#!/bin/bash
: << =cut
=head1 NAME
zpool_capacity - Munin plugin to monitor ZFS capacity
These functions are implemented:
capacity : to monitor zpool capacity %
allocated : to monitor zpool allocated bytes
dedup : to monitor zpool dedup and compress ratio
Tested with Solaris 10 and 11, OpenIndiana Hipster, FreeBSD 11, CentOS 7
=head1 CONFIGURATION
Make symlink:
cd /path/to/munin/etc/plugins
ln -s /path/to/munin/lib/plugins/zpool_capacity .
For FreeBSD, it should be necessary to change shebang /bin/bash -> /usr/local/bin/bash
For Linux, root privilege is necessary to run zpool command.
[zpool_capacity]
user root
=head1 ENVIRONMENT VARIABLES
critical : default 90
warning : default 80
=head1 AUTHOR
K.Cima https://github.com/shakemid
=head1 LICENSE
GPLv2
=head1 Magic markers
#%# family=contrib
#%# capabilities=autoconf
=cut
# Include plugin.sh
. "${MUNIN_LIBDIR:-}/plugins/plugin.sh"
is_multigraph "$@"
# Shell options
set -o nounset
# Global variables
plugin_name=zpool_capacity
functions='capacity allocated dedup'
zpool_cmd=/sbin/zpool
zfs_cmd=/sbin/zfs
# Environment variables
: "${warning:=80}"
: "${critical:=90}"
# Note: The performance of ZFS may significantly degrade when zpool capacity > 90%
# See also: https://docs.oracle.com/cd/E53394_01/html/E54801/zfspools-4.html
# Functions
preconfig() {
local func="$1"
local p c
# data_attr format: field type draw label
# label can contain white-spaces.
data_attr=
case $func in
capacity)
global_attr="
graph_title ZFS storage pool - Capacity
graph_category fs
graph_args --base 1000 --lower-limit 0 --upper-limit 100
graph_vlabel % allocated
graph_info ZFS storage pool - Capacity
warning ${warning}
critical ${critical}
"
for p in $pool_list
do
data_attr="${data_attr}
${p} GAUGE LINE2 ${p}"
done
;;
allocated)
global_attr="
graph_title ZFS storage pool - Allocated bytes
graph_category fs
graph_args --base 1024 --lower-limit 0
graph_vlabel Bytes
graph_info ZFS storage pool - Allocated bytes
"
c=0
for p in $pool_list
do
data_attr="${data_attr}
${p}_size GAUGE LINE ${p} size
${p}_allocated GAUGE LINE2 ${p} allocated"
global_attr="${global_attr}
${p}_size.colour COLOUR${c}
${p}_allocated.colour COLOUR${c}"
c=$(( c + 1 ))
done
;;
dedup)
global_attr="
graph_title ZFS storage pool - Dedup and compress ratio
graph_category fs
graph_args --base 1000 --lower-limit 1
graph_vlabel Ratio
graph_info ZFS storage pool - Dedup and compress ratio
"
for p in $pool_list
do
data_attr="${data_attr}
${p}_dedup GAUGE LINE ${p} dedup
${p}_compress GAUGE LINE ${p} compress"
done
;;
esac
}
do_config() {
local func="$1"
local label_max_length=45
local field type draw label
preconfig "$func"
echo "multigraph ${plugin_name}_${func}"
# print global attributes
echo "$global_attr" | sed -e 's/^ *//' -e '/^$/d'
# print data source attributes
echo "$data_attr" | while read -r field type draw label
do
[ -z "$field" ] && continue
field=$( clean_fieldname "$field" )
echo "${field}.type ${type}"
echo "${field}.draw ${draw}"
echo "${field}.label ${label:0:${label_max_length}}"
if [ "$type" = 'DERIVE' ]; then
echo "${field}.min 0"
fi
if [ "$label" = 'dummy' ]; then
echo "${field}.graph no"
fi
done
echo
}
get_stats() {
local func="$1"
case $func in
capacity)
"$zpool_cmd" list -H -o name,capacity | sed 's/%$//'
;;
allocated)
( "$zpool_cmd" list -H -o name,allocated \
| awk '{ print $1"_allocated", $2 }'
"$zpool_cmd" list -H -o name,size \
| awk '{ print $1"_size", $2 }'
) \
| perl -ane '
@unit{ qw/ K M G T P E / } = ( 1 .. 6 );
$name = $F[0];
$byteu = $F[1];
( $n, $u ) = $byteu =~ /^([\d.]+)([KMGTPE]?)$/;
$byte = int( $n * 1024 ** ( $u ? $unit{ $u } : 0 ) );
print "$name $byte\n";
'
# Note: ZFS supports up to 16EB.
;;
dedup)
"$zpool_cmd" list -H -o name,dedup \
| sed 's/x$//' \
| awk '{ print $1"_dedup", $2 }'
# example output:
# $ zpool list -H -o name,dedup
# rpool 1.00x
# ...
"$zpool_cmd" list -H -o name \
| xargs "$zfs_cmd" get -H -o name,value compressratio \
| sed 's/x$//' \
| awk '{ print $1"_compress", $2 }'
# example output:
# $ zfs get -H -o name,value compressratio rpool
# rpool 1.00x
;;
esac
}
do_fetch() {
local func="$1"
local zpool_stats field value
# zpool_stats contains 'key value\n'
zpool_stats=$( get_stats "$func" )
echo "multigraph ${plugin_name}_${func}"
echo "$zpool_stats" | while read -r field value
do
field=$( clean_fieldname "$field" )
echo "${field}.value ${value}"
done
echo
}
autoconf() {
if [ -x "$zpool_cmd" ]; then
echo yes
else
echo "no (failed to find executable 'zpool')"
fi
}
config() {
local func
pool_list=$( "$zpool_cmd" list -H -o name )
for func in $functions
do
do_config "$func"
done
}
fetch() {
local func
for func in $functions
do
do_fetch "$func"
done
}
# Main
case ${1:-} in
autoconf)
autoconf
;;
config)
config
if [ "${MUNIN_CAP_DIRTYCONFIG:-0}" = "1" ]; then fetch; fi
;;
*)
fetch
;;
esac
exit 0

127
zpool_iostat Normal file
View file

@ -0,0 +1,127 @@
#!/bin/sh
# -*- sh -*-
set -eu
: <<=cut
=head1 NAME
zpool_iostat - Plugin to monitor transfer statistics of ZFS pools
=head1 APPLICABLE SYSTEMS
All systems with "zpool" installed.
=head1 CONFIGURATION
No configuration is required.
=head1 INTERPRETATION
This plugin shows a graph with read (positive) and write (negative) values
for the IO transfer of each pool.
=head1 MAGIC MARKERS
#%# family=auto
#%# capabilities=autoconf
=head1 AUTHOR
tsaavik <github@hellspark.com>
Peter Doherty <peterd@acranox.org>
Lars Kruse <devel@sumpfralle.de>
=head1 LICENSE
GPLv2
=cut
# shellcheck source=/usr/share/munin/plugins/plugin.sh
. "$MUNIN_LIBDIR/plugins/plugin.sh"
ZPOOL_BIN=/sbin/zpool
ACTION="${1:-}"
if [ "$ACTION" = "autoconf" ]; then
if [ -x "$ZPOOL_BIN" ]; then
echo yes
else
echo "no (missing executable '$ZPOOL_BIN')"
fi
exit 0
fi
zlines=$("$ZPOOL_BIN" iostat -v | wc -l | sed 's/ //g')
iostats=$("$ZPOOL_BIN" iostat -v 1 1 | tail "-$zlines")
zlist=$(echo "$iostats" \
| awk '/alloc/ {next}; /avail/ {next}; /raid/ {next}; /mirror/ {next};
{ if ( $4 >=0 ) print $1}' \
| tr ' ' '\n')
# Parse the n'th column of the iostat output for a given pool or disk as a
# number (interpreting K and M suffixes).
get_device_iostat_column() {
local device_label="$1"
local stat_column="$2"
# convert all numeric values into kB
echo "$iostats" \
| awk '{ if ($1 == "'"$device_label"'") print $'"$stat_column"'; }' \
| awk '/M/ {print int($1)*1000};
/K/ {print int($1)};
/[0-9]$/ {print int($1)/1000}'
}
get_device_fieldname() {
local device_id="$1"
# Backwards compatibility (until 2016): keep the unprefixed pool name
# for the fieldname, except for pool names starting with digits.
if echo "$device_id" | grep -q "^[0-9]"; then
clean_fieldname "_$device_id"
else
clean_fieldname "$device_id"
fi
}
if [ "$ACTION" = "config" ]; then
echo 'graph_title zpool iostat'
echo 'graph_args --base 1000 -l 0'
echo 'graph_vlabel write (-) / read (+) KBytes/s'
echo 'graph_category disk'
echo 'graph_scale no'
echo 'graph_info This graph shows zpool iostat'
# Assemble the "graph_order" as a sorted list of read/write pairs for
# each device.
printf "graph_order"
echo "$zlist" | while read -r device_id; do
fieldname="$(get_device_fieldname "$device_id")"
printf " %s_read %s_write" "$fieldname" "$fieldname"
done
# finalize the 'graph_order' with a newline
echo
# output all fields: write as negative numbers and read as positive
echo "$zlist" | while read -r device_id; do
fieldname="$(get_device_fieldname "$device_id")"
echo "${fieldname}_read.label $device_id"
echo "${fieldname}_read.type GAUGE"
echo "${fieldname}_read.graph no"
echo "${fieldname}_write.label $device_id"
echo "${fieldname}_write.type GAUGE"
echo "${fieldname}_write.negative ${fieldname}_read"
done
exit 0
fi
echo "$zlist" | while read -r device_id; do
fieldname="$(get_device_fieldname "$device_id")"
echo "${fieldname}_read.value $(get_device_iostat_column "$device_id" 6)"
echo "${fieldname}_write.value $(get_device_iostat_column "$device_id" 7)"
done