diff --git a/README.md b/README.md index 0edb2bb0..276e4662 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -[![Build Status](https://secure.travis-ci.org/ganglia/gmond_python_modules.png)](http://travis-ci.org/ganglia/gmond_python_modules) +Build Status](https://secure.travis-ci.org/ganglia/gmond_python_modules.png)](http://travis-ci.org/ganglia/gmond_python_modules) This is the official repository for hosting all user-contributed Gmond Python DSO metric modules. @@ -9,7 +9,7 @@ If you have any questions, you could reach us at: ganglia-developers@lists.sourceforge.net -(subscription required: http://lists.sourceforge.net/lists/listinfo/ganglia-developers) +subscription required: http://lists.sourceforge.net/lists/listinfo/ganglia-developers) Alternatively, you could join our IRC channel on irc.freenode.net #ganglia and ping one of the developers. diff --git a/vmax/vmax.py b/vmax/vmax.py index 2bfdb28a..4e6d357e 100755 --- a/vmax/vmax.py +++ b/vmax/vmax.py @@ -1,9 +1,9 @@ -#!/usr/bin/python -# Name: vmax.py -# Desc: Ganglia module for polling the VMAX UniSphere REST interface for metrics -# Author: Evan Fraser evan.fraser@trademe.co.nz -# Date: July 2014 -# Copyright: GPL +!/usr/bin/python + Name: vmax.py + Desc: Ganglia module for polling the VMAX UniSphere REST interface for metrics + Author: Evan Fraser evan.fraser@trademe.co.nz + Date: July 2014 + Copyright: GPL import json, os, pprint, re, requests, socket, sys, time @@ -12,7 +12,7 @@ unispherePort = 8443 -#This is the minimum interval between querying unisphere for metrics +This is the minimum interval between querying unisphere for metrics METRICS_CACHE_MAX = 300 METRICS = { @@ -103,8 +103,8 @@ def get_metric(name): print "Short response" pprint.pprint(responseObj) - ### Now get the pool based metrics for each vmax. - #Start by getting the list of pools + Now get the pool based metrics for each vmax. + Start by getting the list of pools baseurl = 'https://' + vmax_dict[key]['unisphereIP'] + ':' + str(unispherePort) + '/univmax/restapi/performance/ThinPool/keys' requestObj = {'thinPoolKeyParam': @@ -117,12 +117,12 @@ def get_metric(name): headers = {'content-type': 'application/json','accept':'application/json'} #set the headers for how we want the response - #make the actual request, specifying the URL, the JSON from above, standard basic auth, the headers and not to verify the SSL cert. + make the actual request, specifying the URL, the JSON from above, standard basic auth, the headers and not to verify the SSL cert. r = requests.post(baseurl, requestJSON, auth=(vmax_dict[key]['user'], vmax_dict[key]['pass']), headers=headers, verify=False) - #take the raw response text and deserialize it into a python object. + take the raw response text and deserialize it into a python object. try: responseObj = json.loads(r.text) except: @@ -145,10 +145,10 @@ def get_metric(name): headers = {'content-type': 'application/json','accept':'application/json'} #set the headers for how we want the response - #make the actual request, specifying the URL, the JSON from above, standard basic auth, the headers and not to verify the SSL cert. + make the actual request, specifying the URL, the JSON from above, standard basic auth, the headers and not to verify the SSL cert. r = requests.post(baseurl, requestJSON, auth=(vmax_dict[key]['user'], vmax_dict[key]['pass']), headers=headers, verify=False) - #take the raw response text and deserialize it into a python object. + take the raw response text and deserialize it into a python object. try: responseObj = json.loads(r.text) except: @@ -180,14 +180,14 @@ def get_metric(name): return METRICS['data'][name] -# define_metrics will run an snmp query on an ipaddr, find interfaces, build descriptors and set spoof_host -# define_metrics is called from metric_init +define_metrics will run an snmp query on an ipaddr, find interfaces, build descriptors and set spoof_host +define_metrics is called from metric_init def define_metrics(Desc_Skel, unisphereIP, sid, site): global vmax_dict spoof_string = unisphereIP + ':vmax_' + site vmax_name = 'vmax_' + site - #FE Cache Hits/s + FE Cache Hits/s descriptors.append(create_desc(Desc_Skel, { "name" : vmax_name + '_cache_hits', "units" : "iops", @@ -195,7 +195,7 @@ def define_metrics(Desc_Skel, unisphereIP, sid, site): "groups" : "iops", "spoof_host" : spoof_string, })) - #FE Read IOPs + FE Read IOPs descriptors.append(create_desc(Desc_Skel, { "name" : vmax_name + '_fe_reads', "units" : "iops", @@ -203,7 +203,7 @@ def define_metrics(Desc_Skel, unisphereIP, sid, site): "groups" : "iops", "spoof_host" : spoof_string, })) - #FE Write IOPs + FE Write IOPs descriptors.append(create_desc(Desc_Skel, { "name" : vmax_name + '_fe_writes', "units" : "iops", @@ -211,7 +211,7 @@ def define_metrics(Desc_Skel, unisphereIP, sid, site): "groups" : "iops", "spoof_host" : spoof_string, })) - #Array MB_READ_PER_SEC + Array MB_READ_PER_SEC descriptors.append(create_desc(Desc_Skel, { "name" : vmax_name + '_megabytes_read', "units" : "MB/s", @@ -219,7 +219,7 @@ def define_metrics(Desc_Skel, unisphereIP, sid, site): "groups" : "Throughput", "spoof_host" : spoof_string, })) - #Array MB_WRITE_PER_SEC + Array MB_WRITE_PER_SEC descriptors.append(create_desc(Desc_Skel, { "name" : vmax_name + '_megabytes_written', "units" : "MB/s", @@ -227,7 +227,7 @@ def define_metrics(Desc_Skel, unisphereIP, sid, site): "groups" : "Throughput", "spoof_host" : spoof_string, })) - #Array RESPONSE_TIME_READ + Array RESPONSE_TIME_READ descriptors.append(create_desc(Desc_Skel, { "name" : vmax_name + '_response_time_read', "units" : "ms", @@ -235,7 +235,7 @@ def define_metrics(Desc_Skel, unisphereIP, sid, site): "groups" : "Latency", "spoof_host" : spoof_string, })) - #Array RESPONSE_TIME_WRITE + Array RESPONSE_TIME_WRITE descriptors.append(create_desc(Desc_Skel, { "name" : vmax_name + '_response_time_write', "units" : "ms", @@ -243,7 +243,7 @@ def define_metrics(Desc_Skel, unisphereIP, sid, site): "groups" : "Latency", "spoof_host" : spoof_string, })) - #Total Volume IO_RATE + Total Volume IO_RATE descriptors.append(create_desc(Desc_Skel, { "name" : vmax_name + '_vol_iorate', "units" : "iops", @@ -252,7 +252,7 @@ def define_metrics(Desc_Skel, unisphereIP, sid, site): "spoof_host" : spoof_string, })) - ###Perform API query to get list of Thinpools + Perform API query to get list of Thinpools baseurl = 'https://' + unisphereIP + ':8443/univmax/restapi/performance/ThinPool/keys' @@ -298,7 +298,7 @@ def define_metrics(Desc_Skel, unisphereIP, sid, site): "spoof_host" : spoof_string, })) - #BE_RESPONSE_TIME_READ + BE_RESPONSE_TIME_READ descriptors.append(create_desc(Desc_Skel, { "name" : str(vmax_name + '_' + pool["poolId"] + '_response_time_reads'), "units" : "ms", @@ -307,7 +307,7 @@ def define_metrics(Desc_Skel, unisphereIP, sid, site): "spoof_host" : spoof_string, })) - #BE_RESPONSE_TIME_WRITE + BE_RESPONSE_TIME_WRITE descriptors.append(create_desc(Desc_Skel, { "name" : str(vmax_name + '_' + pool["poolId"] + '_response_time_writes'), "units" : "ms", @@ -315,7 +315,7 @@ def define_metrics(Desc_Skel, unisphereIP, sid, site): "groups" : "Latency", "spoof_host" : spoof_string, })) - #BE_MB_READ_RATE + BE_MB_READ_RATE descriptors.append(create_desc(Desc_Skel, { "name" : str(vmax_name + '_' + pool["poolId"] + '_megabytes_read'), "units" : "MB/s", @@ -323,7 +323,7 @@ def define_metrics(Desc_Skel, unisphereIP, sid, site): "groups" : "Throughput", "spoof_host" : spoof_string, })) - #BE_MB_WRITE_RATE + BE_MB_WRITE_RATE descriptors.append(create_desc(Desc_Skel, { "name" : str(vmax_name + '_' + pool["poolId"] + '_megabytes_written'), "units" : "MB/s", @@ -341,7 +341,7 @@ def metric_init(params): print '[switch] Received the following parameters' print params - #Import the params into the global NIPARAMS + Import the params into the global NIPARAMS Desc_Skel = { 'name' : 'XXX', @@ -355,11 +355,11 @@ def metric_init(params): 'groups' : 'switch', } - # Find all the vmax's passed in params + Find all the vmax's passed in params for vmax in vmax_dict: # pass skel, ip and name to define_metrics to create descriptors descriptors = define_metrics(Desc_Skel, vmax_dict[vmax]['unisphereIP'], vmax_dict[vmax]['sid'], vmax_dict[vmax]['site']) - #Return the descriptors back to gmond + Return the descriptors back to gmond return descriptors def create_desc(skel, prop): @@ -373,7 +373,7 @@ def metric_cleanup(): '''Clean up the metric module.''' pass -# For CLI Debuging: + For CLI Debuging: if __name__ == '__main__': params = { } diff --git a/zfs_arc/python_modules/zfs_arc.py b/zfs_arc/python_modules/zfs_arc.py index ff05fb9e..1911d700 100644 --- a/zfs_arc/python_modules/zfs_arc.py +++ b/zfs_arc/python_modules/zfs_arc.py @@ -1,24 +1,24 @@ -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -# DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE FOR ANY -# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -# gmond python module for collection ZFS ARC stats. Based on the -# arcstat command line tool: http://github.com/mharsch/arcstat +Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE FOR ANY + DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +gmond python module for collection ZFS ARC stats. Based on the + arcstat command line tool: http://github.com/mharsch/arcstat import abc import copy @@ -44,125 +44,124 @@ 'groups' : 'zfs_arc' } - -METRICS = [ - {'name': 'hits', - 'description': 'ARC reads per second', - 'units': 'hits/s'}, - {'name': 'misses', - 'description': 'ARC misses per second', - 'units': 'misses/s'}, - {'name': 'read', - 'description': 'Total ARC accesses per second', - 'units': 'reads/s'}, - {'name': 'hit_percent', - 'description': 'ARC Hit percentage', - 'units': 'percent', - 'value_type': 'double', - 'format': '%f'}, - {'name': 'miss_percent', - 'description': 'ARC miss percentage', - 'units': 'percent', - 'value_type': 'double', - 'format': '%f'}, - {'name': 'dhit', - 'description': 'Demand Data hits per second', - 'units': 'hits/s'}, - {'name': 'dmis', - 'description': 'Demand Data misses per second', - 'units': 'misses/s'}, - {'name': 'dh_percent', - 'description': 'Demand Data hit percentage', - 'units': 'percent', - 'value_type': 'double', - 'format': '%f'}, - {'name': 'dm_percent', - 'description': 'Demand Data miss percentage', - 'units': 'percent', - 'value_type': 'double', - 'format': '%f'}, - {'name': 'phit', - 'description': 'Prefetch hits per second', - 'units': 'hits/s'}, - {'name': 'pmis', - 'description': 'Prefetch misses per second', - 'units': 'misses/s'}, - {'name': 'ph_percent', - 'description': 'Prefetch hits percentage', - 'units': 'percent', - 'value_type': 'double', - 'format': '%f'}, - {'name': 'pm_percent', - 'description': 'Prefetch miss percentage', - 'units': 'percent', - 'value_type': 'double', - 'format': '%f'}, - {'name': 'mhit', - 'description': 'Metadata hits per second', - 'units': 'hits/s'}, - {'name': 'mmis', - 'description': 'Metadata misses per second', - 'units': 'misses/s'}, - {'name': 'mread', - 'description': 'Metadata accesses per second', - 'units': 'accesses/s'}, - {'name': 'mh_percent', - 'description': 'Metadata hit percentage', - 'units': 'percent', - 'value_type': 'double', - 'format': '%f'}, - {'name': 'mm_percent', - 'description': 'Metadata miss percentage', - 'units': 'percent', - 'value_type': 'double', - 'format': '%f'}, - {'name': 'size', - 'description': 'ARC Size', - 'units': 'bytes'}, - {'name': 'c', - 'description': 'ARC Target Size', - 'units': 'bytes'}, - {'name': 'mfu', - 'description': 'MFU List hits per second', - 'units': 'hits/s'}, - {'name': 'mru', - 'description': 'MRU List hits per second', - 'units': 'hits/s'}, - {'name': 'mfug', - 'description': 'MFU Ghost List hits per second', - 'units': 'hits/s'}, - {'name': 'mrug', - 'description': 'MRU Ghost List hits per second', - 'units': 'hits/s'}, - {'name': 'eskip', - 'description': 'evict_skip per second', - 'units': 'hits/s'}, - {'name': 'mtxmis', - 'description': 'mutex_miss per second', - 'units': 'misses/s'}, - {'name': 'rmis', - 'description': 'recycle_miss per second', - 'units': 'misses/s'}, - {'name': 'dread', - 'description': 'Demand data accesses per second', - 'units': 'accesses/s'}, - {'name': 'pread', - 'description': 'Prefetch accesses per second', - 'units': 'accesses/s'}, - {'name': 'l2hits', - 'description': 'L2ARC hits per second', - 'units': 'hits/s'}, - {'name': 'l2misses', - 'description': 'L2ARC misses per second', - 'units': 'misses/s'}, - {'name': 'l2read', - 'description': 'Total L2ARC accesses per second', - 'units': 'reads/s'}, - {'name': 'l2hit_percent', - 'description': 'L2ARC access hit percentage', - 'units': 'percent', - 'value_type': 'double', - 'format': '%f'}, +ETRICS = [ +'name': 'hits', + description': 'ARC reads per second', + units': 'hits/s'}, + {name': 'misses', + description': 'ARC misses per second', + units': 'misses/s'}, + {name': 'read', + description': 'Total ARC accesses per second', + units': 'reads/s'}, + {name': 'hit_percent', + description': 'ARC Hit percentage', + units': 'percent', + value_type': 'double', + format': '%f'}, + {name': 'miss_percent', + description': 'ARC miss percentage', + units': 'percent', + value_type': 'double', + format': '%f'}, + {name': 'dhit', + description': 'Demand Data hits per second', + units': 'hits/s'}, + {name': 'dmis', + description': 'Demand Data misses per second', + units': 'misses/s'}, + {name': 'dh_percent', + description': 'Demand Data hit percentage', + units': 'percent', + value_type': 'double', + format': '%f'}, + {name': 'dm_percent', + description': 'Demand Data miss percentage', + units': 'percent', + value_type': 'double', + format': '%f'}, + {name': 'phit', + description': 'Prefetch hits per second', + units': 'hits/s'}, + {name': 'pmis', + description': 'Prefetch misses per second', + units': 'misses/s'}, + {name': 'ph_percent', + description': 'Prefetch hits percentage', + units': 'percent', + value_type': 'double', + format': '%f'}, + {name': 'pm_percent', + description': 'Prefetch miss percentage', + units': 'percent', + value_type': 'double', + format': '%f'}, + {name': 'mhit', + description': 'Metadata hits per second', + units': 'hits/s'}, + {name': 'mmis', + description': 'Metadata misses per second', + units': 'misses/s'}, + {name': 'mread', + description': 'Metadata accesses per second', + units': 'accesses/s'}, + {name': 'mh_percent', + description': 'Metadata hit percentage', + units': 'percent', + value_type': 'double', + format': '%f'}, + {name': 'mm_percent', + description': 'Metadata miss percentage', + units': 'percent', + value_type': 'double', + format': '%f'}, + {name': 'size', + description': 'ARC Size', + units': 'bytes'}, + {name': 'c', + description': 'ARC Target Size', + units': 'bytes'}, + {name': 'mfu', + description': 'MFU List hits per second', + units': 'hits/s'}, + {name': 'mru', + description': 'MRU List hits per second', + units': 'hits/s'}, + {name': 'mfug', + description': 'MFU Ghost List hits per second', + units': 'hits/s'}, + {name': 'mrug', + description': 'MRU Ghost List hits per second', + units': 'hits/s'}, + {name': 'eskip', + description': 'evict_skip per second', + units': 'hits/s'}, + {name': 'mtxmis', + description': 'mutex_miss per second', + units': 'misses/s'}, + {name': 'rmis', + description': 'recycle_miss per second', + units': 'misses/s'}, + {name': 'dread', + description': 'Demand data accesses per second', + units': 'accesses/s'}, + {name': 'pread', + description': 'Prefetch accesses per second', + units': 'accesses/s'}, + {name': 'l2hits', + description': 'L2ARC hits per second', + units': 'hits/s'}, + {name': 'l2misses', + description': 'L2ARC misses per second', + units': 'misses/s'}, + {name': 'l2read', + description': 'Total L2ARC accesses per second', + units': 'reads/s'}, + {name': 'l2hit_percent', + description': 'L2ARC access hit percentage', + units': 'percent', + value_type': 'double', + format': '%f'}, {'name': 'l2miss_percent', 'description': 'L2ARC access miss percentage', 'units': 'percent',