David Pilibosian
Cadet
- Joined
- Feb 4, 2017
- Messages
- 3
So I've spent some time rewriting the zfs-snmp module that's included with FreeNAS. I've fixed a couple of bugs and rewritten it to take advantage of the pass_persist method, this makes it significantly faster and consumes far fewer resources than the previous method. I've also added replication monitoring to it, I've done this to make use of at work but I figured I'd contribute it back.
There are a couple of issues though, primarily the version of snmp_passpersist that's included with FreeNAS 11 is not compatible with Python 3.6 and needs to be updated to the latest version available on GitHub, also I'm not sure what to do with the license on it, I based it on this example https://github.com/nagius/cxm/blob/master/misc/snmp_xen.py and it's licensed under GPL but the original script is BSD licensed.
I've attached the rewritten version, if anyone wants to test it I would appreciate it. To test it replace the file /usr/local/bin/freenas-snmp/zfs-snmp with zfs-snmp located in the attached zip file and change the line
pass .1.3.6.1.4.1.25359.1 in /usr/local/etc/snmpd.conf to pass_persist .1.3.6.1.4.1.25359.1 then restart the snmpd service
/usr/local/lib/python3.6/site-packages/snmp_passpersist.py will also need replaced with the attached version.
If anyone has anything else they would like added to it please let me know and I'll see what I can do. Also is there anything in particular I need to do to commit this to the FreeNAS GitHub once I feel it's ready?
Code for anyone who doesn't trust random zip files from internet strangers.
zfs-snmp
snmp_passpersist.py
There are a couple of issues though, primarily the version of snmp_passpersist that's included with FreeNAS 11 is not compatible with Python 3.6 and needs to be updated to the latest version available on GitHub, also I'm not sure what to do with the license on it, I based it on this example https://github.com/nagius/cxm/blob/master/misc/snmp_xen.py and it's licensed under GPL but the original script is BSD licensed.
I've attached the rewritten version, if anyone wants to test it I would appreciate it. To test it replace the file /usr/local/bin/freenas-snmp/zfs-snmp with zfs-snmp located in the attached zip file and change the line
pass .1.3.6.1.4.1.25359.1 in /usr/local/etc/snmpd.conf to pass_persist .1.3.6.1.4.1.25359.1 then restart the snmpd service
/usr/local/lib/python3.6/site-packages/snmp_passpersist.py will also need replaced with the attached version.
If anyone has anything else they would like added to it please let me know and I'll see what I can do. Also is there anything in particular I need to do to commit this to the FreeNAS GitHub once I feel it's ready?
Code for anyone who doesn't trust random zip files from internet strangers.
zfs-snmp
Code:
#!/usr/local/bin/python3.6 -u
# Copyright (c) 2012, Jakob Borg
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY JAKOB BORG ''AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL JAKOB BORG BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
# Initial code taken from: https://github.com/jm66/solaris-extra-snmp
import errno
import json
import os
import re
import socket
import subprocess
import sys
import time
import django
import libzfs
import syslog
import snmp_passpersist as snmp
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'freenasUI.settings')
sys.path.append("/usr/local/www")
django.setup()
from freenasUI.storage.models import Replication
from freenasUI.tools.arc_summary import get_Kstat, get_arc_efficiency
POOLING_INTERVAL = 10 # Update timer, in second
MAX_RETRY = 10 # Number of successives retry in case of error
OID_BASE = '.1.3.6.1.4.1.25359.1'
pp = None
ARC = get_arc_efficiency(get_Kstat())
FREENASSNMPDSOCK = '/var/run/freenas-snmpd.sock'
size_dict = {
"K": 1024,
"M": 1048576,
"G": 1073741824,
"T": 1099511627776
}
def get_from_freenas_snmpd_sock(val_to_obtain):
data = b''
try:
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.connect(FREENASSNMPDSOCK)
s.sendall(val_to_obtain)
while True:
text = s.recv(4096)
if text == b'':
break
else:
data += text
except socket.error:
pass
finally:
s.close()
try:
data = json.loads(data.decode('utf8'))
except (ValueError, UnicodeDecodeError):
data = {}
return data
def update_data():
global pp
all_iostat = zpoolio_interval("all")
onesec_iostat = zpoolio_interval(1)
zfs = libzfs.ZFS()
pools = [pool for pool in zfs.pools]
datasets = []
zvols = []
qs = Replication.objects.filter(repl_enabled=True)
pp.add_gau('2.1.0', zfs_arc_size())
pp.add_gau('2.2.0', zfs_arc_meta())
pp.add_gau('2.3.0', zfs_arc_data())
pp.add_cnt_32bit('2.4.0', zfs_arc_hits())
pp.add_cnt_32bit('2.5.0', zfs_arc_misses())
pp.add_gau('2.6.0', zfs_arc_c())
pp.add_gau('2.7.0', zfs_arc_p())
pp.add_str('2.8.0', zfs_arc_miss_percent())
pp.add_str('2.9.0', zfs_arc_cache_hit_ratio())
pp.add_str('2.10.0', zfs_arc_cache_miss_ratio())
pp.add_cnt_32bit('3.1.0', zfs_l2arc_hits())
pp.add_cnt_32bit('3.2.0', zfs_l2arc_misses())
pp.add_cnt_32bit('3.3.0', zfs_l2arc_read())
pp.add_cnt_32bit('3.4.0', zfs_l2arc_write())
pp.add_gau('3.5.0', zfs_l2arc_size())
pp.add_cnt_64bit('6.1.0', zfs_zilstat_ops1())
pp.add_cnt_64bit('6.2.0', zfs_zilstat_ops5())
pp.add_cnt_64bit('6.3.0', zfs_zilstat_ops10())
for res in map(zfs_segregate, [pool.root_dataset for pool in pools]):
# Excluding the first item in the datasets list as its always the root_dataset
# and we already have the stats on that (i.e. the pool!)
datasets.extend(res[0][1:])
zvols.extend(res[1])
for i, zpool in enumerate(pools):
stri = str(i + 1)
pool = zpool.name
pool_health = zpool.properties['health'].value
# Dividing by 1024 to ge to KB
pool_used = unprettyprint(zpool.root_dataset.properties['used'].value) / 1024
pool_available = unprettyprint(zpool.root_dataset.properties['available'].value) / 1024
pool_size = unprettyprint(zpool.properties['size'].value) / 1024
pp.add_str('1.1.' + stri, pool)
pp.add_cnt_64bit('1.2.' + stri, pool_available)
pp.add_cnt_64bit('1.3.' + stri, pool_used)
pp.add_str('1.4.' + stri, pool_health)
pp.add_cnt_64bit('1.5.' + stri, pool_size)
pp.add_gau('1.12.' + stri, pool_available / 1024)
pp.add_gau('1.13.' + stri, pool_used / 1024)
pp.add_gau('1.14.' + stri, pool_size / 1024)
pp.add_cnt_64bit('1.15.' + stri, all_iostat[pool]['opread'])
pp.add_cnt_64bit('1.16.' + stri, all_iostat[pool]['opwrite'])
pp.add_cnt_64bit('1.17.' + stri, all_iostat[pool]['bwread'])
pp.add_cnt_64bit('1.18.' + stri, all_iostat[pool]['bwrite'])
pp.add_cnt_64bit('1.19.' + stri, onesec_iostat[pool].get('opread', 0))
pp.add_cnt_64bit('1.20.' + stri, onesec_iostat[pool].get('opwrite', 0))
pp.add_cnt_64bit('1.21.' + stri, onesec_iostat[pool].get('bwread', 0))
pp.add_cnt_64bit('1.22.' + stri, onesec_iostat[pool].get('bwrite', 0))
for i, zvol in enumerate(zvols):
stri = str(i + 1)
volsize = unprettyprint(zvol.properties['volsize'].value) / 1024
vol_used = unprettyprint(zvol.properties['used'].value) / 1024
vol_available = unprettyprint(zvol.properties['available'].value) / 1024
pp.add_str('5.1.' + stri, zvol.name)
pp.add_cnt_64bit('5.2.' + stri, vol_available)
pp.add_cnt_64bit('5.3.' + stri, vol_used)
pp.add_cnt_64bit('5.4.' + stri, volsize)
pp.add_gau('5.12.' + stri, vol_available / 1024)
pp.add_gau('5.13.' + stri, vol_used / 1024)
pp.add_gau('5.14.' + stri, volsize / 1024)
for i, ds in enumerate(datasets):
stri = str(i + 1)
ds_used = unprettyprint(ds.properties['used'].value) / 1024
ds_available = unprettyprint(ds.properties['available'].value) / 1024
ds_size = ds_used + ds_available
pp.add_str('7.1.' + stri, ds.name)
pp.add_cnt_64bit('7.2.' + stri, ds_available)
pp.add_cnt_64bit('7.3.' + stri, ds_used)
pp.add_cnt_64bit('7.4.' + stri, ds_size)
pp.add_gau('7.12.' + stri, ds_available / 1024)
pp.add_gau('7.13.' + stri, ds_used / 1024)
pp.add_gau('7.14.' + stri, ds_size / 1024)
for i, repl in enumerate(qs):
stri = str(i + 1)
name = repl.repl_filesystem
status = repl.status
enabled = repl.repl_enabled
remote_addr = repl.repl_remote
remote_zfs = repl.repl_zfs
last = repl.repl_lastsnapshot
pp.add_str('8.1.' + stri, name)
pp.add_str('8.2.' + stri, '%s@%s' % (remote_zfs, remote_addr))
pp.add_str('8.3.' + stri, enabled)
pp.add_str('8.4.' + stri, status)
pp.add_str('8.5.' + stri, last)
class ArgumentValidationError(ValueError):
"""
Raised when the type of an argument to a function is not what it should be.
"""
def __init__(self, arg_num, func_name, accepted_arg_type):
self.error = 'The {0} argument of {1}() is not a {2}'.format(
arg_num, func_name, accepted_arg_type)
def __str__(self):
return self.error
def unprettyprint(ster):
"""
Method to convert 1K --> 1024 and so on...
"""
num = 0.0
try:
num = float(ster)
except:
try:
num = float(ster[:-1]) * size_dict[ster[-1]]
except:
pass
return int(num)
def kstat(name):
output = subprocess.getoutput("sysctl kstat." + name)
try:
return int(re.split("\s+", output)[1])
except:
return 0
def zfs_arc_size():
# KB
return kstat("zfs.misc.arcstats.size") / 1024
def zfs_arc_data():
# KB
return kstat("zfs.misc.arcstats.data_size") / 1024
def zfs_arc_meta():
# KB
return kstat("zfs.misc.arcstats.arc_meta_used") / 1024
def zfs_arc_hits():
# 32 bit counter
return kstat("zfs.misc.arcstats.hits") % 2 ** 32
def zfs_arc_misses():
# 32 bit counter
return kstat("zfs.misc.arcstats.misses") % 2 ** 32
def zfs_arc_miss_percent():
# percentage (floating point precision wrapped as a string)
arc_hits = kstat("zfs.misc.arcstats.hits")
arc_misses = kstat("zfs.misc.arcstats.misses")
arc_read = arc_hits + arc_misses
if (arc_read > 0):
hit_percent = float(100 * arc_hits / arc_read)
miss_percent = 100 - hit_percent
return str(miss_percent)
return "0"
def zfs_arc_c():
# KB
return kstat("zfs.misc.arcstats.c") / 1024
def zfs_arc_p():
# KB
return kstat("zfs.misc.arcstats.p") / 1024
def zfs_arc_cache_hit_ratio():
# percentage (floating point precision wrapped as a string)
return ARC['cache_hit_ratio']['per'][:-1]
def zfs_arc_cache_miss_ratio():
# percentage (floating point precision wrapped as a string)
return ARC['cache_miss_ratio']['per'][:-1]
def zilstatd_ops(interval):
res = 0
FSNMPDATA = get_from_freenas_snmpd_sock(b"get_all")
try:
res = FSNMPDATA["zil_data"][str(interval)]['ops']
except KeyError:
pass
return res
# Note: Currently only 1 second interval and "all" is supported
# to add more make the appropriate worker in gui/tools/freenas-snmpd.py
def zpoolio_interval(interval):
res = {}
FSNMPDATA = get_from_freenas_snmpd_sock(b"get_all")
try:
res = FSNMPDATA["zpool_data"][str(interval)]
except KeyError:
pass
return res
def zfs_zilstat_ops1():
return zilstatd_ops(1)
def zfs_zilstat_ops5():
return zilstatd_ops(5)
def zfs_zilstat_ops10():
return zilstatd_ops(10)
def zfs_l2arc_size():
# KB
return kstat("zfs.misc.arcstats.l2_size") / 1024
def zfs_l2arc_hits():
# 32 bit counter
return kstat("zfs.misc.arcstats.l2_hits") % 2 ** 32
def zfs_l2arc_misses():
# 32 bit counter
return kstat("zfs.misc.arcstats.l2_misses") % 2 ** 32
def zfs_l2arc_write():
# 32 bit KB counter
return kstat("zfs.misc.arcstats.l2_write_bytes") / 1024 % 2 ** 32
def zfs_l2arc_read():
# 32 bit KB counter
return kstat("zfs.misc.arcstats.l2_read_bytes") / 1024 % 2 ** 32
def zfs_segregate(zfs_dataset):
"""
A function to obtain and segregte all the datsets (children) and zvols
in the provided (`zfs_dataset`) dataset. The best example to use this
is to provide it with a zfs pool's root dataset and it will return a
a tuple of (datsets, zvols) where each of them is a list.
Note: Please make sure that the input to this function (`zfs_dataset`)
is of type: libzfs.ZFSDataset
"""
if type(zfs_dataset) is not libzfs.ZFSDataset:
raise ArgumentValidationError(1, 'zfs_segregate', libzfs.ZFSDataset)
zvols = []
datasets = []
if zfs_dataset.properties['type'].value == 'volume':
# since zvols do not have children lets just return now
return datasets, [zfs_dataset]
else:
datasets = [zfs_dataset]
for x, y in map(zfs_segregate, list(zfs_dataset.children)):
datasets.extend(x)
zvols.extend(y)
return datasets, zvols
def main():
global pp
retry_timestamp = int(time.time())
retry_counter = MAX_RETRY
while retry_counter > 0:
try:
syslog.syslog(syslog.LOG_WARNING, "Starting FreeNAS monitoring...")
# Load helpers
pp = snmp.PassPersist(OID_BASE)
pp.start(update_data, POOLING_INTERVAL)
except KeyboardInterrupt:
print("Exiting on user request.")
sys.exit(0)
except IOError as e:
if e.errno == errno.EPIPE:
syslog.syslog(syslog.LOG_INFO, "Snmpd has close pipe, exiting...")
sys.exit(0)
else:
syslog.syslog(syslog.LOG_WARNING, "Updater thread has died: IOError: %s" % e)
except Exception as e:
syslog.syslog(syslog.LOG_WARNING, "Main thread has died: %s: %s" % (e.__class__.__name__, e))
else:
syslog.syslog(syslog.LOG_WARNING, "Updater thread has died: %s" % pp.error)
syslog.syslog(syslog.LOG_WARNING, "Restarting monitoring in 15 sec...")
time.sleep(15)
# Errors frequency detection
now = int(time.time())
if (now - 3600) > retry_timestamp: # If the previous error is older than 1H
retry_counter = MAX_RETRY # Reset the counter
else:
retry_counter -= 1 # Else countdown
retry_timestamp = now
syslog.syslog(syslog.LOG_ERR, "Too many retries, aborting!")
sys.exit(1)
if __name__ == "__main__":
main()
snmp_passpersist.py
Code:
# -*- coding:utf-8 -*-
# snmp_passpersist.py - SNMP passPersist backend for Net-SNMP
# Copyleft 2010-2013 - Nicolas AGIUS <nicolas.agius@lps-it.fr>
###########################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###########################################################################
"""
This 'snmp_passpersist' module is a python backend for snmp's "pass_persist" function.
It is critical that the python interpreter be invoked with unbuffered STDIN and
STDOUT by use of the -u switch in the shebang line.
All the methods are in the PassPersist class.
"""
import sys, time, threading, os
__all__ = ["Error", "ErrorValues", "Type", "TypeValues", "PassPersist"]
__author__ = "Nicolas Agius"
__license__ = "GPL"
__version__ = "1.3.0"
__email__ = "nicolas.agius@lps-it.fr"
__status__ = "Production"
class Error(object):
"""
SET command requests errors.
As listed in the man snmpd.conf(5) page
"""
NotWritable = 'not-writable'
WrongType = 'wrong-type'
WrongValue = 'wrong-value'
WrongLength = 'wrong-length'
InconsistentValue = 'inconsistent-value'
ErrorValues = Error.__dict__.values()
class Type:
"""
SET command requests value types.
As listed in the man snmpd.conf(5) page
"""
Integer = 'integer'
Gauge = 'gauge'
Counter = 'counter'
TimeTicks = 'timeticks'
IPAddress = 'ipaddress'
OID = 'objectid'
ObjectID = 'objectid'
String = 'string'
Octet = 'octet'
TypeValues = Type.__dict__.values()
class ResponseError(ValueError):
"""
Wrong user function
"""
class PassPersist:
"""
This class present a convenient way to creare a MIB subtree and expose it to snmp via it's passpersist protocol.
Two thread are used, one for talking with snmpd and a second that trigger the update process at a fixed interval.
The keyword 'DUMP' has been added to the protocol for testing purpose.
Usage example: in a file /path/to/your/script.py :
> #!/usr/bin/python -u
> import snmp_passpersist as snmp
>
> def update():
> pp.add_int('0.1',123)
>
> pp=snmp.PassPersist(".1.3.6.1.3.53.8")
> pp.start(update,30) # Every 30s
With the folowing line in snmpd.conf :
pass_persist .1.3.6.1.3.53.8.0 /path/to/your/script.py
"""
@staticmethod
def encode(string):
"""
Encode the given string as an OID.
>>> import snmp_passpersist as snmp
>>> snmp.PassPersist.encode("hello")
'5.104.101.108.108.111'
>>>
"""
result = ".".join([str(ord(s)) for s in string])
return "%s." % (len(string)) + result
def __init__(self, base_oid):
"""
Initialize internals structures.
base_oid is the OID prefix used for all entry (the root of the MIB tree).
"""
self.data = dict()
self.data_idx = list()
self.pending = dict()
self.lock = threading.RLock()
if not base_oid.endswith("."):
base_oid += "."
self.base_oid = base_oid
self.setter = dict()
self.debug = False
# The data structure is a dict that hold the unsorted MIB tree like this :
# data = {
# '1.1': { 'type':'INTEGER', 'value':4 },
# '1.3.2.1':{ 'type':'STRING', 'value':'vm1' }
# }
def get(self, oid):
"""Return snmp value for the given OID."""
try:
self.lock.acquire()
if oid not in self.data:
return "NONE"
else:
return self.base_oid + oid + '\n' + self.data[oid]['type'] + '\n' + str(self.data[oid]['value'])
finally:
self.lock.release()
def get_next(self, oid):
"""Return snmp value for the next OID."""
try: # Nested try..except because of Python 2.4
self.lock.acquire()
try:
# remove trailing zeroes from the oid
while len(oid) > 0 and oid[-2:] == ".0" and oid not in self.data:
oid = oid[:-2];
return self.get(self.data_idx[self.data_idx.index(oid) + 1])
except ValueError:
# Not found: try to match partial oid
for real_oid in self.data_idx:
if real_oid.startswith(oid):
return self.get(real_oid)
return "NONE" # Unknown OID
except IndexError:
return "NONE" # End of MIB
finally:
self.lock.release()
def get_first(self):
"""Return snmp value for the first OID."""
try: # Nested try..except because of Python 2.4
self.lock.acquire()
try:
return self.get(self.data_idx[0])
except (IndexError, ValueError):
return "NONE"
finally:
self.lock.release()
def cut_oid(self, full_oid):
"""
Remove the base OID from the given string.
>>> import snmp_passpersist as snmp
>>> pp=snmp.PassPersist(".1.3.6.1.3.53.8")
>>> pp.cut_oid(".1.3.6.1.3.53.8.28.12")
'28.12'
"""
if not full_oid.startswith(self.base_oid.rstrip('.')):
return None
else:
return full_oid[len(self.base_oid):]
def add_oid_entry(self, oid, type, value):
"""General function to add an oid entry to the MIB subtree."""
if self.debug:
print('DEBUG: %s %s %s' % (oid, type, value))
self.pending[oid] = {'type': str(type), 'value': str(value)}
def add_oid(self, oid, value):
"""Short helper to add an object ID value to the MIB subtree."""
self.add_oid_entry(oid, 'OBJECTID', value)
def add_int(self, oid, value):
"""Short helper to add an integer value to the MIB subtree."""
self.add_oid_entry(oid, 'INTEGER', value)
def add_oct(self, oid, value):
"""Short helper to add an octet value to the MIB subtree."""
self.add_oid_entry(oid, 'OCTET', value)
def add_str(self, oid, value):
"""Short helper to add a string value to the MIB subtree."""
self.add_oid_entry(oid, 'STRING', value)
def add_ip(self, oid, value):
"""Short helper to add an IP address value to the MIB subtree."""
self.add_oid_entry(oid, 'IPADDRESS', value)
def add_cnt_32bit(self, oid, value):
"""Short helper to add a 32 bit counter value to the MIB subtree."""
# Truncate integer to 32bits max
self.add_oid_entry(oid, 'Counter32', int(value) % 4294967296)
def add_cnt_64bit(self, oid, value):
"""Short helper to add a 64 bit counter value to the MIB subtree."""
# Truncate integer to 64bits max
self.add_oid_entry(oid, 'Counter64', int(value) % 18446744073709551615)
def add_gau(self, oid, value):
"""Short helper to add a gauge value to the MIB subtree."""
self.add_oid_entry(oid, 'GAUGE', value)
def add_tt(self, oid, value):
"""Short helper to add a timeticks value to the MIB subtree."""
self.add_oid_entry(oid, 'TIMETICKS', value)
def main_passpersist(self):
"""
Main function that handle SNMP's pass_persist protocol, called by
the start method.
Direct call is unnecessary.
"""
line = sys.stdin.readline().strip()
if not line:
raise EOFError()
if 'PING' in line:
print("PONG")
elif 'getnext' in line:
oid = self.cut_oid(sys.stdin.readline().strip())
if oid is None:
print("NONE")
elif oid == "":
# Fallback to the first entry
print(self.get_first())
else:
print(self.get_next(oid))
elif 'get' in line:
oid = self.cut_oid(sys.stdin.readline().strip())
if oid is None:
print("NONE")
else:
print(self.get(oid))
elif 'set' in line:
oid = sys.stdin.readline().strip()
typevalue = sys.stdin.readline().strip()
self.set(oid, typevalue)
elif 'DUMP' in line: # Just for debbuging
from pprint import pprint
pprint(self.data)
else:
print("NONE")
sys.stdout.flush()
def commit(self):
"""
Commit change made by the add_* methods.
All previous values with no update will be lost.
This method is automatically called by the updater thread.
"""
# Generate index before acquiring lock to keep locked section fast
# Works because this thread is the only writer of self.pending
pending_idx = sorted(self.pending.keys(), key=lambda k: tuple(int(part) for part in k.split('.')))
# Commit new data
try:
self.lock.acquire()
self.data = self.pending
self.pending = dict()
self.data_idx = pending_idx
finally:
self.lock.release()
def main_update(self):
"""
Main function called by the updater thread.
Direct call is unnecessary.
"""
# Renice updater thread to limit overload
try:
os.nice(1)
except AttributeError as er:
pass # os.nice is not available on windows
time.sleep(self.refresh)
try:
while True:
# We pick a timestamp to take in account the time used by update()
timestamp = time.time()
# Update data with user's defined function
self.update()
# We use this trick because we cannot use signals in a backoffice threads
# and alarm() mess up with readline() in the main thread.
delay = (timestamp + self.refresh) - time.time()
if delay > 0:
if delay > self.refresh:
time.sleep(self.refresh)
else:
time.sleep(delay)
# Commit change exactly every 'refresh' seconds, whatever update() takes long.
# Commited values are a bit old, but for RRD, punctuals values
# are better than fresh-but-not-time-constants values.
self.commit()
except Exception as e:
self.error = e
raise
def get_setter(self, oid):
"""
Retrieve the nearest parent setter function for an OID
"""
if hasattr(self.setter, oid):
return self.setter[oid]
parents = [poid for poid in self.setter.keys() if oid.startswith(poid)]
if parents:
return self.setter[max(parents)]
return self.default_setter
def register_setter(self, oid, setter_func):
"""
Set reference to an user defined function for deal with set commands.
The user function receives the OID, type (see Type class) and value
and must return a true value on succes or one of errors in Error class
"""
self.setter[oid] = setter_func
def default_setter(self, oid, _type, value):
return Error.NotWritable
def set(self, oid, typevalue):
"""
Call the default or user setter function if available
"""
success = False
type_ = typevalue.split()[0]
value = typevalue.lstrip(type_).strip().strip('"')
ret_value = self.get_setter(oid)(oid, type_, value)
if ret_value:
if ret_value in ErrorValues or ret_value == 'DONE':
print(ret_value)
elif ret_value == True:
print('DONE')
elif ret_value == False:
print(Error.NotWritable)
else:
raise RuntimeError("wrong return value: %s" % str(ret_value))
else:
print(Error.NotWritable)
def start(self, user_func, refresh):
"""
Start the SNMP's protocol handler and the updater thread
user_func is a reference to an update function, ran every 'refresh' seconds.
"""
self.update = user_func
self.refresh = refresh
self.error = None
# First load
self.update()
self.commit()
# Start updater thread
up = threading.Thread(None, self.main_update, "Updater")
up.daemon = True
up.start()
# Main loop
while up.isAlive(): # Do not serve data if the Updater thread has died
try:
self.main_passpersist()
except:
raise
# vim: ts=4:sw=4:aiAttachments
Last edited: