pax_global_header00006660000000000000000000000064131557117670014527gustar00rootroot0000000000000052 comment=59fbd13ad4399cef96710141ccd6d2ecf478e9cd nixstatsagent-1.1.32/000077500000000000000000000000001315571176700145075ustar00rootroot00000000000000nixstatsagent-1.1.32/.gitignore000066400000000000000000000001211315571176700164710ustar00rootroot00000000000000nixstats.ini *.pyc *~ *.bak build debian dist nixstatsagent.egg-info README.html nixstatsagent-1.1.32/.travis.yml000066400000000000000000000017211315571176700166210ustar00rootroot00000000000000language: python python: - '2.6' - '2.7' install: python setup.py install script: python setup.py sdist bdist_egg bdist_wheel deploy: provider: pypi user: btbroot password: secure: by+wKHR5IU8Rc4tyBSn2y8oH/Wz3nuw5NVSafpD+pLL+PjJ98AkYZFjVk7li79tu2OdhrpIR6+rHZSVcIP2EqyRMoxUgp45F158GYJmgpmY8Vi5NbNR0D/kP2qxMVTY3/J29bjns32RnjNU8PYumY0qg6kOZn/rh1LJHNasPcK2xrgAkTnplvHRS1IpFCoONN1hobg1a34CvvLic22KBPBAdOgAD8IxdVVbnrEpy7urxKld+ClJw3tvrqWgf17SNuYG+0vVr25fC/fcTESG17AiC/tw6i3IXrspJzR5dV+MGatWknokF4NaElXlDmf1wD1o2iY/8P4Xa9JTPLh56J5hvViAxBogafEuH566QgrufA6xXFSzS0+lkOkmYSK7kLWB9nEcDbVB8nMXt2RC7aLGzr0XZijeduhuADtO62mA9THgBEor56T/cSBi9fYYh9CC95glwTUN8UgDJ98+CcD8bVK1oFscMZgk/LqC6EMccOFJvi5zJshtd5y5/53XG/jqFUCvVPsW5XzA7r+9//l3RQeIcSfAQgkkpxb6bpoKctDwBGcjEMT0I8kwVtKJFjhw1Hfbbhg2QYyiRAhk0oaYGrHNxdYcf9te4BdtTqzoukHuJvx6NOjy7XyHh8C+SLhCbzkvMRHJVH92YhdsGsP1Lw6tFYzA/B75zwPhN/SM= on: tags: true distributions: sdist bdist_egg bdist_wheel repo: NIXStats/nixstatsagent nixstatsagent-1.1.32/LICENSE000066400000000000000000000030331315571176700155130ustar00rootroot00000000000000BSD Simplified License Copyright (c) 2016, NIXStats All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. nixstatsagent-1.1.32/MANIFEST.in000066400000000000000000000000571315571176700162470ustar00rootroot00000000000000include nixstats-example.ini LICENSE README.md nixstatsagent-1.1.32/README.md000066400000000000000000000040131315571176700157640ustar00rootroot00000000000000NixStats Agent ============== NixStats.com is a web service of monitoring and displaying statistics of your server performance. This software is an OS-agnostic agent compatible with Python 2.4, 2.5, 2.6 and 2.7. It's been optimized to have a small CPU consumption and comes with an extendable set of useful plugins. [![Build Status](https://travis-ci.org/NIXStats/nixstatsagent.svg?branch=master)](https://travis-ci.org/NIXStats/nixstatsagent) Installation ------------ Depending on your platform, many installation options are possible. We are listing them more or less in the order from the most specific (and preferred) to the most generic ones. ### Debian GNU/Linux The package is existing in Debian Sid and should appear soon in other releases and derivaives. If your suite is already supported, simply do: ``` apt-get install nixstatsagent ``` Until then, use our [packagecloud repository](https://packagecloud.io/btbroot/nixstats/install#bash) Current status: - [x] Ubuntu 16.04.1 LTS (packagecloud:ubuntu/xenial) - [x] Ubuntu 14.04.5 LTS (packagecloud:ubuntu/trusty) - [x] Ubuntu 12.04.5 LTS (packagecloud:ubuntu/precise) - [x] Debian 8 (packagecloud:debian/jessie) - [x] Debian 7 (packagecloud:debian/wheezy) - [ ] Debian 6 - [x] Debian 9 (sid, stretch, packagecloud:debian/stretch) - [x] Ubuntu 17.04 (zesty) ### Fedora - CentOS 7 - CentOS 6 - CentOS 5 - Fedora 25 (low priority) - Fedora 24 (low priority) - Fedora 23 (low priority) ### Windows - Windows 2016 (low priority) - Windows 2012 (low priority) ### Python 2.6 or 2.7 environment As the binary packages are published on [PyPI](https://pypi.python.org/pypi), provided that you've obtained [pip](https://pip.pypa.io/en/latest/installing/), simply do: ``` pip install nixstatsagent ``` ### Python 2.4 or 2.5 environment As the source package is published on [PyPI](https://pypi.python.org/pypi), provided that you've obtained [setuptools](https://pypi.python.org/pypi/setuptools#installation-instructions), simply do: ``` easy_install nixstatsagent ``` nixstatsagent-1.1.32/nixstats-example.ini000066400000000000000000000025261315571176700205230ustar00rootroot00000000000000# [DEFAULT] ; Values here *override* the hardcoded defaults (listed) # max_data_span = 60 ; Collected data span threshold for sending, sec # max_data_age = 600 ; Collected data age threshold for sending, sec # logging_level = 30 ; (logging.WARNING) Logging level, defined in logging # threads = 100 ; Maximun amount of threads # interval = 60 ; Interval betveen iterations, sec # plugins = plugins ; Path to plugins # enabled = no ; Toggle plugin # subprocess = no ; Run plugin as a subprocess # ttl = 60 ; Time to live (for a subprocess), sec # user = '' ; API user name # server = '' ; API server name # # ###################### Default sections # [agent] ; Main thread # # [execution] ; Plugin execution threads # ttl = 15 ; Example: plugins will be killed after 15 sec # # [data] ; Collected data sening thread # interval = 600 ; Example: the data collected will be checked every minute for thresholds # # ##################### Plugin sections examples # [cpu] ; Example: Shortname for 'plugins/cpu.py' plugin # enabled = yes ; Example: cpu plugin enabled # ttl = 5 ; Example: cpu plugin time to live # # [sleeper] ; Example: Shortname for 'plugins/sleeper.py' plugin # enabled = yes ; Example: sleeper plugin disabled nixstatsagent-1.1.32/nixstatsagent/000077500000000000000000000000001315571176700174035ustar00rootroot00000000000000nixstatsagent-1.1.32/nixstatsagent/__init__.py000066400000000000000000000000001315571176700215020ustar00rootroot00000000000000nixstatsagent-1.1.32/nixstatsagent/nixstatsagent.py000077500000000000000000000534721315571176700226670ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # by Al Nikolov import bz2 import ConfigParser import glob import httplib import imp try: import json except ImportError: import simplejson as json import logging import os import pickle import Queue import signal import socket import StringIO import subprocess import sys import threading import time import types import urllib import urllib2 __version__ = '1.1.32' __FILEABSDIRNAME__ = os.path.dirname(os.path.abspath(__file__)) ini_files = ( os.path.join('/etc', 'nixstats.ini'), os.path.join('/etc', 'nixstats-token.ini'), os.path.join(os.path.dirname(__FILEABSDIRNAME__), 'nixstats.ini'), os.path.join(os.path.dirname(__FILEABSDIRNAME__), 'nixstats-token.ini'), os.path.abspath('nixstats.ini'), os.path.abspath('nixstats-token.ini'), ) def info(): ''' Return string with info about nixstatsagent: - version - plugins enabled - absolute path to plugin directory - server id from configuration file ''' agent = Agent(dry_instance=True) plugins_path = agent.config.get('agent', 'plugins') plugins_enabled = agent._get_plugins(state='enabled') return '\n'.join(( 'Version: %s' % __version__, 'Plugins enabled: %s' % ', '.join(plugins_enabled), 'Plugins directory: %s' % plugins_path, 'Server: %s' % agent.config.get('agent', 'server'), )) def hello(proto='https'): user_id = sys.argv[1] if len(sys.argv) > 2: token_filename = sys.argv[2] else: token_filename = os.path.join(__FILEABSDIRNAME__, 'nixstats-token.ini') if '_' in user_id: server_id = user_id.split('_')[1] user_id = user_id.split('_')[0] elif os.path.isfile('/etc/nixstats/token'): oldconfigfile = open('/etc/nixstats/token','r') server_id = oldconfigfile.readline() print 'Upgrading from old monitoring agent' print 'Remove the old agent from the crontab (crontab -e -u nixstats)' elif os.path.isfile('/opt/nixstats/nixstats.cfg'): oldconfigfile = open('/opt/nixstats/nixstats.cfg') lines=oldconfigfile.readlines() server_id = lines[1].replace('server=', '').strip() print 'Upgrading from old python client.' print 'Run :\nchkconfig --del nixstats \nor \nupdate-rc.d -f nixstats remove \nto remove the old service.' else: try: hostname = os.uname()[1] except AttributeError: hostname = socket.getfqdn() server_id = urllib2.urlopen( proto + '://api.nixstats.com/hello.php', data=urllib.urlencode({ 'user': user_id, 'hostname': hostname }) ).read() print('Got server_id: %s' % server_id) open(token_filename, 'w').\ write('[DEFAULT]\nuser=%s\nserver=%s\n' % (user_id, server_id)) # def run_agent(): # Agent().run() def _plugin_name(plugin): if isinstance(plugin, basestring): basename = os.path.basename(plugin) return os.path.splitext(basename)[0] else: return plugin.__name__ def test_plugins(plugins=[]): ''' Test specified plugins and print their data output after single check. If plugins list is empty test all enabled plugins. ''' agent = Agent(dry_instance=True) plugins_path = agent.config.get('agent', 'plugins') if plugins_path not in sys.path: sys.path.insert(0, plugins_path) if not plugins: plugins = agent._get_plugins(state='enabled') print 'Check all enabled plugins: %s' % ', '.join(plugins) for plugin_name in plugins: print '%s:' % plugin_name try: fp, pathname, description = imp.find_module(plugin_name) except Exception as e: print 'Find error:', e continue try: module = imp.load_module(plugin_name, fp, pathname, description) except Exception as e: print 'Load error:', e continue finally: # Since we may exit via an exception, close fp explicitly. if fp: fp.close() try: payload = module.Plugin().run(agent.config) print json.dumps(payload, indent=4, sort_keys=True) except Exception as e: print 'Execution error:', e class Agent: execute = Queue.Queue() metrics = Queue.Queue() data = Queue.Queue() shutdown = False def __init__(self, dry_instance=False): ''' Initialize internal strictures ''' self._config_init() # Cache for plugins so they can store values related to previous checks self.plugins_cache = {} if dry_instance: return self._logging_init() self._plugins_init() self._data_worker_init() self._dump_config() def _config_init(self): ''' Initialize configuration object ''' defaults = { 'max_data_span': 60, 'max_data_age': 60 * 10, 'logging_level': logging.INFO, 'threads': 100, 'ttl': 60, 'interval': 60, 'plugins': os.path.join(__FILEABSDIRNAME__, 'plugins'), 'enabled': 'no', 'subprocess': 'no', 'user': '', 'server': '', 'api_host': 'api.nixstats.com', 'api_path': '/v2/server/poll', 'log_file': '/var/log/nixstatsagent.log', 'log_file_mode': 'a', 'max_cached_collections': 10, } sections = [ 'agent', 'execution', 'data', ] config = ConfigParser.RawConfigParser(defaults) config.read(ini_files) self.config = config for section in sections: self._config_section_create(section) if section is 'data': self.config.set(section, 'interval', 1) if section is 'agent': self.config.set(section, 'interval', .5) def _config_section_create(self, section): ''' Create an addition section in the configuration object if it's not exists ''' if not self.config.has_section(section): self.config.add_section(section) def _logging_init(self): ''' Initialize logging faculty ''' level = self.config.getint('agent', 'logging_level') log_file = self.config.get('agent', 'log_file') log_file_mode = self.config.get('agent', 'log_file_mode') if log_file_mode in ('w', 'a'): pass elif log_file_mode == 'truncate': log_file_mode = 'w' elif log_file_mode == 'append': log_file_mode = 'a' else: log_file_mode = 'a' if log_file == '-': logging.basicConfig(level=level) # Log to sys.stderr by default else: try: logging.basicConfig(filename=log_file, filemode=log_file_mode, level=level, format="%(asctime)-15s %(levelname)s %(message)s") except IOError as e: logging.basicConfig(level=level) logging.info('IOError: %s', e) logging.info('Drop logging to stderr') logging.info('Agent logging_level %i', level) def _plugins_init(self): ''' Discover the plugins ''' logging.info('_plugins_init') plugins_path = self.config.get('agent', 'plugins') filenames = glob.glob(os.path.join(plugins_path, '*.py')) if plugins_path not in sys.path: sys.path.insert(0, plugins_path) self.schedule = {} for filename in filenames: name = _plugin_name(filename) if name == 'plugins': continue self._config_section_create(name) if self.config.getboolean(name, 'enabled'): if self.config.getboolean(name, 'subprocess'): self.schedule[filename] = 0 else: fp, pathname, description = imp.find_module(name) try: module = imp.load_module(name, fp, pathname, description) except Exception: module = None finally: # Since we may exit via an exception, close fp explicitly. if fp: fp.close() if module: self.schedule[module] = 0 else: logging.error('import_plugin:%s:%s', name, sys.exc_type) def _subprocess_execution(self, task): ''' Execute /task/ in a subprocess ''' process = subprocess.Popen((sys.executable, task), stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) logging.debug('%s:process:%i', threading.currentThread(), process.pid) interval = self.config.getint('execution', 'interval') name = _plugin_name(task) ttl = self.config.getint(name, 'ttl') ticks = ttl / interval or 1 process.poll() while process.returncode is None and ticks > 0: logging.debug('%s:tick:%i', threading.currentThread(), ticks) time.sleep(interval) ticks -= 1 process.poll() if process.returncode is None: logging.error('%s:kill:%i', threading.currentThread(), process.pid) os.kill(process.pid, signal.SIGTERM) stdout, stderr = process.communicate() if process.returncode != 0 or stderr: logging.error('%s:%s:%s:%s', threading.currentThread(), task, process.returncode, stderr) if stdout: ret = pickle.loads(stdout) else: ret = None return ret def _execution(self): ''' Take queued execution requests, execute plugins and queue the results ''' while True: if self.shutdown: logging.info('%s:shutdown', threading.currentThread()) break logging.debug('%s:exec_queue:%i', threading.currentThread(), self.execute.qsize()) try: task = self.execute.get_nowait() except Queue.Empty: break logging.debug('%s:task:%s', threading.currentThread(), task) name = _plugin_name(task) ts = time.time() if isinstance(task, basestring): payload = self._subprocess_execution(task) else: try: # Setup cache for plugin instance # if name not in self.plugins_cache.iterkeys(): # self.plugins_cache[name] = [] self.plugins_cache.update({ name: self.plugins_cache.get(name, []) }) plugin = task.Plugin(agent_cache=self.plugins_cache[name]) payload = plugin.run(self.config) except Exception: logging.exception('plugin_exception') payload = {'exception': str(sys.exc_info()[0])} self.metrics.put({ 'ts': ts, 'task': task, 'name': name, 'payload': payload, }) self.hire.release() def _data(self): ''' Take and collect data, send and clean if needed ''' logging.info('%s', threading.currentThread()) api_host = self.config.get('data', 'api_host') api_path = self.config.get('data', 'api_path') max_age = self.config.getint('agent', 'max_data_age') max_span = self.config.getint('agent', 'max_data_span') server = self.config.get('agent', 'server') user = self.config.get('agent', 'user') interval = self.config.getint('data', 'interval') max_cached_collections = self.config.get('agent', 'max_cached_collections') cached_collections = [] collection = [] while True: loop_ts = time.time() if self.shutdown: logging.info('%s:shutdown', threading.currentThread()) break logging.debug('%s:data_queue:%i:collection:%i', threading.currentThread(), self.data.qsize(), len(collection)) while self.data.qsize(): try: collection.append(self.data.get_nowait()) except Exception as e: logging.error('Data queue error: %s' % e) if collection: first_ts = min((e['ts'] for e in collection)) last_ts = max((e['ts'] for e in collection)) now = time.time() send = False if last_ts - first_ts >= max_span: logging.debug('Max data span') send = True clean = False elif now - first_ts >= max_age: logging.warning('Max data age') send = True clean = True if send: headers = { "Content-type": "application/json", "Authorization": "ApiKey %s:%s" % (user, server), } logging.debug('collection: %s', json.dumps(collection, indent=2, sort_keys=True)) if not (server and user): logging.warning('Empty server or user, nowhere to send.') clean = True else: try: connection = httplib.HTTPSConnection(api_host, timeout=15) # Trying to send cached collections if any if cached_collections: logging.info('Sending cached collections: %i', len(cached_collections)) while cached_collections: connection.request('PUT', '%s?version=%s' % (api_path, __version__), cached_collections[0], headers=headers) response = connection.getresponse() response.read() if response.status == 200: del cached_collections[0] # Remove just sent collection logging.debug('Successful response: %s', response.status) else: raise ValueError('Unsuccessful response: %s' % response.status) logging.info('All cached collections sent') # Send recent collection (reuse existing connection) connection.request('PUT', '%s?version=%s' % (api_path, __version__), bz2.compress(str(json.dumps(collection)) + "\n"), headers=headers) response = connection.getresponse() response.read() if response.status == 200: logging.debug('Successful response: %s', response.status) clean = True else: raise ValueError('Unsuccessful response: %s' % response.status) except Exception as e: logging.error('Failed to submit collection: %s' % e) # Store recent collection in cached_collections if send failed if max_cached_collections > 0: if len(cached_collections) >= max_cached_collections: del cached_collections[0] # Remove oldest collection logging.info('Reach max_cached_collections (%s): oldest cached collection dropped', max_cached_collections) logging.info('Cache current collection to resend next time') cached_collections.append(bz2.compress(str(json.dumps(collection)) + "\n")) collection = [] finally: connection.close() if clean: collection = [] sleep_interval = interval - (time.time() - loop_ts) if sleep_interval > 0: time.sleep(sleep_interval) def _data_worker_init(self): ''' Initialize data worker thread ''' logging.info('_data_worker_init') threading.Thread(target=self._data).start() def _dump_config(self): ''' Dumps configuration object ''' buf = StringIO.StringIO() self.config.write(buf) logging.info('Config: %s', buf.getvalue()) def _get_plugins(self, state='enabled'): ''' Return list with plugins names ''' plugins_path = self.config.get('agent', 'plugins') plugins = [] for filename in glob.glob(os.path.join(plugins_path, '*.py')): plugin_name = _plugin_name(filename) if plugin_name == 'plugins': continue self._config_section_create(plugin_name) if state == 'enabled': if self.config.getboolean(plugin_name, 'enabled'): plugins.append(plugin_name) elif state == 'disabled': if not self.config.getboolean(plugin_name, 'enabled'): plugins.append(plugin_name) return plugins def run(self): ''' Start all the worker threads ''' logging.info('Agent main loop') interval = self.config.getint('agent', 'interval') self.hire = threading.Semaphore( self.config.getint('execution', 'threads')) try: while True: now = time.time() logging.debug('%i threads', threading.activeCount()) while self.metrics.qsize(): metrics = self.metrics.get_nowait() name = metrics['name'] logging.debug('metrics:%s', name) plugin = metrics.get('task') if plugin: self.schedule[plugin] = \ int(now) + self.config.getint(name, 'interval') if isinstance(plugin, types.ModuleType): metrics['task'] = plugin.__file__ self.data.put(metrics) execute = [ what for what, when in self.schedule.items() if when <= now ] for name in execute: logging.debug('scheduling:%s', name) del self.schedule[name] self.execute.put(name) if self.hire.acquire(False): try: thread = threading.Thread(target=self._execution) thread.start() logging.debug('new_execution_worker_thread:%s', thread) except Exception as e: logging.warning('Can not start new thread: %s', e) else: logging.warning('threads_capped') self.metrics.put({ 'ts': now, 'name': 'agent_internal', 'payload': { 'threads_capping': self.config.getint('execution', 'threads')} }) time.sleep(.5) except KeyboardInterrupt: logging.warning(sys.exc_info()[0]) while True: wait_for = [thread for thread in threading.enumerate() if not thread.isDaemon() and not isinstance(thread, threading._MainThread)] logging.info('Shutdown, waiting for %i threads to exit', len(wait_for)) logging.info('Remaining threads: %s', threading.enumerate()) if len(wait_for) == 0: sys.exit(0) self.shutdown = True sleep_interval = interval-(time.time()-now) if sleep_interval > 0: time.sleep(sleep_interval) except Exception as e: logging.error('Worker error: %s' % e) def main(): if len(sys.argv) > 1: if sys.argv[1].startswith('--'): sys.argv[1] = sys.argv[1][2:] if sys.argv[1] == 'help': print '\n'.join(( 'Run without options to run agent.', 'Acceptable options (leading -- is optional):', ' help, info, version, hello, insecure-hello, test', )) sys.exit() elif sys.argv[1] == 'info': print info() sys.exit() elif sys.argv[1] == 'version': print __version__ sys.exit() elif sys.argv[1] == 'hello': del sys.argv[1] sys.exit(hello()) elif sys.argv[1] == 'insecure-hello': del sys.argv[1] sys.exit(hello(proto='http')) elif sys.argv[1] == 'test': sys.exit(test_plugins(sys.argv[2:])) else: print >>sys.stderr, 'Invalid option:', sys.argv[1] sys.exit(1) else: Agent().run() if __name__ == '__main__': main() nixstatsagent-1.1.32/nixstatsagent/plugins/000077500000000000000000000000001315571176700210645ustar00rootroot00000000000000nixstatsagent-1.1.32/nixstatsagent/plugins/__init__.py000066400000000000000000000000001315571176700231630ustar00rootroot00000000000000nixstatsagent-1.1.32/nixstatsagent/plugins/cpu.py000077500000000000000000000011151315571176700222260ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- import psutil import plugins class Plugin(plugins.BasePlugin): __name__ = 'cpu' def run(self, *unused): results = {} data = psutil.cpu_times_percent(interval=1, percpu=True) cpu_number = -1 for cpu in data: core = {} cpu_number = cpu_number+1 results[cpu_number] = {} for key in cpu._fields: core[key] = getattr(cpu, key) results[cpu_number] = core return results if __name__ == '__main__': Plugin().execute() nixstatsagent-1.1.32/nixstatsagent/plugins/diskusage.py000077500000000000000000000032651315571176700234260ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- import os import psutil import plugins class Plugin(plugins.BasePlugin): __name__ = 'diskusage' def run(self, config): disk = {} disk['df-psutil'] = [] for part in psutil.disk_partitions(False): if os.name == 'nt': if 'cdrom' in part.opts or part.fstype == '': # skip cd-rom drives with no disk in it; they may raise # ENOENT, pop-up a Windows GUI error for a non-ready # partition or just hang. continue try: usage = psutil.disk_usage(part.mountpoint) diskdata = {} diskdata['info'] = part for key in usage._fields: diskdata[key] = getattr(usage, key) disk['df-psutil'].append(diskdata) except: pass try: force_df = config.get('diskusage', 'force_df') except: force_df = 'no' if len(disk['df-psutil']) == 0 or force_df == 'yes': try: disk['df-psutil'] = [] df_output_lines = [s.split() for s in os.popen("df -Pl").read().splitlines()] del df_output_lines[0] for row in df_output_lines: if row[0] == 'tmpfs': continue disk['df-psutil'].append({'info': [row[0], row[5],'',''], 'total': int(row[1])*1024, 'used': int(row[2])*1024, 'free': int(row[3])*1024, 'percent': row[4][:-1]}) except: pass return disk if __name__ == '__main__': Plugin().execute() nixstatsagent-1.1.32/nixstatsagent/plugins/httpd.py000066400000000000000000000046661315571176700225750ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- import urllib2 import time import plugins import re class Plugin(plugins.BasePlugin): __name__ = 'httpd' def run(self, config): ''' Apache/httpd status page metrics ''' prev_cache = {} next_cache = dict() next_cache['ts'] = time.time() prev_cache = self.get_agent_cache() # Get absolute values from previous check try: data = urllib2.urlopen(config.get('httpd', 'status_page_url')).read() except Exception, e: return False exp = re.compile('^([A-Za-z ]+):\s+(.+)$') results = {} def parse_score_board(sb): ret = [] ret.append(('IdleWorkers', sb.count('_'))) ret.append(('ReadingWorkers', sb.count('R'))) ret.append(('WritingWorkers', sb.count('W'))) ret.append(('KeepaliveWorkers', sb.count('K'))) ret.append(('DnsWorkers', sb.count('D'))) ret.append(('ClosingWorkers', sb.count('C'))) ret.append(('LoggingWorkers', sb.count('L'))) ret.append(('FinishingWorkers', sb.count('G'))) ret.append(('CleanupWorkers', sb.count('I'))) return ret for line in data.split('\n'): if line: m = exp.match(line) if m: k = m.group(1) v = m.group(2) # Ignore the following values if k == 'IdleWorkers' or k == 'Server Built' or k == 'Server Built' \ or k == 'CurrentTime' or k == 'RestartTime' or k == 'ServerUptime' \ or k == 'CPULoad' or k == 'CPUUser' or k == 'CPUSystem' \ or k == 'CPUChildrenUser' or k == 'CPUChildrenSystem' \ or k == 'ReqPerSec': continue if k == 'Total Accesses': results['requests_per_second'] = self.absolute_to_per_second(k, int(v), prev_cache) next_cache['Total Accesses'] = int(v) if k == 'Scoreboard': for sb_kv in parse_score_board(v): results[sb_kv[0]] = sb_kv[1] else: results[k] = v self.set_agent_cache(next_cache) return results if __name__ == '__main__': Plugin().execute() nixstatsagent-1.1.32/nixstatsagent/plugins/iostat.py000077500000000000000000000213351315571176700227500ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # # collectd-iostat-python # ====================== # # Collectd-iostat-python is an iostat plugin for collectd that allows you to # graph Linux iostat metrics in Graphite or other output formats that are # supported by collectd. # # https://github.com/powdahound/redis-collectd-plugin # - was used as template # https://github.com/keirans/collectd-iostat/ # - was used as inspiration and contains some code from # https://bitbucket.org/jakamkon/python-iostat # - by Kuba Kończyk # import os import signal import subprocess import sys import psutil import plugins __version__ = '0.0.3' __author__ = 'denis.zhdanov@gmail.com' class IOStatError(Exception): pass class CmdError(IOStatError): pass class ParseError(IOStatError): pass class IOStat(object): def __init__(self, path='iostat', interval=2, count=2, disks=[]): self.path = path self.interval = interval self.count = count self.disks = disks def parse_diskstats(self, input): ''' Parse iostat -d and -dx output.If there are more than one series of statistics, get the last one. By default parse statistics for all avaliable block devices. @type input: C{string} @param input: iostat output @type disks: list of C{string}s @param input: lists of block devices that statistics are taken for. @return: C{dictionary} contains per block device statistics. Statistics are in form of C{dictonary}. Main statistics: tps Blk_read/s Blk_wrtn/s Blk_read Blk_wrtn Extended staistics (available with post 2.5 kernels): rrqm/s wrqm/s r/s w/s rsec/s wsec/s rkB/s wkB/s avgrq-sz \ avgqu-sz await svctm %util See I{man iostat} for more details. ''' dstats = {} dsi = input.rfind('Device:') if dsi == -1: raise ParseError('Unknown input format: %r' % input) ds = input[dsi:].splitlines() hdr = ds.pop(0).split()[1:] for d in ds: if d: d = d.split() dev = d.pop(0) if (dev in self.disks) or not self.disks: dstats[dev] = dict([(k, float(v)) for k, v in zip(hdr, d)]) return dstats def sum_dstats(self, stats, smetrics): ''' Compute the summary statistics for chosen metrics. ''' avg = {} for disk, metrics in stats.iteritems(): for mname, metric in metrics.iteritems(): if mname not in smetrics: continue if mname in avg: avg[mname] += metric else: avg[mname] = metric return avg def _run(self, options=None): ''' Run iostat command. ''' close_fds = 'posix' in sys.builtin_module_names args = '%s %s %s %s %s' % ( self.path, ''.join(options), self.interval, self.count, ' '.join(self.disks), ) return subprocess.Popen( args, bufsize=1, shell=True, stdout=subprocess.PIPE, close_fds=close_fds) @staticmethod def _get_childs_data(child): ''' Return child's data when avaliable. ''' (stdout, stderr) = child.communicate() ecode = child.poll() if ecode != 0: raise CmdError('Command %r returned %d' % (child.cmd, ecode)) return stdout def get_diskstats(self): ''' Get all avaliable disks statistics that we can get. ''' dstats = self._run(options=['-kNd']) extdstats = self._run(options=['-kNdx']) dsd = self._get_childs_data(dstats) edd = self._get_childs_data(extdstats) ds = self.parse_diskstats(dsd) eds = self.parse_diskstats(edd) for dk, dv in ds.iteritems(): if dk in eds: ds[dk].update(eds[dk]) return ds class IOMon(object): def __init__(self): self.plugin_name = 'collectd-iostat-python' self.iostat_path = '/usr/bin/iostat' self.iostat_interval = 2 self.iostat_count = 2 self.iostat_disks = [] self.iostat_nice_names = False self.iostat_disks_regex = '' self.verbose_logging = False self.names = { 'tps': {'t': 'transfers_per_second'}, 'Blk_read/s': {'t': 'blocks_per_second', 'ti': 'read'}, 'kB_read/s': {'t': 'bytes_per_second', 'ti': 'read', 'm': 10e3}, 'MB_read/s': {'t': 'bytes_per_second', 'ti': 'read', 'm': 10e6}, 'Blk_wrtn/s': {'t': 'blocks_per_second', 'ti': 'write'}, 'kB_wrtn/s': {'t': 'bytes_per_second', 'ti': 'write', 'm': 10e3}, 'MB_wrtn/s': {'t': 'bytes_per_second', 'ti': 'write', 'm': 10e6}, 'Blk_read': {'t': 'blocks', 'ti': 'read'}, 'kB_read': {'t': 'bytes', 'ti': 'read', 'm': 10e3}, 'MB_read': {'t': 'bytes', 'ti': 'read', 'm': 10e6}, 'Blk_wrtn': {'t': 'blocks', 'ti': 'write'}, 'kB_wrtn': {'t': 'bytes', 'ti': 'write', 'm': 10e3}, 'MB_wrtn': {'t': 'bytes', 'ti': 'write', 'm': 10e6}, 'rrqm/s': {'t': 'requests_merged_per_second', 'ti': 'read'}, 'wrqm/s': {'t': 'requests_merged_per_second', 'ti': 'write'}, 'r/s': {'t': 'per_second', 'ti': 'read'}, 'w/s': {'t': 'per_second', 'ti': 'write'}, 'rsec/s': {'t': 'sectors_per_second', 'ti': 'read'}, 'rkB/s': {'t': 'bytes_per_second', 'ti': 'read', 'm': 10e3}, 'rMB/s': {'t': 'bytes_per_second', 'ti': 'read', 'm': 10e6}, 'wsec/s': {'t': 'sectors_per_second', 'ti': 'write'}, 'wkB/s': {'t': 'bytes_per_second', 'ti': 'write', 'm': 10e3}, 'wMB/s': {'t': 'bytes_per_second', 'ti': 'write', 'm': 10e6}, 'avgrq-sz': {'t': 'avg_request_size'}, 'avgqu-sz': {'t': 'avg_request_queue'}, 'await': {'t': 'avg_wait_time'}, 'r_await': {'t': 'avg_wait_time', 'ti': 'read'}, 'w_await': {'t': 'avg_wait_time', 'ti': 'write'}, 'svctm': {'t': 'avg_service_time'}, '%util': {'t': 'percent', 'ti': 'util'} } def restore_sigchld(): ''' Restore SIGCHLD handler for python <= v2.6 It will BREAK exec plugin!!! See https://github.com/deniszh/collectd-iostat-python/issues/2 for details ''' if sys.version_info[0] == 2 and sys.version_info[1] <= 6: signal.signal(signal.SIGCHLD, signal.SIG_DFL) def diskstats_parse(dev=None): file_path = '/proc/diskstats' result = {} # ref: http://lxr.osuosl.org/source/Documentation/iostats.txt columns_disk = ['m', 'mm', 'dev', 'reads', 'rd_mrg', 'rd_sectors', 'ms_reading', 'writes', 'wr_mrg', 'wr_sectors', 'ms_writing', 'cur_ios', 'ms_doing_io', 'ms_weighted'] columns_partition = ['m', 'mm', 'dev', 'reads', 'rd_sectors', 'writes', 'wr_sectors'] lines = open(file_path, 'r').readlines() for line in lines: if line == '': continue split = line.split() if len(split) == len(columns_disk): columns = columns_disk elif len(split) == len(columns_partition): columns = columns_partition else: # No match continue data = dict(zip(columns, split)) if data['dev'][-1:].isdigit() is True: continue if "loop" in data['dev'] or "ram" in data['dev']: continue if dev is not None and dev != data['dev']: continue for key in data: if key != 'dev': data[key] = int(data[key]) result[data['dev']] = data return result class Plugin(plugins.BasePlugin): __name__ = 'iostat' def run(self, *unused): if(os.path.isfile("/proc/diskstats")): return diskstats_parse() elif(os.path.isfile("/usr/bin/iostat")): iostat = IOStat() ds = iostat.get_diskstats() if ds: return ds else: results = {} try: diskdata = psutil.disk_io_counters(perdisk=True) for device, values in diskdata.items(): device_stats = {} for key_value in values._fields: device_stats[key_value] = getattr(values, key_value) results[device] = device_stats except Exception as e: results = e.message return results if __name__ == '__main__': Plugin().execute() nixstatsagent-1.1.32/nixstatsagent/plugins/litespeed.py000066400000000000000000000063171315571176700234230ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- import plugins import os import time import re import urllib2 import base64 class Plugin(plugins.BasePlugin): __name__ = 'litespeed' ''' Litespeed monitoring plugin. Add the following section to /etc/nixstats.ini [litespeed] enabled=yes host=localhost port=7080 username=admin password=pass ''' def run(self, config): result = {} results = {} data = False prev_cache = self.get_agent_cache() # Get absolute values from previous check request = urllib2.Request("http://%s:%s/status?rpt=summary" % (config.get('litespeed', 'host'),config.get('litespeed', 'port'))) base64string = base64.b64encode('%s:%s' % (config.get('litespeed', 'username'), config.get('litespeed', 'password'))) request.add_header("Authorization", "Basic %s" % base64string) response = urllib2.urlopen(request).read() for line in response.split('\n'): test = re.search('REQ_RATE \[(.*)\]', line) if test is not None and test.group(1): data = True try: result[test.group(1)] except KeyError: result[test.group(1)] = {} lines = line.replace('\n', '').replace(test.group(0), '').split(', ') for line in lines: keyval = line.strip(':').strip().split(':') try: result[test.group(1)][keyval[0]] += float(keyval[1]) except KeyError: result[test.group(1)][keyval[0]] = float(keyval[1]) metrics = ( 'SSL_BPS_IN', 'BPS_OUT', 'MAXSSL_CONN', 'PLAINCONN', 'BPS_IN', 'SSLCONN', 'AVAILSSL', 'IDLECONN', 'SSL_BPS_OUT', 'AVAILCONN', 'MAXCONN', 'REQ_PROCESSING' ) if data is True: for vhost, statistics in result.items(): try: prev_cache[vhost]['ts'] = prev_cache['ts'] except KeyError: prev_cache[vhost] = {} results[vhost] = {} for key, value in statistics.items(): if key == 'TOT_REQS': results[vhost]['RPS'] = self.absolute_to_per_second(key, value, prev_cache[vhost]) if key == 'TOTAL_STATIC_HITS': results[vhost]['STATIC_RPS'] = self.absolute_to_per_second(key, value, prev_cache[vhost]) if key == 'TOTAL_PUB_CACHE_HITS': results[vhost]['PUB_CACHE_RPS'] = self.absolute_to_per_second(key, value, prev_cache[vhost]) if key == 'TOTAL_PRIVATE_CACHE_HITS': results[vhost]['PRIVATE_CACHE_RPS'] = self.absolute_to_per_second(key, value, prev_cache[vhost]) if key in metrics: results[vhost][key] = value result['ts'] = time.time() self.set_agent_cache(result) return results if __name__ == '__main__': Plugin().execute() nixstatsagent-1.1.32/nixstatsagent/plugins/loadavg.py000077500000000000000000000003641315571176700230610ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- import os import plugins class Plugin(plugins.BasePlugin): __name__ = 'loadavg' def run(self, *unused): return os.getloadavg() if __name__ == '__main__': Plugin().execute() nixstatsagent-1.1.32/nixstatsagent/plugins/memory.py000077500000000000000000000005671315571176700227610ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- import psutil import plugins class Plugin(plugins.BasePlugin): __name__ = 'memory' def run(self, *unused): memory = {} mem = psutil.virtual_memory() for name in mem._fields: memory[name] = getattr(mem, name) return memory if __name__ == '__main__': Plugin().execute() nixstatsagent-1.1.32/nixstatsagent/plugins/mongodb.py000066400000000000000000000070771315571176700230760ustar00rootroot00000000000000#!/usr/bin/env python import urllib2 import time import plugins from pymongo import MongoClient class Plugin(plugins.BasePlugin): __name__ = 'mongodb' def run(self, config): """ Mongodb monitoring """ client = MongoClient(config.get('mongodb', 'connection_string')) db = client.admin statistics = db.command("serverStatus") prev_cache = self.get_agent_cache() # Get absolute values from previous check data = {} results = dict() data['connections.totalCreated'] = statistics['connections']['totalCreated'] results['connections.available'] = statistics['connections']['available'] results['connections.current'] = statistics['connections']['current'] data['opcounters.command'] = statistics['opcounters']['command'] data['opcounters.delete'] = statistics['opcounters']['delete'] data['opcounters.getmore'] = statistics['opcounters']['getmore'] data['opcounters.insert'] = statistics['opcounters']['insert'] data['opcounters.query'] = statistics['opcounters']['query'] data['opcounters.update'] = statistics['opcounters']['update'] data['opLatencies.commands.latency'] = statistics['opLatencies']['commands']['latency'] data['opLatencies.commands.ops'] = statistics['opLatencies']['commands']['ops'] data['opLatencies.reads.latency'] = statistics['opLatencies']['reads']['latency'] data['opLatencies.reads.ops'] = statistics['opLatencies']['reads']['ops'] data['opLatencies.writes.latency'] = statistics['opLatencies']['writes']['latency'] data['opLatencies.writes.ops'] = statistics['opLatencies']['writes']['ops'] data['asserts.msg'] = statistics['asserts']['msg'] data['asserts.regular'] = statistics['asserts']['regular'] data['asserts.rollovers'] = statistics['asserts']['rollovers'] data['asserts.user'] = statistics['asserts']['user'] data['asserts.warning'] = statistics['asserts']['warning'] try: results['repl']['hosts'] = statistics['repl']['hosts'] results['repl']['isMaster'] = statistics['repl']['isMaster'] results['repl']['secondary'] = statistics['repl']['secondary'] results['repl']['setName'] = statistics['repl']['setName'] data['opcountersRepl.command'] = statistics['opcountersRepl']['command'] data['opcountersRepl.delete'] = statistics['opcountersRepl']['delete'] data['opcountersRepl.getmore'] = statistics['opcountersRepl']['getmore'] data['opcountersRepl.insert'] = statistics['opcountersRepl']['insert'] data['opcountersRepl.query'] = statistics['opcountersRepl']['query'] data['opcountersRepl.update'] = statistics['opcountersRepl']['update'] except KeyError: pass for key, val in data.items(): results[key] = self.absolute_to_per_second(key, val, prev_cache) try: results['opLatencies.commands'] = results['opLatencies.commands.latency']/results['opLatencies.commands.ops'] results['opLatencies.writes'] = results['opLatencies.writes.latency']/results['opLatencies.writes.ops'] results['opLatencies.reads'] = results['opLatencies.reads.latency']/results['opLatencies.reads.ops'] except: pass next_cache = data next_cache['ts'] = time.time() self.set_agent_cache(next_cache) results['mem'] = statistics['mem'] return results if __name__ == '__main__': Plugin().execute() nixstatsagent-1.1.32/nixstatsagent/plugins/mysql.py000066400000000000000000000073301315571176700226060ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- import time import MySQLdb import plugins class Plugin(plugins.BasePlugin): __name__ = 'mysql' def run(self, config): ''' MySQL metrics plugin ''' prev_cache = self.get_agent_cache() # Get absolute values from previous check auth = {} try: auth['port'] = int(config.get('mysql', 'port')) except ValueError: auth['port'] = 3306 try: auth['user'] = config.get('mysql', 'username') except: auth['user'] = 'root' try: auth['passwd'] = config.get('mysql', 'password') except: auth['passwd'] = '' try: auth['host'] = config.get('mysql', 'host') except: auth['unix_socket'] = config.get('mysql', 'socket') try: auth['db'] = config.get('mysql', 'database') except: auth['db'] = 'mysql' db = MySQLdb.connect(**auth) cursor = db.cursor() cursor.execute("SHOW GLOBAL STATUS;") query_result = cursor.fetchall() non_delta = ( 'max_used_connections', 'open_files', 'open_tables', 'qcache_free_blocks', 'qcache_free_memory', 'qcache_total_blocks', 'slave_open_temp_tables', 'threads_cached', 'threads_connected', 'threads_running', 'uptime' ) delta_keys = ( 'aborted_clients', 'aborted_connects', 'binlog_cache_disk_use', 'binlog_cache_use', 'bytes_received', 'bytes_sent', 'com_delete', 'com_delete_multi', 'com_insert', 'com_insert_select', 'com_load', 'com_replace', 'com_replace_select', 'com_select', 'com_update', 'com_update_multi', 'connections', 'created_tmp_disk_tables', 'created_tmp_files', 'created_tmp_tables', 'key_reads', 'key_read_requests', 'key_writes', 'key_write_requests', 'max_used_connections', 'open_files', 'open_tables', 'opened_tables', 'qcache_free_blocks', 'qcache_free_memory', 'qcache_hits', 'qcache_inserts', 'qcache_lowmem_prunes', 'qcache_not_cached', 'qcache_queries_in_cache', 'qcache_total_blocks', 'questions', 'select_full_join', 'select_full_range_join', 'select_range', 'select_range_check', 'select_scan', 'slave_open_temp_tables', 'slave_retried_transactions', 'slow_launch_threads', 'slow_queries', 'sort_range', 'sort_rows', 'sort_scan', 'table_locks_immediate', 'table_locks_waited', 'threads_cached', 'threads_connected', 'threads_created', 'threads_running' ) results = {} data = {} for key, value in query_result: key = key.lower() if key in non_delta: results[key] = value elif key in delta_keys: results[key] = self.absolute_to_per_second(key, float(value), prev_cache) data[key] = float(value) else: continue db.close() data['ts'] = time.time() self.set_agent_cache(data) return results if __name__ == '__main__': Plugin().execute() nixstatsagent-1.1.32/nixstatsagent/plugins/network.py000077500000000000000000000004141315571176700231310ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- import psutil import plugins class Plugin(plugins.BasePlugin): __name__ = 'network' def run(self, *unused): return psutil.net_io_counters(pernic=True) if __name__ == '__main__': Plugin().execute() nixstatsagent-1.1.32/nixstatsagent/plugins/nginx.py000066400000000000000000000054541315571176700225710ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # import psutil import urllib2 import time import plugins class Plugin(plugins.BasePlugin): __name__ = 'nginx' def run(self, config): ''' Provides the following metrics (example): "accepts": 588462, "accepts_per_second": 0.0, "active_connections": 192, "handled": 588462, "handled_per_second": 0.0, "reading": 0, "requests": 9637106, "requests_per_second": 0.0, "waiting": 189, "writing": 3 requests, accepts, handled are values since the start of nginx. *_per_second values calculated from them using cached values from previous call. ''' try: results = dict() next_cache = dict() request = urllib2.Request(config.get('nginx', 'status_page_url')) raw_response = urllib2.urlopen(request) next_cache['ts'] = time.time() prev_cache = self.get_agent_cache() # Get absolute values from previous check response = raw_response.readlines() # Active connections: N # active_connections = response[0].split(':')[1].strip() active_connections = response[0].split()[-1] results['active_connections'] = int(active_connections) # server accepts handled requests keys = response[1].split()[1:] values = response[2].split() for key, value in zip(keys, values): next_cache[key] = int(value) results[key] = next_cache[key] # Keep absolute values in results try: if next_cache[key] >= prev_cache[key]: results['%s_per_second' % key] = \ (next_cache[key] - prev_cache[key]) / \ (next_cache['ts'] - prev_cache['ts']) else: # Nginx was restarted after previous caching results['%s_per_second' % key] = \ next_cache[key] / \ (next_cache['ts'] - prev_cache['ts']) except KeyError: # No cache yet, can't calculate results['%s_per_second' % key] = 0.0 # Reading: X Writing: Y Waiting: Z keys = response[3].split()[0::2] keys = [entry.strip(':').lower() for entry in keys] values = response[3].split()[1::2] for key, value in zip(keys, values): results[key] = int(value) # Cache absolute values for next check calculations self.set_agent_cache(next_cache) return results except Exception: return False if __name__ == '__main__': Plugin().execute() nixstatsagent-1.1.32/nixstatsagent/plugins/phpfpm.py000066400000000000000000000034751315571176700227410ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- import urllib2 import time import plugins import json class Plugin(plugins.BasePlugin): __name__ = 'phpfpm' def run(self, config): ''' php-fpm status page metrics ''' def ascii_encode_dict(data): ascii_encode = lambda x: x.encode('ascii') if isinstance(x, unicode) else x return dict(map(ascii_encode, pair) for pair in data.items()) results = dict() next_cache = dict() request = urllib2.Request(config.get('phpfpm', 'status_page_url')) raw_response = urllib2.urlopen(request) next_cache['ts'] = time.time() prev_cache = self.get_agent_cache() # Get absolute values from previous check try: j = json.loads(raw_response.read(), object_hook=ascii_encode_dict) for k, v in j.items(): results[k.replace(" ", "_")] = v next_cache['accepted_conn'] = int(results['accepted_conn']) except Exception: return False try: if next_cache['accepted_conn'] >= prev_cache['accepted_conn']: results['accepted_conn_per_second'] = \ (next_cache['accepted_conn'] - prev_cache['accepted_conn']) / \ (next_cache['ts'] - prev_cache['ts']) else: # Was restarted after previous caching results['accepted_conn_per_second'] = \ next_cache['accepted_conn'] / \ (next_cache['ts'] - prev_cache['ts']) except KeyError: # No cache yet, can't calculate results['accepted_conn_per_second'] = 0.0 # Cache absolute values for next check calculations self.set_agent_cache(next_cache) return results if __name__ == '__main__': Plugin().execute() nixstatsagent-1.1.32/nixstatsagent/plugins/ping.py000077500000000000000000000055371315571176700224100ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- import re from subprocess import Popen, PIPE, CalledProcessError import sys import plugins def _get_match_groups(ping_output, regex): match = regex.search(ping_output) if not match: return False else: return match.groups() def system_command(Command, newlines=True): Output = "" Error = "" try: proc = Popen(Command.split(), stdout=PIPE) Output = proc.communicate()[0] except Exception: pass if Output: if newlines is True: Stdout = Output.split("\\n") else: Stdout = Output else: Stdout = [] if Error: Stderr = Error.split("\n") else: Stderr = [] return (Stdout, Stderr) def collect_ping(hostname): if sys.platform.startswith('linux') or sys.platform.startswith('freebsd'): #if sys.platform == "linux" or sys.platform == "linux2": response = str(system_command("ping -W 5 -c 1 " + hostname, False)[0]) try: matcher = re.compile(r'(\d+.\d+)/(\d+.\d+)/(\d+.\d+)/(\d+.\d+)') minping, avgping, maxping, jitter = _get_match_groups(response, matcher) response = avgping except Exception: #response = 9999 response = -1 elif sys.platform == "darwin": response = str(system_command("ping -c 1 " + hostname, False)[0]) # matcher = re.compile(r'min/avg/max/stddev = (\d+)/(\d+)/(\d+)/(\d+) ms') # min, avg, max, stddev = _get_match_groups(response, matcher) matcher = re.compile(r'(\d+.\d+)/(\d+.\d+)/(\d+.\d+)/(\d+.\d+)') matched = _get_match_groups(response, matcher) if matched is False: #response = 0 response = -1 else: minping, avgping, maxping, jitter = matched response = avgping elif sys.platform == "win32": #response = 0 response = -1 try: ping = Popen(["ping", "-n", "1 ", hostname], stdout=PIPE, stderr=PIPE) out, error = ping.communicate() if out: try: response = int(re.findall(r"Average = (\d+)", out)[0]) except Exception: pass else: #response = 0 response = -1 except CalledProcessError: pass else: #response = float(system_command("ping -W -c 1 " + hostname)) response = -1 return {'avgping': response, 'host': hostname} class Plugin(plugins.BasePlugin): __name__ = 'ping' def run(self, config): data = {} my_hosts = config.get('ping', 'hosts').split(',') data['ping'] = [] for host in my_hosts: data['ping'].append(collect_ping(host)) return data['ping'] if __name__ == '__main__': Plugin().execute() nixstatsagent-1.1.32/nixstatsagent/plugins/plugins.py000066400000000000000000000044511315571176700231230ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- import ConfigParser import pickle import time import sys # import os class BasePlugin: ''' Abstract class for plugins ''' __name__ = '' def __init__(self, agent_cache=[]): if isinstance(agent_cache, list): self.agent_cache = agent_cache else: raise TypeError('Type of agent_cache have to be list') # if not self.__name__: # self.__name__ = os.path.splitext(os.path.basename(__file__))[0] def run(self, config=None): ''' Virtual method for running the plugin ''' pass def execute(self): ''' Execution wrapper for the plugin argv[1]: ini_file ''' config = None if len(sys.argv) > 1: config = ConfigParser.RawConfigParser() config.read(sys.argv[1]) pickle.dump(self.run(config), sys.stdout) def get_agent_cache(self): ''' Return agent cached value for this specific plugin. ''' try: return self.agent_cache[0] except Exception: return {} def set_agent_cache(self, cache): ''' Set agent cache value previously passed to this plugin instance. To enable caching existing agent_cache list have to be passed to Plugin on initialization. Minimally it should be list(). Agent will be able to see only changes in zero element of agent_cache, so do not manually override self.agent_cache, othervice cache will not be saved! If self.agent_cache is not a list appropriate exception will be raised. ''' try: self.agent_cache[0] = cache except IndexError: self.agent_cache.append(cache) def absolute_to_per_second(self, key, val, prev_cache): try: if val >= prev_cache[key]: value = \ (val - prev_cache[key]) / \ (time.time() - prev_cache['ts']) else: # previous cached value should not be higher than current value (service was restarted?) value = val / \ (time.time() - prev_cache['ts']) except Exception: # No cache yet, can't calculate value = 0 return value nixstatsagent-1.1.32/nixstatsagent/plugins/process.py000066400000000000000000000012031315571176700231100ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- import psutil import plugins class Plugin(plugins.BasePlugin): __name__ = 'process' def run(self, *unused): process = [] for proc in psutil.process_iter(): try: pinfo = proc.as_dict(attrs=[ 'pid', 'name', 'ppid', 'exe', 'cmdline', 'username', 'cpu_percent', 'memory_percent', 'io_counters' ]) except psutil.NoSuchProcess: pass else: process.append(pinfo) return process if __name__ == '__main__': Plugin().execute() nixstatsagent-1.1.32/nixstatsagent/plugins/sleeper.py000077500000000000000000000003641315571176700231030ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- import time import plugins class Plugin(plugins.BasePlugin): __name__ = 'sleeper' def run(self, *unused): time.sleep(60 * 60 * 24) if __name__ == '__main__': Plugin().run() nixstatsagent-1.1.32/nixstatsagent/plugins/swap.py000077500000000000000000000005541315571176700224170ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- import psutil import plugins class Plugin(plugins.BasePlugin): __name__ = 'swap' def run(self, *unused): swap = {} mem = psutil.swap_memory() for name in mem._fields: swap[name] = getattr(mem, name) return swap if __name__ == '__main__': Plugin().execute() nixstatsagent-1.1.32/nixstatsagent/plugins/system.py000077500000000000000000000071261315571176700227730ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- import netifaces import os import platform from subprocess import Popen, PIPE import sys import time import psutil import plugins def systemCommand(Command, newlines=True): Output = "" Error = "" try: # Output = subprocess.check_output(Command, stderr = subprocess.STDOUT, shell='True') proc = Popen(Command.split(), stdout=PIPE) Output = proc.communicate()[0] except Exception: pass if Output: if newlines is True: Stdout = Output.split("\n") else: Stdout = Output else: Stdout = [] if Error: Stderr = Error.split("\n") else: Stderr = [] return (Stdout, Stderr) def ip_addresses(): ip_list = {} ip_list['v4'] = {} ip_list['v6'] = {} for interface in netifaces.interfaces(): link = netifaces.ifaddresses(interface) if netifaces.AF_INET in link: if interface not in ip_list['v4']: ip_list['v4'][interface] = [] ip_list['v4'][interface].append(link[netifaces.AF_INET]) if netifaces.AF_INET6 in link: if interface not in ip_list['v6']: ip_list['v6'][interface] = [] ip_list['v6'][interface].append(link[netifaces.AF_INET6]) return ip_list class Plugin(plugins.BasePlugin): __name__ = 'system' def run(self, *unused): systeminfo = {} cpu = {} if(os.path.isfile("/proc/cpuinfo")): f = open('/proc/cpuinfo') if f: for line in f: # Ignore the blank line separating the information between # details about two processing units if line.strip(): if "model name" == line.rstrip('\n').split(':')[0].strip(): cpu['brand'] = line.rstrip('\n').split(':')[1].strip() if "Processor" == line.rstrip('\n').split(':')[0].strip(): cpu['brand'] = line.rstrip('\n').split(':')[1].strip() if "processor" == line.rstrip('\n').split(':')[0].strip(): cpu['count'] = line.rstrip('\n').split(':')[1].strip() else: cpu['brand'] = "Unknown CPU" cpu['count'] = 0 mem = psutil.virtual_memory() if sys.platform == "linux" or sys.platform == "linux2": systeminfo['os'] = str(' '.join(platform.linux_distribution())) elif sys.platform == "darwin": systeminfo['os'] = "Mac OS %s" % platform.mac_ver()[0] cpu['brand'] = str(systemCommand('sysctl machdep.cpu.brand_string', False)[0]).split(': ')[1] cpu['count'] = systemCommand('sysctl hw.ncpu') elif sys.platform == "freebsd10" or sys.platform == "freebsd11": systeminfo['os'] = "FreeBSD %s" % platform.release() cpu['brand'] = str(systemCommand('sysctl hw.model', False)[0]).split(': ')[1] cpu['count'] = systemCommand('sysctl hw.ncpu') elif sys.platform == "win32": systeminfo['os'] = "{} {}".format(platform.uname()[0], platform.uname()[2]) systeminfo['cpu'] = cpu['brand'] systeminfo['cores'] = cpu['count'] systeminfo['memory'] = mem.total systeminfo['psutil'] = '.'.join(map(str, psutil.version_info)) systeminfo['platform'] = platform.platform() systeminfo['uptime'] = int(time.time()-psutil.boot_time()) systeminfo['ip_addresses'] = ip_addresses() return systeminfo if __name__ == '__main__': Plugin().execute() nixstatsagent-1.1.32/setup.py000077500000000000000000000034171315571176700162310ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # by Al Nikolov from nixstatsagent.nixstatsagent import __version__ import os import sys import setuptools here = os.path.abspath(os.path.dirname(__file__)) readme = open(os.path.join(here, 'README.md')).read() if sys.version.startswith('2.4'): install_requires = ['psutil==2.1.3', 'netifaces==0.8', 'simplejson==2.1.0'] else: install_requires = ['psutil', 'netifaces'] setuptools.setup( name='nixstatsagent', version=__version__, description='NixStats agent', long_description=readme, url='https://github.com/NIXStats/nixstatsagent', author='NIXStats', author_email='vincent@nixstats.com', maintainer='Al Nikolov', maintainer_email='root@toor.fi.eu.org', license='BSD-3-Clause', classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: No Input/Output (Daemon)', 'Intended Audience :: System Administrators', 'License :: OSI Approved :: BSD License', 'Natural Language :: English', 'Operating System :: POSIX :: Linux', 'Programming Language :: Python :: 2.4', 'Programming Language :: Python :: 2.5', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Topic :: System :: Monitoring', ], keywords='nixstats system monitoring agent', install_requires=install_requires, packages=setuptools.find_packages(), entry_points={ 'console_scripts': [ 'nixstatsagent=nixstatsagent.nixstatsagent:main', 'nixstatshello=nixstatsagent.nixstatsagent:hello', ], }, data_files=[('share/doc/nixstatsagent', [ 'nixstats-example.ini', 'LICENSE', 'README.md', ])], )