diff --git a/plugins/command/check_elasticsearch.py b/plugins/command/check_elasticsearch.py deleted file mode 100755 index 6616b67..0000000 --- a/plugins/command/check_elasticsearch.py +++ /dev/null @@ -1,626 +0,0 @@ -#!/usr/bin/env python2 - -# Forked from https://github.com/anchor/nagios-plugin-elasticsearch -# commit c7ec4d76778c3ad0404d3af86c70ae3ddf7072cf - -from nagioscheck import NagiosCheck, UsageError -from nagioscheck import PerformanceMetric, Status -import urllib2 - -try: - import json -except ImportError: - import simplejson as json - -HEALTH = {'red': 0, - 'yellow': 1, - 'green': 2} - -RED = HEALTH['red'] -YELLOW = HEALTH['yellow'] -GREEN = HEALTH['green'] - -HEALTH_MAP = {0: 'critical', - 1: 'warning', - 2: 'ok'} - -SHARD_STATE = {'UNASSIGNED': 1, - 'INITIALIZING': 2, - 'STARTED': 3, - 'RELOCATING': 4} - -class ESShard(object): - def __init__(self, state): - self.state = state - -class ESIndex(object): - def __init__(self, name, n_shards, n_replicas): - self.name = name - self.n_shards = n_shards - self.n_replicas = n_replicas - -class ESNode(object): - def __init__(self, name=None, esid=None, attributes={}): - self.esid = esid - self.name = name - self.attributes = attributes - -class ElasticSearchCheck(NagiosCheck): - version = '0.2.0' - - def __init__(self): - NagiosCheck.__init__(self) - - self.health = HEALTH['green'] - - self.add_option('f', 'failure-domain', 'failure_domain', "A " - "comma-separated list of ElasticSearch " - "attributes that make up your cluster's " - "failure domain. This should be the same list " - "of attributes that ElasticSearch's location-" - "aware shard allocator has been configured " - "with. If this option is supplied, additional " - "checks are carried out to ensure that primary " - "and replica shards are not stored in the same " - "failure domain.") - - self.add_option('H', 'host', 'host', "Hostname or network " - "address to probe. The ElasticSearch API " - "should be listening here. Defaults to " - "'localhost'.") - - self.add_option('m', 'master-nodes', 'master_nodes', "Issue a " - "warning if the number of master-eligible " - "nodes in the cluster drops below this " - "number. By default, do not monitor the " - "number of nodes in the cluster.") - - self.add_option('p', 'port', 'port', "TCP port to probe. " - "The ElasticSearch API should be listening " - "here. Defaults to 9200.") - - self.add_option('r', 'routing_allocation', None, - "Check if routing allocation is enabled.") - - def check(self, opts, args): - host = opts.host or "localhost" - port = int(opts.port or '9200') - - failure_domain = [] - if (isinstance(opts.failure_domain, str) and - len(opts.failure_domain) > 0): - failure_domain.extend(opts.failure_domain.split(",")) - - if opts.master_nodes is not None: - try: - if int(opts.master_nodes) < 1: - raise ValueError("'master_nodes' must be greater " - "than zero") - except ValueError: - raise UsageError("Argument to -m/--master-nodes must " - "be a natural number") - - - # - # Data retrieval - # - - # Request "about" info, so we can figure out the ES version, - # to allow for version-specific API changes. - es_about = get_json(r'http://%s:%d/' % (host, port)) - es_version = es_about['version']['number'] - - # Request cluster 'health'. /_cluster/health is like a tl;dr - # for /_cluster/state (see below). There is very little useful - # information here. We are primarily interested in ES' cluster - # 'health colour': a little rating ES gives itself to describe - # how much pain it is in. - es_health = get_json(r'http://%s:%d/_cluster/health' % - (host, port)) - - their_health = HEALTH[es_health['status'].lower()] - - # Request cluster 'state'. This be where all the meat at, yo. - # Here, we can see a list of all nodes, indexes, and shards in - # the cluster. This response will also contain a map detailing - # where all shards are living at this point in time. - es_state = get_json(r'http://%s:%d/_cluster/state' % - (host, port)) - - if opts.r: - # Check for routing allocation being disabled by transient settings - # - # Routing allocation will often be disabled manually during - # maintainance operations (eg, rebooting nodes). We want an alert - # when it's disabled, so that we don't forget to turn it back on - # again. Also, if it's disabled, there's little point running - # further checks because it implies known maintainance is in - # progress. - es_settings = get_json(r'http://%s:%d/_cluster/settings' % - (host, port)) - - try: - allocation = es_settings['transient']['cluster']['routing']['allocation']['enable'] - except KeyError: - # We assume this means routing allocation is not set and - # everything is ok. - # - # This is an unsatisfying check, because it will default to - # being a silent pass if elasticsearch changes its API, and can - # also pass if allocation is disabled with some other - # deprecated settings (eg, by setting - # cluster.routing.allocation.disable_allocation) - # - # However, because the keys don't exist in the index settings - # API unless allocation has been disabled since the cluster - # started up, we can't require the keys to be present. In the - # absence of a way to make the default be "fail", this check - # that we've not accidentally left allocation disabled is - # better than nothing. - pass - else: - if allocation == 'none': - raise Status('critical', ("Routing allocation disabled for cluster",)) - - # Request a bunch of useful numbers that we export as perfdata. - # Details like the number of get, search, and indexing - # operations come from here. - es_stats = get_json(r'http://%s:%d/_nodes/_local/' - 'stats?all=true' % (host, port)) - - myid = es_stats['nodes'].keys()[0] - - n_nodes = es_health['number_of_nodes'] - n_dnodes = es_health['number_of_data_nodes'] - - # Unlike n_dnodes (the number of data nodes), we must compute - # the number of master-eligible nodes ourselves. - n_mnodes = 0 - for esid in es_state['nodes']: - master_elig = True - - # ES will never elect 'client' nodes as masters. - try: - master_elig = not (booleanise( - es_state['nodes'][esid]['attributes'] - ['client'])) - except KeyError, e: - if e.args[0] != 'client': - raise - - try: - master_elig = (booleanise( - es_state['nodes'][esid]['attributes'] - ['master'])) - except KeyError, e: - if e.args[0] != 'master': - raise - - if master_elig: - n_mnodes += 1 - - n_active_shards = es_health['active_shards'] - n_relocating_shards = es_health['relocating_shards'] - n_initialising_shards = es_health['initializing_shards'] - n_unassigned_shards = es_health['unassigned_shards'] - n_shards = (n_active_shards + n_relocating_shards + - n_initialising_shards + n_unassigned_shards) - - # - # Map construction - # - - # String all the dumb ES* objects into a bunch of transitive - # associations so that we may make some useful assertions about - # them. - esid_node_map = {} # ESID : - index_primary_map = {} # : { 0: , ... } - name_index_map = {} # 'bar' : - name_node_map = {} # 'foo' : - node_esid_map = {} # : ESID - node_location_map = {} # : ('mars',) - node_shard_map = {} # : [ , ... ] - primary_replica_map = {} # : [ , ... ] - shard_location_map = {} # : ('mars',) - - # Build node maps: - # - # - esid_node_map - # - name_node_map - # - node_esid_map - # - node_location_map (data nodes only) - # - nodes = es_state['nodes'] - for n in nodes: - name = nodes[n]['name'] - attrs = nodes[n]['attributes'] - node = ESNode(name, n, attrs) - - name_node_map[name] = node - esid_node_map[n] = node - node_esid_map[node] = n - node_shard_map[node] = [] - - if len(failure_domain) > 0: - node_location_map[node] = tuple() - try: - node_location_map[node] = ( - tuple(map(lambda a: attrs[a], failure_domain))) - except KeyError, e: - # Nodes that do not store shards (e.g.: 'client' - # nodes) cannot be expected to have been configured - # with locational attributes. - if 'data' not in attrs or booleanise(attrs['data']): - missing_attr = e.args[0] - raise Status('warning', - ("Node '%s' missing location " - "attribute '%s'" % - (name, missing_attr),)) - - # Build index maps: - # - # - name_index_map - # - indices = es_state['metadata']['indices'] - for i in indices: - if indices[i]["state"] == "close": - continue - idx_stns = indices[i]['settings'] - if version(es_version) < version("1.0.0"): - idx = ESIndex(i, - int(idx_stns['index.number_of_shards']), - int(idx_stns['index.number_of_replicas'])) - else: - idx = ESIndex(i, - int(idx_stns['index']['number_of_shards']), - int(idx_stns['index']['number_of_replicas'])) - - name_index_map[i] = idx - - # Build shard maps: - # - # - index_primary_map - # - node_shard_map - # - primary_replica_map - # - shard_location_map - # - for i in name_index_map: - idx = name_index_map[i] - - if idx not in index_primary_map: - index_primary_map[idx] = dict(map(lambda n: (n, None), - range(idx.n_shards))) - - idx_shards = (es_state['routing_table']['indices'] - [i]['shards']) - for d in idx_shards: - primary = None - replicas = [] - for s in idx_shards[d]: - shard = ESShard(SHARD_STATE[s['state'].upper()]) - - if s['primary']: - primary = shard - else: - replicas.append(shard) - - if s['state'] != 'UNASSIGNED': - node = esid_node_map[s['node']] - - node_shard_map[node].append(shard) - - if len(failure_domain) > 0: - loc = node_location_map[esid_node_map[s['node']]] - shard_location_map[shard] = loc - - index_primary_map[idx][int(d)] = primary - - if primary is not None: - primary_replica_map[primary] = replicas - - # - # Perfdata - # - - perfdata = [] - - def dict2perfdata(base, metrics): - for metric in metrics: - if len(metric) == 2: - label, path = metric - unit = "" - elif len(metric) > 2: - label, path, unit = metric - else: - continue - - keys = path.split(".") - - value = base - for key in keys: - if value is None: - break - try: - value = value[key] - except KeyError: - value = None - break - - if value is not None: - metric = PerformanceMetric(label=label, - value=value, - unit=unit) - perfdata.append(metric) - - def other2perfdata(metrics): - for metric in metrics: - if len(metric) == 2: - label, value = metric - unit = "" - elif len(metric) > 2: - label, value, unit = metric - else: - continue - - if value is not None: - metric = PerformanceMetric(label=label, - value=value, - unit=unit) - perfdata.append(metric) - - # Add cluster-wide metrics first. If you monitor all of your ES - # cluster nodes with this plugin, they should all report the - # same figures for these labels. Not ideal, but 'tis better to - # graph this data multiple times than not graph it at all. - metrics = [["cluster_nodes", n_nodes], - ["cluster_master_eligible_nodes", n_mnodes], - ["cluster_data_nodes", n_dnodes], - ["cluster_active_shards", n_active_shards], - ["cluster_relocating_shards", n_relocating_shards], - ["cluster_initialising_shards", n_initialising_shards], - ["cluster_unassigned_shards", n_unassigned_shards], - ["cluster_total_shards", n_shards]] - - other2perfdata(metrics) - - metrics = [["storesize", 'indices.store.size_in_bytes', "B"], - ["documents", 'indices.docs.count'], - - ["index_ops", 'indices.indexing.index_total', "c"], - ["index_time", 'indices.indexing.' - 'index_time_in_millis', "c"], - - ["flush_ops", 'indices.flush.total', "c"], - ["flush_time", 'indices.flush.' - 'total_time_in_millis', "c"], - - ["throttle_time", "indices.store.throttle_time_in_millis", "c"], - - ["index_ops", "indices.indexing.index_total", "c"], - ["index_time", "indices.indexing.index_time_in_millis", "c"], - ["delete_ops", "indices.indexing.delete_total", "c"], - ["delete_time", "indices.indexing.delete_time_in_millis", "c"], - - ["get_ops", "indices.get.total", "c"], - ["get_time", "indices.get.time_in_millis", "c"], - ["exists_ops", "indices.get.exists_total", "c"], - ["exists_time", "indices.get.exists_time_in_millis", "c"], - ["missing_ops", "indices.get.missing_total", "c"], - ["missing_time", "indices.get.missing_time_in_millis", "c"], - - ["query_ops", 'indices.search.query_total', "c"], - ["query_time", 'indices.search.query_time_in_millis', "c"], - ["fetch_ops", "indices.search.fetch_total", "c"], - ["fetch_time", "indices.search.fetch_time_in_millis", "c"], - - ["merge_ops", "indices.merges.total", "c"], - ["merge_time", "indices.merges.time_in_millis", "c"], - - ["refresh_ops", "indices.refresh.total", "c"], - ["refresh_time", "indices.refresh.total_time_in_millis", "c"], - ] - - dict2perfdata(es_stats['nodes'][myid], metrics) - - # - # Assertions - # - - detail = [] # Collect error messages into this list - - msg = "Monitoring cluster '%s'" % es_health['cluster_name'] - - # Assertion: Each shard has one primary in STARTED or RELOCATING state. - downgraded = False - - for idx_name, idx in name_index_map.iteritems(): - for shard_no in range(idx.n_shards): - primary = index_primary_map[idx][shard_no] - if primary is None: - downgraded |= self.downgrade_health(RED) - detail.append("Index '%s' missing primary on " - "shard %d" % (idx_name, shard_no)) - else: - if primary.state not in ( SHARD_STATE['STARTED'], SHARD_STATE['RELOCATING'] ): - downgraded |= self.downgrade_health(RED) - detail.append("Index '%s' primary down on " - "shard %d" % (idx_name, shard_no)) - - if downgraded: - msg = ("One or more indexes are missing primary shards. " - "Use -vv to list them.") - - # Assertion: Each primary has replicas in STARTED state. - downgraded = False - - for idx_name, idx in name_index_map.iteritems(): - expect_replicas = idx.n_replicas - - for shard_no in range(idx.n_shards): - primary = index_primary_map[idx][shard_no] - - if primary is None: - continue - - has_replicas = len(primary_replica_map[primary]) - - if has_replicas < expect_replicas: - downgraded |= self.downgrade_health(YELLOW) - detail.append("Index '%s' missing replica on " - "shard %d" % (idx_name, shard_no)) - - for replica in primary_replica_map[primary]: - if replica.state != SHARD_STATE['STARTED']: - downgraded |= self.downgrade_health(YELLOW) - detail.append("Index '%s' replica down on " - "shard %d" % (idx_name, shard_no)) - - if downgraded: - msg = ("One or more indexes are missing replica shards. " - "Use -vv to list them.") - - # Assertion: You have as many master-eligible nodes in the - # cluster as you think you ought to. - # - # To be of any use in detecting split-brains, this value must be - # set to the *total* number of master-eligible nodes in the - # cluster, not whatever you set in ElasticSearch's - # 'discovery.zen.minimum_master_nodes' configuration parameter. - # (See ES issue #2488.) Of course, this will trip whenever a - # node is taken down for maintenance, so we raise only a warning - # -- not a critical -- status condition. - downgraded = False - - if opts.master_nodes is not None: - if n_mnodes < int(opts.master_nodes): - downgraded |= self.downgrade_health(YELLOW) - detail.append("Expected to find %d master-eligible " - "nodes in the cluster but only found %d" % - (int(opts.master_nodes), n_mnodes)) - - if downgraded: - msg = ("Missing master-eligible nodes") - - # Assertion: Replicas are not stored in the same failure domain - # as their primary. - downgraded = False - - if len(failure_domain) > 0: - for idx_name, idx in name_index_map.iteritems(): - - # Suppress this test if the index has not been - # configured with replicas. - if idx.n_replicas == 0: - continue - - for shard_no in range(idx.n_shards): - loc_redundancy = set() - vulnerable_shards = set() - - primary = index_primary_map[idx][shard_no] - - if primary is None: - continue - - try: - loc = shard_location_map[primary] - except KeyError: - continue - loc_redundancy.add(loc) - vulnerable_shards.add(primary) - - for replica in primary_replica_map[primary]: - try: - loc = shard_location_map[replica] - except KeyError: - continue - loc_redundancy.add(loc) - vulnerable_shards.add(replica) - - # Suppress the problem unless at least one of the - # vulnerable shards is on this data node. - my_shards = set(node_shard_map[esid_node_map[myid]]) - if vulnerable_shards.isdisjoint(my_shards): - continue - - if len(loc_redundancy) == 1: - downgraded |= self.downgrade_health(YELLOW) - loc = ",".join(list(loc_redundancy)[0]) - detail.append("Index '%s' shard %d only exists " - "in location '%s'" % - (idx_name, shard_no, loc)) - - if downgraded: - msg = ("One or more index shards are not being replicated " - "across failure domains. Use -vv to list them.") - - # ES detected a problem that we did not. This should never - # happen. (If it does, you should work out what happened, then - # fix this code so that we can detect the problem if it happens - # again.) Obviously, in this case, we cannot provide any useful - # output to the operator. - if their_health < self.health: - raise Status('critical', - ("Cluster reports degraded health: '%s'" % - es_health['status'],), - perfdata) - - raise Status(HEALTH_MAP[self.health], - (msg, None, "%s\n\n%s" % (msg, "\n".join(detail))), - perfdata) - - def downgrade_health(self, new_health): - if new_health < self.health: - self.health = new_health - return True - return False - -def booleanise(b): - """Normalise a 'stringified' Boolean to a proper Python Boolean. - - ElasticSearch has a habit of returning "true" and "false" in its - JSON responses when it should be returning `true` and `false`. If - `b` looks like a stringified Boolean true, return True. If `b` - looks like a stringified Boolean false, return False. - - Raise ValueError if we don't know what `b` is supposed to represent. - - """ - s = str(b) - if s.lower() == "true": - return True - if s.lower() == "false": - return False - - raise ValueError("I don't know how to coerce %r to a bool" % b) - -def get_json(uri): - try: - f = urllib2.urlopen(uri) - except urllib2.HTTPError, e: - raise Status('unknown', ("API failure", - None, - "API failure:\n\n%s" % str(e))) - except urllib2.URLError, e: - # The server could be down; make this CRITICAL. - raise Status('critical', (e.reason,)) - - body = f.read() - - try: - j = json.loads(body) - except ValueError: - raise Status('unknown', ("API returned nonsense",)) - - return j - -def version(version_string): - """Accept a typical version string (ex: 1.0.1) and return a tuple - of ints, allowing for reasonable comparison.""" - return tuple([int(i) for i in version_string.split('.')]) - -def main(): - ElasticSearchCheck().run() - -if __name__ == '__main__': - main() diff --git a/plugins/command/check_reboot_required.py b/plugins/command/check_reboot_required.py deleted file mode 100644 index 6c51ab5..0000000 --- a/plugins/command/check_reboot_required.py +++ /dev/null @@ -1,131 +0,0 @@ -# Read marker files created by `notify-reboot-required` of the -# `update-notifier-common` package. These indicate that a package has -# requested the machine to be rebooted at a convenient time. - -from datetime import datetime, date -from fileinput import FileInput -import os -import re -import sys - -from plugins.output import (CheckException, - nagios_ok, - nagios_warning, - nagios_critical, - nagios_unknown, - nagios_message) - - -def dpkg_log_lines(log_files): - """Parse the package install logs into a list of lines""" - valid_log_files = [] - for log_file in log_files: - if os.path.exists(log_file): - valid_log_files.append(log_file) - if len(valid_log_files) > 0: - try: - return list(FileInput(valid_log_files)) - except: - nagios_unknown("Cannot find/open the dpkg log files") - else: - nagios_unknown("None of the listed dpkg log files are available") - - -def grep(string, list): - """Find a string within a list of lines""" - expr = re.compile(string) - return [elem for elem in list if expr.match(elem)] - - -def parse_files(warning_days, critical_days, - dpkg_log_files=['/var/log/dpkg.log.1', '/var/log/dpkg.log'], - reboot_required_file='/var/run/reboot-required', - reboot_required_pkgs_file='/var/run/reboot-required.pkgs'): - full_message = "" - install_dates = [] - dpkg_log = dpkg_log_lines(dpkg_log_files) - - # Check if the reboot-required flag file exists - # To silence the check, delete this file - if not os.path.exists(reboot_required_file): - nagios_ok("Reboot required file (%s) does not exist" % - reboot_required_file) - - if not os.path.exists(reboot_required_pkgs_file): - nagios_ok("No packages listed requiring reboot") - with open(reboot_required_pkgs_file) as f: - # for each package, attempt to find what time it was installed - for line in f.readlines(): - package = line.rstrip() - log_lines = grep(".*status installed %s.*" % - package, dpkg_log) - if len(log_lines) >= 1: - install_date = log_lines[0].split(' ')[0] - else: - # This is a nasty nasty hack. If I can't find the install - # date in the dkpg log, (for whatever reason), I still - # want to reboot to install that package, - # so I will fake the date. Later on I will - # check the date and if I find the minimum is 2199-12-12, - # I will go UNKNOWN instead. - install_date = '2199-12-12' - full_message += "%s: %s\n" % (install_date, package) - install_dates.append(install_date) - - # Find the age in days of the oldest package install - if min(install_dates) == '2199-12-12': - nagios_unknown('There are packages requiring reboot that I can ' - 'find no install date for, so I have assumed ' - '2199-12-12\n\n%s' % full_message) - oldest_install_date = datetime.strptime(min(install_dates), - "%Y-%m-%d").date() - today = date.today() - install_age = today - oldest_install_date - - # Spit out the correct message - if int(install_age.days) >= int(critical_days): - nagios_critical("Packages requiring reboot outstanding for longer " - "than %s days:\n\n%s" - % (critical_days, full_message.rstrip())) - elif int(install_age.days) >= int(warning_days): - nagios_warning("Packages requiring reboot outstanding for longer " - "than %s days:\n\n%s" - % (warning_days, full_message.rstrip())) - else: - nagios_ok("Packages requiring reboot, but inside the threshold of " - "%s days\n\n%s" % (warning_days, full_message.rstrip())) - - -usage_message = """ -Usage: ./check_reboot_required [critical_days] [warning_days] - -When given no arguments, the default threshold is 0 days. -One argument will raise a critical alert at that number of days. -Two arguments will raise a warning at the first number of days -and a critical at the second number of days -""" - - -def main(): - try: - if len(sys.argv) >= 3: - warning_days = sys.argv[2] - critical_days = sys.argv[1] - elif len(sys.argv) == 2: - if sys.argv[1] == "-h": - print usage_message - sys.exit(0) - else: - warning_days = sys.argv[1] - critical_days = sys.argv[1] - else: - warning_days = 0 - critical_days = 0 - - parse_files(warning_days, critical_days) - - except CheckException as e: - nagios_message(e.message, e.severity) - except Exception as e: - # Catching all other exceptions - nagios_message("Exception: %s" % e, 3) diff --git a/setup.py b/setup.py index 2f06e59..6232213 100755 --- a/setup.py +++ b/setup.py @@ -42,9 +42,7 @@ 'console_scripts': [ 'check_apt_security_updates=' 'plugins.command.check_apt_security_updates:main', - 'check_reboot_required=plugins.command.check_reboot_required:main', 'check_puppetdb_ssh_host_keys=plugins.command.check_puppetdb_ssh_host_keys:main', - 'check_elasticsearch=plugins.command.check_elasticsearch:main', 'check_elasticsearch_aws=plugins.command.check_elasticsearch_aws:main' ] } diff --git a/tests/command/test_check_reboot_required.py b/tests/command/test_check_reboot_required.py deleted file mode 100644 index 9980cf4..0000000 --- a/tests/command/test_check_reboot_required.py +++ /dev/null @@ -1,95 +0,0 @@ -import unittest -import os - -from freezegun import freeze_time - -from plugins.output import CheckException -from plugins.command.check_reboot_required import (dpkg_log_lines, - grep, - parse_files) - -script_directory = os.path.dirname(__file__) -dpkg_file = os.path.join(script_directory, '../fixtures/dpkg.log') -reboot_file = os.path.join(script_directory, - '../fixtures/reboot-required') -pkgs_file = os.path.join(script_directory, - '../fixtures/reboot-required.pkgs') -pkgs_unknown_file = os.path.join(script_directory, - '../fixtures/reboot-required.pkgs.unknown') - - -class TestCheckRebootCommand(unittest.TestCase): - def test_dpkg_log_lines_no_files(self): - with self.assertRaisesRegexp(CheckException, 'None') as context: - dpkg_log_lines(['/asasdfasdfasdf', '/34ggqg3q4g']) - - self.assertEqual(context.exception.severity, 3) - - def test_grep(self): - grepped = grep('foo', ['foo', 'bar']) - self.assertEqual(len(grepped), 1) - self.assertEqual(grepped[0], 'foo') - - @freeze_time("2014-01-14") - def test_parse_files_warning(self): - with self.assertRaisesRegexp(CheckException, 'longer') as context: - parse_files(warning_days=0, - critical_days=20, - dpkg_log_files=[dpkg_file], - reboot_required_file=reboot_file, - reboot_required_pkgs_file=pkgs_file) - - self.assertEqual(context.exception.severity, 1) - - @freeze_time("2014-01-14") - def test_parse_files_critical(self): - with self.assertRaisesRegexp(CheckException, 'longer') as context: - parse_files(warning_days=0, - critical_days=2, - dpkg_log_files=[dpkg_file], - reboot_required_file=reboot_file, - reboot_required_pkgs_file=pkgs_file) - - self.assertEqual(context.exception.severity, 2) - - @freeze_time("2014-01-14") - def test_parse_files_ok(self): - with self.assertRaisesRegexp(CheckException, 'inside') as context: - parse_files(warning_days=15, - critical_days=30, - dpkg_log_files=[dpkg_file], - reboot_required_file=reboot_file, - reboot_required_pkgs_file=pkgs_file) - - self.assertEqual(context.exception.severity, 0) - - @freeze_time("2014-01-14") - def test_parse_files_no_install_date(self): - with self.assertRaisesRegexp(CheckException, 'assumed') as context: - parse_files(warning_days=15, - critical_days=30, - dpkg_log_files=[dpkg_file], - reboot_required_file=reboot_file, - reboot_required_pkgs_file=pkgs_unknown_file) - - self.assertEqual(context.exception.severity, 3) - - def test_parse_files_no_reboot_file(self): - with self.assertRaisesRegexp(CheckException, 'exist') as context: - parse_files(warning_days=15, - critical_days=30, - dpkg_log_files=[dpkg_file], - reboot_required_file='/lieruhgealrugh', - reboot_required_pkgs_file=pkgs_unknown_file) - - self.assertEqual(context.exception.severity, 0) - - def test_parse_files_no_reboot_file(self): - with self.assertRaisesRegexp(CheckException, 'requiring') as context: - parse_files(warning_days=15, - critical_days=30, - dpkg_log_files=[dpkg_file], - reboot_required_file=reboot_file, - reboot_required_pkgs_file='/erlgiuhaerg') - - self.assertEqual(context.exception.severity, 0) diff --git a/tests/fixtures/reboot-required b/tests/fixtures/reboot-required deleted file mode 100644 index e088937..0000000 --- a/tests/fixtures/reboot-required +++ /dev/null @@ -1 +0,0 @@ -*** System restart required *** diff --git a/tests/fixtures/reboot-required.pkgs b/tests/fixtures/reboot-required.pkgs deleted file mode 100644 index 6659db9..0000000 --- a/tests/fixtures/reboot-required.pkgs +++ /dev/null @@ -1,3 +0,0 @@ -linux-image-3.2.0-58-generic -linux-base -libssl1.0.0 diff --git a/tests/fixtures/reboot-required.pkgs.unknown b/tests/fixtures/reboot-required.pkgs.unknown deleted file mode 100644 index 441c1f6..0000000 --- a/tests/fixtures/reboot-required.pkgs.unknown +++ /dev/null @@ -1 +0,0 @@ -linux-base