From ebe3ebf33fe45a1214034ab2ccad3a6ea5605c7e Mon Sep 17 00:00:00 2001 From: lstoppa <77723162+lstoppa@users.noreply.github.com> Date: Wed, 21 Jul 2021 15:08:09 +0200 Subject: [PATCH] Feature/redo walk (#95) * feat: ADDON-37033 (add sysUpTimeInstance initial support) - added some code for handling sysUpTimeInstance and a general function that should decide -based on some SNMP data- when a walk should be executed. * feat: ADDON-37033 (add support for mongodb real-time data storage) * feat: ADDON-37033 (add real-time task that handles SNMP WALK) - added a new task that will potentially redo a SNMPWALK based on sysUpTimeInstance - general refactoring (Poller) - fixed some issues with mongo when retrieving REAL-TIME-DATA-COLLECTION * feat: ADDON-37033 (add sync SNMPGET) - added a way to get 1.3.6.1.2.1.1.3.0 before checking whether a WALK is needed * feat: ADDON-37033 (refactoring) - created a named method for creating scheduler map keys (create_poller_scheduler_entry_key) * feat: ADDON-37033 (refactoring) - scheduler was set to one second (fixed that) * fox: ADDON-37033 (sanitizers #1) * fox: ADDON-37033 (sanitizers #2) * fox: ADDON-37033 (sanitizers #3) * fox: ADDON-37033 (sanitizers #4) * fox: ADDON-37033 (code-review #1) * fox: ADDON-37033 (code-review #2) - again lint was complaining about non-sorted imports * fox: ADDON-37033 (code-review #3) - we were calling int(frequency) even if frequency was already an int. --- .../manager/poller.py | 207 +++++++----------- .../manager/poller_utilities.py | 186 ++++++++++++++++ .../manager/realtime/__init__.py | 15 ++ .../manager/realtime/oid_constant.py | 17 ++ .../manager/realtime/real_time_data.py | 76 +++++++ splunk_connect_for_snmp_poller/mongo.py | 58 ++++- splunk_connect_for_snmp_poller/utilities.py | 6 + tests/test_real_time_data.py | 48 ++++ 8 files changed, 480 insertions(+), 133 deletions(-) create mode 100644 splunk_connect_for_snmp_poller/manager/poller_utilities.py create mode 100644 splunk_connect_for_snmp_poller/manager/realtime/__init__.py create mode 100644 splunk_connect_for_snmp_poller/manager/realtime/oid_constant.py create mode 100644 splunk_connect_for_snmp_poller/manager/realtime/real_time_data.py create mode 100644 tests/test_real_time_data.py diff --git a/splunk_connect_for_snmp_poller/manager/poller.py b/splunk_connect_for_snmp_poller/manager/poller.py index 1cf6e2d..442a5e3 100644 --- a/splunk_connect_for_snmp_poller/manager/poller.py +++ b/splunk_connect_for_snmp_poller/manager/poller.py @@ -13,17 +13,18 @@ # See the License for the specific language governing permissions and # limitations under the License. # -import csv import functools import logging.config import time import schedule -from splunk_connect_for_snmp_poller.manager.tasks import snmp_polling -from splunk_connect_for_snmp_poller.manager.validator.inventory_validator import ( - is_valid_inventory_line_from_dict, - should_process_inventory_line, +from pysnmp.hlapi import SnmpEngine +from splunk_connect_for_snmp_poller.manager.poller_utilities import ( + automatic_realtime_task, + create_poller_scheduler_entry_key, + parse_inventory_file, ) +from splunk_connect_for_snmp_poller.manager.tasks import snmp_polling from splunk_connect_for_snmp_poller.mongo import WalkedHostsRepository from splunk_connect_for_snmp_poller.utilities import ( file_was_modified, @@ -46,133 +47,102 @@ def __init__(self, args, server_config): self._mongo_walked_hosts_coll = WalkedHostsRepository( self._server_config["mongo"] ) + self._local_snmp_engine = SnmpEngine() - def get_splunk_indexes(self): - index = { + def __get_splunk_indexes(self): + return { "event_index": self._args.event_index, "metric_index": self._args.metric_index, } - return index + + def __get_realtime_task_frequency(self): + return self._args.realtime_task_frequency def run(self): + self.__start_realtime_scheduler_task() counter = 0 while True: if counter == 0: - self.check_inventory() + self.__check_inventory() counter = int(self._args.refresh_interval) schedule.run_pending() time.sleep(1) counter -= 1 - def should_process_current_line(self, host, version, community, profile, frequency): - return should_process_inventory_line( - host - ) and is_valid_inventory_line_from_dict( - host, version, community, profile, frequency - ) - - def check_inventory(self): - splunk_indexes = self.get_splunk_indexes() - - # check if config was modified + def __check_inventory(self): server_config_modified, self._config_mod_time = file_was_modified( self._args.config, self._config_mod_time ) if server_config_modified: self._server_config = parse_config_file(self._args.config) - # check if inventory was modified inventory_config_modified, self._inventory_mod_time = file_was_modified( self._args.inventory, self._inventory_mod_time ) # update job when either inventory changes or config changes if server_config_modified or inventory_config_modified: - with open(self._args.inventory, newline="") as csvfile: - inventory = csv.DictReader(csvfile, delimiter=",") - - inventory_hosts = set() - - for agent in inventory: - host = agent["host"] - version = agent["version"] - community = agent["community"] - profile = agent["profile"] - frequency_str = agent["freqinseconds"] - if self.should_process_current_line( - host, version, community, profile, frequency_str - ): - entry_key = host + "#" + profile - frequency = int(agent["freqinseconds"]) - - if entry_key in inventory_hosts: - logger.error( - ( - f"{host},{version},{community},{profile},{frequency_str} has duplicated " - f"hostname {host} and {profile} in the inventory," - f" cannot use the same profile twice for the same device" - ) - ) - continue - - inventory_hosts.add(entry_key) - - logger.info( - f"[-] server_config['profiles']: {self._server_config['profiles']}" + inventory_hosts = set() + for ir in parse_inventory_file(self._args.inventory): + entry_key = create_poller_scheduler_entry_key(ir.host, ir.profile) + frequency = int(ir.frequency_str) + if entry_key in inventory_hosts: + logger.error( + ( + f"{ir.host},{ir.version},{ir.community},{ir.profile},{ir.frequency_str} has duplicated " + f"hostname {ir.host} and {ir.profile} in the inventory," + f" cannot use the same profile twice for the same device" ) - # perform one-time walk for the entire tree for each un-walked host - self.one_time_walk( - host, - version, - community, - Poller.universal_base_oid, + ) + continue + + inventory_hosts.add(entry_key) + logger.info( + f"[-] server_config['profiles']: {self._server_config['profiles']}" + ) + if entry_key not in self._jobs_map: + logger.debug(f"Adding configuration for job {entry_key}") + job_reference = schedule.every(frequency).seconds.do( + scheduled_task, + ir.host, + ir.version, + ir.community, + ir.profile, + self._server_config, + self.__get_splunk_indexes(), + ) + self._jobs_map[entry_key] = job_reference + else: + old_conf = self._jobs_map.get(entry_key).job_func.args + if ( + old_conf + != ( + ir.host, + ir.version, + ir.community, + ir.profile, self._server_config, - splunk_indexes, + self.__get_splunk_indexes(), + ) + or frequency != self._jobs_map.get(entry_key).interval + ): + self.__update_schedule( + ir.community, + ir.frequency, + ir.host, + ir.profile, + ir.version, + self._server_config, + self.__get_splunk_indexes(), ) - - if entry_key not in self._jobs_map: - logger.debug(f"Adding configuration for job {entry_key}") - job_reference = schedule.every(int(frequency)).seconds.do( - scheduled_task, - host, - version, - community, - profile, - self._server_config, - splunk_indexes, - ) - self._jobs_map[entry_key] = job_reference - else: - old_conf = self._jobs_map.get(entry_key).job_func.args - if ( - old_conf - != ( - host, - version, - community, - profile, - self._server_config, - splunk_indexes, - ) - or frequency != self._jobs_map.get(entry_key).interval - ): - self.update_schedule( - community, - frequency, - host, - profile, - version, - self._server_config, - splunk_indexes, - ) for entry_key in list(self._jobs_map): if entry_key not in inventory_hosts: logger.debug(f"Removing job for {entry_key}") schedule.cancel_job(self._jobs_map.get(entry_key)) del self._jobs_map[entry_key] - def update_schedule( + def __update_schedule( self, community, frequency, @@ -206,25 +176,17 @@ def update_schedule( old_next_run if new_next_run > old_next_run else new_next_run ) - def one_time_walk( - self, host, version, community, profile, server_config, splunk_indexes - ): - logger.debug( - f"[-]walked flag: {self._mongo_walked_hosts_coll.contains_host(host)}" + def __start_realtime_scheduler_task(self): + # schedule.every().second.do( + # For debugging purposes better change it to "one second" + schedule.every(self._args.realtime_task_frequency).seconds.do( + automatic_realtime_task, + self._mongo_walked_hosts_coll, + self._args.inventory, + self.__get_splunk_indexes(), + self._server_config, + self._local_snmp_engine, ) - if self._mongo_walked_hosts_coll.contains_host(host) == 0: - schedule.every().second.do( - onetime_task, - host, - version, - community, - profile, - server_config, - splunk_indexes, - ) - self._mongo_walked_hosts_coll.add_host(host) - else: - logger.debug(f"[-] One time walk executed for {host}!") def scheduled_task(host, version, community, profile, server_config, splunk_indexes): @@ -233,20 +195,3 @@ def scheduled_task(host, version, community, profile, server_config, splunk_inde ) snmp_polling.delay(host, version, community, profile, server_config, splunk_indexes) - - -def onetime_task(host, version, community, profile, server_config, splunk_indexes): - logger.debug( - f"Executing onetime_task for {host} version={version} community={community} profile={profile}" - ) - - snmp_polling.delay( - host, - version, - community, - profile, - server_config, - splunk_indexes, - one_time_flag=True, - ) - return schedule.CancelJob diff --git a/splunk_connect_for_snmp_poller/manager/poller_utilities.py b/splunk_connect_for_snmp_poller/manager/poller_utilities.py new file mode 100644 index 0000000..999bcf2 --- /dev/null +++ b/splunk_connect_for_snmp_poller/manager/poller_utilities.py @@ -0,0 +1,186 @@ +# +# Copyright 2021 Splunk Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import csv +import logging.config +from dataclasses import dataclass + +import schedule +from pysnmp.hlapi import ObjectIdentity, ObjectType, UdpTransportTarget, getCmd +from splunk_connect_for_snmp_poller.manager.realtime.oid_constant import ( + OidConstant, +) +from splunk_connect_for_snmp_poller.manager.realtime.real_time_data import ( + should_redo_walk, +) +from splunk_connect_for_snmp_poller.manager.tasks import snmp_polling +from splunk_connect_for_snmp_poller.manager.validator.inventory_validator import ( + is_valid_inventory_line_from_dict, + should_process_inventory_line, +) + +logger = logging.getLogger(__name__) + + +@dataclass +class InventoryRecord: + host: str + version: str + community: str + profile: str + frequency_str: str + + +def _should_process_current_line(inventory_record): + return should_process_inventory_line( + inventory_record.host + ) and is_valid_inventory_line_from_dict( + inventory_record.host, + inventory_record.version, + inventory_record.community, + inventory_record.profile, + inventory_record.frequency_str, + ) + + +def onetime_task(host, version, community, profile, server_config, splunk_indexes): + logger.debug( + f"Executing onetime_task for {host} version={version} community={community} profile={profile}" + ) + + snmp_polling.delay( + host, + version, + community, + profile, + server_config, + splunk_indexes, + one_time_flag=True, + ) + return schedule.CancelJob + + +def parse_inventory_file(inventory_file_path): + with open(inventory_file_path, newline="") as inventory_file: + for agent in csv.DictReader(inventory_file, delimiter=","): + inventory_record = InventoryRecord( + agent["host"], + agent["version"], + agent["community"], + agent["profile"], + agent["freqinseconds"], + ) + if _should_process_current_line(inventory_record): + yield inventory_record + + +def _extract_sys_uptime_instance( + local_snmp_engine, host, version, community, server_config +): + from splunk_connect_for_snmp_poller.manager.task_utilities import ( + parse_port, + ) + from splunk_connect_for_snmp_poller.manager.tasks import ( + build_authData, + build_contextData, + ) + + auth_data = build_authData(version, community, server_config) + context_data = build_contextData(version, community, server_config) + device_hostname, device_port = parse_port(host) + result = getCmd( + local_snmp_engine, + auth_data, + UdpTransportTarget((device_hostname, device_port)), + context_data, + ObjectType(ObjectIdentity(OidConstant.SYS_UP_TIME_INSTANCE)), + ) + error_indication, error_status, error_index, var_binds = next(result) + sys_up_time_value = 0 + if not error_indication and not error_status: + for a, b in var_binds: + if str(a) == OidConstant.SYS_UP_TIME_INSTANCE: + # class_name = b.__class__.__name__ + sys_up_time_value = b.prettyPrint() + return { + OidConstant.SYS_UP_TIME_INSTANCE: { + "value": str(sys_up_time_value), + "type": "TimeTicks", + }, + } + + +def _walk_info(all_walked_hosts_collection, host, current_sys_up_time): + host_already_walked = all_walked_hosts_collection.contains_host(host) != 0 + should_do_walk = not host_already_walked + if host_already_walked: + previous_sys_up_time = all_walked_hosts_collection.real_time_data_for(host) + should_do_walk = should_redo_walk(previous_sys_up_time, current_sys_up_time) + return host_already_walked, should_do_walk + + +def _update_mongo( + all_walked_hosts_collection, host, host_already_walked, current_sys_up_time +): + if not host_already_walked: + all_walked_hosts_collection.add_host(host) + all_walked_hosts_collection.update_real_time_data_for(host, current_sys_up_time) + + +""" +This is he realtime task responsible for executing an SNMPWALK when +* we discover an host for the first time, or +* upSysTimeInstance has changed. +""" + + +def automatic_realtime_task( + all_walked_hosts_collection, + inventory_file_path, + splunk_indexes, + server_config, + local_snmp_engine, +): + for inventory_record in parse_inventory_file(inventory_file_path): + sys_up_time = _extract_sys_uptime_instance( + local_snmp_engine, + inventory_record.host, + inventory_record.version, + inventory_record.community, + server_config, + ) + host_already_walked, should_do_walk = _walk_info( + all_walked_hosts_collection, inventory_record.host, sys_up_time + ) + if should_do_walk: + schedule.every().second.do( + onetime_task, + inventory_record.host, + inventory_record.version, + inventory_record.community, + inventory_record.profile, + server_config, + splunk_indexes, + ) + _update_mongo( + all_walked_hosts_collection, + inventory_record.host, + host_already_walked, + sys_up_time, + ) + + +def create_poller_scheduler_entry_key(host, profile): + return host + "#" + profile diff --git a/splunk_connect_for_snmp_poller/manager/realtime/__init__.py b/splunk_connect_for_snmp_poller/manager/realtime/__init__.py new file mode 100644 index 0000000..72d4509 --- /dev/null +++ b/splunk_connect_for_snmp_poller/manager/realtime/__init__.py @@ -0,0 +1,15 @@ +# +# Copyright 2021 Splunk Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/splunk_connect_for_snmp_poller/manager/realtime/oid_constant.py b/splunk_connect_for_snmp_poller/manager/realtime/oid_constant.py new file mode 100644 index 0000000..b0c7bc8 --- /dev/null +++ b/splunk_connect_for_snmp_poller/manager/realtime/oid_constant.py @@ -0,0 +1,17 @@ +# +# Copyright 2021 Splunk Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +class OidConstant: + SYS_UP_TIME_INSTANCE = "1.3.6.1.2.1.1.3.0" diff --git a/splunk_connect_for_snmp_poller/manager/realtime/real_time_data.py b/splunk_connect_for_snmp_poller/manager/realtime/real_time_data.py new file mode 100644 index 0000000..9458f43 --- /dev/null +++ b/splunk_connect_for_snmp_poller/manager/realtime/real_time_data.py @@ -0,0 +1,76 @@ +# +# Copyright 2021 Splunk Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from pysnmp.proto.rfc1902 import TimeTicks +from splunk_connect_for_snmp_poller.manager.realtime.oid_constant import ( + OidConstant, +) + + +class __RealTimeData: + def __init__(self, element_type, element_value): + self.element_type = element_type + self.element_value = element_value + + def value(self): + return self.element_value + + +def _device_probably_restarted(old_sysuptime, new_sysuptime): + try: + return TimeTicks(int(old_sysuptime.value())) > TimeTicks( + int(new_sysuptime.value()) + ) + except ValueError: + return False + + +"""" +With virtualization becoming more and more common, we need some way of detecting when, for the same IP, a new device +was redeployed. One common way of doing this is to analyze DISMAN-EVENT-MIB::sysUpTimeInstance. +If its new value is less than the previous one, it probably means a device was re-deployed and the DHCP probably +assigned it the same IP. In this case we need to re-do an SNMP WALK. + +Parameters +---------- +realtime_collection: dict + This is a dictionary in the format {"OID": {"type": "your-oid-type", "value": "value as string"}, ... } +input_data_collection: dict + This is a dictionary in the format {"OID": {"type": "your-oid-type", "value": "value as string"}, ... } + +Returns +------- +True if both dictionaries have a "SYS_UP_TIME_INSTANCE" key, and the input_data_collection has a value that is +less than realtime_collection. False otherwise. +""" + + +def _device_restarted(realtime_collection, input_data_collection): + if OidConstant.SYS_UP_TIME_INSTANCE in realtime_collection: + if OidConstant.SYS_UP_TIME_INSTANCE in input_data_collection: + old_value = realtime_collection[OidConstant.SYS_UP_TIME_INSTANCE] + old_rt_record = __RealTimeData(old_value["type"], old_value["value"]) + new_value = input_data_collection[OidConstant.SYS_UP_TIME_INSTANCE] + new_rt_record = __RealTimeData(new_value["type"], new_value["value"]) + return _device_probably_restarted(old_rt_record, new_rt_record) + return False + + +def should_redo_walk(realtime_collection, input_data): + if realtime_collection and input_data: + return _device_restarted(realtime_collection, input_data) + else: + return False diff --git a/splunk_connect_for_snmp_poller/mongo.py b/splunk_connect_for_snmp_poller/mongo.py index c2c049e..32d9fad 100644 --- a/splunk_connect_for_snmp_poller/mongo.py +++ b/splunk_connect_for_snmp_poller/mongo.py @@ -30,13 +30,49 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. +import os -from pymongo import MongoClient +from pymongo import MongoClient, ReturnDocument from pymongo.errors import ConnectionFailure -import os + +""" +In order to store some general data into Mongo we use the following structure. +Each WalkedHostsRepository can contain the following fields: +* _id: a unique key that represents a concrete host (always present) +* MIB-STATIC-DATA: a dictionary that contains some required-MIB data for further processing. For example, when + enriching the OIDs related to network interfaces, as part of the intial walk for a given host we plan to store + in this dictionary -at least- the following information: + "MIB-STATIC-DATA": { <----- GENERAL STATIC MIB DATA + "IF-MIB": { <----- NETWORK INTERFACES DATA + "ifNumber": 2, <----- TOTAL NUMBER OF NETWORK INTERFACES + "ifIndex": [1, 2], <----- INDEX MAPPING FOR OIDs + "ifDescr": ["lo", "eth0"], <----- INDEX MAPPING FOR OIDs (IF-MIB*.1 -> "lo", IF-MIB*.2 -> "eth0", ...) + } + } + + For example: + IF-MIB::ifNumber.0 = INTEGER: 2 + IF-MIB::ifIndex.1 = INTEGER: 1 + IF-MIB::ifIndex.2 = INTEGER: 2 + IF-MIB::ifDescr.1 = STRING: lo + IF-MIB::ifDescr.2 = STRING: eth0 + IF-MIB::ifType.1 = INTEGER: softwareLoopback(24) + IF-MIB::ifType.2 = INTEGER: ethernetCsmacd(6) + IF-MIB::ifPhysAddress.1 = STRING: + IF-MIB::ifPhysAddress.2 = STRING: 0:12:79:62:f9:40 + IF-MIB::ifAdminStatus.1 = INTEGER: up(1) + IF-MIB::ifAdminStatus.2 = INTEGER: up(1) + +* MIB_STATIC_DATA: a dictionary that contains some MIB real-time data that needs to be collected constantly. + At the moment, we only need to collect sysUpTimeInstance data in order to decide when we need to re-walk + a given host. +""" class WalkedHostsRepository: + MIB_REAL_TIME_DATA = "MIB-REAL-TIME-DATA" + MIB_STATIC_DATA = "MIB-STATIC-DATA" + def __init__(self, mongo_config): self._client = MongoClient( os.environ["MONGO_SERVICE_SERVICE_HOST"], @@ -69,3 +105,21 @@ def delete_host(self, host): def clear(self): self._walked_hosts.remove() + + def real_time_data_for(self, host): + full_collection = self._walked_hosts.find_one({"_id": host}) + if WalkedHostsRepository.MIB_REAL_TIME_DATA in full_collection: + return full_collection[WalkedHostsRepository.MIB_REAL_TIME_DATA] + else: + return None + + def update_real_time_data_for(self, host, input_dictionary): + if input_dictionary: + real_time_data_dictionary = { + WalkedHostsRepository.MIB_REAL_TIME_DATA: input_dictionary + } + self._walked_hosts.find_one_and_update( + {"_id": host}, + {"$set": real_time_data_dictionary}, + return_document=ReturnDocument.AFTER, + ) diff --git a/splunk_connect_for_snmp_poller/utilities.py b/splunk_connect_for_snmp_poller/utilities.py index e387649..48aa3a6 100644 --- a/splunk_connect_for_snmp_poller/utilities.py +++ b/splunk_connect_for_snmp_poller/utilities.py @@ -97,6 +97,12 @@ def parse_command_line_arguments(): parser.add_argument( "--meta_index", default="##META_INDEX##", help="Meta index for polling data" ) + parser.add_argument( + "--realtime_task_frequency", + type=int, + default=60, + help="Frequency in seconds for each real-time scheduler task", + ) return parser.parse_args() diff --git a/tests/test_real_time_data.py b/tests/test_real_time_data.py new file mode 100644 index 0000000..02f9696 --- /dev/null +++ b/tests/test_real_time_data.py @@ -0,0 +1,48 @@ +# +# Copyright 2021 Splunk Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from unittest import TestCase + +from splunk_connect_for_snmp_poller.manager.realtime.oid_constant import ( + OidConstant, +) +from splunk_connect_for_snmp_poller.manager.realtime.real_time_data import ( + should_redo_walk, +) + + +class ShouldRedoWalkTest(TestCase): + def test_invalid_collection(self): + self.assertFalse(should_redo_walk(None, None)) + self.assertFalse(should_redo_walk(None, {})) + self.assertFalse(should_redo_walk({}, None)) + + def test_when_restart_did_not_happen(self): + realtime_collection = { + OidConstant.SYS_UP_TIME_INSTANCE: {"value": "123", "type": "TimeTicks"}, + } + current_get_result = { + OidConstant.SYS_UP_TIME_INSTANCE: {"value": "123", "type": "TimeTicks"}, + } + self.assertFalse(should_redo_walk(realtime_collection, current_get_result)) + + def test_when_restart_happened(self): + realtime_collection = { + OidConstant.SYS_UP_TIME_INSTANCE: {"value": "123", "type": "TimeTicks"}, + } + current_get_result = { + OidConstant.SYS_UP_TIME_INSTANCE: {"value": "100", "type": "TimeTicks"}, + } + self.assertTrue(should_redo_walk(realtime_collection, current_get_result))