diff --git a/splunk_connect_for_snmp_poller/manager/poller.py b/splunk_connect_for_snmp_poller/manager/poller.py index 04b320b..4548e5c 100644 --- a/splunk_connect_for_snmp_poller/manager/poller.py +++ b/splunk_connect_for_snmp_poller/manager/poller.py @@ -56,6 +56,10 @@ def __init__(self, args, server_config): self._local_snmp_engine = SnmpEngine() self._unmatched_devices = {} self._lock = threading.Lock() + self._force_refresh = False + + def force_inventory_refresh(self): + self._force_refresh = True def __get_splunk_indexes(self): return { @@ -88,7 +92,14 @@ def __check_inventory(self): ) # update job when either inventory changes or config changes - if server_config_modified or inventory_config_modified: + if server_config_modified or inventory_config_modified or self._force_refresh: + self._force_refresh = False + logger.info( + f"Refreshing inventory and config: server_config_modified = {server_config_modified}, " + f"inventory_config_modified = {inventory_config_modified}, " + f"force_refresh = {self._force_refresh}" + ) + inventory_hosts = set() profiles = get_profiles(self._server_config) for ir in parse_inventory_file(self._args.inventory, profiles): @@ -195,6 +206,8 @@ def __start_realtime_scheduler_task(self): self.__get_splunk_indexes(), self._server_config, self._local_snmp_engine, + self.force_inventory_refresh, + False, ) schedule.every(self._args.matching_task_frequency).seconds.do( @@ -208,6 +221,8 @@ def __start_realtime_scheduler_task(self): self.__get_splunk_indexes(), self._server_config, self._local_snmp_engine, + self.force_inventory_refresh, + True, ) def add_device_for_profile_matching(self, device: InventoryRecord): diff --git a/splunk_connect_for_snmp_poller/manager/poller_utilities.py b/splunk_connect_for_snmp_poller/manager/poller_utilities.py index a5a699c..11a6101 100644 --- a/splunk_connect_for_snmp_poller/manager/poller_utilities.py +++ b/splunk_connect_for_snmp_poller/manager/poller_utilities.py @@ -16,7 +16,6 @@ import csv import logging.config import threading -from pathlib import Path import schedule from pysnmp.hlapi import ObjectIdentity, ObjectType, UdpTransportTarget, getCmd @@ -62,13 +61,12 @@ def onetime_task(inventory_record: InventoryRecord, server_config, splunk_indexe return schedule.CancelJob -def refresh_inventory(inventory_file_path): - Path(inventory_file_path).touch() - +def refresh_inventory(force_inventory_refresh): + force_inventory_refresh() return schedule.CancelJob -def parse_inventory_file(inventory_file_path, profiles): +def parse_inventory_file(inventory_file_path, profiles, fetch_frequency): with open(inventory_file_path, newline="") as inventory_file: for agent in csv.DictReader(inventory_file, delimiter=","): if _should_process_current_line(agent): @@ -77,7 +75,7 @@ def parse_inventory_file(inventory_file_path, profiles): agent["version"], agent["community"], agent["profile"], - get_frequency(agent, profiles, 60), + get_frequency(agent, profiles, 60) if fetch_frequency else None, ) @@ -162,6 +160,8 @@ def automatic_realtime_job( splunk_indexes, server_config, local_snmp_engine, + force_inventory_refresh, + initial_walk, ): job_thread = threading.Thread( target=automatic_realtime_task, @@ -171,6 +171,8 @@ def automatic_realtime_job( splunk_indexes, server_config, local_snmp_engine, + force_inventory_refresh, + initial_walk, ], ) job_thread.start() @@ -182,10 +184,12 @@ def automatic_realtime_task( splunk_indexes, server_config, local_snmp_engine, + force_inventory_refresh, + initial_walk, ): try: for inventory_record in parse_inventory_file( - inventory_file_path, profiles=None + inventory_file_path, profiles=None, fetch_frequency=False ): db_host_id = return_database_id(inventory_record.host) sys_up_time = _extract_sys_uptime_instance( @@ -207,11 +211,11 @@ def automatic_realtime_task( server_config, splunk_indexes, ) - # touch inventory file after 2 min to trigger reloading of inventory with new walk data - schedule.every(2).minutes.do( - refresh_inventory, - inventory_file_path, - ) + if not initial_walk: + # force inventory reloading after 2 min with new walk data + schedule.every(2).minutes.do( + refresh_inventory, force_inventory_refresh + ) _update_mongo( all_walked_hosts_collection, db_host_id,