@@ -48,79 +48,79 @@ def __init__(self, name, init_config, instances):
4848 'name' : 'aws_host' ,
4949 'default' : urlparse (self .instance ['url' ]).hostname ,
5050 }
51+ self .config = from_instance (self .instance )
5152
52- def check (self , instance ):
53- config = from_instance (instance )
54- admin_forwarder = config .admin_forwarder
55- jvm_rate = instance .get ('gc_collectors_as_rate' , False )
53+ def check (self , _ ):
54+ admin_forwarder = self .config .admin_forwarder
55+ jvm_rate = self .instance .get ('gc_collectors_as_rate' , False )
5656
5757 # Check ES version for this instance and define parameters
5858 # (URLs and metrics) accordingly
5959 try :
60- version = self ._get_es_version (config )
60+ version = self ._get_es_version ()
6161 except AuthenticationError :
6262 self .log .exception ("The ElasticSearch credentials are incorrect" )
6363 raise
6464
65- health_url , stats_url , pshard_stats_url , pending_tasks_url = self ._get_urls (version , config . cluster_stats )
65+ health_url , stats_url , pshard_stats_url , pending_tasks_url = self ._get_urls (version )
6666 stats_metrics = stats_for_version (version , jvm_rate )
67- if config .cluster_stats :
67+ if self . config .cluster_stats :
6868 # Include Node System metrics
6969 stats_metrics .update (node_system_stats_for_version (version ))
7070 pshard_stats_metrics = pshard_stats_for_version (version )
7171
7272 # Load stats data.
7373 # This must happen before other URL processing as the cluster name
7474 # is retrieved here, and added to the tag list.
75- stats_url = self ._join_url (config . url , stats_url , admin_forwarder )
76- stats_data = self ._get_data (stats_url , config )
75+ stats_url = self ._join_url (stats_url , admin_forwarder )
76+ stats_data = self ._get_data (stats_url )
7777 if stats_data .get ('cluster_name' ):
7878 # retrieve the cluster name from the data, and append it to the
7979 # master tag list.
8080 cluster_name_tag = "cluster_name:{}" .format (stats_data ['cluster_name' ])
81- config .tags .append (cluster_name_tag )
82- config .health_tags .append (cluster_name_tag )
83- self ._process_stats_data (stats_data , stats_metrics , config )
81+ self . config .tags .append (cluster_name_tag )
82+ self . config .health_tags .append (cluster_name_tag )
83+ self ._process_stats_data (stats_data , stats_metrics )
8484
8585 # Load cluster-wise data
8686 # Note: this is a cluster-wide query, might TO.
87- if config .pshard_stats :
88- send_sc = bubble_ex = not config .pshard_graceful_to
89- pshard_stats_url = self ._join_url (config . url , pshard_stats_url , admin_forwarder )
87+ if self . config .pshard_stats :
88+ send_sc = bubble_ex = not self . config .pshard_graceful_to
89+ pshard_stats_url = self ._join_url (pshard_stats_url , admin_forwarder )
9090 try :
91- pshard_stats_data = self ._get_data (pshard_stats_url , config , send_sc = send_sc )
92- self ._process_pshard_stats_data (pshard_stats_data , config , pshard_stats_metrics )
91+ pshard_stats_data = self ._get_data (pshard_stats_url , send_sc = send_sc )
92+ self ._process_pshard_stats_data (pshard_stats_data , pshard_stats_metrics )
9393 except requests .ReadTimeout as e :
9494 if bubble_ex :
9595 raise
9696 self .log .warning ("Timed out reading pshard-stats from servers (%s) - stats will be missing" , e )
9797
9898 # Load the health data.
99- health_url = self ._join_url (config . url , health_url , admin_forwarder )
100- health_data = self ._get_data (health_url , config )
101- self ._process_health_data (health_data , config , version )
99+ health_url = self ._join_url (health_url , admin_forwarder )
100+ health_data = self ._get_data (health_url )
101+ self ._process_health_data (health_data , version )
102102
103- if config .pending_task_stats :
103+ if self . config .pending_task_stats :
104104 # Load the pending_tasks data.
105- pending_tasks_url = self ._join_url (config . url , pending_tasks_url , admin_forwarder )
106- pending_tasks_data = self ._get_data (pending_tasks_url , config )
107- self ._process_pending_tasks_data (pending_tasks_data , config )
105+ pending_tasks_url = self ._join_url (pending_tasks_url , admin_forwarder )
106+ pending_tasks_data = self ._get_data (pending_tasks_url )
107+ self ._process_pending_tasks_data (pending_tasks_data )
108108
109- if config .index_stats and version >= [1 , 0 , 0 ]:
109+ if self . config .index_stats and version >= [1 , 0 , 0 ]:
110110 try :
111- self ._get_index_metrics (config , admin_forwarder , version )
111+ self ._get_index_metrics (admin_forwarder , version )
112112 except requests .ReadTimeout as e :
113113 self .log .warning ("Timed out reading index stats from servers (%s) - stats will be missing" , e )
114114
115115 # If we're here we did not have any ES conn issues
116- self .service_check (self .SERVICE_CHECK_CONNECT_NAME , AgentCheck .OK , tags = config .service_check_tags )
116+ self .service_check (self .SERVICE_CHECK_CONNECT_NAME , AgentCheck .OK , tags = self . config .service_check_tags )
117117
118- def _get_es_version (self , config ):
118+ def _get_es_version (self ):
119119 """
120120 Get the running version of elasticsearch.
121121 """
122122 try :
123- data = self ._get_data (config .url , config , send_sc = False )
123+ data = self ._get_data (self . config .url , send_sc = False )
124124 raw_version = data ['version' ]['number' ]
125125 self .set_metadata ('version' , raw_version )
126126 # pre-release versions of elasticearch are suffixed with -rcX etc..
@@ -130,30 +130,30 @@ def _get_es_version(self, config):
130130 except AuthenticationError :
131131 raise
132132 except Exception as e :
133- self .warning ("Error while trying to get Elasticsearch version from %s %s" , config .url , e )
133+ self .warning ("Error while trying to get Elasticsearch version from %s %s" , self . config .url , e )
134134 version = [1 , 0 , 0 ]
135135
136136 self .log .debug ("Elasticsearch version is %s" , version )
137137 return version
138138
139- def _join_url (self , base , url , admin_forwarder = False ):
139+ def _join_url (self , url , admin_forwarder = False ):
140140 """
141141 overrides `urlparse.urljoin` since it removes base url path
142142 https://docs.python.org/2/library/urlparse.html#urlparse.urljoin
143143 """
144144 if admin_forwarder :
145- return base + url
145+ return self . config . url + url
146146 else :
147- return urljoin (base , url )
147+ return urljoin (self . config . url , url )
148148
149- def _get_index_metrics (self , config , admin_forwarder , version ):
149+ def _get_index_metrics (self , admin_forwarder , version ):
150150 cat_url = '/_cat/indices?format=json&bytes=b'
151- index_url = self ._join_url (config . url , cat_url , admin_forwarder )
152- index_resp = self ._get_data (index_url , config )
151+ index_url = self ._join_url (cat_url , admin_forwarder )
152+ index_resp = self ._get_data (index_url )
153153 index_stats_metrics = index_stats_for_version (version )
154154 health_stat = {'green' : 0 , 'yellow' : 1 , 'red' : 2 }
155155 for idx in index_resp :
156- tags = config .tags + ['index_name:' + idx ['index' ]]
156+ tags = self . config .tags + ['index_name:' + idx ['index' ]]
157157 # we need to remap metric names because the ones from elastic
158158 # contain dots and that would confuse `_process_metric()` (sic)
159159 index_data = {
@@ -181,7 +181,7 @@ def _get_index_metrics(self, config, admin_forwarder, version):
181181 desc = index_stats_metrics [metric ]
182182 self ._process_metric (index_data , metric , * desc , tags = tags )
183183
184- def _get_urls (self , version , cluster_stats ):
184+ def _get_urls (self , version ):
185185 """
186186 Compute the URLs we need to hit depending on the running ES version
187187 """
@@ -190,18 +190,22 @@ def _get_urls(self, version, cluster_stats):
190190
191191 if version >= [0 , 90 , 10 ]:
192192 pending_tasks_url = "/_cluster/pending_tasks"
193- stats_url = "/_nodes/stats" if cluster_stats else "/_nodes/_local/stats"
193+ stats_url = "/_nodes/stats" if self . config . cluster_stats else "/_nodes/_local/stats"
194194 if version < [5 , 0 , 0 ]:
195195 # version 5 errors out if the `all` parameter is set
196196 stats_url += "?all=true"
197197 else :
198198 # legacy
199199 pending_tasks_url = None
200- stats_url = "/_cluster/nodes/stats?all=true" if cluster_stats else "/_cluster/nodes/_local/stats?all=true"
200+ stats_url = (
201+ "/_cluster/nodes/stats?all=true"
202+ if self .config .cluster_stats
203+ else "/_cluster/nodes/_local/stats?all=true"
204+ )
201205
202206 return health_url , stats_url , pshard_stats_url , pending_tasks_url
203207
204- def _get_data (self , url , config , send_sc = True ):
208+ def _get_data (self , url , send_sc = True ):
205209 """
206210 Hit a given URL and return the parsed json
207211 """
@@ -219,15 +223,15 @@ def _get_data(self, url, config, send_sc=True):
219223 self .SERVICE_CHECK_CONNECT_NAME ,
220224 AgentCheck .CRITICAL ,
221225 message = "Error {} when hitting {}" .format (e , url ),
222- tags = config .service_check_tags ,
226+ tags = self . config .service_check_tags ,
223227 )
224228 raise
225229
226230 self .log .debug ("request to url %s returned: %s" , url , resp )
227231
228232 return resp .json ()
229233
230- def _process_pending_tasks_data (self , data , config ):
234+ def _process_pending_tasks_data (self , data ):
231235 p_tasks = defaultdict (int )
232236 average_time_in_queue = 0
233237
@@ -247,23 +251,23 @@ def _process_pending_tasks_data(self, data, config):
247251 for metric in CLUSTER_PENDING_TASKS :
248252 # metric description
249253 desc = CLUSTER_PENDING_TASKS [metric ]
250- self ._process_metric (node_data , metric , * desc , tags = config .tags )
254+ self ._process_metric (node_data , metric , * desc , tags = self . config .tags )
251255
252- def _process_stats_data (self , data , stats_metrics , config ):
256+ def _process_stats_data (self , data , stats_metrics ):
253257 for node_data in itervalues (data .get ('nodes' , {})):
254258 metric_hostname = None
255- metrics_tags = list (config .tags )
259+ metrics_tags = list (self . config .tags )
256260
257261 # Resolve the node's name
258262 node_name = node_data .get ('name' )
259263 if node_name :
260264 metrics_tags .append ('node_name:{}' .format (node_name ))
261265
262266 # Resolve the node's hostname
263- if config .node_name_as_host :
267+ if self . config .node_name_as_host :
264268 if node_name :
265269 metric_hostname = node_name
266- elif config .cluster_stats :
270+ elif self . config .cluster_stats :
267271 for k in ['hostname' , 'host' ]:
268272 if k in node_data :
269273 metric_hostname = node_data [k ]
@@ -272,9 +276,9 @@ def _process_stats_data(self, data, stats_metrics, config):
272276 for metric , desc in iteritems (stats_metrics ):
273277 self ._process_metric (node_data , metric , * desc , tags = metrics_tags , hostname = metric_hostname )
274278
275- def _process_pshard_stats_data (self , data , config , pshard_stats_metrics ):
279+ def _process_pshard_stats_data (self , data , pshard_stats_metrics ):
276280 for metric , desc in iteritems (pshard_stats_metrics ):
277- self ._process_metric (data , metric , * desc , tags = config .tags )
281+ self ._process_metric (data , metric , * desc , tags = self . config .tags )
278282
279283 def _process_metric (self , data , metric , xtype , path , xform = None , tags = None , hostname = None ):
280284 """
@@ -302,23 +306,23 @@ def _process_metric(self, data, metric, xtype, path, xform=None, tags=None, host
302306 else :
303307 self .log .debug ("Metric not found: %s -> %s" , path , metric )
304308
305- def _process_health_data (self , data , config , version ):
309+ def _process_health_data (self , data , version ):
306310 cluster_status = data .get ('status' )
307- if not self .cluster_status .get (config .url ):
308- self .cluster_status [config .url ] = cluster_status
311+ if not self .cluster_status .get (self . config .url ):
312+ self .cluster_status [self . config .url ] = cluster_status
309313 if cluster_status in ["yellow" , "red" ]:
310- event = self ._create_event (cluster_status , tags = config .tags )
314+ event = self ._create_event (cluster_status , tags = self . config .tags )
311315 self .event (event )
312316
313- if cluster_status != self .cluster_status .get (config .url ):
314- self .cluster_status [config .url ] = cluster_status
315- event = self ._create_event (cluster_status , tags = config .tags )
317+ if cluster_status != self .cluster_status .get (self . config .url ):
318+ self .cluster_status [self . config .url ] = cluster_status
319+ event = self ._create_event (cluster_status , tags = self . config .tags )
316320 self .event (event )
317321
318322 cluster_health_metrics = health_stats_for_version (version )
319323
320324 for metric , desc in iteritems (cluster_health_metrics ):
321- self ._process_metric (data , metric , * desc , tags = config .tags )
325+ self ._process_metric (data , metric , * desc , tags = self . config .tags )
322326
323327 # Process the service check
324328 if cluster_status == 'green' :
@@ -349,7 +353,10 @@ def _process_health_data(self, data, config, version):
349353 )
350354
351355 self .service_check (
352- self .SERVICE_CHECK_CLUSTER_STATUS , status , message = msg , tags = config .service_check_tags + config .health_tags
356+ self .SERVICE_CHECK_CLUSTER_STATUS ,
357+ status ,
358+ message = msg ,
359+ tags = self .config .service_check_tags + self .config .health_tags ,
353360 )
354361
355362 def _create_event (self , status , tags = None ):
0 commit comments