Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
fixes bug 962638 - Create the LagLog implementation and endpoint, r=t…
…wobraids
- Loading branch information
Showing
3 changed files
with
176 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,78 @@ | ||
# This Source Code Form is subject to the terms of the Mozilla Public | ||
# License, v. 2.0. If a copy of the MPL was not distributed with this | ||
# file, You can obtain one at http://mozilla.org/MPL/2.0/. | ||
|
||
import calendar | ||
from collections import defaultdict | ||
|
||
from socorro.external.postgresql.base import PostgreSQLBase | ||
|
||
|
||
class LagLog(PostgreSQLBase): | ||
|
||
def get(self, **kwargs): | ||
|
||
sql = """ | ||
/* socorro.external.postgresql.laglog.LagLog.get */ | ||
SELECT | ||
replica_name, | ||
lag, | ||
moment, | ||
master, | ||
AVG(lag) | ||
OVER ( | ||
PARTITION BY replica_name | ||
ORDER BY moment DESC | ||
ROWS BETWEEN 0 FOLLOWING AND 11 FOLLOWING | ||
) AS average | ||
FROM lag_log | ||
ORDER BY moment | ||
""" | ||
|
||
results = self.query(sql) | ||
averages = defaultdict(list) | ||
all = defaultdict(list) | ||
for row in results: | ||
replica_name = row[0] | ||
lag = row[1] | ||
moment = row[2] | ||
master = row[3] | ||
average = row[4] | ||
timestamp = calendar.timegm(moment.utctimetuple()) | ||
all[replica_name].append({ | ||
'x': timestamp, | ||
'y': lag, | ||
'master': master | ||
}) | ||
if not average: | ||
average = 0 | ||
averages[replica_name].append({ | ||
'x': timestamp, | ||
'y': int(average) | ||
}) | ||
|
||
max_bytes_critical = self.context.laglog.max_bytes_critical | ||
max_bytes_warning = self.context.laglog.max_bytes_warning | ||
|
||
replicas = [] | ||
for name, rows in all.items(): | ||
message = None | ||
last_average = averages[name][-1]['y'] | ||
last_value = rows[-1]['y'] | ||
#print "LAST", (name, last_average) | ||
if last_average > max_bytes_critical: | ||
message = 'CRITICAL' | ||
elif last_average > max_bytes_warning: | ||
message = 'WARNING' | ||
|
||
replicas.append({ | ||
'name': name, | ||
'rows': rows, | ||
'averages': averages[name], | ||
'message': message, | ||
'last_average': last_average, | ||
'last_value': last_value, | ||
}) | ||
replicas.sort(key=lambda x: x['name']) | ||
|
||
return {'replicas': replicas} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,83 @@ | ||
# This Source Code Form is subject to the terms of the Mozilla Public | ||
# License, v. 2.0. If a copy of the MPL was not distributed with this | ||
# file, You can obtain one at http://mozilla.org/MPL/2.0/. | ||
|
||
import datetime | ||
import random | ||
|
||
from nose.plugins.attrib import attr | ||
|
||
from socorro.external.postgresql.laglog import LagLog | ||
from socorro.lib.util import DotDict | ||
|
||
from unittestbase import PostgreSQLTestCase | ||
|
||
|
||
@attr(integration='postgres') # for nosetests | ||
class IntegrationTestLagLog(PostgreSQLTestCase): | ||
|
||
def tearDown(self): | ||
"""Clean up the database. """ | ||
cursor = self.connection.cursor() | ||
cursor.execute("TRUNCATE lag_log") | ||
self.connection.commit() | ||
super(IntegrationTestLagLog, self).tearDown() | ||
|
||
def _generate_random_data(self, names, points=40): | ||
now = datetime.datetime.utcnow() | ||
|
||
sqls = [] | ||
for i in range(points): | ||
moment = now - datetime.timedelta(seconds=(points - i) * 60) | ||
for name in names: | ||
if name == names[0]: # first one | ||
bytes = random.randint(180, 500 - i) | ||
else: | ||
bytes = random.randint(170 + i * 10, 300 + i * 12) | ||
sqls.append( | ||
"INSERT INTO lag_log " | ||
"(replica_name, moment, lag, master)" | ||
"values ('%s', '%s', %s, 'master1');" | ||
% (name, moment, bytes) | ||
) | ||
|
||
cursor = self.connection.cursor() | ||
cursor.execute('\n'.join(sqls)) | ||
self.connection.commit() | ||
|
||
def _get_model(self, **overrides): | ||
config = self.config | ||
config['laglog'] = DotDict({ | ||
'max_bytes_warning': 1000, | ||
'max_bytes_critical': 2000, | ||
}) | ||
config.update(overrides) | ||
return LagLog(config=config) | ||
|
||
def test_get_empty(self): | ||
laglog = self._get_model() | ||
res = laglog.get() | ||
self.assertEqual(res, {'replicas': []}) | ||
|
||
def test_get(self): | ||
self._generate_random_data(['DB1', 'DB2'], points=20) | ||
laglog = self._get_model() | ||
res = laglog.get() | ||
self.assertTrue(res['replicas']) | ||
|
||
names = [x['name'] for x in res['replicas']] | ||
self.assertEqual(names, ['DB1', 'DB2']) | ||
db1s, db2s = res['replicas'] | ||
|
||
self.assertTrue(db1s['rows']) | ||
self.assertTrue(db1s['averages']) | ||
self.assertEqual(db1s['name'], 'DB1') | ||
self.assertEqual( | ||
db1s['last_average'], | ||
db1s['averages'][-1]['y'] | ||
) | ||
last = db1s['rows'][-12:] | ||
assert len(last) == 12 | ||
sum_ys = sum(x['y'] for x in last) | ||
calculated_last_average = int(1.0 * sum_ys / len(last)) | ||
self.assertEqual(calculated_last_average, db1s['last_average']) |