/
setup.py
136 lines (127 loc) · 4.55 KB
/
setup.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
import logging
import requests
import sys
import json
def get_status(config, start_time, end_time):
"""
Get cerberus status
"""
cerberus_status = True
check_application_routes = False
application_routes_status = True
if config["cerberus"]["cerberus_enabled"]:
cerberus_url = config["cerberus"]["cerberus_url"]
check_application_routes = \
config["cerberus"]["check_applicaton_routes"]
if not cerberus_url:
logging.error(
"url where Cerberus publishes True/False signal "
"is not provided."
)
sys.exit(1)
cerberus_status = requests.get(cerberus_url, timeout=60).content
cerberus_status = True if cerberus_status == b"True" else False
# Fail if the application routes monitored by cerberus
# experience downtime during the chaos
if check_application_routes:
application_routes_status, unavailable_routes = application_status(
cerberus_url,
start_time,
end_time
)
if not application_routes_status:
logging.error(
"Application routes: %s monitored by cerberus "
"encountered downtime during the run, failing"
% unavailable_routes
)
else:
logging.info(
"Application routes being monitored "
"didn't encounter any downtime during the run!"
)
if not cerberus_status:
logging.error(
"Received a no-go signal from Cerberus, looks like "
"the cluster is unhealthy. Please check the Cerberus "
"report for more details. Test failed."
)
if not application_routes_status or not cerberus_status:
sys.exit(1)
else:
logging.info(
"Received a go signal from Ceberus, the cluster is healthy. "
"Test passed."
)
return cerberus_status
def publish_kraken_status(config, failed_post_scenarios, start_time, end_time):
"""
Publish kraken status to cerberus
"""
cerberus_status = get_status(config, start_time, end_time)
if not cerberus_status:
if failed_post_scenarios:
if config["kraken"]["exit_on_failure"]:
logging.info(
"Cerberus status is not healthy and post action scenarios "
"are still failing, exiting kraken run"
)
sys.exit(1)
else:
logging.info(
"Cerberus status is not healthy and post action scenarios "
"are still failing"
)
else:
if failed_post_scenarios:
if config["kraken"]["exit_on_failure"]:
logging.info(
"Cerberus status is healthy but post action scenarios "
"are still failing, exiting kraken run"
)
sys.exit(1)
else:
logging.info(
"Cerberus status is healthy but post action scenarios "
"are still failing"
)
def application_status(cerberus_url, start_time, end_time):
"""
Check application availability
"""
if not cerberus_url:
logging.error(
"url where Cerberus publishes True/False signal is not provided."
)
sys.exit(1)
else:
duration = (end_time - start_time) / 60
url = "{baseurl}/history?loopback={duration}".format(
baseurl=cerberus_url,
duration=str(duration)
)
logging.info(
"Scraping the metrics for the test "
"duration from cerberus url: %s" % url
)
try:
failed_routes = []
status = True
metrics = requests.get(url, timeout=60).content
metrics_json = json.loads(metrics)
for entry in metrics_json["history"]["failures"]:
if entry["component"] == "route":
name = entry["name"]
failed_routes.append(name)
status = False
else:
continue
except Exception as e:
logging.error(
"Failed to scrape metrics from cerberus API at %s: %s" % (
url,
e
)
)
sys.exit(1)
return status, set(failed_routes)