Skip to content
This repository has been archived by the owner on Oct 3, 2020. It is now read-only.

Commit

Permalink
More tests (#60)
Browse files Browse the repository at this point in the history
* test get_pod_usage

* test application slack

* report to coveralls
  • Loading branch information
hjacobs committed Dec 3, 2018
1 parent faa3731 commit 0d56c49
Show file tree
Hide file tree
Showing 5 changed files with 164 additions and 74 deletions.
4 changes: 3 additions & 1 deletion .travis.yml
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
dist: xenial
sudo: required
sudo: yes
language: python
python:
- "3.7"
Expand All @@ -10,3 +10,5 @@ install:
- pipenv install --dev
script:
- make test docker
after_success:
- coveralls
1 change: 1 addition & 0 deletions Pipfile
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ pytest-watch = "*"
black = "*"
"boto3" = "*"
pytest-cov = "*"
coveralls = "*"

[requires]
python_version = "3.7"
Expand Down
39 changes: 38 additions & 1 deletion Pipfile.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

154 changes: 85 additions & 69 deletions kube_resource_report/report.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,10 @@
RESOURCE_PATTERN = re.compile(r"^(\d*)(\D*)$")


def new_resources():
return {"cpu": 0, "memory": 0}


def parse_resource(v):
"""
>>> parse_resource('100m')
Expand Down Expand Up @@ -114,6 +118,69 @@ def json_default(obj):
logger = logging.getLogger(__name__)


def get_node_usage(cluster, nodes: dict):
try:
# https://github.com/kubernetes/community/blob/master/contributors/design-proposals/instrumentation/resource-metrics-api.md
for i, url in enumerate(
[
"/apis/metrics.k8s.io/v1beta1/nodes",
"/api/v1/namespaces/kube-system/services/heapster/proxy/apis/metrics/v1alpha1/nodes",
]
):
try:
response = request(cluster, url)
response.raise_for_status()
except Exception as e:
if i == 0:
logger.warning("Failed to query metrics: %s", e)
else:
raise
if response.ok:
break
for item in response.json()["items"]:
key = item["metadata"]["name"]
node = nodes.get(key)
if node:
usage = collections.defaultdict(float)
for k, v in item.get("usage", {}).items():
usage[k] += parse_resource(v)
node["usage"] = usage
except Exception:
logger.exception("Failed to get node usage metrics")


def get_pod_usage(cluster, pods: dict):
try:
# https://github.com/kubernetes/community/blob/master/contributors/design-proposals/instrumentation/resource-metrics-api.md
for i, url in enumerate(
[
"/apis/metrics.k8s.io/v1beta1/pods",
"/api/v1/namespaces/kube-system/services/heapster/proxy/apis/metrics/v1alpha1/pods",
]
):
try:
response = request(cluster, url)
response.raise_for_status()
except Exception as e:
if i == 0:
logger.warning("Failed to query metrics: %s", e)
else:
raise
if response.ok:
break
for item in response.json()["items"]:
key = (item["metadata"]["namespace"], item["metadata"]["name"])
pod = pods.get(key)
if pod:
usage = collections.defaultdict(float)
for container in item["containers"]:
for k, v in container.get("usage", {}).items():
usage[k] += parse_resource(v)
pod["usage"] = usage
except Exception:
logger.exception("Failed to get pod usage metrics")


def query_cluster(
cluster, executor, system_namespaces, additional_cost_per_cluster, no_ingress_status, node_label
):
Expand Down Expand Up @@ -142,15 +209,14 @@ def query_cluster(
cluster_allocatable = collections.defaultdict(float)
cluster_requests = collections.defaultdict(float)
user_requests = collections.defaultdict(float)
cluster_usage = collections.defaultdict(float)
node_count = collections.defaultdict(int)
cluster_cost = additional_cost_per_cluster
for node in response.json()["items"]:
nodes[node["metadata"]["name"]] = node
node["capacity"] = {}
node["allocatable"] = {}
node["requests"] = {"cpu": 0, "memory": 0}
node["usage"] = {"cpu": 0, "memory": 0}
node["requests"] = new_resources()
node["usage"] = new_resources()
for k, v in node["status"].get("capacity", {}).items():
parsed = parse_resource(v)
node["capacity"][k] = parsed
Expand All @@ -173,35 +239,12 @@ def query_cluster(
node["cost"] = pricing.get_node_cost(region, instance_type, is_spot)
cluster_cost += node["cost"]

try:
# https://github.com/kubernetes/community/blob/master/contributors/design-proposals/instrumentation/resource-metrics-api.md
for i, url in enumerate(
[
"/apis/metrics.k8s.io/v1beta1/nodes",
"/api/v1/namespaces/kube-system/services/heapster/proxy/apis/metrics/v1alpha1/nodes",
]
):
try:
response = request(cluster, url)
response.raise_for_status()
except Exception as e:
if i == 0:
logger.warning("Failed to query metrics: %s", e)
else:
raise
if response.ok:
break
for item in response.json()["items"]:
key = item["metadata"]["name"]
node = nodes.get(key)
if node:
usage = collections.defaultdict(float)
for k, v in item.get("usage", {}).items():
usage[k] += parse_resource(v)
cluster_usage[k] += parse_resource(v)
node["usage"] = usage
except Exception:
logger.exception("Failed to query Heapster metrics")
get_node_usage(cluster, nodes)

cluster_usage = collections.defaultdict(float)
for node in nodes.values():
for k, v in node['usage'].items():
cluster_usage[k] += v

cost_per_cpu = cluster_cost / cluster_allocatable["cpu"]
cost_per_memory = cluster_cost / cluster_allocatable["memory"]
Expand Down Expand Up @@ -231,7 +274,7 @@ def query_cluster(
"requests": requests,
"application": application,
"cost": cost,
"usage": {"cpu": 0, "memory": 0},
"usage": new_resources(),
}

hourly_cost = cluster_cost / HOURS_PER_MONTH
Expand Down Expand Up @@ -266,43 +309,16 @@ def query_cluster(
"ingresses": [],
}

cluster_slack_cost = 0
get_pod_usage(cluster, pods)

try:
# https://github.com/kubernetes/community/blob/master/contributors/design-proposals/instrumentation/resource-metrics-api.md
for i, url in enumerate(
[
"/apis/metrics.k8s.io/v1beta1/pods",
"/api/v1/namespaces/kube-system/services/heapster/proxy/apis/metrics/v1alpha1/pods",
]
):
try:
response = request(cluster, url)
response.raise_for_status()
except Exception as e:
if i == 0:
logger.warning("Failed to query metrics: %s", e)
else:
raise
if response.ok:
break
for item in response.json()["items"]:
key = (item["metadata"]["namespace"], item["metadata"]["name"])
pod = pods.get(key)
if pod:
usage = collections.defaultdict(float)
for container in item["containers"]:
for k, v in container.get("usage", {}).items():
usage[k] += parse_resource(v)
pod["usage"] = usage
usage_cost = max(
pod["usage"]["cpu"] * cost_per_cpu,
pod["usage"]["memory"] * cost_per_memory,
)
pod["slack_cost"] = pod["cost"] - usage_cost
cluster_slack_cost += pod["slack_cost"]
except Exception:
logger.exception("Failed to query Heapster metrics")
cluster_slack_cost = 0
for pod in pods.values():
usage_cost = max(
pod["usage"]["cpu"] * cost_per_cpu,
pod["usage"]["memory"] * cost_per_memory,
)
pod["slack_cost"] = pod["cost"] - usage_cost
cluster_slack_cost += pod["slack_cost"]

cluster_summary["slack_cost"] = min(cluster_cost, cluster_slack_cost)

Expand Down
40 changes: 37 additions & 3 deletions tests/test_report.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
from pathlib import Path

from kube_resource_report.cluster_discovery import Cluster
from kube_resource_report.report import generate_report, HOURS_PER_MONTH, parse_resource
from kube_resource_report.report import generate_report, HOURS_PER_MONTH, parse_resource, get_pod_usage, new_resources

from unittest.mock import MagicMock

Expand Down Expand Up @@ -65,6 +65,25 @@ def fake_responses():
}


@pytest.fixture
def fake_metric_responses():
return {
"/apis/metrics.k8s.io/v1beta1/pods": {
"items": [
{
"metadata": {"namespace": "default", "name": "pod-1"},
"containers": [
{
# 50% of requested resources are used
"usage": {"cpu": "50m", "memory": "256Mi"}
}
]
}
]
}
}


@pytest.fixture
def output_dir(tmpdir):
return tmpdir.mkdir("output")
Expand Down Expand Up @@ -130,8 +149,11 @@ def test_cluster_cost(fake_generate_report, fake_responses):
# assert cost_per_hour == cost_per_user_request_hour_cpu + cost_per_user_request_hour_memory


def test_application_report(output_dir, fake_generate_report, fake_responses):
fake_generate_report(fake_responses)
def test_application_report(output_dir, fake_generate_report, fake_responses, fake_metric_responses):

# merge responses to get usage metrics and slack costs
all_responses = {**fake_responses, **fake_metric_responses}
fake_generate_report(all_responses)

expected = set(['index.html', 'applications.html', 'application-metrics.json'])
paths = set()
Expand All @@ -148,3 +170,15 @@ def test_application_report(output_dir, fake_generate_report, fake_responses):
assert data['myapp']['requests'] == {'cpu': 0.1, 'memory': 512 * 1024**2}
# the "myapp" pod consumes 1/2 of cluster capacity (512Mi of 1Gi memory)
assert data['myapp']['cost'] == 50.0
# only 1/2 of requested resources are used => 50% of costs are slack
assert data['myapp']['slack_cost'] == 25.0


def test_get_pod_usage(monkeypatch, fake_metric_responses):
monkeypatch.setattr(
"kube_resource_report.report.request",
lambda cluster, path: MagicMock(json=lambda: fake_metric_responses.get(path)),
)
pods = {('default', 'pod-1'): {'usage': new_resources()}}
get_pod_usage(None, pods)
assert pods[('default', 'pod-1')]['usage']['cpu'] == 0.05

0 comments on commit 0d56c49

Please sign in to comment.