Skip to content

Commit

Permalink
Bug 1184966 - perfherder should let harness do summarization
Browse files Browse the repository at this point in the history
  • Loading branch information
jmaher authored and wlach committed Aug 7, 2015
1 parent 6130e1f commit 61a4291
Show file tree
Hide file tree
Showing 3 changed files with 59 additions and 2 deletions.
24 changes: 24 additions & 0 deletions tests/etl/test_perf_data_adapters.py
Expand Up @@ -3,6 +3,7 @@
# file, you can obtain one at http://mozilla.org/MPL/2.0/.

import json
import zlib

from tests.sampledata import SampleData
from treeherder.etl.perf_data_adapters import TalosDataAdapter
Expand Down Expand Up @@ -50,4 +51,27 @@ def test_adapt_and_load():
datum['blob'] = json.dumps({'talos_data': [datum['blob']]})
tda.adapt_and_load(reference_data, job_data, datum)

# we upload a summary with a suite and subtest values, +1 for suite
if 'summary' in datum['blob']:
results = json.loads(zlib.decompress(tda.performance_artifact_placeholders[-1][4]))
data = json.loads(datum['blob'])['talos_data'][0]
assert results["blob"]["performance_series"]["geomean"] == data['summary']['suite']

# deal with the subtests now
subtests = len(data['summary']['subtests'])
for iter in range(0, subtests):
subresults = json.loads(zlib.decompress(tda.performance_artifact_placeholders[-1 - iter][4]))
if 'subtest_signatures' in subresults["blob"]['signature_properties']:
# ignore summary signatures
continue

subdata = data['summary']['subtests'][subresults["blob"]['signature_properties']['test']]
for datatype in ['min', 'max', 'mean', 'median', 'std']:
print datatype
assert subdata[datatype] == subresults["blob"]["performance_series"][datatype]
else:
# FIXME: the talos data blob we're currently using contains datums with summaries and those without
# we should probably test non-summarized data as well
pass

assert result_count == len(tda.performance_artifact_placeholders)
7 changes: 7 additions & 0 deletions tests/sample_data/artifacts/performance/talos_perf.json
Expand Up @@ -26,6 +26,13 @@
"rss": false
}
},
"summary": {
"suite": 3141.00,
"subtests": {
"dhtml.html": {"min": 1, "max": 100, "std": 0.75, "mean": 50, "median": 50},
"tablemutation.html": {"min": 1, "max": 100, "std": 0.75, "mean": 50, "median": 50}
}
},
"results": {
"dhtml.html": [
1273,
Expand Down
30 changes: 28 additions & 2 deletions treeherder/etl/perf_data_adapters.py
Expand Up @@ -89,6 +89,11 @@ def _round(num):
# that inflate the size of the stored data structure
return round(num, 2)

@staticmethod
def _extract_summary_data(suite_data, summary):
suite_data["geomean"] = summary["suite"]
return suite_data

@staticmethod
def _calculate_summary_data(job_id, result_set_id, push_timestamp, results):
values = []
Expand All @@ -108,6 +113,19 @@ def _calculate_summary_data(job_id, result_set_id, push_timestamp, results):
"geomean": PerformanceDataAdapter._round(geomean)
}

@staticmethod
def _extract_test_data(series_data, summary):
if not isinstance(summary, dict):
return series_data

series_data["min"] = PerformanceDataAdapter._round(summary["min"])
series_data["max"] = PerformanceDataAdapter._round(summary["max"])
series_data["std"] = PerformanceDataAdapter._round(summary["std"])
series_data["median"] = PerformanceDataAdapter._round(summary["median"])
series_data["mean"] = PerformanceDataAdapter._round(summary["mean"])

return series_data

@staticmethod
def _calculate_test_data(job_id, result_set_id, push_timestamp,
replicates):
Expand Down Expand Up @@ -328,8 +346,12 @@ def adapt_and_load(self, reference_data, job_data, datum):

series_data = self._calculate_test_data(
job_id, result_set_id, push_timestamp,
talos_datum["results"][_test]
)
talos_datum["results"][_test])

if "summary" in talos_datum and talos_datum["summary"]["subtests"][_test]:
summary_data = talos_datum["summary"]["subtests"][_test]
series_data = self._extract_test_data(series_data,
summary_data)

obj = self._get_base_perf_obj(_job_guid, _name, _type,
talos_datum,
Expand Down Expand Up @@ -357,6 +379,10 @@ def adapt_and_load(self, reference_data, job_data, datum):
summary_data = self._calculate_summary_data(
job_id, result_set_id, push_timestamp, talos_datum["results"])

if "summary" in talos_datum and "suite" in talos_datum["summary"]:
summary_data = self._extract_summary_data(summary_data,
talos_datum["summary"])

obj = self._get_base_perf_obj(_job_guid, _name, _type,
talos_datum,
summary_signature,
Expand Down

0 comments on commit 61a4291

Please sign in to comment.