Skip to content

Commit

Permalink
Add base_rev and new_rev to PerfCompare API + removed unnecessary fie…
Browse files Browse the repository at this point in the history
…lds (#7733)
  • Loading branch information
beatrice-acasandrei committed Jul 5, 2023
1 parent 97d5b7c commit c422f7c
Show file tree
Hide file tree
Showing 4 changed files with 18 additions and 44 deletions.
23 changes: 8 additions & 15 deletions tests/webapp/api/test_perfcompare_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,8 @@ def test_perfcompare_results_against_no_base(

expected = [
{
'base_rev': None,
'new_rev': test_perfcomp_push_2.revision,
'framework_id': base_sig.framework.id,
'platform': base_sig.platform.platform,
'suite': base_sig.suite,
Expand Down Expand Up @@ -126,9 +128,6 @@ def test_perfcompare_results_against_no_base(
'new_stddev_pct': round(response['new_stddev_pct'], 2),
'confidence': round(response['confidence'], 2),
'confidence_text': response['confidence_text'],
'confidence_text_long': response['confidence_text_long'],
't_value_confidence': perfcompare_utils.T_VALUE_CONFIDENCE,
't_value_care_min': perfcompare_utils.T_VALUE_CARE_MIN,
'delta_value': round(response['delta_value'], 2),
'delta_percentage': round(response['delta_pct'], 2),
'magnitude': round(response['magnitude'], 2),
Expand Down Expand Up @@ -249,6 +248,8 @@ def test_perfcompare_results_with_only_one_run_and_diff_repo(

expected = [
{
'base_rev': test_perfcomp_push.revision,
'new_rev': test_perfcomp_push_2.revision,
'framework_id': base_sig.framework.id,
'platform': base_sig.platform.platform,
'suite': base_sig.suite,
Expand Down Expand Up @@ -276,9 +277,6 @@ def test_perfcompare_results_with_only_one_run_and_diff_repo(
'new_stddev_pct': round(response['new_stddev_pct'], 2),
'confidence': round(response['confidence'], 2),
'confidence_text': response['confidence_text'],
'confidence_text_long': response['confidence_text_long'],
't_value_confidence': perfcompare_utils.T_VALUE_CONFIDENCE,
't_value_care_min': perfcompare_utils.T_VALUE_CARE_MIN,
'delta_value': round(response['delta_value'], 2),
'delta_percentage': round(response['delta_pct'], 2),
'magnitude': round(response['magnitude'], 2),
Expand Down Expand Up @@ -406,6 +404,8 @@ def test_perfcompare_results_multiple_runs(

expected = [
{
'base_rev': test_perfcomp_push.revision,
'new_rev': test_perfcomp_push_2.revision,
'framework_id': sig1.framework.id,
'platform': sig1.platform.platform,
'suite': sig1.suite,
Expand Down Expand Up @@ -433,9 +433,6 @@ def test_perfcompare_results_multiple_runs(
'new_stddev_pct': round(first_row['new_stddev_pct'], 2),
'confidence': round(first_row['confidence'], 2),
'confidence_text': first_row['confidence_text'],
'confidence_text_long': first_row['confidence_text_long'],
't_value_confidence': perfcompare_utils.T_VALUE_CONFIDENCE,
't_value_care_min': perfcompare_utils.T_VALUE_CARE_MIN,
'delta_value': round(first_row['delta_value'], 2),
'delta_percentage': round(first_row['delta_pct'], 2),
'magnitude': round(first_row['magnitude'], 2),
Expand All @@ -451,6 +448,8 @@ def test_perfcompare_results_multiple_runs(
'is_meaningful': first_row['is_meaningful'],
},
{
'base_rev': test_perfcomp_push.revision,
'new_rev': test_perfcomp_push_2.revision,
'framework_id': sig3.framework.id,
'platform': sig3.platform.platform,
'suite': sig3.suite,
Expand Down Expand Up @@ -478,9 +477,6 @@ def test_perfcompare_results_multiple_runs(
'new_stddev_pct': round(second_row['new_stddev_pct'], 2),
'confidence': round(second_row['confidence'], 2),
'confidence_text': second_row['confidence_text'],
'confidence_text_long': second_row['confidence_text_long'],
't_value_confidence': perfcompare_utils.T_VALUE_CONFIDENCE,
't_value_care_min': perfcompare_utils.T_VALUE_CARE_MIN,
'delta_value': round(second_row['delta_value'], 2),
'delta_percentage': round(second_row['delta_pct'], 2),
'magnitude': round(second_row['magnitude'], 2),
Expand Down Expand Up @@ -615,9 +611,6 @@ def get_expected(
len(base_perf_data_values), len(new_perf_data_values), response['confidence']
)
response['confidence_text'] = perfcompare_utils.get_confidence_text(response['confidence'])
response['confidence_text_long'] = perfcompare_utils.confidence_detailed_info(
response['confidence_text']
)
response['is_complete'] = True
response['more_runs_are_needed'] = perfcompare_utils.more_runs_are_needed(
response['is_complete'], response['is_confident'], len(base_perf_data_values)
Expand Down
23 changes: 3 additions & 20 deletions treeherder/webapp/api/perfcompare_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,32 +147,15 @@ def get_abs_ttest_value(control_values, test_values):
return res


def confidence_detailed_info(confidence):
"""Returns more explanations on what confidence text means"""
text = 'Result of running t-test on base versus new result distribution: '
switcher = {
'low': text + 'A value of \'low\' suggests less confidence that there is a sustained,'
' significant change between the two revisions.',
'med': text
+ 'A value of \'med\' indicates uncertainty that there is a significant change. '
'If you haven\'t already, consider retriggering the job to be more sure.',
'high': text
+ 'A value of \'high\' indicates uncertainty that there is a significant change. '
'If you haven\'t already, consider retriggering the job to be more sure.',
}

return switcher.get(confidence, '')


def get_confidence_text(abs_tvalue):
if abs_tvalue == 0 or abs_tvalue is None:
return ''
if abs_tvalue < T_VALUE_CARE_MIN:
confidence_text = 'low'
confidence_text = 'Low'
elif abs_tvalue < T_VALUE_CONFIDENCE:
confidence_text = 'med'
confidence_text = 'Medium'
else:
confidence_text = 'high'
confidence_text = 'High'
return confidence_text


Expand Down
5 changes: 3 additions & 2 deletions treeherder/webapp/api/performance_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -871,7 +871,6 @@ def list(self, request):
base_perf_data_values, new_perf_data_values
)
confidence_text = perfcompare_utils.get_confidence_text(confidence)
detailed_confidence = perfcompare_utils.confidence_detailed_info(confidence_text)
sig_hash = (
base_sig.get('signature_hash', '')
if base_sig
Expand All @@ -892,11 +891,14 @@ def list(self, request):
class_name = perfcompare_utils.get_class_name(
new_is_better, base_avg_value, new_avg_value, confidence
)

is_improvement = class_name == 'success'
is_regression = class_name == 'danger'
is_meaningful = class_name == ''

row_result = {
'base_rev': base_rev,
'new_rev': new_rev,
'header_name': header,
'platform': platform,
'suite': base_sig.get('suite', ''), # same suite for base_result and new_result
Expand Down Expand Up @@ -926,7 +928,6 @@ def list(self, request):
'new_retriggerable_job_ids': new_grouped_job_ids.get(new_sig_id, []),
'confidence': confidence,
'confidence_text': confidence_text,
'confidence_text_long': detailed_confidence,
'delta_value': delta_value,
'delta_percentage': delta_percentage,
'magnitude': magnitude,
Expand Down
11 changes: 4 additions & 7 deletions treeherder/webapp/api/performance_serializers.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@
PerformanceTag,
)
from treeherder.webapp.api.utils import to_timestamp
from treeherder.webapp.api.perfcompare_utils import T_VALUE_CONFIDENCE, T_VALUE_CARE_MIN


class OptionalBooleanField(serializers.BooleanField):
Expand Down Expand Up @@ -423,6 +422,8 @@ def validate(self, data):


class PerfCompareResultsSerializer(serializers.ModelSerializer):
base_rev = serializers.CharField()
new_rev = serializers.CharField()
is_empty = serializers.BooleanField()
is_complete = serializers.BooleanField()
platform = serializers.CharField()
Expand Down Expand Up @@ -452,9 +453,6 @@ class PerfCompareResultsSerializer(serializers.ModelSerializer):
new_stddev_pct = PerfCompareDecimalField()
confidence = PerfCompareDecimalField()
confidence_text = serializers.CharField()
confidence_text_long = serializers.CharField()
t_value_confidence = serializers.IntegerField(default=T_VALUE_CONFIDENCE)
t_value_care_min = serializers.IntegerField(default=T_VALUE_CARE_MIN)
delta_value = PerfCompareDecimalField()
delta_percentage = PerfCompareDecimalField()
magnitude = PerfCompareDecimalField()
Expand All @@ -470,6 +468,8 @@ class PerfCompareResultsSerializer(serializers.ModelSerializer):
class Meta:
model = PerformanceSignature
fields = [
'base_rev',
'new_rev',
'framework_id',
'platform',
'suite',
Expand Down Expand Up @@ -497,9 +497,6 @@ class Meta:
'new_stddev_pct',
'confidence',
'confidence_text',
'confidence_text_long',
't_value_confidence',
't_value_care_min',
'graphs_link',
'delta_value',
'delta_percentage',
Expand Down

0 comments on commit c422f7c

Please sign in to comment.