Skip to content

Commit

Permalink
WT-8438 Latencies stat for the perf tests are printed repeatedly (#7246)
Browse files Browse the repository at this point in the history
  • Loading branch information
ajmorton committed Nov 29, 2021
1 parent 12f9c15 commit 4df8522
Show file tree
Hide file tree
Showing 3 changed files with 33 additions and 34 deletions.
35 changes: 21 additions & 14 deletions bench/perf_run_py/perf_stat.py
Expand Up @@ -85,18 +85,19 @@ def get_value_list(self, brief: bool):
return [as_dict]


class PerfStatMin(PerfStat):
def get_value(self):
"""Return the averaged minimum of all gathered values"""
min_3_vals = sorted(self.values)[:3]
return self.average(min_3_vals)
class PerfStatMinMax(PerfStat):
def get_value_list(self, brief: bool):
avg_min_3_vals = self.average(sorted(self.values)[:3])
avg_max_3_vals = self.average(sorted(self.values)[-3:])

as_list = [
{'name': f"Min {self.output_label}", 'value': avg_min_3_vals},
{'name': f"Max {self.output_label}", 'value': avg_max_3_vals},
]

class PerfStatMax(PerfStat):
def get_value(self):
"""Return the averaged maximum of all gathered values"""
max_3_vals = sorted(self.values)[-3:]
return self.average(max_3_vals)
if not brief:
as_list.append({'name': f"All {self.output_label}s", 'values': sorted(self.values)})
return as_list


class PerfStatCount(PerfStat):
Expand Down Expand Up @@ -139,9 +140,12 @@ def get_value_list(self, brief: bool):
'name': self.output_label + str(i),
'value': self.get_value(i)
}
if not brief:
as_dict['values'] = self.values
as_list.append(as_dict)
if not brief:
as_list.append({
'name': "Latencies",
'values': sorted(self.values)
})
return as_list


Expand All @@ -158,7 +162,10 @@ def get_value_list(self, brief: bool):
'name': self.output_label + str(i),
'value': self.get_value(i)
}
if not brief:
as_dict['values'] = self.values
as_list.append(as_dict)
if not brief:
as_list.append({
'name': "Latencies",
'values': sorted(self.values)
})
return as_list
26 changes: 9 additions & 17 deletions bench/perf_run_py/perf_stat_collection.py
Expand Up @@ -29,7 +29,7 @@
# OTHER DEALINGS IN THE SOFTWARE.

import os
from perf_stat import PerfStat, PerfStatCount, PerfStatLatency, PerfStatMax, PerfStatMin, PerfStatLatencyWorkgen
from perf_stat import PerfStat, PerfStatCount, PerfStatLatency, PerfStatMinMax, PerfStatLatencyWorkgen
from typing import List


Expand Down Expand Up @@ -82,14 +82,10 @@ def all_stats():
pattern=r'Executed \d+ checkpoint operations',
input_offset=1,
output_label='Checkpoint count'),
PerfStatMax(short_label="max_update_throughput",
pattern=r'updates,',
input_offset=8,
output_label='Max update throughput'),
PerfStatMin(short_label="min_update_throughput",
pattern=r'updates,',
input_offset=8,
output_label='Min update throughput'),
PerfStatMinMax(short_label="min_max_update_throughput",
pattern=r'updates,',
input_offset=8,
output_label='update throughput'),
PerfStatCount(short_label="warnings",
pattern='WARN',
output_label='Latency warnings'),
Expand All @@ -112,14 +108,10 @@ def all_stats():
output_label='Latency(read, update) Max',
ops=['read', 'update'],
num_max=1),
PerfStatMax(short_label="max_read_throughput",
pattern=r'updates,',
input_offset=4,
output_label='Max read throughput'),
PerfStatMin(short_label="min_read_throughput",
pattern=r'updates,',
input_offset=4,
output_label='Min read throughput'),
PerfStatMinMax(short_label="min_max_read_throughput",
pattern=r'updates,',
input_offset=4,
output_label='read throughput'),
PerfStatCount(short_label="warning_operations",
stat_file='../stdout_file.txt',
pattern='max latency exceeded',
Expand Down
6 changes: 3 additions & 3 deletions test/evergreen.yml
Expand Up @@ -606,7 +606,7 @@ functions:
${virtualenv_binary} -p ${python_binary} venv
source venv/bin/activate
${pip3_binary} install psutil pygit2
JSON_TASK_INFO='{ "evergreen_task_info": { "is_patch": "'${is_patch}'", "task_id": "'${task_id}'", "distro_id": "'${distro_id}'", "execution": "'${execution}'" } }'
JSON_TASK_INFO='{ "evergreen_task_info": { "is_patch": "'${is_patch}'", "task_id": "'${task_id}'", "distro_id": "'${distro_id}'", "execution": "'${execution}'", "task_info": "'${task_name}'" } }'
echo "JSON_TASK_INFO: $JSON_TASK_INFO"
${test_env_vars|} ${python_binary} ../../../bench/perf_run_py/perf_run.py --${test_type|wtperf} -e ${exec_path|./wtperf} -t ${perf-test-path|../../../bench/wtperf/runners}/${perf-test-name} -ho WT_TEST -m ${maxruns} -g "../.." -v -i "$JSON_TASK_INFO" -b -o test_stats/evergreen_out_${perf-test-name}.json ${wtarg}
${test_env_vars|} ${python_binary} ../../../bench/perf_run_py/perf_run.py --${test_type|wtperf} -e ${exec_path|./wtperf} -t ${perf-test-path|../../../bench/wtperf/runners}/${perf-test-name} -ho WT_TEST -m ${maxruns} -g "../.." -v -i "$JSON_TASK_INFO" -re -o test_stats/atlas_out_${perf-test-name}.json ${wtarg}
Expand Down Expand Up @@ -3514,7 +3514,7 @@ tasks:
vars:
perf-test-name: log.wtperf
maxruns: 1
wtarg: -ops ['"update", "max_update_throughput", "min_update_throughput"']
wtarg: -ops ['"update", "min_max_update_throughput"']
- func: "upload-perf-test-stats"
vars:
perf-test-name: log.wtperf
Expand Down Expand Up @@ -3648,7 +3648,7 @@ tasks:
perf-test-name: 500m-btree-rdonly.wtperf
maxruns: 1
no_create: true
wtarg: -args ['"-C create,statistics=(fast),statistics_log=(json,wait=1,sources=[file:])"'] -ops ['"read", "warnings", "max_latency_read_update", "min_read_throughput", "max_read_throughput"']
wtarg: -args ['"-C create,statistics=(fast),statistics_log=(json,wait=1,sources=[file:])"'] -ops ['"read", "warnings", "max_latency_read_update", "min_max_read_throughput"']
- func: "upload-perf-test-stats"
vars:
perf-test-name: 500m-btree-rdonly.wtperf
Expand Down

0 comments on commit 4df8522

Please sign in to comment.