Skip to content
This repository was archived by the owner on Jan 28, 2022. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 13 additions & 11 deletions hpcbench/benchmark/ior.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
from hpcbench.toolbox.process import find_executable


class Extractor(MetricsExtractor):
class IORMetricsExtractor(MetricsExtractor):
"""Parser for IOR outputs
"""

Expand Down Expand Up @@ -58,26 +58,28 @@ class Extractor(MetricsExtractor):
@cached_property
def metrics(self):
metrics = {}
for operation in Extractor.OPERATIONS:
for meta, desc in Extractor.METAS.items():
name = Extractor.get_meta_name(operation, desc.get('name') or meta)
for operation in IORMetricsExtractor.OPERATIONS:
for meta, desc in IORMetricsExtractor.METAS.items():
name = IORMetricsExtractor.get_meta_name(
operation, desc.get('name') or meta
)
metrics[name] = desc['metric']
return metrics

def extract_metrics(self, metas):
columns = None
metrics = {}
with open(self.stdout) as istr:
Extractor._skip_output_header(istr)
IORMetricsExtractor._skip_output_header(istr)
for line in istr:
line = line.strip()
if line.startswith(Extractor.RESULTS_HEADER_START):
columns = Extractor.parse_results_header(line)
if line.startswith(IORMetricsExtractor.RESULTS_HEADER_START):
columns = IORMetricsExtractor.parse_results_header(line)
elif line == '':
# end of results
break
else:
Extractor.parse_result_line(columns, line, metrics)
IORMetricsExtractor.parse_result_line(columns, line, metrics)
return metrics

@classmethod
Expand All @@ -103,7 +105,7 @@ def parse_results_header(cls, header):
:param header: content of the results header line
:return: list of string providing columns
"""
header = Extractor.RE_MULTIPLE_SPACES.sub(' ', header)
header = IORMetricsExtractor.RE_MULTIPLE_SPACES.sub(' ', header)
header = header.split(' ')
return header

Expand All @@ -115,7 +117,7 @@ def parse_result_line(cls, columns, line, metrics):
:param line: string of results below the columns line
:param metrics: output dict where metrics are written
"""
line = Extractor.RE_MULTIPLE_SPACES.sub(' ', line)
line = IORMetricsExtractor.RE_MULTIPLE_SPACES.sub(' ', line)
line = line.split(' ')
operation = line[0]
assert len(line) == len(columns)
Expand Down Expand Up @@ -240,4 +242,4 @@ def srun_nodes(self):
@cached_property
def metrics_extractors(self):
# Use same extractor for all categories of commands
return Extractor()
return IORMetricsExtractor()
8 changes: 4 additions & 4 deletions hpcbench/driver/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,9 @@
ConstraintTag = namedtuple('ConstraintTag', ['name', 'constraint'])


Top = namedtuple('top', ['campaign', 'node', 'logger', 'root', 'name'])


def write_yaml_report(func):
"""Decorator used in campaign node post-processing
"""
Expand Down Expand Up @@ -125,7 +128,7 @@ def _wrap(self, *args, **kwargs):
return func(self, *args, **kwargs)
except Exception:
self.logger.exception('While executing benchmark')
if not self.catch_child_exception:
if not (self.catch_child_exception or False):
raise

return _wrap
Expand Down Expand Up @@ -161,9 +164,6 @@ def children(self):
return []


Top = namedtuple('top', ['campaign', 'node', 'logger', 'root', 'name'])


class ClusterWrapper(Cluster):
def __init__(self, network, tag, node):
self._network = network
Expand Down
26 changes: 13 additions & 13 deletions hpcbench/driver/benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
from hpcbench.api import ExecutionContext, NoMetricException, Metric
from hpcbench.campaign import YAML_REPORT_FILE, JSON_METRICS_FILE
from .executor import ExecutionDriver, SrunExecutionDriver
from .base import Enumerator, ClusterWrapper, write_yaml_report
from .base import Enumerator, ClusterWrapper, write_yaml_report, Leaf
from hpcbench.toolbox.buildinfo import extract_build_info
from hpcbench.toolbox.collections_ext import nameddict
from hpcbench.toolbox.contextlib_ext import pushd
Expand Down Expand Up @@ -166,7 +166,7 @@ def _extract_metrics(self, **kwargs):
child_config.pop('children', None)
runs.setdefault(self.category, []).append(child)
with pushd(child):
MetricsDriver(self.campaign, self.benchmark)(**kwargs)
MetricsDriver(self, self.benchmark)(**kwargs)
self.gather_metrics(runs)

def _add_build_info(self, execution):
Expand Down Expand Up @@ -235,22 +235,23 @@ def metrics(self):
return json.load(istr)


class MetricsDriver(object):
class MetricsDriver(Leaf):
"""Abstract representation of metrics already
built by a previous run
"""

def __init__(self, campaign, benchmark):
self.campaign = campaign
def __init__(self, parent, benchmark):
super(MetricsDriver, self).__init__(parent)
self.campaign = parent.campaign
self.benchmark = benchmark
with open(YAML_REPORT_FILE) as istr:
self.report = yaml.safe_load(istr)

@write_yaml_report
@Enumerator.call_decorator
def __call__(self, **kwargs):
cat = self.report.get('category')
metas = self.report.get('metas')
with open(YAML_REPORT_FILE) as istr:
report = yaml.safe_load(istr)
cat = report.get('category')
metas = report.get('metas')
all_extractors = self.benchmark.metrics_extractors
if isinstance(all_extractors, Mapping):
if cat not in all_extractors:
Expand All @@ -260,7 +261,7 @@ def __call__(self, **kwargs):
extractors = all_extractors
if not isinstance(extractors, list):
extractors = [extractors]
all_metrics = self.report.setdefault('metrics', [])
all_metrics = report.setdefault('metrics', [])
for log in self.logs:
metrics = {}
for extractor in extractors:
Expand All @@ -278,7 +279,7 @@ def __call__(self, **kwargs):
if self.benchmark.metric_required and not all_metrics:
# at least one of the logs must provide metrics
raise NoMetricException()
return self.report
return report

class LocalLog(namedtuple('LocalLog', ['path', 'log_prefix'])):
@property
Expand Down Expand Up @@ -389,8 +390,7 @@ def _wrap(**kwargs):
driver = self.execution_layer()
driver(**kwargs)
if self.report['command_succeeded']:
mdriver = MetricsDriver(self.campaign, self.benchmark)
mdriver(**kwargs)
MetricsDriver(self, self.benchmark)(**kwargs)
return self.report

return _wrap
Expand Down
24 changes: 21 additions & 3 deletions tests/benchmark/benchmark.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from abc import ABCMeta, abstractmethod
from collections import Mapping
from collections import Mapping, namedtuple
import inspect
import itertools
import logging
Expand All @@ -13,7 +13,12 @@

from hpcbench.api import Benchmark, ExecutionContext, MetricsExtractor
from hpcbench.campaign import YAML_REPORT_FILE
from hpcbench.driver.benchmark import MetricsDriver
from hpcbench.driver.base import Top
from hpcbench.driver.benchmark import (
MetricsDriver,
BenchmarkCategoryDriver,
BenchmarkDriver,
)
from hpcbench.toolbox.collections_ext import dict_merge
from hpcbench.toolbox.contextlib_ext import mkdtemp, pushd
from .. import FakeCluster
Expand Down Expand Up @@ -127,7 +132,20 @@ def check_category_metrics(self, category):
yaml.dump(
dict(category=category, metas=metas, executor='local'), ostr
)
md = MetricsDriver('test-category', benchmark)

md = MetricsDriver(
BenchmarkCategoryDriver(
BenchmarkDriver(
Top(
logger=LOGGER, root=namedtuple('root', ['network'])
),
benchmark,
dict(),
),
'test-category',
),
benchmark,
)
report = md()
parsed_metrics = report['metrics'][0]['measurement']
self.assertEqual(parsed_metrics, next(expected_metrics))
Expand Down