Skip to content

Commit

Permalink
Merge pull request #2882 from vkarak/feat/combine-param-test-logs
Browse files Browse the repository at this point in the history
[feat] Combine perflogs for parameterised tests
  • Loading branch information
vkarak committed Jun 14, 2023
2 parents a128328 + 3be4031 commit fa813d0
Show file tree
Hide file tree
Showing 5 changed files with 174 additions and 19 deletions.
18 changes: 17 additions & 1 deletion docs/config_reference.rst
Original file line number Diff line number Diff line change
Expand Up @@ -1013,7 +1013,6 @@ All logging handlers share the following set of common attributes:
ReFrame accepts all log record attributes from Python's `logging <https://docs.python.org/3.8/library/logging.html#logrecord-attributes>`__ mechanism and adds the following attributes:

.. csv-table::
:header: "Log record attribute", "Description"

``%(check_build_locally)s``, The value of the :attr:`~reframe.core.pipeline.RegressionTest.build_locally` attribute.
``%(check_build_time_limit)s``, The value of the :attr:`~reframe.core.pipeline.RegressionTest.build_time_limit` attribute.
Expand Down Expand Up @@ -1073,6 +1072,13 @@ All logging handlers share the following set of common attributes:
ReFrame allows you to log any test variable, parameter or property if they are marked as "loggable".
The log record attribute will have the form ``%(check_NAME)s`` where ``NAME`` is the variable name, the parameter name or the property name that is marked as loggable.

There is also the special ``%(check_#ALL)s`` format specifier which expands to all the loggable test attributes.
These include all the above specifiers and any additional loggable variables or parameters defined by the test.
On expanding this specifier, ReFrame will try to guess the delimiter to use for separating the different attributes based on the existing format.
If it cannot guess it, it will default to ``|``.

Since this can lead to very long records, you may consider using it with the :attr:`~logging.handlers_perflog..filelog..ignore_keys` parameter to filter out some attributes that are not of interest.

.. versionadded:: 3.3
Allow arbitrary test attributes to be logged.

Expand All @@ -1082,6 +1088,9 @@ All logging handlers share the following set of common attributes:
.. versionchanged:: 3.11.0
Limit the number of attributes that can be logged. User attributes or properties must be explicitly marked as "loggable" in order to be selectable for logging.

.. versionadded:: 4.3
The ``%(check_#ALL)s`` special specifier is added.


.. py:attribute:: logging.handlers.format_perfvars
.. py:attribute:: logging.handlers_perflog.format_perfvars
Expand Down Expand Up @@ -1185,6 +1194,13 @@ The additional properties for the ``filelog`` handler are the following:
The base directory of performance data log files.


.. py:attribute:: logging.handlers_perflog..filelog..ignore_keys
A list of log record `format specifiers <#config.logging.handlers.format>`__ that will be ignored by the special ``%(check_#ALL)s`` specifier.

.. versionadded:: 4.3


.. py:attribute:: logging.handlers_perflog..filelog..prefix
:required: Yes
Expand Down
73 changes: 65 additions & 8 deletions reframe/core/logging.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,12 +128,35 @@ def ignore_brokenpipe(hdlr, l):
logging.Handler.handleError = handleError(logging.Handler.handleError)


def _expand_params(check):
cls = type(check)
return {
name: getattr(check, name) for name, param in cls.param_space.items
if param.is_loggable()
}


def _guess_delim(s):
'''Guess the delimiter in the given logging format string'''
delims = set()
for m in re.finditer(r'\%\((.*?)\)s(.)?', s):
d = m.group(2)
if d:
delims.add(d)

if len(delims) == 1:
return delims.pop()
else:
return '|'


class MultiFileHandler(logging.FileHandler):
'''A file handler that allows writing on different log files based on
information from the log record.
'''

def __init__(self, prefix, mode='a', encoding=None, fmt=None, perffmt=None):
def __init__(self, prefix, mode='a', encoding=None, fmt=None,
perffmt=None, ignore_keys=None):
super().__init__(prefix, mode, encoding, delay=True)

# Reset FileHandler's filename
Expand All @@ -147,9 +170,21 @@ def __init__(self, prefix, mode='a', encoding=None, fmt=None, perffmt=None):
self.__fmt = fmt
self.__perffmt = perffmt
self.__attr_patt = re.compile(r'\%\((.*?)\)s(.)?')
self.__ignore_keys = set(ignore_keys) if ignore_keys else set()

def __generate_header(self, record):
# Generate the header from the record and fmt

# Expand the special check_#ALL specifier
if '%(check_#ALL)s' in self.__fmt:
delim = _guess_delim(self.__fmt)
self.__fmt = self.__fmt.replace(
'%(check_#ALL)s',
delim.join(f'%({x})s'
for x in sorted(record.__rfm_loggable_attrs__)
if x not in self.__ignore_keys)
)

header = ''
for m in self.__attr_patt.finditer(self.__fmt):
attr = m.group(1)
Expand Down Expand Up @@ -222,9 +257,8 @@ def emit(self, record):
except OSError as e:
raise LoggingError('logging failed') from e

self.baseFilename = os.path.join(
dirname, f'{record.__rfm_check__.short_name}.log'
)
check_basename = type(record.__rfm_check__).variant_name()
self.baseFilename = os.path.join(dirname, f'{check_basename}.log')
self._emit_header(record)
self.stream = self.__streams[self.baseFilename]
super().emit(record)
Expand Down Expand Up @@ -263,13 +297,28 @@ class CheckFieldFormatter(logging.Formatter):
regression test.'''

# NOTE: This formatter will work only for the '%' style
def __init__(self, fmt=None, datefmt=None, perffmt=None, style='%'):
def __init__(self, fmt=None, datefmt=None, perffmt=None,
ignore_keys=None, style='%'):
super().__init__(fmt, datefmt, style)

self.__fmt = fmt
self.__fmtperf = perffmt[:-1] if perffmt else ''
self.__specs = re.findall(r'\%\((\S+?)\)s', fmt)
self.__delim = perffmt[-1] if perffmt else ''
self.__expand_vars = '%(check_#ALL)s' in self.__fmt
self.__expanded = False
self.__ignore_keys = set(ignore_keys) if ignore_keys else set()

def _expand_fmt(self, attrs):
if not self.__expand_vars or self.__expanded:
return self.__fmt

delim = _guess_delim(self.__fmt)
self.__fmt = self.__fmt.replace(
'%(check_#ALL)s', delim.join(f'%({x})s' for x in attrs
if x not in self.__ignore_keys)
)
self.__expanded = True

def _format_perf(self, perfvars):
chunks = []
Expand All @@ -292,6 +341,7 @@ def _format_perf(self, perfvars):
return self.__delim.join(chunks)

def formatMessage(self, record):
self._expand_fmt(sorted(record.__rfm_loggable_attrs__))
for s in self.__specs:
if not hasattr(record, s):
setattr(record, s, None)
Expand Down Expand Up @@ -383,8 +433,10 @@ def _create_filelog_handler(site_config, config_prefix):
append = site_config.get(f'{config_prefix}/append')
format = site_config.get(f'{config_prefix}/format')
format_perf = site_config.get(f'{config_prefix}/format_perfvars')
ignore_keys = site_config.get(f'{config_prefix}/ignore_keys')
return MultiFileHandler(filename_patt, mode='a+' if append else 'w+',
fmt=format, perffmt=format_perf)
fmt=format, perffmt=format_perf,
ignore_keys=ignore_keys)


def _create_syslog_handler(site_config, config_prefix):
Expand Down Expand Up @@ -658,8 +710,10 @@ def _extract_handlers(site_config, handlers_group):
fmt = site_config.get(f'{handler_prefix}/{i}/format')
datefmt = site_config.get(f'{handler_prefix}/{i}/datefmt')
perffmt = site_config.get(f'{handler_prefix}/{i}/format_perfvars')
ignore_keys = site_config.get(f'{handler_prefix}/{i}/ignore_keys')
hdlr.setFormatter(RFC3339Formatter(fmt=fmt,
datefmt=datefmt, perffmt=perffmt))
datefmt=datefmt, perffmt=perffmt,
ignore_keys=ignore_keys))
hdlr.setLevel(_check_level(level))
hdlr._rfm_type = handler_type
handlers.append(hdlr)
Expand Down Expand Up @@ -733,6 +787,7 @@ def __init__(self, logger=None, check=None):
# Here we only set the format specifiers that do not
# correspond directly to check attributes
'__rfm_check__': check,
'__rfm_loggable_attrs__': [],
'check_name': 'reframe',
'check_jobid': None,
'check_job_completion_time': None,
Expand Down Expand Up @@ -786,7 +841,9 @@ def _update_check_extras(self):
# Attribute is parameter, so format it
val = check_type.raw_params[attr].format(val)

self.extra[f'check_{extra_name}'] = val
key = f'check_{extra_name}'
self.extra['__rfm_loggable_attrs__'].append(key)
self.extra[key] = val

# Add special extras
self.extra['check_info'] = self.check.info()
Expand Down
42 changes: 33 additions & 9 deletions reframe/core/settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,16 +54,40 @@
'type': 'filelog',
'prefix': '%(check_system)s/%(check_partition)s',
'level': 'info',
'format': (
'%(check_job_completion_time)s,%(version)s,'
'%(check_display_name)s,%(check_system)s,'
'%(check_partition)s,%(check_environ)s,'
'%(check_jobid)s,%(check_result)s,%(check_perfvalues)s'
),
'format': ('%(check_result)s|'
'%(check_job_completion_time)s|%(check_#ALL)s'),
'ignore_keys': [
'check_build_locally',
'check_build_time_limit',
'check_display_name',
'check_executable',
'check_executable_opts',
'check_hashcode',
'check_keep_files',
'check_local',
'check_maintainers',
'check_max_pending_time',
'check_outputdir',
'check_prebuild_cmds',
'check_prefix',
'check_prerun_cmds',
'check_postbuild_cmds',
'check_postrun_cmds',
'check_readonly_files',
'check_sourcepath',
'check_sourcesdir',
'check_stagedir',
'check_strict_check',
'check_tags',
'check_time_limit',
'check_valid_prog_environs',
'check_valid_systems',
'check_variables'
],
'format_perfvars': (
'%(check_perf_value)s,%(check_perf_unit)s,'
'%(check_perf_ref)s,%(check_perf_lower_thres)s,'
'%(check_perf_upper_thres)s,'
'%(check_perf_value)s|%(check_perf_unit)s|'
'%(check_perf_ref)s|%(check_perf_lower_thres)s|'
'%(check_perf_upper_thres)s|'
),
'append': True
}
Expand Down
7 changes: 6 additions & 1 deletion reframe/schemas/config.json
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,11 @@
"properties": {
"basedir": {"type": "string"},
"prefix": {"type": "string"},
"append": {"type": "boolean"}
"append": {"type": "boolean"},
"ignore_keys": {
"type": "array",
"items": {"type": "string"}
}
},
"required": ["prefix"]
}
Expand Down Expand Up @@ -577,6 +581,7 @@
"logging/handlers*/syslog_facility": "user",
"logging/handlers_perflog/filelog_append": true,
"logging/handlers_perflog/filelog_basedir": "./perflogs",
"logging/handlers_perflog/filelog_ignore_keys": [],
"logging/handlers_perflog/graylog_extras": {},
"logging/handlers_perflog/httpjson_extras": {},
"logging/handlers_perflog/httpjson_ignore_keys": [],
Expand Down
53 changes: 53 additions & 0 deletions unittests/test_policies.py
Original file line number Diff line number Diff line change
Expand Up @@ -1021,6 +1021,29 @@ def perf1(self):
return _MyTest()


@pytest.fixture
def perf_param_tests():
class _MyTest(rfm.RunOnlyRegressionTest):
valid_systems = ['*']
valid_prog_environs = ['*']
executable = 'echo perf0=100 && echo perf1=50'
p = parameter([1, 2])

@sanity_function
def validate(self):
return sn.assert_found(r'perf0', self.stdout)

@performance_function('unit0')
def perf0(self):
return sn.extractsingle(r'perf0=(\S+)', self.stdout, 1, float)

@performance_function('unit1')
def perf1(self):
return sn.extractsingle(r'perf1=(\S+)', self.stdout, 1, float)

return [_MyTest(variant_num=v) for v in range(_MyTest.num_variants)]


@pytest.fixture
def failing_perf_test():
class _MyFailingTest(rfm.RunOnlyRegressionTest):
Expand Down Expand Up @@ -1321,3 +1344,33 @@ def test_perf_logging_lazy(make_runner, make_exec_ctx, lazy_perf_test,

logfile = tmp_path / 'perflogs' / 'generic' / 'default' / '_LazyPerfTest.log'
assert os.path.exists(logfile)


def test_perf_logging_all_attrs(make_runner, make_exec_ctx, perf_test,
config_perflog, tmp_path):
make_exec_ctx(config_perflog(fmt='%(check_result)s|%(check_#ALL)s'))
logging.configure_logging(rt.runtime().site_config)
runner = make_runner()
testcases = executors.generate_testcases([perf_test])
runner.runall(testcases)

logfile = tmp_path / 'perflogs' / 'generic' / 'default' / '_MyTest.log'
assert os.path.exists(logfile)
with open(logfile) as fp:
header = fp.readline()

loggable_attrs = type(perf_test).loggable_attrs()
assert len(header.split('|')) == len(loggable_attrs) + 1


def test_perf_logging_param_test(make_runner, make_exec_ctx, perf_param_tests,
config_perflog, tmp_path):
make_exec_ctx(config_perflog(fmt='%(check_result)s|%(check_#ALL)s'))
logging.configure_logging(rt.runtime().site_config)
runner = make_runner()
testcases = executors.generate_testcases(perf_param_tests)
runner.runall(testcases)

logfile = tmp_path / 'perflogs' / 'generic' / 'default' / '_MyTest.log'
assert os.path.exists(logfile)
assert _count_lines(logfile) == 3

0 comments on commit fa813d0

Please sign in to comment.