Skip to content

Commit 39e6272

Browse files
authored
Merge pull request #3189 from vkarak/feat/always-log-perftests
[feat] Add a perflog entry also when sanity fails
2 parents 09ae029 + 036cad7 commit 39e6272

File tree

8 files changed

+94
-37
lines changed

8 files changed

+94
-37
lines changed

docs/config_reference.rst

+4
Original file line numberDiff line numberDiff line change
@@ -1112,6 +1112,7 @@ All logging handlers share the following set of common attributes:
11121112
``%(check_executable)s``, The value of the :attr:`~reframe.core.pipeline.RegressionTest.executable` attribute.
11131113
``%(check_executable_opts)s``, The value of the :attr:`~reframe.core.pipeline.RegressionTest.executable_opts` attribute.
11141114
``%(check_extra_resources)s``, The value of the :attr:`~reframe.core.pipeline.RegressionTest.extra_resources` attribute.
1115+
``%(check_fail_reason)s``, The failure reason if the test has failed.
11151116
``%(check_hashcode)s``, The unique hash associated with this test.
11161117
``%(check_info)s``, Various information about this test; essentially the return value of the test's :func:`~reframe.core.pipeline.RegressionTest.info` function.
11171118
``%(check_job_completion_time)s``, Same as the ``(check_job_completion_time_unix)s`` but formatted according to ``datefmt``.
@@ -1184,6 +1185,9 @@ All logging handlers share the following set of common attributes:
11841185
.. versionadded:: 4.3
11851186
The ``%(check_#ALL)s`` special specifier is added.
11861187

1188+
.. versionadded:: 4.7
1189+
The ``%(check_fail_reason)s`` specifier is added.
1190+
11871191

11881192
.. py:attribute:: logging.handlers.format_perfvars
11891193
.. py:attribute:: logging.handlers_perflog.format_perfvars

reframe/core/logging.py

+3-9
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919
import reframe.utility.color as color
2020
import reframe.utility.jsonext as jsonext
2121
import reframe.utility.osext as osext
22-
from reframe.core.exceptions import ConfigError, LoggingError
22+
from reframe.core.exceptions import ConfigError, LoggingError, what
2323
from reframe.core.warnings import suppress_deprecations
2424
from reframe.utility import is_trivially_callable
2525
from reframe.utility.profile import TimeProfiler
@@ -128,14 +128,6 @@ def ignore_brokenpipe(hdlr, l):
128128
logging.Handler.handleError = handleError(logging.Handler.handleError)
129129

130130

131-
def _expand_params(check):
132-
cls = type(check)
133-
return {
134-
name: getattr(check, name) for name, param in cls.param_space.items
135-
if param.is_loggable()
136-
}
137-
138-
139131
def _guess_delim(s):
140132
'''Guess the delimiter in the given logging format string'''
141133
delims = set()
@@ -863,6 +855,8 @@ def log_performance(self, level, task, msg=None, multiline=False):
863855
self.extra['check_partition'] = task.testcase.partition.name
864856
self.extra['check_environ'] = task.testcase.environ.name
865857
self.extra['check_result'] = 'pass' if task.succeeded else 'fail'
858+
fail_reason = what(*task.exc_info) if not task.succeeded else None
859+
self.extra['check_fail_reason'] = fail_reason
866860
if msg is None:
867861
msg = 'sent by ' + self.extra['osuser']
868862

reframe/core/pipeline.py

+23-23
Original file line numberDiff line numberDiff line change
@@ -2235,14 +2235,11 @@ def check_performance(self):
22352235
self.perf_variables[var] = sn.make_performance_function(expr,
22362236
unit)
22372237

2238-
if self.is_dry_run():
2239-
return
2240-
22412238
# Evaluate the performance function and retrieve the metrics
22422239
with osext.change_dir(self._stagedir):
22432240
for tag, expr in self.perf_variables.items():
22442241
try:
2245-
value = expr.evaluate()
2242+
value = expr.evaluate() if not self.is_dry_run() else None
22462243
unit = expr.unit
22472244
except Exception as e:
22482245
logging.getlogger().warning(
@@ -2282,27 +2279,30 @@ def check_performance(self):
22822279

22832280
self._perfvalues[key] = (value, *ref, unit)
22842281

2285-
# Check the performance variables against their references.
2286-
for key, values in self._perfvalues.items():
2287-
val, ref, low_thres, high_thres, *_ = values
2282+
if self.is_dry_run():
2283+
return
22882284

2289-
# Verify that val is a number
2290-
if not isinstance(val, numbers.Number):
2291-
raise SanityError(
2292-
f'the value extracted for performance variable '
2293-
f'{key!r} is not a number: {val}'
2294-
)
2285+
# Check the performance variables against their references.
2286+
for key, values in self._perfvalues.items():
2287+
val, ref, low_thres, high_thres, *_ = values
22952288

2296-
tag = key.split(':')[-1]
2297-
try:
2298-
sn.evaluate(
2299-
sn.assert_reference(
2300-
val, ref, low_thres, high_thres,
2301-
msg=('failed to meet reference: %s={0}, '
2302-
'expected {1} (l={2}, u={3})' % tag))
2303-
)
2304-
except SanityError as e:
2305-
raise PerformanceError(e) from None
2289+
# Verify that val is a number
2290+
if not isinstance(val, numbers.Number):
2291+
raise SanityError(
2292+
f'the value extracted for performance variable '
2293+
f'{key!r} is not a number: {val}'
2294+
)
2295+
2296+
tag = key.split(':')[-1]
2297+
try:
2298+
sn.evaluate(
2299+
sn.assert_reference(
2300+
val, ref, low_thres, high_thres,
2301+
msg=('failed to meet reference: %s={0}, '
2302+
'expected {1} (l={2}, u={3})' % tag))
2303+
)
2304+
except SanityError as e:
2305+
raise PerformanceError(e) from None
23062306

23072307
def _copy_job_files(self, job, dst):
23082308
if job is None:

reframe/frontend/cli.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -1432,7 +1432,7 @@ def module_unuse(*paths):
14321432
printer, options.duration or options.reruns
14331433
)
14341434

1435-
if options.performance_report:
1435+
if options.performance_report and not options.dry_run:
14361436
printer.info(runner.stats.performance_report())
14371437

14381438
# Generate the report for this session

reframe/frontend/executors/__init__.py

+28-3
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
# SPDX-License-Identifier: BSD-3-Clause
55

66
import abc
7+
import contextlib
78
import copy
89
import os
910
import signal
@@ -328,6 +329,27 @@ def __exit__(this, exc_type, exc_value, traceback):
328329
self.fail()
329330
raise TaskExit from e
330331

332+
def _dry_run_call(self, fn, *args, **kwargs):
333+
'''Call check's fn method in dry-run mode.'''
334+
335+
@contextlib.contextmanager
336+
def temp_dry_run(check):
337+
dry_run_save = check._rfm_dry_run
338+
try:
339+
check._rfm_dry_run = True
340+
yield check
341+
except ABORT_REASONS:
342+
raise
343+
except BaseException:
344+
pass
345+
finally:
346+
check._rfm_dry_run = dry_run_save
347+
348+
with runtime.temp_config(self.testcase.partition.fullname):
349+
with temp_dry_run(self.check):
350+
return fn(*args, **kwargs)
351+
352+
331353
@logging.time_function
332354
def setup(self, *args, **kwargs):
333355
self.testcase.prepare()
@@ -372,12 +394,15 @@ def run_wait(self):
372394

373395
@logging.time_function
374396
def sanity(self):
397+
self._perflogger = logging.getperflogger(self.check)
375398
self._safe_call(self.check.sanity)
376399

377400
@logging.time_function
378-
def performance(self):
379-
self._perflogger = logging.getperflogger(self.check)
380-
self._safe_call(self.check.performance)
401+
def performance(self, dry_run=False):
402+
if dry_run:
403+
self._dry_run_call(self.check.performance)
404+
else:
405+
self._safe_call(self.check.performance)
381406

382407
@logging.time_function
383408
def finalize(self):

reframe/frontend/executors/policies.py

+8
Original file line numberDiff line numberDiff line change
@@ -199,6 +199,10 @@ def on_task_failure(self, task):
199199
self.printer.status('FAIL', msg, just='right')
200200

201201
_print_perf(task)
202+
if task.failed_stage == 'sanity':
203+
# Dry-run the performance stage to trigger performance logging
204+
task.performance(dry_run=True)
205+
202206
timings = task.pipeline_timings(['setup',
203207
'compile_complete',
204208
'run_complete',
@@ -617,6 +621,10 @@ def on_task_failure(self, task):
617621
self.printer.status('FAIL', msg, just='right')
618622

619623
_print_perf(task)
624+
if task.failed_stage == 'sanity':
625+
# Dry-run the performance stage to trigger performance logging
626+
task.performance(dry_run=True)
627+
620628
timings = task.pipeline_timings(['setup',
621629
'compile_complete',
622630
'run_complete',

unittests/test_cli.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -423,7 +423,8 @@ def test_perflogdir_from_env(run_reframe, tmp_path, monkeypatch):
423423
def test_performance_report(run_reframe, run_action):
424424
returncode, stdout, _ = run_reframe(
425425
checkpath=['unittests/resources/checks/frontend_checks.py'],
426-
more_options=['-n', 'PerformanceFailureCheck', '--performance-report'],
426+
more_options=['-n', '^PerformanceFailureCheck',
427+
'--performance-report'],
427428
action=run_action
428429
)
429430
if run_action == 'run':

unittests/test_policies.py

+25
Original file line numberDiff line numberDiff line change
@@ -1400,3 +1400,28 @@ def test_perf_logging_param_test(make_runner, make_exec_ctx, perf_param_tests,
14001400
'default' / '_MyPerfParamTest.log')
14011401
assert os.path.exists(logfile)
14021402
assert _count_lines(logfile) == 3
1403+
1404+
1405+
def test_perf_logging_sanity_failure(make_runner, make_exec_ctx,
1406+
config_perflog, tmp_path):
1407+
class _X(_MyPerfTest):
1408+
@sanity_function
1409+
def validate(self):
1410+
return sn.assert_true(0, msg='no way')
1411+
1412+
make_exec_ctx(config_perflog(
1413+
fmt='%(check_result)s|%(check_fail_reason)s|%(check_perfvalues)s',
1414+
perffmt='%(check_perf_value)s|'
1415+
))
1416+
logging.configure_logging(rt.runtime().site_config)
1417+
runner = make_runner()
1418+
testcases = executors.generate_testcases([_X()])
1419+
_assert_no_logging_error(runner.runall, testcases)
1420+
1421+
logfile = tmp_path / 'perflogs' / 'generic' / 'default' / '_X.log'
1422+
assert os.path.exists(logfile)
1423+
with open(logfile) as fp:
1424+
lines = fp.readlines()
1425+
1426+
assert len(lines) == 2
1427+
assert lines[1] == 'fail|sanity error: no way|None|None\n'

0 commit comments

Comments
 (0)