Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

use mixin class for espresso #248

Open
wants to merge 6 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 11 additions & 2 deletions eessi/testsuite/eessi_mixin.py
Original file line number Diff line number Diff line change
Expand Up @@ -142,14 +142,23 @@ def measure_mem_usage(self):

@run_after('init', always_last=True)
def set_tag_ci(self):
"Set CI tag if bench_name_ci and bench_name are set and are equal"
"""
Set CI tag if bench_name_ci and bench_name are set and are equal
Also set tag on bench_name if set
"""
tags_added = False
if self.bench_name_ci:
if not self.bench_name:
msg = "Attribute bench_name_ci is set, but bench_name is not set"
raise ReframeFatalError(msg)
if self.bench_name == self.bench_name_ci:
self.tags.add(TAGS['CI'])
log(f'tags set to {self.tags}')
tags_added = True
if self.bench_name:
self.tags.add(self.bench_name)
tags_added = True
if tags_added:
log(f'tags set to {self.tags}')

@run_after('setup')
def validate_setup(self):
Expand Down
137 changes: 27 additions & 110 deletions eessi/testsuite/tests/apps/espresso/espresso.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,11 +10,11 @@
import reframe as rfm
import reframe.utility.sanity as sn

from reframe.core.builtins import parameter, run_after # added only to make the linter happy
from reframe.core.builtins import deferrable, parameter, performance_function, run_after, sanity_function
from reframe.utility import reframe

from eessi.testsuite import hooks, utils
from eessi.testsuite.constants import *
from eessi.testsuite.constants import CPU, DEVICE_TYPES, SCALES, COMPUTE_UNIT
from eessi.testsuite.eessi_mixin import EESSI_Mixin
from eessi.testsuite.utils import find_modules, log


Expand All @@ -31,65 +31,21 @@ def filter_scales():
]


class EESSI_ESPRESSO(rfm.RunOnlyRegressionTest):
valid_prog_environs = ['default']
valid_systems = ['*']
# Need to check if QuantumESPRESSO also gets listed.
module_name = parameter(find_modules('ESPResSo'))
# device type is parameterized for an impending CUDA ESPResSo module.
device_type = parameter([DEVICE_TYPES[CPU]])
class EESSI_ESPRESSO_base(rfm.RunOnlyRegressionTest):
module_name = parameter(find_modules('^ESPResSo$'))
device_type = DEVICE_TYPES[CPU]
compute_unit = COMPUTE_UNIT[CPU]
time_limit = '300m'

@run_after('init')
def run_after_init(self):
"""hooks to run after init phase"""
# Filter on which scales are supported by the partitions defined in the ReFrame configuration
hooks.filter_supported_scales(self)

hooks.filter_valid_systems_by_device_type(self, required_device_type=self.device_type)

hooks.set_modules(self)

# Set scales as tags
hooks.set_tag_scale(self)

@run_after('setup')
def set_num_tasks_per_node(self):
""" Setting number of tasks per node and cpus per task in this function. This function sets num_cpus_per_task
for 1 node and 2 node options where the request is for full nodes."""
hooks.assign_tasks_per_compute_unit(self, COMPUTE_UNIT[CPU])

@run_after('setup')
def set_mem(self):
""" Setting an extra job option of memory. Here the assumption made is that HPC systems will contain at
least 1 GB per core of memory."""
mem_required_per_node = self.num_tasks_per_node * 0.9
hooks.req_memory_per_node(test=self, app_mem_req=mem_required_per_node * 1024)

@run_after('setup')
def set_binding(self):
hooks.set_compact_process_binding(self)

@deferrable
def assert_completion(self):
'''Check completion'''
if self.benchmark_info[0] in ['mpi.ionic_crystals.p3m']:
cao = sn.extractsingle(r'^resulting parameters:.*cao: (?P<cao>\S+),', self.stdout, 'cao', int)
return (sn.assert_found(r'^Algorithm executed.', self.stdout) and cao)
elif self.benchmark_info[0] in ['mpi.particles.lj']:
return (sn.assert_found(r'^Algorithm executed.', self.stdout))

@deferrable
def assert_convergence(self):
'''Check convergence'''
check_string = False
energy = 0.0
if self.benchmark_info[0] in ['mpi.ionic_crystals.p3m']:
check_string = sn.assert_found(r'Final convergence met with tolerances:', self.stdout)
energy = sn.extractsingle(r'^\s+energy:\s+(?P<energy>\S+)', self.stdout, 'energy', float)
elif self.benchmark_info[0] in ['mpi.particles.lj']:
check_string = sn.assert_found(r'Final convergence met with relative tolerances:', self.stdout)
energy = sn.extractsingle(r'^\s+sim_energy:\s+(?P<energy>\S+)', self.stdout, 'energy', float)
return (check_string and (energy != 0.0))
def set_ci_tag(self):
""" Setting tests under CI tag. """
# this test runs longer at larger scales due to mesh tuning
# thus, we only set CI tag on scales < 2 nodes to limit execution time
# TODO: revisit this for more recent versions of ESPResSo
# see also: https://github.com/EESSI/test-suite/issues/154
if SCALES[self.scale]['num_nodes'] < 2:
self.bench_name_ci = self.bench_name

@sanity_function
def assert_sanity(self):
Expand All @@ -105,41 +61,25 @@ def perf(self):


@rfm.simple_test
class EESSI_ESPRESSO_P3M_IONIC_CRYSTALS(EESSI_ESPRESSO):
class EESSI_ESPRESSO_P3M_IONIC_CRYSTALS(EESSI_ESPRESSO_base, EESSI_Mixin):
scale = parameter(filter_scales())
time_limit = '300m'

executable = 'python3 madelung.py'
sourcesdir = 'src/p3m'
readonly_files = ['madelung.py']
bench_name = 'ionic_crystals_p3m'

default_weak_scaling_system_size = 6

@run_after('init')
def set_tag_ci(self):
""" Setting tests under CI tag. """
if SCALES[self.scale]['num_nodes'] < 2:
self.tags.add('CI')
log(f'tags set to {self.tags}')

self.tags.add('ionic_crystals_p3m')
def required_mem_per_node(self):
return (self.num_tasks_per_node * 0.9) * 1024

@run_after('init')
def set_executable_opts(self):
"""Set executable opts based on device_type parameter"""
num_default = 0 # If this test already has executable opts, they must have come from the command line
hooks.check_custom_executable_opts(self, num_default=num_default)
# By default we run weak scaling since the strong scaling sizes need to change based on max node size and a
# corresponding min node size has to be chozen.
# Weak scaling (Gustafson's law: constant work per core): size scales with number of cores
self.executable_opts += ['--size', str(self.default_weak_scaling_system_size), '--weak-scaling']
utils.log(f'executable_opts set to {self.executable_opts}')

@run_after('setup')
def set_mem(self):
""" Setting an extra job option of memory. Here the assumption made is that HPC systems will contain at
least 1 GB per core of memory."""
mem_required_per_node = self.num_tasks_per_node * 0.9
hooks.req_memory_per_node(test=self, app_mem_req=mem_required_per_node * 1024)
log(f'executable_opts set to {self.executable_opts}')

@deferrable
def assert_completion(self):
Expand All @@ -150,44 +90,23 @@ def assert_completion(self):
@deferrable
def assert_convergence(self):
'''Check convergence'''
check_string = False
energy = 0.0
check_string = sn.assert_found(r'Final convergence met with tolerances:', self.stdout)
energy = sn.extractsingle(r'^\s+energy:\s+(?P<energy>\S+)', self.stdout, 'energy', float)
return (check_string and (energy != 0.0))


@rfm.simple_test
class EESSI_ESPRESSO_LJ_PARTICLES(EESSI_ESPRESSO):
class EESSI_ESPRESSO_LJ_PARTICLES(EESSI_ESPRESSO_base, EESSI_Mixin):
scale = parameter(filter_scales())
time_limit = '300m'

executable = 'python3 lj.py'
sourcesdir = 'src/lj'
readonly_files = ['lj.py']
bench_name = 'particles_lj'

@run_after('init')
def set_tag_ci(self):
""" Setting tests under CI tag. """
if SCALES[self.scale]['num_nodes'] < 2:
self.tags.add('CI')
log(f'tags set to {self.tags}')

self.tags.add('particles_lj')

@run_after('init')
def set_executable_opts(self):
"""Allow executable opts to be overwritten from command line"""
num_default = 0 # If this test already has executable opts, they must have come from the command line
hooks.check_custom_executable_opts(self, num_default=num_default)

@run_after('setup')
def set_mem(self):
""" Setting an extra job option of memory. Here the assumption made is that HPC systems will contain at
least 1 GB per core of memory. LJ requires much lesser memory than P3M. 200 MB per core is as per measurement,
therefore 300 should be more than enough. """
mem_required_per_node = self.num_tasks_per_node * 0.3
hooks.req_memory_per_node(test=self, app_mem_req=mem_required_per_node * 1024)
def required_mem_per_node(self):
"LJ requires 200 MB per core"
return (self.num_tasks_per_node * 0.3) * 1024

@deferrable
def assert_completion(self):
Expand All @@ -197,8 +116,6 @@ def assert_completion(self):
@deferrable
def assert_convergence(self):
'''Check convergence'''
check_string = False
energy = 0.0
check_string = sn.assert_found(r'Final convergence met with relative tolerances:', self.stdout)
energy = sn.extractsingle(r'^\s+sim_energy:\s+(?P<energy>\S+)', self.stdout, 'energy', float)
return (check_string and (energy != 0.0))