Skip to content

Commit 856636d

Browse files
author
Vasileios Karakasis
committed
Merge branch 'master' into feature/oar-scheduler
2 parents a7cd42e + aacf20f commit 856636d

File tree

100 files changed

+3897
-2310
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

100 files changed

+3897
-2310
lines changed

.github/workflows/main.yml

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,17 @@ jobs:
4747
run: |
4848
docker run reframe:${{ matrix.modules-version }}
4949
50+
tutorialtest:
51+
runs-on: ubuntu-latest
52+
steps:
53+
- uses: actions/checkout@v2
54+
- name: Build Image for Tutorial Tests
55+
run: |
56+
docker build -f ci-scripts/dockerfiles/tutorials.dockerfile -t reframe:tutorials .
57+
- name: Run Tutorial Tests
58+
run: |
59+
docker run reframe:tutorials
60+
5061
unusedimports:
5162
runs-on: ubuntu-latest
5263
steps:

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
[![ReFrame Logo](https://github.com/eth-cscs/reframe/blob/master/docs/_static/img/reframe_logo-width400p.png)](https://github.com/eth-cscs/reframe)<br/>
1+
[![ReFrame Logo](https://raw.githubusercontent.com/eth-cscs/reframe/master/docs/_static/img/reframe_logo-width400p.png)](https://github.com/eth-cscs/reframe)<br/>
22
[![Build Status](https://github.com/eth-cscs/reframe/workflows/ReFrame%20CI/badge.svg)](https://github.com/eth-cscs/reframe/actions?query=workflow%3A%22ReFrame+CI%22)
33
[![Documentation Status](https://readthedocs.org/projects/reframe-hpc/badge/?version=latest)](https://reframe-hpc.readthedocs.io/en/latest/?badge=latest)
44
[![codecov.io](https://codecov.io/gh/eth-cscs/reframe/branch/master/graph/badge.svg)](https://codecov.io/github/eth-cscs/reframe)<br/>
Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,40 @@
1+
#
2+
# Execute this from the top-level ReFrame source directory
3+
#
4+
5+
6+
FROM reframehpc/rfm-ci-base:lmod
7+
8+
ENV _SPACK_VER=0.16
9+
ENV _EB_VER=4.4.1
10+
11+
# Required utilities
12+
RUN apt-get -y update && \
13+
apt-get -y install curl
14+
15+
# ReFrame user
16+
RUN useradd -ms /bin/bash rfmuser
17+
18+
USER rfmuser
19+
20+
# Install Spack
21+
RUN git clone https://github.com/spack/spack ~/spack && \
22+
cd ~/spack && \
23+
git checkout releases/v${_SPACK_VER}
24+
25+
RUN pip3 install easybuild==${_EB_VER}
26+
27+
ENV PATH="/home/rfmuser/.local/bin:${PATH}"
28+
29+
# Install ReFrame from the current directory
30+
COPY --chown=rfmuser . /home/rfmuser/reframe/
31+
32+
WORKDIR /home/rfmuser/reframe
33+
34+
RUN ./bootstrap.sh
35+
36+
RUN echo '. /usr/local/lmod/lmod/init/profile && . /home/rfmuser/spack/share/spack/setup-env.sh' > /home/rfmuser/setup.sh
37+
38+
ENV BASH_ENV /home/rfmuser/setup.sh
39+
40+
CMD ["/bin/bash", "-c", "./bin/reframe -r -C tutorials/config/settings.py -R -c tutorials/build_systems --system tutorials-docker"]

config/cscs.py

Lines changed: 10 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -194,7 +194,7 @@
194194
{
195195
'type': 'Singularity',
196196
'modules': [
197-
'singularity'
197+
'singularity/3.6.4-daint'
198198
]
199199
}
200200
],
@@ -242,7 +242,7 @@
242242
{
243243
'type': 'Singularity',
244244
'modules': [
245-
'singularity'
245+
'singularity/3.6.4-daint'
246246
]
247247
}
248248
],
@@ -362,7 +362,7 @@
362362
{
363363
'type': 'Singularity',
364364
'modules': [
365-
'singularity/3.5.3'
365+
'singularity/3.6.4-daint'
366366
]
367367
}
368368
],
@@ -405,7 +405,7 @@
405405
{
406406
'type': 'Singularity',
407407
'modules': [
408-
'singularity/3.5.3'
408+
'singularity/3.6.4-daint'
409409
]
410410
}
411411
],
@@ -703,6 +703,9 @@
703703
},
704704
{
705705
'type': 'Singularity',
706+
'modules': [
707+
'singularity/3.5.3-eiger'
708+
]
706709
}
707710
],
708711
'environs': [
@@ -788,6 +791,9 @@
788791
},
789792
{
790793
'type': 'Singularity',
794+
'modules': [
795+
'singularity/3.5.3-eiger'
796+
]
791797
}
792798
],
793799
'environs': [

cscs-checks/apps/greasy/greasy_check.py

Lines changed: 84 additions & 64 deletions
Original file line numberDiff line numberDiff line change
@@ -16,79 +16,76 @@ def to_seconds(str):
1616
datetime.strptime('00:00:00', '%H:%M:%S')).total_seconds()
1717

1818

19-
@rfm.parameterized_test(
20-
['serial', 'gpu', 24, 12, 1, 1],
21-
['serial', 'mc', 72, 36, 1, 1],
22-
['openmp', 'gpu', 24, 3, 1, 4],
23-
['openmp', 'mc', 72, 9, 1, 4],
24-
['mpi', 'gpu', 24, 4, 3, 1],
25-
['mpi', 'mc', 72, 12, 3, 1],
26-
['mpi+openmp', 'gpu', 24, 3, 2, 2],
27-
['mpi+openmp', 'mc', 72, 6, 3, 2]
28-
)
19+
@rfm.simple_test
2920
class GREASYCheck(rfm.RegressionTest):
30-
def __init__(self, variant, partition, num_greasy_tasks, nworkes_per_node,
31-
nranks_per_worker, ncpus_per_worker):
32-
self.valid_systems = ['daint:' + partition, 'dom:' + partition]
21+
configuration = parameter([('serial', 'gpu', 24, 12, 1, 1),
22+
('serial', 'mc', 72, 36, 1, 1),
23+
('openmp', 'gpu', 24, 3, 1, 4),
24+
('openmp', 'mc', 72, 9, 1, 4),
25+
('mpi', 'gpu', 24, 4, 3, 1),
26+
('mpi', 'mc', 72, 12, 3, 1),
27+
('mpi+openmp', 'gpu', 24, 3, 2, 2),
28+
('mpi+openmp', 'mc', 72, 6, 3, 2)])
29+
variant = variable(str)
30+
partition = variable(str)
31+
num_greasy_tasks = variable(int)
32+
workers_per_node = variable(int)
33+
ranks_per_worker = variable(int)
34+
cpus_per_worker = variable(int)
35+
valid_prog_environs = ['PrgEnv-gnu']
36+
sourcepath = 'tasks_mpi_openmp.c'
37+
build_system = 'SingleSource'
38+
executable = 'tasks_mpi_openmp.x'
39+
tasks_file = variable(str, value='tasks.txt')
40+
greasy_logfile = variable(str, value='greasy.log')
41+
nnodes = variable(int, value=2)
3342

34-
self.valid_prog_environs = ['PrgEnv-gnu']
35-
self.sourcepath = 'tasks_mpi_openmp.c'
36-
self.build_system = 'SingleSource'
43+
# sleep enough time to distinguish if the files are running in parallel
44+
# or not
45+
sleep_time = variable(int, value=60)
46+
use_multithreading = False
47+
modules = ['GREASY']
48+
maintainers = ['VH', 'SK']
49+
tags = {'production'}
3750

38-
# sleep enough time to distinguish if the files are running in parallel
39-
# or not
40-
self.sleep_time = 60
51+
@run_after('init')
52+
def unpack_configuration_parameter(self):
53+
self.variant, self.partition = self.configuration[0:2]
54+
self.num_greasy_tasks, self.workers_per_node = self.configuration[2:4]
55+
self.ranks_per_worker, self.cpus_per_worker = self.configuration[4:6]
56+
57+
@run_after('init')
58+
def set_valid_systems(self):
59+
self.valid_systems = [f'daint:{self.partition}',
60+
f'dom:{self.partition}']
61+
62+
@run_before('compile')
63+
def setup_build_system(self):
4164
self.build_system.cflags = [f'-DSLEEP_TIME={self.sleep_time:d}']
42-
self.variant = variant
43-
if variant == 'openmp':
65+
if self.variant == 'openmp':
4466
self.build_system.cflags += ['-fopenmp']
45-
elif variant == 'mpi':
67+
elif self.variant == 'mpi':
4668
self.build_system.cflags += ['-D_MPI']
47-
elif variant == 'mpi+openmp':
69+
elif self.variant == 'mpi+openmp':
4870
self.build_system.cflags += ['-fopenmp', '-D_MPI']
4971

50-
self.executable = 'tasks_mpi_openmp.x'
51-
self.tasks_file = 'tasks.txt'
72+
@run_before('run')
73+
def setup_greasy_run(self):
5274
self.executable_opts = [self.tasks_file]
53-
self.greasy_logfile = 'greasy.log'
5475
self.keep_files = [self.tasks_file, self.greasy_logfile]
55-
nnodes = 2
56-
self.use_multithreading = False
57-
self.num_greasy_tasks = num_greasy_tasks
58-
self.nworkes_per_node = nworkes_per_node
59-
self.nranks_per_worker = nranks_per_worker
60-
self.num_tasks_per_node = nranks_per_worker * nworkes_per_node
61-
self.num_tasks = self.num_tasks_per_node * nnodes
62-
self.num_cpus_per_task = ncpus_per_worker
63-
self.sanity_patterns = self.eval_sanity()
76+
self.num_tasks_per_node = self.ranks_per_worker * self.workers_per_node
77+
self.num_tasks = self.num_tasks_per_node * self.nnodes
78+
self.num_cpus_per_task = self.cpus_per_worker
6479

65-
# Reference value is system agnostic
66-
# Adding 10 secs of slowdown per greasy tasks
67-
# this is to compensate for whenever the systems are full and srun gets
68-
# slightly slower
69-
refperf = (
70-
(self.sleep_time+10)*num_greasy_tasks / nworkes_per_node / nnodes
71-
)
72-
self.reference = {
73-
'*': {
74-
'time': (refperf, None, 0.5, 's')
75-
}
76-
}
77-
self.perf_patterns = {
78-
'time': sn.extractsingle(r'Total time: (?P<perf>\S+)',
79-
self.greasy_logfile,
80-
'perf', to_seconds)
81-
}
80+
@run_before('run')
81+
def set_environment_variables(self):
8282
# On SLURM there is no need to set OMP_NUM_THREADS if one defines
8383
# num_cpus_per_task, but adding for completeness and portability
8484
self.variables = {
8585
'OMP_NUM_THREADS': str(self.num_cpus_per_task),
86-
'GREASY_NWORKERS_PER_NODE': str(nworkes_per_node),
86+
'GREASY_NWORKERS_PER_NODE': str(self.workers_per_node),
8787
'GREASY_LOGFILE': self.greasy_logfile
8888
}
89-
self.modules = ['GREASY']
90-
self.maintainers = ['VH', 'SK']
91-
self.tags = {'production'}
9289

9390
@run_before('run')
9491
def generate_tasks_file(self):
@@ -114,7 +111,7 @@ def daint_dom_gpu_specific_workaround(self):
114111
}
115112
}
116113
elif self.current_partition.fullname in ['daint:mc']:
117-
if self.variant != 'serial':
114+
if 'serial' not in self.variant:
118115
self.extra_resources = {
119116
'gres': {
120117
'gres': 'craynetwork:72'
@@ -133,17 +130,19 @@ def set_launcher(self):
133130
# make calls to srun
134131
self.job.launcher = getlauncher('local')()
135132

136-
@sn.sanity_function
137-
def eval_sanity(self):
133+
@sanity_function
134+
def assert_success(self):
138135
output_files = []
139136
output_files = [file for file in os.listdir(self.stagedir)
140137
if file.startswith('output-')]
141138
num_greasy_tasks = len(output_files)
142139
failure_msg = (f'Requested {self.num_greasy_tasks} task(s), but '
143140
f'executed only {num_greasy_tasks} tasks(s)')
144-
sn.evaluate(sn.assert_eq(num_greasy_tasks, self.num_greasy_tasks,
145-
msg=failure_msg))
146-
num_tasks = sn.getattr(self, 'nranks_per_worker')
141+
sn.evaluate(
142+
sn.assert_eq(num_greasy_tasks, self.num_greasy_tasks,
143+
msg=failure_msg)
144+
)
145+
num_tasks = sn.getattr(self, 'ranks_per_worker')
147146
num_cpus_per_task = sn.getattr(self, 'num_cpus_per_task')
148147

149148
def tid(match):
@@ -184,7 +183,7 @@ def num_ranks(match):
184183
lambda x: sn.assert_lt(
185184
rank(x), num_ranks(x),
186185
msg=(f'Rank id {rank(x)} is not lower than the '
187-
f'number of ranks {self.nranks_per_worker} '
186+
f'number of ranks {self.ranks_per_worker} '
188187
f'in output file')
189188
), result
190189
),
@@ -217,7 +216,7 @@ def num_ranks(match):
217216
lambda x: sn.assert_eq(
218217
num_ranks(x), num_tasks,
219218
msg=(f'Number of ranks {num_ranks(x)} is not '
220-
f'equal to {self.nranks_per_worker} in '
219+
f'equal to {self.ranks_per_worker} in '
221220
f'output file {output_file}')
222221
), result
223222
)
@@ -234,3 +233,24 @@ def num_ranks(match):
234233
))
235234

236235
return True
236+
237+
@run_before('performance')
238+
def set_reference(self):
239+
# Reference value is system agnostic
240+
# Adding 10 secs of slowdown per greasy tasks
241+
# this is to compensate for whenever the systems are full and srun gets
242+
# slightly slower
243+
refperf = (
244+
(self.sleep_time + 10) * self.num_greasy_tasks /
245+
self.workers_per_node / self.nnodes
246+
)
247+
self.reference = {
248+
'*': {
249+
'time': (refperf, None, 0.5, 's')
250+
}
251+
}
252+
253+
@performance_function('s')
254+
def time(self):
255+
return sn.extractsingle(r'Total time: (?P<perf>\S+)',
256+
self.greasy_logfile, 'perf', to_seconds)

0 commit comments

Comments
 (0)