|
7 | 7 | import reframe.utility.sanity as sn |
8 | 8 |
|
9 | 9 |
|
| 10 | +@rfm.simple_test |
10 | 11 | class VASPCheck(rfm.RunOnlyRegressionTest): |
11 | | - keep_files = ['OUTCAR'] |
12 | | - maintainers = ['LM'] |
13 | | - tags = {'scs'} |
14 | | - strict_check = False |
15 | 12 | modules = ['VASP'] |
| 13 | + executable = 'vasp_std' |
16 | 14 | extra_resources = { |
17 | 15 | 'switches': { |
18 | 16 | 'num_switches': 1 |
19 | 17 | } |
20 | 18 | } |
| 19 | + keep_files = ['OUTCAR'] |
| 20 | + strict_check = False |
| 21 | + use_multithreading = False |
| 22 | + tags = {'maintenance', 'production'} |
| 23 | + maintainers = ['LM'] |
21 | 24 |
|
22 | | - @run_after('init') |
23 | | - def setup_by_system(self): |
24 | | - if self.current_system.name in ['eiger', 'pilatus']: |
25 | | - self.valid_prog_environs = ['cpeIntel'] |
26 | | - else: |
27 | | - self.valid_prog_environs = ['builtin'] |
| 25 | + num_nodes = parameter([6, 16], loggable=True) |
| 26 | + references = { |
| 27 | + 6: { |
| 28 | + 'sm_60': { |
| 29 | + 'dom:gpu': {'time': (56.0, None, 0.10, 's')}, |
| 30 | + 'daint:gpu': {'time': (65.0, None, 0.15, 's')}, |
| 31 | + }, |
| 32 | + 'broadwell': { |
| 33 | + 'dom:mc': {'time': (58.0, None, 0.10, 's')}, |
| 34 | + 'daint:mc': {'time': (65.0, None, 0.15, 's')}, |
| 35 | + }, |
| 36 | + 'zen2': { |
| 37 | + 'eiger:mc': {'time': (100.0, None, 0.10, 's')}, |
| 38 | + 'pilatus:mc': {'time': (100.0, None, 0.10, 's')}, |
| 39 | + }, |
| 40 | + }, |
| 41 | + 16: { |
| 42 | + 'sm_60': { |
| 43 | + 'daint:gpu': {'time': (55.0, None, 0.15, 's')}, |
| 44 | + }, |
| 45 | + 'broadwell': { |
| 46 | + 'daint:mc': {'time': (55.0, None, 0.15, 's')}, |
| 47 | + }, |
| 48 | + 'zen2': { |
| 49 | + 'eiger:mc': {'time': (100.0, None, 0.10, 's')}, |
| 50 | + 'pilatus:mc': {'time': (100.0, None, 0.10, 's')} |
| 51 | + } |
| 52 | + } |
| 53 | + } |
| 54 | + |
| 55 | + @performance_function('s') |
| 56 | + def elapsed_time(self): |
| 57 | + return sn.extractsingle(r'Elapsed time \(sec\):' |
| 58 | + r'\s+(?P<time>\S+)', 'OUTCAR', |
| 59 | + 'time', float) |
28 | 60 |
|
29 | 61 | @sanity_function |
30 | 62 | def assert_reference(self): |
31 | 63 | force = sn.extractsingle(r'1 F=\s+(?P<result>\S+)', |
32 | 64 | self.stdout, 'result', float) |
33 | 65 | return sn.assert_reference(force, -.85026214E+03, -1e-5, 1e-5) |
34 | 66 |
|
35 | | - @performance_function('s') |
36 | | - def time(self): |
37 | | - return sn.extractsingle(r'Elapsed time \(sec\):' |
38 | | - r'\s+(?P<time>\S+)', 'OUTCAR', |
39 | | - 'time', float) |
40 | | - |
| 67 | + @run_after('init') |
| 68 | + def setup_system_filtering(self): |
| 69 | + self.descr = f'VASP check ({self.num_nodes} node(s))' |
| 70 | + |
| 71 | + # setup system filter |
| 72 | + valid_systems = { |
| 73 | + 6: ['daint:gpu', 'daint:mc', 'dom:gpu', 'dom:mc', |
| 74 | + 'eiger:mc', 'pilatus:mc'], |
| 75 | + 16: ['daint:gpu', 'daint:mc', 'eiger:mc'] |
| 76 | + } |
41 | 77 |
|
42 | | -@rfm.simple_test |
43 | | -class VASPCpuCheck(VASPCheck): |
44 | | - valid_systems = ['daint:mc', 'dom:mc', 'eiger:mc', 'pilatus:mc'] |
45 | | - executable = 'vasp_std' |
46 | | - reference = { |
47 | | - 'dom:mc': {'time': (138.0, None, 0.15, 's')}, |
48 | | - 'daint:mc': {'time': (138.0, None, 0.15, 's')}, |
49 | | - 'eiger:mc': {'time': (100.0, None, 0.10, 's')}, |
50 | | - 'pilatus:mc': {'time': (100.0, None, 0.10, 's')} |
51 | | - } |
| 78 | + self.skip_if(self.num_nodes not in valid_systems, |
| 79 | + f'No valid systems found for {self.num_nodes}(s)') |
| 80 | + self.valid_systems = valid_systems[self.num_nodes] |
52 | 81 |
|
53 | | - @run_after('init') |
54 | | - def setup_by_scale(self): |
55 | | - self.descr = f'VASP CPU check' |
56 | | - if self.current_system.name == 'dom': |
57 | | - self.num_tasks = 72 |
58 | | - self.num_tasks_per_node = 12 |
59 | | - self.use_multithreading = True |
60 | | - elif self.current_system.name in ['eiger', 'pilatus']: |
61 | | - self.num_tasks = 64 |
62 | | - self.num_tasks_per_node = 4 |
63 | | - self.num_cpus_per_task = 8 |
64 | | - self.num_tasks_per_core = 1 |
65 | | - self.use_multithreading = False |
66 | | - self.variables = { |
67 | | - 'MPICH_OFI_STARTUP_CONNECT': '1', |
68 | | - 'OMP_NUM_THREADS': str(self.num_cpus_per_task), |
69 | | - 'OMP_PLACES': 'cores', |
70 | | - 'OMP_PROC_BIND': 'close' |
71 | | - } |
| 82 | + # setup programming environment filter |
| 83 | + if self.current_system.name in ['eiger', 'pilatus']: |
| 84 | + self.valid_prog_environs = ['cpeIntel'] |
72 | 85 | else: |
73 | | - self.num_tasks = 32 |
74 | | - self.num_tasks_per_node = 2 |
75 | | - self.use_multithreading = True |
76 | | - |
77 | | - self.reference = self.references_by_variant[self.variant] |
78 | | - self.tags |= {'maintenance', 'production'} |
| 86 | + self.valid_prog_environs = ['builtin'] |
79 | 87 |
|
80 | | - @run_before('run') |
81 | | - def set_task_distribution(self): |
82 | | - self.job.options = ['--distribution=block:block'] |
83 | 88 |
|
84 | 89 | @run_before('run') |
85 | | - def set_cpu_binding(self): |
| 90 | + def setup_run(self): |
| 91 | + # set auto-detected architecture |
| 92 | + self.skip_if_no_procinfo() |
| 93 | + proc = self.current_partition.processor |
| 94 | + arch = proc.arch |
| 95 | + |
| 96 | + # set architecture for GPU partition (no auto-detection) |
| 97 | + if self.current_partition.fullname in ['daint:gpu', 'dom:gpu']: |
| 98 | + arch = 'sm_60' |
| 99 | + |
| 100 | + try: |
| 101 | + found = self.references[self.num_nodes][arch] |
| 102 | + except KeyError: |
| 103 | + self.skip(f'Configuration with {self.num_nodes} node(s) ' |
| 104 | + f'is not supported on {arch!r}') |
| 105 | + |
| 106 | + # common setup for every architecture |
86 | 107 | self.job.launcher.options = ['--cpu-bind=cores'] |
| 108 | + self.job.options = ['--distribution=block:block'] |
| 109 | + self.num_tasks_per_node = proc.num_sockets |
| 110 | + self.num_cpus_per_task = proc.num_cores // self.num_tasks_per_node |
| 111 | + self.num_tasks = self.num_nodes * self.num_tasks_per_node |
| 112 | + self.variables = { |
| 113 | + 'OMP_NUM_THREADS': str(self.num_cpus_per_task), |
| 114 | + 'OMP_PLACES': 'cores', |
| 115 | + 'OMP_PROC_BIND': 'close' |
| 116 | + } |
87 | 117 |
|
| 118 | + # custom settings for selected architectures |
| 119 | + if arch == 'zen2': |
| 120 | + self.variables.update({ |
| 121 | + 'MPICH_OFI_STARTUP_CONNECT': '1' |
| 122 | + }) |
88 | 123 |
|
89 | | -@rfm.simple_test |
90 | | -class VASPGpuCheck(VASPCheck): |
91 | | - variant = parameter(['maint', 'prod']) |
92 | | - valid_systems = ['daint:gpu', 'dom:gpu'] |
93 | | - executable = 'vasp_std' |
94 | | - num_gpus_per_node = 1 |
95 | | - reference = { |
96 | | - 'dom:gpu': {'time': (45.0, None, 0.15, 's')}, |
97 | | - 'daint:gpu': {'time': (45.0, None, 0.15, 's')}, |
98 | | - } |
99 | | - |
100 | | - @run_after('init') |
101 | | - def setup_by_variant(self): |
102 | | - self.descr = f'VASP GPU check (variant: {self.variant})' |
103 | | - if self.current_system.name == 'dom': |
104 | | - self.num_tasks = 6 |
105 | | - self.num_tasks_per_node = 1 |
106 | | - self.num_cpus_per_task = 12 |
107 | | - else: |
108 | | - self.num_tasks = 16 |
109 | | - self.num_tasks_per_node = 1 |
110 | | - |
111 | | - self.tags |= {'maintenance', 'production'} |
| 124 | + # setup performance references |
| 125 | + self.reference = self.references[self.num_nodes][arch] |
0 commit comments