44# SPDX-License-Identifier: BSD-3-Clause
55
66import contextlib
7+ import io
78import json
89import jsonschema
910import os
@@ -999,89 +1000,79 @@ def assert_git_timeout(self):
9991000 assert not runner .stats .failed ()
10001001
10011002
1002- @pytest .fixture
1003- def perf_test ():
1004- class _MyTest (rfm .RunOnlyRegressionTest ):
1005- valid_systems = ['*' ]
1006- valid_prog_environs = ['*' ]
1007- executable = 'echo perf0=100 && echo perf1=50'
1003+ class _MyPerfTest (rfm .RunOnlyRegressionTest ):
1004+ valid_systems = ['*' ]
1005+ valid_prog_environs = ['*' ]
1006+ executable = 'echo perf0=100 && echo perf1=50'
10081007
1009- @sanity_function
1010- def validate (self ):
1011- return sn .assert_found (r'perf0' , self .stdout )
1008+ @sanity_function
1009+ def validate (self ):
1010+ return sn .assert_found (r'perf0' , self .stdout )
10121011
1013- @performance_function ('unit0' )
1014- def perf0 (self ):
1015- return sn .extractsingle (r'perf0=(\S+)' , self .stdout , 1 , float )
1012+ @performance_function ('unit0' )
1013+ def perf0 (self ):
1014+ return sn .extractsingle (r'perf0=(\S+)' , self .stdout , 1 , float )
10161015
1017- @performance_function ('unit1' )
1018- def perf1 (self ):
1019- return sn .extractsingle (r'perf1=(\S+)' , self .stdout , 1 , float )
1016+ @performance_function ('unit1' )
1017+ def perf1 (self ):
1018+ return sn .extractsingle (r'perf1=(\S+)' , self .stdout , 1 , float )
10201019
1021- return _MyTest ()
10221020
1021+ class _MyPerfParamTest (_MyPerfTest ):
1022+ p = parameter ([1 , 2 ])
10231023
1024- @pytest .fixture
1025- def perf_param_tests ():
1026- class _MyTest (rfm .RunOnlyRegressionTest ):
1027- valid_systems = ['*' ]
1028- valid_prog_environs = ['*' ]
1029- executable = 'echo perf0=100 && echo perf1=50'
1030- p = parameter ([1 , 2 ])
10311024
1032- @sanity_function
1033- def validate (self ):
1034- return sn .assert_found (r'perf0' , self .stdout )
1025+ class _MyFailingTest (rfm .RunOnlyRegressionTest ):
1026+ valid_systems = ['*' ]
1027+ valid_prog_environs = ['*' ]
1028+ executable = 'echo perf0=100'
10351029
1036- @ performance_function ( 'unit0' )
1037- def perf0 (self ):
1038- return sn . extractsingle ( r'perf0=(\S+)' , self . stdout , 1 , float )
1030+ @ sanity_function
1031+ def validate (self ):
1032+ return False
10391033
1040- @performance_function ('unit1 ' )
1041- def perf1 (self ):
1042- return sn .extractsingle (r'perf1 =(\S+)' , self .stdout , 1 , float )
1034+ @performance_function ('unit0 ' )
1035+ def perf0 (self ):
1036+ return sn .extractsingle (r'perf0 =(\S+)' , self .stdout , 1 , float )
10431037
1044- return [_MyTest (variant_num = v ) for v in range (_MyTest .num_variants )]
10451038
1039+ class _LazyPerfTest (rfm .RunOnlyRegressionTest ):
1040+ valid_systems = ['*' ]
1041+ valid_prog_environs = ['*' ]
1042+ executable = 'echo perf0=100'
10461043
1047- @pytest .fixture
1048- def failing_perf_test ():
1049- class _MyFailingTest (rfm .RunOnlyRegressionTest ):
1050- valid_systems = ['*' ]
1051- valid_prog_environs = ['*' ]
1052- executable = 'echo perf0=100'
1044+ @sanity_function
1045+ def validate (self ):
1046+ return True
10531047
1054- @sanity_function
1055- def validate (self ):
1056- return False
1048+ @run_before ('performance' )
1049+ def set_perf_vars (self ):
1050+ self .perf_variables = {
1051+ 'perf0' : sn .make_performance_function (
1052+ sn .extractsingle (r'perf0=(\S+)' , self .stdout , 1 , float ),
1053+ 'unit0'
1054+ )
1055+ }
10571056
1058- @performance_function ('unit0' )
1059- def perf0 (self ):
1060- return sn .extractsingle (r'perf0=(\S+)' , self .stdout , 1 , float )
10611057
1062- return _MyFailingTest ()
1058+ @pytest .fixture
1059+ def perf_test ():
1060+ return _MyPerfTest ()
10631061
10641062
10651063@pytest .fixture
1066- def lazy_perf_test ():
1067- class _LazyPerfTest (rfm .RunOnlyRegressionTest ):
1068- valid_systems = ['*' ]
1069- valid_prog_environs = ['*' ]
1070- executable = 'echo perf0=100'
1064+ def perf_param_tests ():
1065+ return [_MyPerfParamTest (variant_num = v )
1066+ for v in range (_MyPerfParamTest .num_variants )]
10711067
1072- @sanity_function
1073- def validate (self ):
1074- return True
10751068
1076- @run_before ('performance' )
1077- def set_perf_vars (self ):
1078- self .perf_variables = {
1079- 'perf0' : sn .make_performance_function (
1080- sn .extractsingle (r'perf0=(\S+)' , self .stdout , 1 , float ),
1081- 'unit0'
1082- )
1083- }
1069+ @pytest .fixture
1070+ def failing_perf_test ():
1071+ return _MyFailingTest ()
1072+
10841073
1074+ @pytest .fixture
1075+ def lazy_perf_test ():
10851076 return _LazyPerfTest ()
10861077
10871078
@@ -1142,6 +1133,14 @@ def _assert_header(filepath, header):
11421133 assert fp .readline ().strip () == header
11431134
11441135
1136+ def _assert_no_logging_error (fn , * args , ** kwargs ):
1137+ captured_stderr = io .StringIO ()
1138+ with contextlib .redirect_stderr (captured_stderr ):
1139+ fn (* args , ** kwargs )
1140+
1141+ assert 'Logging error' not in captured_stderr .getvalue ()
1142+
1143+
11451144def test_perf_logging (make_runner , make_exec_ctx , perf_test ,
11461145 config_perflog , tmp_path ):
11471146 make_exec_ctx (
@@ -1164,14 +1163,14 @@ def test_perf_logging(make_runner, make_exec_ctx, perf_test,
11641163 testcases = executors .generate_testcases ([perf_test ])
11651164 runner .runall (testcases )
11661165
1167- logfile = tmp_path / 'perflogs' / 'generic' / 'default' / '_MyTest .log'
1166+ logfile = tmp_path / 'perflogs' / 'generic' / 'default' / '_MyPerfTest .log'
11681167 assert os .path .exists (logfile )
11691168 assert _count_lines (logfile ) == 2
11701169
11711170 # Rerun with the same configuration and check that new entry is appended
11721171 testcases = executors .generate_testcases ([perf_test ])
11731172 runner = make_runner ()
1174- runner .runall ( testcases )
1173+ _assert_no_logging_error ( runner .runall , testcases )
11751174 assert _count_lines (logfile ) == 3
11761175
11771176 # Change the configuration and rerun
@@ -1189,7 +1188,7 @@ def test_perf_logging(make_runner, make_exec_ctx, perf_test,
11891188 logging .configure_logging (rt .runtime ().site_config )
11901189 testcases = executors .generate_testcases ([perf_test ])
11911190 runner = make_runner ()
1192- runner .runall ( testcases )
1191+ _assert_no_logging_error ( runner .runall , testcases )
11931192 assert _count_lines (logfile ) == 2
11941193 _assert_header (logfile ,
11951194 'job_completion_time,version,display_name,system,partition,'
@@ -1209,7 +1208,7 @@ def test_perf_logging(make_runner, make_exec_ctx, perf_test,
12091208 logging .configure_logging (rt .runtime ().site_config )
12101209 testcases = executors .generate_testcases ([perf_test ])
12111210 runner = make_runner ()
1212- runner .runall ( testcases )
1211+ _assert_no_logging_error ( runner .runall , testcases )
12131212 assert _count_lines (logfile ) == 2
12141213 _assert_header (logfile ,
12151214 'job_completion_time,version,display_name,system,partition,'
@@ -1238,9 +1237,9 @@ def test_perf_logging_no_end_delim(make_runner, make_exec_ctx, perf_test,
12381237 logging .configure_logging (rt .runtime ().site_config )
12391238 runner = make_runner ()
12401239 testcases = executors .generate_testcases ([perf_test ])
1241- runner .runall ( testcases )
1240+ _assert_no_logging_error ( runner .runall , testcases )
12421241
1243- logfile = tmp_path / 'perflogs' / 'generic' / 'default' / '_MyTest .log'
1242+ logfile = tmp_path / 'perflogs' / 'generic' / 'default' / '_MyPerfTest .log'
12441243 assert os .path .exists (logfile )
12451244 assert _count_lines (logfile ) == 2
12461245
@@ -1270,9 +1269,9 @@ def test_perf_logging_no_perfvars(make_runner, make_exec_ctx, perf_test,
12701269 logging .configure_logging (rt .runtime ().site_config )
12711270 runner = make_runner ()
12721271 testcases = executors .generate_testcases ([perf_test ])
1273- runner .runall ( testcases )
1272+ _assert_no_logging_error ( runner .runall , testcases )
12741273
1275- logfile = tmp_path / 'perflogs' / 'generic' / 'default' / '_MyTest .log'
1274+ logfile = tmp_path / 'perflogs' / 'generic' / 'default' / '_MyPerfTest .log'
12761275 assert os .path .exists (logfile )
12771276 assert _count_lines (logfile ) == 2
12781277
@@ -1307,9 +1306,9 @@ def test_perf_logging_multiline(make_runner, make_exec_ctx, perf_test,
13071306 testcases = executors .generate_testcases (
13081307 [perf_test , simple_test , failing_perf_test ]
13091308 )
1310- runner .runall ( testcases )
1309+ _assert_no_logging_error ( runner .runall , testcases )
13111310
1312- logfile = tmp_path / 'perflogs' / 'generic' / 'default' / '_MyTest .log'
1311+ logfile = tmp_path / 'perflogs' / 'generic' / 'default' / '_MyPerfTest .log'
13131312 assert os .path .exists (logfile )
13141313 assert _count_lines (logfile ) == 3
13151314
@@ -1340,7 +1339,7 @@ def test_perf_logging_lazy(make_runner, make_exec_ctx, lazy_perf_test,
13401339 logging .configure_logging (rt .runtime ().site_config )
13411340 runner = make_runner ()
13421341 testcases = executors .generate_testcases ([lazy_perf_test ])
1343- runner .runall ( testcases )
1342+ _assert_no_logging_error ( runner .runall , testcases )
13441343
13451344 logfile = tmp_path / 'perflogs' / 'generic' / 'default' / '_LazyPerfTest.log'
13461345 assert os .path .exists (logfile )
@@ -1352,9 +1351,9 @@ def test_perf_logging_all_attrs(make_runner, make_exec_ctx, perf_test,
13521351 logging .configure_logging (rt .runtime ().site_config )
13531352 runner = make_runner ()
13541353 testcases = executors .generate_testcases ([perf_test ])
1355- runner .runall ( testcases )
1354+ _assert_no_logging_error ( runner .runall , testcases )
13561355
1357- logfile = tmp_path / 'perflogs' / 'generic' / 'default' / '_MyTest .log'
1356+ logfile = tmp_path / 'perflogs' / 'generic' / 'default' / '_MyPerfTest .log'
13581357 assert os .path .exists (logfile )
13591358 with open (logfile ) as fp :
13601359 header = fp .readline ()
@@ -1363,14 +1362,41 @@ def test_perf_logging_all_attrs(make_runner, make_exec_ctx, perf_test,
13631362 assert len (header .split ('|' )) == len (loggable_attrs ) + 1
13641363
13651364
1365+ def test_perf_logging_custom_vars (make_runner , make_exec_ctx ,
1366+ config_perflog , tmp_path ):
1367+ # Create two tests with different loggable variables
1368+ class _X (_MyPerfTest ):
1369+ x = variable (int , value = 1 , loggable = True )
1370+
1371+ class _Y (_MyPerfTest ):
1372+ y = variable (int , value = 2 , loggable = True )
1373+
1374+ make_exec_ctx (config_perflog (fmt = '%(check_result)s|%(check_#ALL)s' ))
1375+ logging .configure_logging (rt .runtime ().site_config )
1376+ runner = make_runner ()
1377+ testcases = executors .generate_testcases ([_X (), _Y ()])
1378+ _assert_no_logging_error (runner .runall , testcases )
1379+
1380+ logfiles = [tmp_path / 'perflogs' / 'generic' / 'default' / '_X.log' ,
1381+ tmp_path / 'perflogs' / 'generic' / 'default' / '_Y.log' ]
1382+ for f in logfiles :
1383+ with open (f ) as fp :
1384+ header = fp .readline ().strip ()
1385+ if os .path .basename (f ).startswith ('_X' ):
1386+ assert 'x' in header .split ('|' )
1387+ else :
1388+ assert 'y' in header .split ('|' )
1389+
1390+
13661391def test_perf_logging_param_test (make_runner , make_exec_ctx , perf_param_tests ,
13671392 config_perflog , tmp_path ):
13681393 make_exec_ctx (config_perflog (fmt = '%(check_result)s|%(check_#ALL)s' ))
13691394 logging .configure_logging (rt .runtime ().site_config )
13701395 runner = make_runner ()
13711396 testcases = executors .generate_testcases (perf_param_tests )
1372- runner .runall ( testcases )
1397+ _assert_no_logging_error ( runner .runall , testcases )
13731398
1374- logfile = tmp_path / 'perflogs' / 'generic' / 'default' / '_MyTest.log'
1399+ logfile = (tmp_path / 'perflogs' / 'generic' /
1400+ 'default' / '_MyPerfParamTest.log' )
13751401 assert os .path .exists (logfile )
13761402 assert _count_lines (logfile ) == 3
0 commit comments