Skip to content

Commit 28c9916

Browse files
committed
Add abs_tolerance
1 parent 992af12 commit 28c9916

File tree

3 files changed

+35
-11
lines changed

3 files changed

+35
-11
lines changed

regtest.py

Lines changed: 10 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -232,8 +232,8 @@ def process_comparison_results(stdout, tvars, test):
232232
indices = filter(lambda i: words[i] in tvars, range(len(words)))
233233

234234
for i in indices:
235-
_, _, rel_err = words[i: i + 3]
236-
if abs(test.tolerance) <= abs(float(rel_err)):
235+
_, abs_err, rel_err = words[i: i + 3]
236+
if abs(test.tolerance) < abs(float(rel_err)) and test.abs_tolerance < abs(float(abs_err)):
237237
return False
238238

239239
return True
@@ -817,16 +817,17 @@ def test_suite(argv):
817817

818818
command = f"diff {bench_file} {output_file}"
819819

820-
elif test.tolerance is not None:
820+
else:
821821

822-
command = "{} --abort_if_not_all_found -n 0 -r {} {} {}".format(suite.tools["fcompare"],
823-
test.tolerance,
824-
bench_file, output_file)
822+
command = "{} --abort_if_not_all_found -n 0".format(suite.tools["fcompare"])
825823

826-
else:
824+
if test.tolerance is not None:
825+
command += " --rel_tol {}".format(test.tolerance)
826+
827+
if test.abs_tolerance is not None:
828+
command += " --abs_tol {}".format(test.abs_tolerance)
827829

828-
command = "{} --abort_if_not_all_found -n 0 {} {}".format(suite.tools["fcompare"],
829-
bench_file, output_file)
830+
command += " {} {}".format(bench_file, output_file)
830831

831832
sout, _, ierr = test_util.run(command,
832833
outfile=test.comparison_outfile,

suite.py

Lines changed: 19 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -85,6 +85,7 @@ def __init__(self, name):
8585

8686
self._doComparison = True
8787
self._tolerance = None
88+
self._abs_tolerance = None
8889
self._particle_tolerance = None
8990

9091
self.analysisRoutine = ""
@@ -294,6 +295,20 @@ def set_tolerance(self, value):
294295

295296
self._tolerance = value
296297

298+
def get_abs_tolerance(self):
299+
""" Returns the global absolute tolerance if one was set,
300+
and the test-specific one otherwise.
301+
"""
302+
303+
if Test.global_abs_tolerance is None:
304+
return self._abs_tolerance
305+
return Test.global_abs_tolerance
306+
307+
def set_abs_tolerance(self, value):
308+
""" Sets the test-specific absolute tolerance to the specified value. """
309+
310+
self._abs_tolerance = value
311+
297312
def get_particle_tolerance(self):
298313
""" Returns the global particle tolerance if one was set,
299314
and the test-specific one otherwise.
@@ -346,6 +361,7 @@ def set_runs_to_average(self, value):
346361
compile_only = False
347362
skip_comparison = False
348363
global_tolerance = None
364+
global_abs_tolerance = None
349365
global_particle_tolerance = None
350366
performance_params = []
351367

@@ -354,6 +370,7 @@ def set_runs_to_average(self, value):
354370
compileTest = property(get_compile_test, set_compile_test)
355371
doComparison = property(get_do_comparison, set_do_comparison)
356372
tolerance = property(get_tolerance, set_tolerance)
373+
abs_tolerance = property(get_abs_tolerance, set_abs_tolerance)
357374
particle_tolerance = property(get_particle_tolerance, set_particle_tolerance)
358375
check_performance = property(get_check_performance, set_check_performance)
359376
performance_threshold = property(get_performance_threshold, set_performance_threshold)
@@ -979,7 +996,7 @@ def build_tools(self, test_list):
979996
if ("fextract" in self.extra_tools): ftools.append("fextract")
980997
if ("fextrema" in self.extra_tools): ftools.append("fextrema")
981998
if ("ftime" in self.extra_tools): ftools.append("ftime")
982-
if any([t for t in test_list if t.tolerance is not None]): ftools.append("fvarnames")
999+
if any([t for t in test_list if t.tolerance is not None or t.abs_tolerance is not None]): ftools.append("fvarnames")
9831000

9841001
for t in ftools:
9851002
self.log.log(f"building {t}...")
@@ -1073,6 +1090,7 @@ def apply_args(self):
10731090
Test.compile_only = args.compile_only
10741091
Test.skip_comparison = args.skip_comparison
10751092
Test.global_tolerance = args.tolerance
1093+
Test.global_abs_tolerance = args.abs_tolerance
10761094
Test.global_particle_tolerance = args.particle_tolerance
10771095
Test.performance_params = args.check_performance
10781096

test_util.py

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -144,7 +144,10 @@
144144
tolerance = < floating point number representing the largest relative error
145145
permitted between the run output and the benchmark for mesh data,
146146
default is 0.0 >
147-
particle_tolerance = < same as the above, for particle comparisons
147+
abs_tolerance = < floating point number representing the largest absolute
148+
error permitted between the run output and the benchmark for
149+
mesh data, default is 0.0 >
150+
particle_tolerance = < same as tolerance, for particle comparisons >
148151
outputFile = < explicit output file to compare with -- exactly as it will
149152
be written. No prefix of the test name will be done >
150153
@@ -374,6 +377,8 @@ def get_args(arg_string=None):
374377
help="run analysis for each test without comparison to benchmarks")
375378
comp_options.add_argument("--tolerance", type=float, default=None, metavar="value",
376379
help="largest relative error permitted during mesh comparison")
380+
comp_options.add_argument("--abs_tolerance", type=float, default=None, metavar="value",
381+
help="largest absolute error permitted during mesh comparison")
377382
comp_options.add_argument("--particle_tolerance", type=float, default=None, metavar="value",
378383
help="largest relative error permitted during particle comparison")
379384
parser.add_argument("input_file", metavar="input-file", type=str, nargs=1,

0 commit comments

Comments
 (0)