|
6 | 6 | #
|
7 | 7 | #===------------------------------------------------------------------------===#
|
8 | 8 |
|
9 |
| -# It is not clear how we can use the compile tests within the test suite. It |
10 |
| -# would be good to modify the test suite to be able to use it somehow, but for |
11 |
| -# now, there is nothing we can do. |
| 9 | +# The test-suite expects an executable to be produced at build time and for |
| 10 | +# that executable to be run at test time. The result (in the form of the |
| 11 | +# return code or the output written to stdout/stderr) is used to determine |
| 12 | +# whether the test has succeeded. The "compile" tests are intended to exercise |
| 13 | +# the behavior of the compiler itself. There isn't a clean way of having the |
| 14 | +# compiler be executed at test time. Instead, the compiler is run at |
| 15 | +# build time and the diagnostics/errors saved to a file as needed. This file is |
| 16 | +# compared to a reference output at test time to determine success/failure of |
| 17 | +# the test. A dummy executable is also built. This does nothing, but provides |
| 18 | +# something that the test suite can "run" at test time. |
| 19 | + |
| 20 | +# Creates a test from each valid test file in the current source directory. Each |
| 21 | +# argument to a function is a list. If a test file is in any of the lists, a |
| 22 | +# test will not be created from it. |
| 23 | +function(add_tests UnsupportedTests UnimplementedTests SkippedTests FailingTests) |
| 24 | + # This will just get all the Fortran source files in the directory. The tests |
| 25 | + # in this directory are all single-source. |
| 26 | + file(GLOB AllFiles CONFIGURE_DEPENDS LIST_DIRECTORIES false |
| 27 | + *.f* |
| 28 | + *.F* |
| 29 | + ) |
| 30 | + |
| 31 | + set(TestsToSkip) |
| 32 | + |
| 33 | + # There is still a chance that some of the unsupported tests may need to be |
| 34 | + # enabled, for instance if the non-standard extensions that they exercise are |
| 35 | + # supported due to user demand. |
| 36 | + if (NOT TEST_SUITE_FORTRAN_FORCE_ALL_TESTS AND |
| 37 | + NOT TEST_SUITE_FORTRAN_FORCE_UNSUPPORTED_TESTS) |
| 38 | + list(APPEND TestsToSkip ${UnsupportedTests}) |
| 39 | + endif() |
| 40 | + |
| 41 | + # For the remaining tests, there is cause to build and run the skipped, failing |
| 42 | + # and unimplemented tests since some could be enabled once some feature is |
| 43 | + # implemented. Eventually, all the TEST_SUITE_FORTRAN_FORCE_* options (perhaps |
| 44 | + # with the exception of TEST_SUITE_FORTRAN_FORCE_UNSUPPORTED_TESTS) should |
| 45 | + # become redundant and removed. |
| 46 | + if (NOT TEST_SUITE_FORTRAN_FORCE_ALL_TESTS AND |
| 47 | + NOT TEST_SUITE_FORTRAN_FORCE_UNIMPLEMENTED_TESTS) |
| 48 | + list(APPEND TestsToSkip ${UnimplementedTests}) |
| 49 | + endif() |
| 50 | + |
| 51 | + if (NOT TEST_SUITE_FORTRAN_FORCE_ALL_TESTS AND |
| 52 | + NOT TEST_SUITE_FORTRAN_FORCE_FAILING_TESTS) |
| 53 | + list(APPEND TestsToSkip ${FailingTests}) |
| 54 | + endif() |
| 55 | + |
| 56 | + if (NOT TEST_SUITE_FORTRAN_FORCE_ALL_TESTS AND |
| 57 | + NOT TEST_SUITE_FORTRAN_FORCE_SKIPPED_TESTS) |
| 58 | + list(APPEND TestsToSkip ${SkippedTests}) |
| 59 | + endif() |
| 60 | + |
| 61 | + foreach(TestToSkip ${TestsToSkip}) |
| 62 | + list(REMOVE_ITEM AllFiles ${TestToSkip}) |
| 63 | + endforeach() |
| 64 | + |
| 65 | + # The program to be used to verify the results. The programs here should take |
| 66 | + # two files as arguments, return 0 if the files are identical, non-zero |
| 67 | + # otherwise. |
| 68 | + set(DIFFPROG) |
| 69 | + if (WIN32) |
| 70 | + find_program(DIFFPROG |
| 71 | + NAMES fc.exe |
| 72 | + REQUIRED) |
| 73 | + else () |
| 74 | + find_program(DIFFPROG |
| 75 | + NAMES diff cmp |
| 76 | + REQUIRED) |
| 77 | + endif () |
| 78 | + |
| 79 | + # The file prefix is needed because there are several tests with the same |
| 80 | + # file name across the gfortran test suite. cmake prefers all targets to be |
| 81 | + # unique, so they get prefixed with this. |
| 82 | + set(FilePrefix "gfortran-torture-compile") |
| 83 | + |
| 84 | + # The test suite expects to be able to run something at testing time. For the |
| 85 | + # compile tests, there is nothing to be run. While a better solution will be |
| 86 | + # to modify the test suite to allow for cases like this, as a temporary |
| 87 | + # measure, just create an empty executable that will be run for each test. |
| 88 | + set(DummySrc ${CMAKE_CURRENT_BINARY_DIR}/dummy.f90) |
| 89 | + set(Dummy "dummy") |
| 90 | + |
| 91 | + file(WRITE ${DummySrc} "program test\nend program test") |
| 92 | + add_executable(${Dummy} ${DummySrc}) |
| 93 | + # At some point, the -flang-experimental-exec flag will be removed. |
| 94 | + target_link_options(${Dummy} PUBLIC "-flang-experimental-exec") |
| 95 | + |
| 96 | + # All the "compile" tests in the gfortran torture tests are expected to |
| 97 | + # pass. Since diagnostics are only saved on failure, the diagnostics |
| 98 | + # file produced when compiling the test should be empty. An empty file can, |
| 99 | + # therefore, be used as reference output. |
| 100 | + set(Reference "${FilePrefix}-empty.reference.out") |
| 101 | + add_custom_command( |
| 102 | + OUTPUT ${Reference} |
| 103 | + COMMAND ${CMAKE_COMMAND} -E touch ${CMAKE_CURRENT_BINARY_DIR}/${Reference} |
| 104 | + VERBATIM |
| 105 | + USES_TERMINAL |
| 106 | + COMMENT "Creating reference output file" |
| 107 | + ) |
| 108 | + |
| 109 | + # The compile script compiles the files and may save the diagnostics to file |
| 110 | + # as needed (see the options that the script accepts). |
| 111 | + set(COMPILE_SCRIPT |
| 112 | + ${CMAKE_SOURCE_DIR}/Fortran/gfortran/compile-save-diags.cmake) |
| 113 | + |
| 114 | + foreach(File ${AllFiles}) |
| 115 | + get_filename_component(FileName ${File} NAME) |
| 116 | + |
| 117 | + set(Out ${FileName}.out) |
| 118 | + set(Obj ${FileName}.o) |
| 119 | + |
| 120 | + # ${Exe} is just used as a custom target name. Nevertheless, it needs to be |
| 121 | + # unique. There are multiple files with the same name but different |
| 122 | + # extensions in this directory. Retain the extension, but replace the |
| 123 | + # final '.' with an '_'. |
| 124 | + string(REPLACE "." "_" Exe "${FilePrefix}_${FileName}") |
| 125 | + |
| 126 | + set(Compiler -DCOMPILER=${CMAKE_Fortran_COMPILER}) |
| 127 | + set(CompilerFlags -DCOMPILER_FLAGS=-c) |
| 128 | + set(InputFiles -DINPUT_FILES=${File}) |
| 129 | + set(ObjFile -DOBJECT_FILE=${Obj}) |
| 130 | + set(OutputFile -DOUTPUT_FILE=${Out}) |
| 131 | + set(AlwaysSaveDiags -DALWAYS_SAVE_DIAGS=OFF) |
| 132 | + |
| 133 | + add_custom_command( |
| 134 | + OUTPUT ${Out} |
| 135 | + COMMAND ${CMAKE_COMMAND} ${Compiler} ${CompilerFlags} ${InputFiles} ${ObjFile} ${OutputFile} ${AlwaysSaveDiags} -P ${COMPILE_SCRIPT} |
| 136 | + VERBATIM |
| 137 | + USES_TERMINAL |
| 138 | + COMMENT "Compiling ${File}") |
| 139 | + |
| 140 | + add_custom_target(${Exe} |
| 141 | + ALL |
| 142 | + DEPENDS ${Out} ${Reference} ${Dummy} |
| 143 | + SOURCES ${File}) |
| 144 | + |
| 145 | + llvm_test_run(EXECUTABLE %S/${Dummy}) |
| 146 | + llvm_test_verify(${DIFFPROG} %S/${Reference} %S/${Out}) |
| 147 | + llvm_add_test(${Exe}.test %S/${Dummy}) |
| 148 | + endforeach() |
| 149 | +endfunction() |
| 150 | + |
| 151 | +# These tests are disabled because they fail, when they should pass. |
| 152 | +file(GLOB Failing CONFIGURE_DEPENDS |
| 153 | + # For this, gfortran issues a warning while flang fails to compile. This is |
| 154 | + # potentially an "unsupported" test if the reason for this difference in |
| 155 | + # is because the standard allows for implementation-dependent behavior. |
| 156 | + pr37236.f |
| 157 | +) |
| 158 | + |
| 159 | +list(APPEND UnsupportedTests "") |
| 160 | +list(APPEND UnimplementedTests "") |
| 161 | +list(APPEND SkippedTests "") |
| 162 | +list(APPEND FailingTests "${Failing}") |
| 163 | + |
| 164 | +add_tests("${UnsupportedTests}" "${UnimplementedTests}" "${SkippedTests}" "${FailingTests}") |
0 commit comments