diff --git a/.github/workflows/dbg_smoke.yml b/.github/workflows/dbg_smoke.yml deleted file mode 100644 index 8fb2795ce..000000000 --- a/.github/workflows/dbg_smoke.yml +++ /dev/null @@ -1,42 +0,0 @@ - -name: debug-smoke-tests - -on: [push] - -env: - BUILD_TYPE: Debug - -jobs: - build: - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v3 - - - name: Install required packages - run: sudo apt-get install -y libnuma-dev - - - name: Configure - run: mkdir build && cd build && ../bootstrap.sh --prefix=../install --debug-build - - - name: Build - working-directory: ${{github.workspace}}/build - run: make -j4 - - - name: Install - working-directory: ${{github.workspace}}/build - run: make -j4 install - - - name: Test - working-directory: ${{github.workspace}}/build - run: make -j4 smoketests &> smoketests.log - - - name: Check - working-directory: ${{github.workspace}}/build - run: ../tests/summarise.sh smoketests.log - - - name: DumpLogOnFailure - if: failure() - working-directory: ${{github.workspace}}/build - run: cat smoketests.log - diff --git a/.github/workflows/smoke.yml b/.github/workflows/smoke.yml index 884b2f74f..49ac153ec 100644 --- a/.github/workflows/smoke.yml +++ b/.github/workflows/smoke.yml @@ -1,5 +1,5 @@ -name: release-smoke-tests +name: smoke-tests on: [push] @@ -7,9 +7,14 @@ env: BUILD_TYPE: Release jobs: - build: + build-and-test: runs-on: ubuntu-latest + strategy: + matrix: + category: ['smoke'] + build-type: ['','--debug-build'] + steps: - uses: actions/checkout@v3 @@ -17,26 +22,20 @@ jobs: run: sudo apt-get install -y libnuma-dev - name: Configure - run: mkdir build && cd build && ../bootstrap.sh --prefix=../install + run: mkdir build && cd build && ../bootstrap.sh --prefix=../install ${{ matrix.build-type }} - name: Build working-directory: ${{github.workspace}}/build - run: make -j4 + run: make -j$(nproc) && make -j$(nproc) build_tests_category_${{ matrix.category }} - name: Install working-directory: ${{github.workspace}}/build - run: make -j4 install + run: make -j$(nproc) install - name: Test working-directory: ${{github.workspace}}/build - run: make -j4 smoketests &> smoketests.log + run: ctest -L "mode:${{ matrix.category }}" --output-junit ${{ matrix.category }}.xml --output-on-failure || true - name: Check working-directory: ${{github.workspace}}/build - run: ../tests/summarise.sh smoketests.log - - - name: DumpLogOnFailure - if: failure() - working-directory: ${{github.workspace}}/build - run: cat smoketests.log - + run: python3 ../tools/ctest-junit-parse.py --categories ${{ matrix.category }} --xmls-dir $(pwd) --remove-successful-logs-from $(pwd)/tests diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 368bc4c26..a6d7df3f4 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -188,7 +188,7 @@ workflow: build_test: script: - mkdir -p install build && cd ./build && ../bootstrap.sh --prefix=../install --with-datasets=${ALP_DATASETS} - && make -j$(nproc) build_tests_all + - make -j$(nproc) && make -j$(nproc) build_tests_all - *strip_symbols artifacts: paths: @@ -201,10 +201,11 @@ build_test: expire_in: 2 hours -build_tests_buildtype_debug_sym_debug: +build_tests_build_type_debug_sym_debug: script: - mkdir -p install build && cd build && cmake -DCMAKE_INSTALL_PREFIX=../install -DCMAKE_CXX_FLAGS=-D_DEBUG - -DCMAKE_C_FLAGS=-D_DEBUG -DCMAKE_BUILD_TYPE=Debug ../ && make -j$(nproc) build_tests_all + -DCMAKE_C_FLAGS=-D_DEBUG -DCMAKE_BUILD_TYPE=Debug ../ + - make -j$(nproc) && make -j$(nproc) build_tests_all build_tests_sym_debug: @@ -212,23 +213,41 @@ build_tests_sym_debug: - if: $EXTRA_TESTS_ENABLED == "yes" script: - mkdir -p install build && cd build && cmake -DCMAKE_INSTALL_PREFIX=../install -DCMAKE_CXX_FLAGS=-D_DEBUG - -DCMAKE_C_FLAGS=-D_DEBUG -DLPF_INSTALL_PATH=${LPF_PATH} -DCMAKE_BUILD_TYPE=Release ../ && make -j$(nproc) build_tests_all + -DCMAKE_C_FLAGS=-D_DEBUG -DLPF_INSTALL_PATH=${LPF_PATH} -DCMAKE_BUILD_TYPE=Release ../ + - make -j$(nproc) && make -j$(nproc) build_tests_all -tests_unit: - needs: [build_test] +# must specify CTEST_CATEGORY and CTEST_BACKEND to filter +.ctests_run: script: - - cd ./build && make -j$(nproc) tests_unit &> unittests.log - - ../tests/summarise.sh unittests.log + - cd build + - cmake . # re-configure to update the available resources + - | + echo "CATEGORY: ${CTEST_CATEGORY}; BACKEND: ${CTEST_BACKEND}" + - ${CMAKE_RECENT}/ctest -L "mode:${CTEST_CATEGORY}" -L "backend:${CTEST_BACKEND}" --output-junit ${CTEST_CATEGORY}.xml + --output-on-failure || true + - python3 ${CI_PROJECT_DIR}/tools/ctest-junit-parse.py --categories ${CTEST_CATEGORY} --xmls-dir $(pwd) + --remove-successful-logs-from $(pwd)/tests artifacts: - paths: [ build/*.log ] - expire_in: 1 day + when: always + name: "${CTEST_CATEGORY}_failed_tests" + paths: + - build/tests/${CTEST_CATEGORY}/output/ + - build/${CTEST_CATEGORY}.xml + reports: + junit: build/${CTEST_CATEGORY}.xml + expire_in: 1 week -tests_smoke: + +default_tests_matrix: needs: [build_test] - script: - - cd ./build && make -j$(nproc) tests_smoke &> smoketests.log - - ../tests/summarise.sh smoketests.log + variables: + CTEST_BACKEND: ".*" # match all enabled backends + parallel: + matrix: + - CTEST_CATEGORY: [unit, smoke] + extends: .ctests_run + test_installation: needs: [build_test] @@ -236,7 +255,7 @@ test_installation: - cd ./build && make -j$(nproc) install -build_test_buildtype_debug: +build_test_build_type_debug: script: - mkdir -p install build && cd ./build && ../bootstrap.sh --prefix=../install --with-datasets=${ALP_DATASETS} --debug-build && make -j$(nproc) && make -j$(nproc) build_tests_all @@ -249,17 +268,21 @@ build_test_buildtype_debug: - build/**/*.o.d expire_in: 2 hours + test_smoke_build_type_debug: - needs: [build_test_buildtype_debug] - script: - - cd ./build && make -j$(nproc) smoketests &> smoketests.log - - ../tests/summarise.sh smoketests.log + needs: [build_test_build_type_debug] + variables: + CTEST_BACKEND: ".*" # match all enabled backends + CTEST_CATEGORY: smoke + extends: .ctests_run + test_installation_build_type_debug: - needs: [build_test_buildtype_debug] + needs: [build_test_build_type_debug] script: - cd ./build && make -j$(nproc) install + gitleaks: image: name: "zricethezav/gitleaks:v8.0.6" @@ -282,8 +305,9 @@ tests_performance_slurm: - slurm script: - *setup_and_build_ndebug_slurm - - make -j$(nproc) performancetests |& tee performancetests.log - - ../tests/summarise.sh performancetests.log tests/performance/output/benchmarks tests/performance/output/scaling + - make -j$(nproc) build_tests_category_performance |& tee performancetests.log + - ctest -L "mode:performance" --output-junit performance.xml + --output-on-failure &> performancetests.log artifacts: paths: [ build/*.log ] expire_in: 1 month @@ -295,20 +319,20 @@ tests_performance: rules: - if: $EXTRA_TESTS_ENABLED == "yes" needs: [build_test] - script: - - cd ./build && make -j$(nproc) performancetests &> performancetests.log - - ../tests/summarise.sh performancetests.log tests/performance/output/benchmarks tests/performance/output/scaling + variables: + CTEST_BACKEND: ".*" # match all enabled backends + CTEST_CATEGORY: "performance" + extends: .ctests_run + -tests_unit_buildtype_debug: +tests_unit_build_type_debug: rules: - if: $EXTRA_TESTS_ENABLED == "yes" - needs: [build_test_buildtype_debug] - script: - - cd ./build && make -j$(nproc) unittests &> unittests.log - - ../tests/summarise.sh unittests.log - artifacts: - paths: [ build/*.log ] - expire_in: 1 day + needs: [build_test_build_type_debug] + variables: + CTEST_BACKEND: ".*" # match all enabled backends + CTEST_CATEGORY: "unit" + extends: .ctests_run ## Additional tests for LPF (on main branches only) @@ -319,7 +343,8 @@ build_test_lpf: script: # build only LPF-related tests - mkdir -p install build && cd ./build && ../bootstrap.sh --with-lpf=${LPF_PATH} --no-nonblocking --no-reference - --no-hyperdags --prefix=../install --with-datasets=${ALP_DATASETS} && make -j$(nproc) build_tests_all + --no-hyperdags --prefix=../install --with-datasets=${ALP_DATASETS} + - make -j$(nproc) && make -j$(nproc) build_tests_all - *strip_symbols artifacts: paths: @@ -331,15 +356,12 @@ build_test_lpf: - build/**/*.dir expire_in: 2 hours -# common sections for LPF unit tests -.tests_unit_lpf: +# common sections for LPF unit tests: must specify CTEST_CATEGORY +.tests_category_lpf: needs: [build_test_lpf] - script: - - cd ./build && make -j$(nproc) tests_unit &> unittests.log - - ../tests/summarise.sh unittests.log - artifacts: - paths: [ build/*.log ] - expire_in: 1 day + variables: + CTEST_BACKEND: "bsp1d|hybrid" # match bsp1d and hybrid backends + extends: .ctests_run # this job triggers in internal CI, where LPF tests run better on runners # with a given tag $LPF_PREFERRED_RUNNERS_TAG @@ -349,22 +371,29 @@ tests_unit_lpf_preferred: tags: - docker - $LPF_PREFERRED_RUNNERS_TAG - extends: .tests_unit_lpf + variables: + CTEST_CATEGORY: "unit" + extends: .tests_category_lpf + # if runners with a specific tag are not present, run this job # attention: it may timeout tests_unit_lpf_generic: rules: - if: $LPF_TESTS_ENABLED == "yes" && $LPF_PREFERRED_RUNNERS != "yes" - extends: .tests_unit_lpf + variables: + CTEST_CATEGORY: "unit" + extends: .tests_category_lpf + tests_smoke_lpf: rules: - if: $LPF_TESTS_ENABLED == "yes" needs: [build_test_lpf] - script: - - cd ./build && make -j$(nproc) tests_smoke &> smoketests.log - - ../tests/summarise.sh smoketests.log + variables: + CTEST_CATEGORY: "smoke" + extends: .tests_category_lpf + test_installation_lpf: rules: @@ -373,6 +402,7 @@ test_installation_lpf: script: - cd ./build && make -j$(nproc) install + ## Additional jobs to build againt multiple compilers (on main branches only) build_test_gcc_versions: @@ -394,6 +424,7 @@ build_test_gcc_versions: --with-lpf=${LPF_BASE_PATH}/build_mpich_${CC_COMPILER}_${VER}/install && make -j$(nproc) build_tests_all + # Coverage build + tests for each backend coverage_matrix: @@ -422,8 +453,8 @@ coverage_matrix: -DWITH_REFERENCE_BACKEND=${backends_array[1]} -DWITH_OMP_BACKEND=${backends_array[2]} -DWITH_NONBLOCKING_BACKEND=${backends_array[3]} .. - - make -j$(nproc) - - make -j$(nproc) unittests + - make -j$(nproc) build_tests_category_unit + - ${CMAKE_RECENT}/ctest -L "mode:unit" --output-on-failure || true # ignore not run tests (failing ones are detected in standard jobs) # for each job (i.e., each backend), generate a separate JSON to me merged later # (gcovr merges only JSON files) - python3 -m gcovr --json @@ -436,6 +467,7 @@ coverage_matrix: - COVERAGE_${CI_JOB_ID}.json expire_in: 4 weeks + cobertura_coverage_report: rules: - if: $COVERAGE_ENABLED == "yes" || $GENERATE_COVERAGE_PAGES == "yes" @@ -456,6 +488,7 @@ cobertura_coverage_report: coverage_format: cobertura path: coverage.xml + html_coverage_report: rules: - if: $COVERAGE_ENABLED == "yes" || $GENERATE_COVERAGE_PAGES == "yes" @@ -475,6 +508,7 @@ html_coverage_report: paths: - public + ## GitLab Pages update job pages: diff --git a/CMakeLists.txt b/CMakeLists.txt index 02c49eb37..e4351ec0f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -288,7 +288,7 @@ if( WITH_BSP1D_BACKEND OR WITH_HYBRID_BACKEND ) include( AddLPFTargets ) endif() -# Coverage flags +### COVERAGE REPORT GENERATION if( CMAKE_BUILD_TYPE STREQUAL "Coverage") include( Coverage ) endif() @@ -324,31 +324,22 @@ endif() add_subdirectory( include ) ### BACKEND IMPLEMENTATIONS + add_subdirectory( src ) + ### TESTS and EXAMPLES # specify test categories and the directory where ALL tests are stored -set( TESTS_EXE_OUTPUT_DIR "${PROJECT_BINARY_DIR}/tests" ) -include( AddGRBTests ) +include( AddGRBExecutables ) +enable_testing() add_subdirectory( tests ) add_subdirectory( examples ) -### COVERAGE REPORT GENERATION - -if( CMAKE_BUILD_TYPE STREQUAL "Coverage") - create_coverage_command( "coverage_json" "coverage.json" "--json-pretty" ) - create_coverage_command( "coverage_cobertura" "coverage.xml" "--xml-pretty" ) - create_coverage_command( "coverage_csv" "coverage.csv" "--csv" ) - create_coverage_command( "coverage_coveralls" "coveralls.json" "--coveralls" ) - create_coverage_command( "coverage_html" "index.html" "--html-details" ) -endif() - - ### DOXYGEN DOCUMENTATION GENERATION set( DOCS_DIR "${PROJECT_SOURCE_DIR}/docs/developer" ) diff --git a/README.md b/README.md index 54c891dd4..587246479 100644 --- a/README.md +++ b/README.md @@ -62,7 +62,11 @@ To compile ALP, you need the following tools: 2. LibNUMA development headers 3. POSIX threads development headers 4. [CMake](https://cmake.org/download/) version 3.13 or higher, with GNU Make -(CMake's default build tool on UNIX systems) or any other supported build tool. + (CMake's default build tool on UNIX systems) or any other supported build + tool +5. [Python3](https://www.python.org/) for the testing and (optionally) coverage + infrastructure; if your distribution does not provide it, you may download + [pre-built binaries](https://github.com/indygreg/python-build-standalone/releases) ## Linking and run-time The ALP libraries link against the following libraries: @@ -100,7 +104,8 @@ For generating the code documentations: ## Code coverage -For code/test coverage, a native implementation is available using the CMake infrastructure, using `gcovr` and `gcov`/`lcov`. +For code/test coverage, a native implementation is available using the CMake +infrastructure, using `gcovr` and `gcov`/`lcov`. # Very quick start @@ -230,6 +235,8 @@ and lists technical papers. - [Development in ALP](#development-in-alp) - [Acknowledgements](#acknowledgements) - [Citing ALP, ALP/GraphBLAS, and ALP/Pregel](#citing-alp-alpgraphblas-and-alppregel) + - [ALP and ALP/GraphBLAS](#alp-and-alpgraphblas) + - [ALP/Pregel](#alppregel) # Configuration diff --git a/cmake/AddGRBExecutables.cmake b/cmake/AddGRBExecutables.cmake new file mode 100644 index 000000000..cbc8da2da --- /dev/null +++ b/cmake/AddGRBExecutables.cmake @@ -0,0 +1,201 @@ +# +# Copyright 2023 Huawei Technologies Co., Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# +# functions to add GraphBLAs tests linked against the given backend +# + +# protection against double inclusion +include_guard( GLOBAL ) + +assert_valid_variables( ALL_BACKENDS AVAILABLE_TEST_BACKENDS TEST_CATEGORIES + #TESTS_EXE_OUTPUT_DIR + ALP_UTILS_LIBRARY_OUTPUT_NAME ) + + +# create variables to store tests against each backend +foreach( b ${AVAILABLE_TEST_BACKENDS} ) + define_property( GLOBAL PROPERTY tests_backend_${b} BRIEF_DOCS "${b} tests" FULL_DOCS "tests for backend ${b}" ) +endforeach() + +foreach( c ${TEST_CATEGORIES} ) + define_property( GLOBAL PROPERTY tests_category_${c} BRIEF_DOCS "${c} tests" FULL_DOCS "tests for category ${c}" ) + assert_valid_variables( MODES_${c} ) + foreach( p ${MODES_${c}} ) + assert_defined_targets( test_${p}_flags ) + assert_defined_variables( MODES_${p}_suffix ) + endforeach() +endforeach() + +# append var to the list named listName with description description +macro( append_test_to_category category test ) + set_property( GLOBAL APPEND PROPERTY tests_category_${category} "${test}" ) +endmacro() + +macro( append_test_to_backend backend test ) + set_property( GLOBAL APPEND PROPERTY tests_backend_${backend} "${test}" ) +endmacro() + + +# +# [internal!] returns the CMake target name for a test executable and the associated file name, +# implementing the naming conventions of the test suite: all functionalities using these names +# should use this function. +# Arguments: +# +# target_name[out]: name of the variable to store the target name +# exe_name[out]: name of the variable to store the executable name +# test_name: user's name for the test executable +# mode: mode to generate the name for +# backend_name: name of backend (can be empty) +# +# The executable name exe_name is +# test___ +# where _ and _ may be skipped if the respective strings are empty; +# the corresponding target name is "test_${exe_name}" +# +function( __make_test_name target_name exe_name test_name mode backend ) + set( file "${test_name}${MODES_${mode}_suffix}" ) + if( backend ) + string( APPEND file "-" "${backend}" ) + endif() + set( ${exe_name} "${file}" PARENT_SCOPE ) + set( ${target_name} "test_${file}" PARENT_SCOPE ) +endfunction( __make_test_name ) + + +# +# [internal!] creates a test executable target from passed information, also querying +# the mode(s) defined for the given category +# Arguments: +# +# test_prefix: name of test from the user +# backend_name: name of backend (can be empty) +# sources: source files +# libs: libraries to link (including backend) +# defs: definitions +# +# For each mode in the given category, it generates a target as +# test___ +# where _ and _ may be skipped if the respective strings are empty. +# Similarly, the compiled file name is called as the target, without test_ at the beginning +# +function( __add_grb_executables_with_category test_prefix backend_name sources libs defs ) + + assert_valid_variables( TEST_CATEGORY ) + if( NOT "${TEST_CATEGORY}" IN_LIST TEST_CATEGORIES ) + message( FATAL_ERROR "the category ${TEST_CATEGORY} is not among TEST_CATEGORIES: ${TEST_CATEGORIES}" ) + endif() + set( category "${TEST_CATEGORY}") + + foreach( mode ${MODES_${category}} ) + + __make_test_name( full_target_name exe_name "${test_prefix}" "${mode}" "${backend_name}" ) + if( TARGET "${full_target_name}" ) + message( FATAL_ERROR "Target \"${full_target_name}\" already exists!") + endif() + add_executable( "${full_target_name}" EXCLUDE_FROM_ALL "${sources}" ) + + set_target_properties( "${full_target_name}" PROPERTIES + OUTPUT_NAME "${exe_name}" # use the bare test name, WITHOUT "test_" at the beginning + ) + target_link_libraries( "${full_target_name}" PRIVATE "${libs}" ) + target_compile_definitions( "${full_target_name}" PRIVATE "${defs}" ) + target_link_libraries( "${full_target_name}" PRIVATE test_${mode}_flags ) + append_test_to_category( "${category}" "${full_target_name}" ) + if( backend_name ) + append_test_to_backend( "${backend_name}" "${full_target_name}" ) + endif() + endforeach() +endfunction( __add_grb_executables_with_category ) + +# +# add a GraphBLAS executable to be compiled against one or more backends: for each backend, +# it generates an executable target name test___ +# +# Syntax: +# add_grb_executables( testName source1 [source2 ...] +# BACKENDS backend1 [backend2...] +# COMPILE_DEFINITIONS def1 [def2...] +# ADDITIONAL_LINK_LIBRARIES lib1 [lib2...] +# ) +# +# Arguments: +# +# testName: unique name, which is used to generate the test executable target +# source1 [source2 ...]: sources to compile (at least one) +# BACKENDS backend1 [backend2...]: backends to compile the executable against (at least one) +# COMPILE_DEFINITIONS: additional compile definitions +# ADDITIONAL_LINK_LIBRARIES: additional libraries to link to each target +# +# The generated test name is also added to the list of per-backend tests, +# namely tests_backend_ and to the per-category tests lists, +# namely tests_category_. +# +# The backend name must correspond to one of the backends available in ${ALL_BACKENDS}, +# otherwise an error occurs; since not all backends may be enabled, only targets +# to be built against backends stored in ${AVAILABLE_TEST_BACKENDS} are actually built. +# +function( add_grb_executables testName ) + if( NOT testName ) + message( FATAL_ERROR "no test name specified") + endif() + + set(options "" ) + set(oneValueArgs "" ) + set(multiValueArgs + "SOURCES" + "BACKENDS" + "COMPILE_DEFINITIONS" + "ADDITIONAL_LINK_LIBRARIES" + ) + + set( args "SOURCES" "${ARGN}" ) + cmake_parse_arguments( parsed "${options}" + "${oneValueArgs}" "${multiValueArgs}" ${args} + ) + + assert_valid_variables( parsed_SOURCES parsed_BACKENDS ) + + list( LENGTH parsed_BACKENDS num_backends ) + + set_valid_string( defs "${parsed_COMPILE_DEFINITIONS}" "" ) + + if( "${parsed_BACKENDS}" STREQUAL "none" ) + list( APPEND libs "alp_utils_static" "${parsed_ADDITIONAL_LINK_LIBRARIES}" ) + __add_grb_executables_with_category( "${testName}" "" + "${parsed_SOURCES}" "${libs}" "${defs}" + ) + return() + endif() + + foreach( back ${parsed_BACKENDS} ) + if( NOT ${back} IN_LIST AVAILABLE_TEST_BACKENDS ) + continue() + endif() + if( NOT ${back} IN_LIST ALL_BACKENDS ) + message( FATAL_ERROR "no backend named ${back}; existing backends are ${ALL_BACKENDS}") + endif() + assert_defined_targets( backend_${back} ) + + set( libs "backend_${back};alp_utils_static" ) + append_if_valid( libs "${parsed_ADDITIONAL_LINK_LIBRARIES}" ) + + __add_grb_executables_with_category( "${testName}" "${back}" + "${parsed_SOURCES}" "${libs}" "${defs}" + ) + endforeach() +endfunction( add_grb_executables ) diff --git a/cmake/AddGRBTests.cmake b/cmake/AddGRBTests.cmake index 2d6c7b2e6..e55317c7e 100644 --- a/cmake/AddGRBTests.cmake +++ b/cmake/AddGRBTests.cmake @@ -1,5 +1,5 @@ # -# Copyright 2021 Huawei Technologies Co., Ltd. +# Copyright 2023 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,263 +15,279 @@ # # -# functions to add GraphBLAs tests linked against the given backend +# LOGIC TO ADD TEST CASES based on CTest; this logic adds one test per backend/mode/ +# (= processes/threads) # -# protection against double inclusion -if( DEFINED __ADDGRBTESTS_CMAKE_ ) - message( FATAL_ERROR "Please, include this file only once in a project!" ) - set( __ADDGRBTESTS_CMAKE_ TRUE CACHE INTERNAL "once-only inclusion file checker" FORCE ) -endif() +include_guard( GLOBAL ) -assert_valid_variables( ALL_BACKENDS AVAILABLE_TEST_BACKENDS TEST_CATEGORIES - #TESTS_EXE_OUTPUT_DIR - ALP_UTILS_LIBRARY_OUTPUT_NAME ) +assert_valid_variables( DATASETS_DIR ALL_BACKENDS AVAILABLE_BACKENDS TEST_CATEGORIES + TEST_RUNNER Python3_EXECUTABLE MAX_THREADS +) +### GLOBAL CONFIGURATION -# create variables to store tests against each backend -foreach( b ${AVAILABLE_TEST_BACKENDS} ) - define_property( GLOBAL PROPERTY tests_backend_${b} BRIEF_DOCS "${b} tests" FULL_DOCS "tests for backend ${b}" ) -endforeach() +# set( CTEST_PARALLEL_LEVEL ${MAX_THREADS} ) -foreach( c ${TEST_CATEGORIES} ) - define_property( GLOBAL PROPERTY tests_category_${c} BRIEF_DOCS "${c} tests" FULL_DOCS "tests for category ${c}" ) - assert_valid_variables( MODES_${c} ) - foreach( p ${MODES_${c}} ) - assert_defined_targets( test_${p}_flags ) - assert_defined_variables( MODES_${p}_suffix ) - endforeach() -endforeach() +set( TEST_PASSED_REGEX "Test OK" ) + +### BACKEND-SPECIFIC CONFIGURATION + +macro( setup_grb_tests_environment ) + # === MANDATORY FIELDS + set( one_value_args "CATEGORY" ) + set( multi_value_args + "BSP1D_PROCESSES" "HYBRID_PROCESSES" + "BSP1D_EXEC_SLOTS" "HYBRID_EXEC_SLOTS" + "HYBRID_THREADS" "REFERENCE_OMP_THREADS" "NONBLOCKING_THREADS" + ) + cmake_parse_arguments( parsed "" "${one_value_args}" "${multi_value_args}" "${ARGN}" ) + # === MANDATORY FIELDS + + set_if_var_valid( TEST_CATEGORY "${parsed_CATEGORY}" ) + set( TEST_OUTPUT_DIR "${CMAKE_CURRENT_BINARY_DIR}/output" ) + file( MAKE_DIRECTORY "${TEST_OUTPUT_DIR}" ) + if( ${CMAKE_VERSION} VERSION_GREATER_EQUAL "3.15.0" ) + # recent versions of CMake allow cleaning custom files/directories + # as part of the 'make clean' command; the ${TEST_OUTPUT_DIR} directory may grow + # pretty big due to all the test logs, so it makes sense to clean it as well + get_directory_property( to_clean ADDITIONAL_CLEAN_FILES ) + list( APPEND to_clean "${TEST_OUTPUT_DIR}" ) + set_directory_properties( PROPERTIES ADDITIONAL_CLEAN_FILES "${to_clean}" ) + endif() + + if( NOT CMAKE_BUILD_TYPE STREQUAL Coverage ) + set_valid_string( bsp1d_processes "${parsed_BSP1D_PROCESSES}" "1" ) + set_valid_string( hybrid_processes "${parsed_HYBRID_PROCESSES}" "1" ) + + set_valid_string( bsp1d_exec_resource "${parsed_BSP1D_EXEC_SLOTS}" "1" ) + set_valid_string( hybrid_exec_resource "${parsed_HYBRID_EXEC_SLOTS}" "1" ) + + set_valid_string( hybrid_threads "${parsed_HYBRID_THREADS}" "1" ) + set_valid_string( reference_omp_threads "${parsed_REFERENCE_OMP_THREADS}" "1" ) + set_valid_string( nonblocking_threads "${parsed_NONBLOCKING_THREADS}" "1" ) + else() + # for coverage-built binaries, successive runs of the same binary overwrite + # previous coverage information; hence it is useless to run the same binary multiple times, + # we run it just once with maximum resources; this assumes the coverage does NOT depend + # on the number of resources (e.g., branches depending on the number of threads); + # code paths against this assumption are expected to be very rare, + # hence negligible w.r.t. coverage + set( reference_omp_threads "${MAX_THREADS}" ) + set( nonblocking_threads "${MAX_THREADS}" ) + set( bsp1d_processes "${MAX_THREADS}" ) + + set( hybrid_processes "7" ) + math( EXPR hybrid_threads "${MAX_THREADS}/${hybrid_processes}" OUTPUT_FORMAT DECIMAL ) + endif() +endmacro() -# append var to the list named listName with description description -macro( append_test_to_category category test ) - set_property( GLOBAL APPEND PROPERTY tests_category_${category} "${test}" ) + +set( RUNNER_COMMAND ${Python3_EXECUTABLE} ${TEST_RUNNER} ) + +# sets into var the first valid value between 2 (can also be a list) +macro( __set_valid_resource2 var res1 res2 ) + set( ${var} ${res1} ) + if( NOT ${var} ) + set( ${var} ${res2} ) + endif() endmacro() -macro( append_test_to_backend backend test ) - set_property( GLOBAL APPEND PROPERTY tests_backend_${backend} "${test}" ) +# sets into var the first valid value among 3 (can also be a list) +macro( __set_valid_resource3 var res1 res2 res3 ) + set( ${var} ${res1} ) + if( NOT ${var} ) + set( ${var} ${res2} ) + endif() + if( NOT ${var} ) + set( ${var} ${res3} ) + endif() endmacro() # -# [internal!] creates a test target from passed information, also querying the mode(s) -# defined for the given category +# creates a fixture test for the validation of a test output. # Arguments: # -# test_prefix name of test from the user -# backend_name name of backend (mandatory, even if not used for the file and target name) -# suffix file name and category name suffix, either empty or with _ -# sources source files -# libs libraries to link (including backend) -# defs definitions -# no_perf_opt whether to exclude performance optimizations -# -# For each mode in the given category, it generates a target as -# test___ -# where _ and _ may be skipped if the respective strings are empty. -# Similarly, the compiled file name is called as the target, without test_ at the beginning +# full_test_name: name of the test +# __command: the command to run (can be any Bash command, also with piping) +# outfile: absolute path to test output file +# mainTestFixtures: variable name with list of test fixtures # -macro( __add_test_with_category test_prefix backend_name suffix sources libs defs ) - - if( NOT TEST_CATEGORY ) - message( FATAL_ERROR "variable TEST_CATEGORY not specified" ) - endif() - if( NOT "${TEST_CATEGORY}" IN_LIST TEST_CATEGORIES ) - message( FATAL_ERROR "the category ${TEST_CATEGORY} is not among TEST_CATEGORIES: ${TEST_CATEGORIES}" ) - endif() - set( category "${TEST_CATEGORY}") - - foreach( mode ${MODES_${category}} ) +# macro( __set_validation_test mainTest mainTestFixtures outfile validationTest validationExe ) +# add_test( NAME ${validationTest} COMMAND ${BASH_RUNNER} ${validationExe} ${ARGN} +macro( __set_validation_test full_test_name __command outfile mainTestFixtures ) - set( __file_name "${test_prefix}${MODES_${mode}_suffix}" ) - set( __target_name "test_${__file_name}" ) - #set( __target_name "test_${test_prefix}_${category}${MODES_${mode}_suffix}" ) - set( _suffix "${suffix}" ) - if( _suffix ) - string( APPEND __file_name "_" "${_suffix}" ) - string( APPEND __target_name "_" "${_suffix}" ) - endif() + set( __validation_test_name "${full_test_name}-validate" ) + set( __validate_out "${TEST_OUTPUT_DIR}/${__validation_test_name}-output.log" ) - if( TARGET "${__target_name}" ) - message( FATAL_ERROR "Target \"${__target_name}\" already exists!") - endif() - add_executable( "${__target_name}" EXCLUDE_FROM_ALL "${sources}" ) - - set_target_properties( "${__target_name}" PROPERTIES - #RUNTIME_OUTPUT_DIRECTORY "${TESTS_EXE_OUTPUT_DIR}" - OUTPUT_NAME "${__file_name}" # use the bare test name, WITHOUT "test_" at the beginning - ) - target_link_libraries( "${__target_name}" PRIVATE "${libs}" ) - target_compile_definitions( "${__target_name}" PRIVATE "${defs}" ) - target_link_libraries( "${__target_name}" PRIVATE test_${mode}_flags ) - append_test_to_category( "${category}" "${__target_name}" ) - set( __b "${backend_name}" ) - if( __b ) - append_test_to_backend( "${__b}" "${__target_name}" ) - endif() - endforeach() -endmacro( __add_test_with_category ) + string( REPLACE "@@TEST_OUTPUT_FILE@@" "${outfile}" __validate_command "${__command}" ) + add_test( NAME ${__validation_test_name} COMMAND ${BASH_RUNNER} "${outfile}" "${__validate_out}" + ${__validate_command} + WORKING_DIRECTORY "${PROJECT_BINARY_DIR}" + ) + set_tests_properties( ${__validation_test_name} PROPERTIES REQUIRED_FILES ${outfile} ) + # __validation_test_name must run after the test, i.e., as a cleanup fixture + set( validate_fixture_name "${__validation_test_name}_fixture" ) + set_tests_properties( ${__validation_test_name} PROPERTIES FIXTURES_CLEANUP "${validate_fixture_name}" ) + list( APPEND ${mainTestFixtures} "${validate_fixture_name}" ) +endmacro( __set_validation_test ) # -# add a test target whose name is test_${testName}, with the basic details -# -# Syntax: -# add_custom_test_executable( testName source1 [sources1 ...] -# COMPILE_DEFINITIONS def1 [def2...] -# LINK_LIBRARIES lib1 [lib2...] -# CATEGORIES cat1 [cat2...] -# ) -# -# COMPILE_DEFINITIONS: compile definitions -# LINK_LIBRARIES: libraries to link to each target -# CATEGORY: test category (mandatory) +# adds a single test, configuring all of its features (resources, file dependencies, fixtures) +# Arguments: # -# The generated test name is also added to the list of per-category -# tests, namely tests_category_ +# full_test_name: final full name of the test (for CTest) +# full_target_name: name of the executable target to run +# runner_backend: backend the test should run against (cannot be empty) +# mode: mode of the test +# num_procs: number of processes (cannot be empty) +# num_threads: number of threads (cannot be empty) +# test_OK_SUCCESS: whether to look for `Test OK` in the test output (boolean) +# required_files: files required to run the test (typically datasets) (can be empty) +# output_validate_command: command to validate the output (can be empty) # -function( add_grb_executable_custom testName ) - if( NOT testName ) - message( FATAL_ERROR "no test name specified") +function( __do_add_single_test full_test_name full_target_name runner_backend mode num_procs num_threads + parallel_processes arguments test_OK_SUCCESS required_files output_validate_command +) + set( runner_config_cmdline "--backend" "${runner_backend}" "--processes" "${num_procs}" "--threads" "${num_threads}" ) + set( exe_file "$" ) + set( stdout_file "${TEST_OUTPUT_DIR}/${full_test_name}-output.log" ) + set( ok_success_test ) + if ( test_OK_SUCCESS ) + set( ok_success_test "--success-string" "${TEST_PASSED_REGEX}" ) endif() - set( options "" ) - set( oneValueArgs "CATEGORY" ) - set( multiValueArgs - "SOURCES" - "COMPILE_DEFINITIONS" - "LINK_LIBRARIES" - ) + if( parallel_processes ) + set( parallel_opts "--parallel-instance" "${parallel_processes}" ) + endif() + # if( arguments ) + # set( __args "--args" ${arguments} ) + # endif() - cmake_parse_arguments(parsed "${options}" - "${oneValueArgs}" "${multiValueArgs}" "SOURCES;${ARGN}" + add_test( NAME ${full_test_name} COMMAND ${RUNNER_COMMAND} ${runner_config_cmdline} ${ok_success_test} + "--output" ${stdout_file} ${parallel_opts} ${exe_file} ${arguments} + WORKING_DIRECTORY "${PROJECT_BINARY_DIR}" ) + set_tests_properties( ${full_test_name} PROPERTIES RESOURCE_LOCK "${${runner_backend}_exec_resource}" ) - if( NOT parsed_SOURCES ) - message( FATAL_ERROR "no sources specified") + if( required_files ) + set_tests_properties( ${full_test_name} PROPERTIES REQUIRED_FILES "${required_files}" ) endif() - if( DEFINED parsed_CATEGORY ) - message( AUTHOR_WARNING "the flag CATEGORY is deprecated and will be removed very soon; \ -specify the category of all tests in the file via the TEST_CATEGORY variable" ) + + if( output_validate_command ) + __set_validation_test( "${full_test_name}" "${output_validate_command}" ${stdout_file} fixtures ) endif() - set_valid( libs "${parsed_LINK_LIBRARIES}" "" ) - set_valid( defs "${parsed_COMPILE_DEFINITIONS}" "" ) - __add_test_with_category( "${testName}" "" "" - "${parsed_SOURCES}" "${libs}" "${defs}" - ) -endfunction( add_grb_executable_custom ) + set_tests_properties( ${full_test_name} PROPERTIES FIXTURES_REQUIRED "${fixtures}" ) + # compute total number of used threads, to run in parallel via "ctest -j" + math( EXPR total_threads "${num_procs}*${num_threads}" OUTPUT_FORMAT DECIMAL ) + if( total_threads GREATER "${MAX_THREADS}" ) + set( total_threads "${MAX_THREADS}" ) + endif() + set_tests_properties( ${full_test_name} PROPERTIES PROCESSORS "${total_threads}" ) + set_tests_properties( ${full_test_name} PROPERTIES LABELS "mode:${mode};backend:${runner_backend}" ) +endfunction() # -# add a GraphBLAS test to be compiled against one or more backends: for each backend, -# it generates an executable target name test__ +# adds multiple tests against one or more backends: for each backend/mode/resources it generates a test # # Syntax: -# add_grb_tests( testName source1 [source2 ...] -# BACKENDS backend1 [backend2...] [NO_BACKEND_NAME] -# COMPILE_DEFINITIONS def1 [def2...] -# ADDITIONAL_LINK_LIBRARIES lib1 [lib2...] -# CATEGORIES cat1 [cat2...] +# add_grb_tests( test_name target_name source1 [source2 ...] +# BACKENDS backend1 [backend2 ...] +# [Test_OK_SUCCESS] +# [ARGUMENTS] arg1 [arg2 ...] +# [REQUIRED_FILES] file1 [file2 ...] +# [PROCESSES] p1 [p2 ...] +# [THREADS] t1 [t2 ...] +# [OUTPUT_VALIDATE] # ) # -# NO_BACKEND_NAME: if one only backend is selected, do not put its name -# at the end of the test name -# COMPILE_DEFINITIONS: additional compile definitions -# ADDITIONAL_LINK_LIBRARIES: additional libraries to link to each target -# -# The generated test name is also added to the list of per-backend tests, -# namely tests_backend_ and is also added to the per-category -# tests lists, namely tests_category_. +# Arguments: # -# The backend name must correspond to one of the backends available in ${ALL_BACKENDS}, -# otherwise an error occurs; since not all backends may be enabled, only targets -# to be built against backends stored in ${AVAILABLE_TEST_BACKENDS} are actually built. +# BACKENDS backend1 [backend2 ...]: backends to run the test against, can also be "none" +# [Test_OK_SUCCESS]: (optional) whether to look for `Test OK` in the test output (boolean) +# [ARGUMENTS] arg1 [arg2 ...]: (optional) arguments to pass to the executable +# [REQUIRED_FILES] file1 [file2 ...]: (optional) files required to run the test (e.g., datasets) +# [PROCESSES] p1 [p2 ...]: (optional) number of processes, can also be a list; if none, the default for each backend applies +# [THREADS] t1 [t2 ...]: (optional) number of threads, can also be a list; if none, the default for each backend applies +# [OUTPUT_VALIDATE] : (optional) Bash command to validate the output; 0 return code means success # -function( add_grb_executables testName ) - if( NOT testName ) - message( FATAL_ERROR "no test name specified") - endif() - - set(options "NO_BACKEND_NAME" ) - set(oneValueArgs "" ) - set(multiValueArgs - "SOURCES" - "BACKENDS" - "COMPILE_DEFINITIONS" - "ADDITIONAL_LINK_LIBRARIES" +function( add_grb_tests test_name target_name ) + assert_valid_variables( test_name ) + assert_valid_variables( target_name ) + assert_in_list( TEST_CATEGORY TEST_CATEGORIES ) + assert_valid_variables( TEST_OUTPUT_DIR ) + + set( options "Test_OK_SUCCESS" ) + set( oneValueArgs "PARALLEL_PROCESSES" ) + set( multiValueArgs "ARGUMENTS" "BACKENDS" "REQUIRED_FILES" + "PROCESSES" "THREADS" "OUTPUT_VALIDATE" ) - - set( args "SOURCES" "${ARGN}" ) cmake_parse_arguments( parsed "${options}" - "${oneValueArgs}" "${multiValueArgs}" ${args} + "${oneValueArgs}" "${multiValueArgs}" "${ARGN}" ) - assert_valid_variables( parsed_SOURCES parsed_BACKENDS ) - - list( LENGTH parsed_BACKENDS num_backends ) - if( parsed_NO_BACKEND_NAME AND ( NOT num_backends EQUAL "1" ) ) - message( FATAL_ERROR "NO_BACKEND_NAME can be used only with one backend listed") + if( NOT __modes ) + set( __modes "${MODES_${TEST_CATEGORY}}" ) endif() + assert_valid_variables( parsed_BACKENDS ) - set_valid( defs "${parsed_COMPILE_DEFINITIONS}" "" ) - - foreach( back ${parsed_BACKENDS} ) - if( NOT ${back} IN_LIST AVAILABLE_TEST_BACKENDS ) - continue() - endif() - if( NOT ${back} IN_LIST ALL_BACKENDS ) - message( FATAL_ERROR "no backend named ${back}; existing backends are ${ALL_BACKENDS}") - endif() - assert_defined_targets( backend_${back} ) - - set( libs "backend_${back};alp_utils_static" ) - append_if_valid( libs "${parsed_ADDITIONAL_LINK_LIBRARIES}" ) - - if( NOT parsed_NO_BACKEND_NAME ) - set( suffix "${back}" ) - endif() - - __add_test_with_category( "${testName}" "${back}" "${suffix}" - "${parsed_SOURCES}" "${libs}" "${defs}" - ) - endforeach() -endfunction( add_grb_executables ) - -# force add executable even if the test backend is not enabled -# useful for tests that produce a "golden output" for other tests; -# for one backend only -function( force_add_grb_executable testName ) - if( NOT testName ) - message( FATAL_ERROR "no test name specified") + if( DEFINED parsed_OUTPUT_VALIDATE AND NOT parsed_OUTPUT_VALIDATE ) + message( FATAL_ERROR "OUTPUT_VALIDATE defined but empty" ) endif() - set(options "" ) - set(oneValueArgs "BACKEND" ) - set(multiValueArgs - "SOURCES" - "COMPILE_DEFINITIONS" - "ADDITIONAL_LINK_LIBRARIES" - ) - - set( args "SOURCES" "${ARGN}" ) - cmake_parse_arguments( parsed "${options}" - "${oneValueArgs}" "${multiValueArgs}" ${args} - ) - - assert_valid_variables( parsed_SOURCES parsed_BACKEND ) - - set_valid( defs "${parsed_COMPILE_DEFINITIONS}" "" ) - - if( NOT "${parsed_BACKEND}" IN_LIST ALL_BACKENDS ) + # special case for "none" backend: generate variables accordingly + if( parsed_BACKENDS STREQUAL "none" ) + foreach( mode ${__modes} ) + __make_test_name( full_target_name __file_name "${target_name}" "${mode}" "" ) + assert_defined_targets( ${__target_name} ) + __set_valid_resource2( num_procs "${parsed_PROCESSES}" "1" ) + __set_valid_resource2( num_threads "${parsed_THREADS}" "1" ) + + __do_add_single_test( + "${test_name}${MODES_${mode}_suffix}-processes:${num_procs},threads:${num_threads}" + "${full_target_name}" + "none" + "${mode}" + "${num_procs}" + "${num_threads}" + "${parsed_PARALLEL_PROCESSES}" + "${parsed_ARGUMENTS}" + "${parsed_Test_OK_SUCCESS}" + "${parsed_REQUIRED_FILES}" + "${parsed_OUTPUT_VALIDATE}" + ) + endforeach() return() endif() - assert_defined_targets( backend_${parsed_BACKEND} ) - set( libs "backend_${parsed_BACKEND};alp_utils_static" ) - append_if_valid( libs "${parsed_ADDITIONAL_LINK_LIBRARIES}" ) - - __add_test_with_category( "${testName}" "${parsed_BACKEND}" "${parsed_BACKEND}" - "${parsed_SOURCES}" "${libs}" "${defs}" - ) -endfunction() + # generic case: all possible backends, modes, resources + foreach( backend ${parsed_BACKENDS} ) + if( NOT ${backend} IN_LIST ALL_BACKENDS ) + message( FATAL_ERROR "no backend named ${backend}; existing backends are ${ALL_BACKENDS}") + endif() + if( NOT ${backend} IN_LIST AVAILABLE_TEST_BACKENDS ) + continue() + endif() + __set_valid_resource3( __procs "${parsed_PROCESSES}" "${${backend}_processes}" "1" ) + __set_valid_resource3( __threads "${parsed_THREADS}" "${${backend}_threads}" "1" ) + foreach( mode ${__modes} ) + __make_test_name( full_target_name __exe_name "${target_name}" "${mode}" "${backend}" ) + assert_defined_targets( ${full_target_name} ) + + foreach( num_procs ${__procs} ) + foreach( num_threads ${__threads} ) + __do_add_single_test( + "${test_name}${MODES_${mode}_suffix}-${backend}-processes:${num_procs},threads:${num_threads}" + "${full_target_name}" "${backend}" "${mode}" "${num_procs}" "${num_threads}" + "${parsed_PARALLEL_PROCESSES}" "${parsed_ARGUMENTS}" "${parsed_Test_OK_SUCCESS}" + "${parsed_REQUIRED_FILES}" "${parsed_OUTPUT_VALIDATE}" + ) + endforeach() + endforeach() + endforeach() + endforeach() +endfunction( add_grb_tests ) diff --git a/cmake/CompileFlags.cmake b/cmake/CompileFlags.cmake index 4c6c1d862..179f3f229 100644 --- a/cmake/CompileFlags.cmake +++ b/cmake/CompileFlags.cmake @@ -225,7 +225,7 @@ macro( add_category_flags category ) ) endif() list( APPEND MODES_${category} "${__prefix}" ) - set( MODES_${__prefix}_suffix "_${parsed_MODE}" ) + set( MODES_${__prefix}_suffix "-${parsed_MODE}" ) else() set( MODES_${__prefix}_suffix "" ) endif() diff --git a/cmake/Coverage.cmake b/cmake/Coverage.cmake index e649b3060..8cd60691e 100644 --- a/cmake/Coverage.cmake +++ b/cmake/Coverage.cmake @@ -18,7 +18,7 @@ find_package( GCov REQUIRED ) find_package( Gcovr REQUIRED ) set( COVERAGE_REPORT_DIR "${PROJECT_BINARY_DIR}/coverage" ) -string( JOIN + _COVERAGE_TITLE "GraphBLAS_${VERSION}" ${AVAILABLE_TEST_BACKENDS} ) +string( JOIN + _COVERAGE_TITLE "ALP_${VERSION}" ${AVAILABLE_TEST_BACKENDS} ) file( MAKE_DIRECTORY "${COVERAGE_REPORT_DIR}" ) message( STATUS "Directory of coverage reports: ${COVERAGE_REPORT_DIR}" ) @@ -41,6 +41,12 @@ function( create_coverage_command command_name output_file output_switch ) ) endfunction() +create_coverage_command( "coverage_json" "coverage.json" "--json-pretty" ) +create_coverage_command( "coverage_cobertura" "coverage.xml" "--xml-pretty" ) +create_coverage_command( "coverage_csv" "coverage.csv" "--csv" ) +create_coverage_command( "coverage_coveralls" "coveralls.json" "--coveralls" ) +create_coverage_command( "coverage_html" "index.html" "--html-details" ) + add_custom_target( coverage_clean COMMAND find "coverage" -mindepth 1 -delete COMMAND find . -name "*.gcno" -delete diff --git a/cmake/Utils.cmake b/cmake/Utils.cmake index a49d88b26..84aecd75e 100644 --- a/cmake/Utils.cmake +++ b/cmake/Utils.cmake @@ -19,6 +19,27 @@ # defines various utilities used across the entire build infrastructure # +include_guard( GLOBAL ) + + +function( set_if_else var_name if_value else_value ) + cmake_parse_arguments( parsed "" "" "CONDITION" "${ARGN}" ) + if ( "${parsed_CONDITION}" ) + set( ${var_name} ${if_value} PARENT_SCOPE ) + else() + set( ${var_name} ${else_value} PARENT_SCOPE ) + endif() +endfunction() + +# set to_assign if variable is defined; otherwise error +function( set_if_var_valid to_assign variable ) + if( NOT variable ) + message( FATAL_ERROR "${variable} is not valid" ) + endif() + set ( "${to_assign}" "${variable}" PARENT_SCOPE ) +endfunction() + + # asserts that the variable named dirPathVer is set: # if it is set to a valid directory path, set it to its absolute path, if not set it to defValue # if the given path does not exist or is not a directory, raise an error @@ -76,15 +97,6 @@ macro( append_if_valid out_name ) endforeach() endmacro( append_if_valid ) -# set first string if valid, otherwise second -function( set_valid out_name first second ) - if( first ) - set( ${out_name} "${first}" PARENT_SCOPE ) - else() - set( ${out_name} "${second}" PARENT_SCOPE ) - endif() -endfunction( set_valid ) - # set first string if valid, otherwise second function( set_valid_string out_name first second ) if( first ) @@ -93,3 +105,9 @@ function( set_valid_string out_name first second ) set( ${out_name} "${second}" PARENT_SCOPE ) endif() endfunction( set_valid_string ) + +macro( assert_in_list str_name list_name ) + if( NOT "${${str_name}}" IN_LIST "${list_name}" ) + message( FATAL_ERROR "The string \"${${str_name}}\" is not among \"${${list_name}}\"" ) + endif() +endmacro() diff --git a/docs/Build_and_test_infra.md b/docs/Build_and_test_infra.md index 8e28e47cb..060420e37 100644 --- a/docs/Build_and_test_infra.md +++ b/docs/Build_and_test_infra.md @@ -22,7 +22,7 @@ limitations under the License. - [Direct Generation via `cmake`](#direct-generation-via-cmake) - [CMake Build Options, Types and Flags](#cmake-build-options-types-and-flags) - [Naming conventions for targets](#naming-conventions-for-targets) - - [Adding a new test](#adding-a-new-test) + - [Adding a new test executable](#adding-a-new-test-executable) - [Adding a new backend](#adding-a-new-backend) - [1. Add the related project options](#1-add-the-related-project-options) - [2. Add the backend-specific variables](#2-add-the-backend-specific-variables) @@ -165,7 +165,7 @@ As from above, a convenient way to start even for a custom build is from the the building command and start from there with the custom options. For example: -```cmake +```bash mkdir build_release cd build_release cmake -DCMAKE_INSTALL_PREFIX=/path/to/install/dir -DCMAKE_BUILD_TYPE=Release \ @@ -350,7 +350,7 @@ Other targets: standard tools are available, they are compiled into a PDF found at `/docs/code/latex/refman.pdf`. -## Adding a new test +## Adding a new test executable Test sources are split in categories, whose purpose is explained in the [Testing Infrastructure](#the-testing-infrastructure) section. @@ -382,7 +382,8 @@ the test source files (at least one is required) * `BACKENDS reference reference_omp bsp1d hybrid` is the list of all backends the test should be compiled against (at least one is required); for each backend, an executable target is created following the naming conventions in -[Naming conventions for targets](#naming-conventions-for-targets) +[Naming conventions for targets](#naming-conventions-for-targets); also `none` +is possible as backend name, meaning no backend is actually linked * `ADDITIONAL_LINK_LIBRARIES test_utils` (optional) lists additional libraries to link (the backend library is linked by default) * `COMPILE_DEFINITIONS MY_TEST_KEY=VALUE ANOTHER_TEST_DEFINITION` (optional) @@ -407,14 +408,6 @@ Each script is sub-divided in several sections depending on the backend that is assumed to run and on relevant options: hence, you should place your test invocation in the relevant section. -Furthermore, you can achieve more control over the test target generation, i.e., -the building of tests, by using the function `add_grb_executable_custom`, also -defined in [cmake/AddGRBTests.cmake](../cmake/AddGRBTests.cmake), which requires -to specify dependencies manually (thus, building against multiple backends needs -correspondingly multiple calls of the same function) and is therefore used only -in special cases. - - ## Adding a new backend Adding a new backend requires multiple changes to the building infrastructure, @@ -887,7 +880,8 @@ The coverage infrastructure prescribes additional dependencies: -- though the first method is preferable as it provides a more up-to-date version) and clearly requires * [Python3](https://www.python.org/), available in most Linux distributions - (e.g., `apt-get install python3`) or as [pre-built binary](https://github.com/indygreg/python-build-standalone/releases) + (e.g., `apt-get install python3`) or as + [pre-built binary](https://github.com/indygreg/python-build-standalone/releases) for many OSs and architectures (e.g., https://github.com/indygreg/python-build-standalone/releases/download/20230116/cpython-3.11.1+20230116-x86_64_v4-unknown-linux-gnu-install_only.tar.gz) diff --git a/docs/README.md b/docs/README.md index e60d18d52..db54c93c7 100644 --- a/docs/README.md +++ b/docs/README.md @@ -39,3 +39,4 @@ external projects: * the [guide to use ALP/GraphBLAS in your own project](Use_ALPGraphBLAS_in_your_own_project.md) * the [guide to use ALP/GraphBLAS transition path](Transition_use.md) +* technical details for the [Nonblocking backend](Nonblocking_backend.md). diff --git a/docs/Run_tests.md b/docs/Run_tests.md index 3de076ffd..d539e151b 100644 --- a/docs/Run_tests.md +++ b/docs/Run_tests.md @@ -14,31 +14,306 @@ See the License for the specific language governing permissions and limitations under the License. -The current testing infrastructure is composed of several scripts invoking the -test binaries. -Test binaries can be generated via the testing infrastructure, -as from the [related guide](Build_and_test_infra.md#adding-a-new-test). -Tests should be added to the scripts manually, invoking the appropriate launcher -and passing the dedicated options. - -# Run ALP/GraphBLAS Tests - -Tests are run via dedicated scripts in the project root, which invoke the -specific test binaries. -This solution deals with the complexity of testing ALP/GraphBLAS, whose -different backends require different execution targets (shared-memory and a -distributed system with an MPI or LPF launcher). - -These scripts should be invoked via the corresponding `make` targets inside the -build directory (e.g., `make unittests`): this invocation takes care of passing -the scripts the relevant parameters (location of binaries, available backends, -datasets location, output paths, ...) and, as usual, shows their output in the -`stdout`. +The current testing infrastructure is based on +[CTest](https://cmake.org/cmake/help/latest/manual/ctest.1.html), a +program companion to CMake that is able to launch tests, filter them and +report the results in great detail. CTest is usually installed together +with CMake and can be invoked in a similar way from the command line. +Therefore, it integrates smoothly with a CMake configuration/compilation +infrastructure, and allows defining test cases in the same CMake files +where CMake targets are defined. Indeed, CTest is *not* a test suite, but +rather a test *launcher*: tests programs are coded independently and +compiled as usual as executables and then described to CTest as +command-line programs, hence with command-line arguments if needed. +CTest simply runs this program as-is and reports the execution result. +Due to its various features, ALP/GraphBLAS developed several +facilities to generate tests, which are explained in the following. + +# How to use CTest +The following examples show the most common options for ctest. + +From the build directory, one can simply type + +```bash +ctest +``` + +tu run **all** available tests (it may take some time). +To filter certain tests, one needs the `-R` (regex) option, which runs any +test matching the given regex, e.g.: + +```bash +ctest -R mxv +``` + +runs all tests whose name contains `mxv`, while + +```bash +ctest -R "mxv.*processes:1" +``` + +runs all tests whose name contains `mxv` *and* after `processes:1`. +CMake/CTest supports a +[regex syntax](https://cmake.org/cmake/help/latest/command/string.html#regex-specification) +very close to the UNIX Simple Regular Expressions. +To avoid running tests and only list them, e.g. to check a passed regex, +one can pass the `-N` option, e.g.: + +```bash +ctest -R "mxv.*processes:1" -N +``` + +Tests also have *labels* in order to be grouped into categories; +for example, tests in ALP/GraphBLAS have labels corresponding to +the backend they run and the category they belong to. +One can list all the labels via + +```bash +ctest --print-labels +``` + +and filter via the `-L` option: for example, + +```bash +ctest -L reference +``` + +runs all tests whose label contains "reference" (here, tests for both +`reference` and `reference_omp` backend). +As usual, one can only list matched tests via the `-N` option. +Union and intersection of conditions on test names can be achieved via the +regular expression; for example: + +```bash +ctest -R "buildMatrix|mxv" -N +``` + +lists all tests that contain either `buildMatrix` *or* `mxv`, while + +```bash +ctest -R "buildMatrix.*processes:1" -N +``` + +lists all tests with `buildMatrix` *and* `processes:1` in the name, in this +specific order. +For labels, union can be achieved in the same way, while, starting from CMake +3.21, intersection can be achieved via repeated usage of the `-L` option: + +```bash +ctest -L 'mode:unit' -L 'backend:reference$' -N +``` + +lists all tests for the unit category *and* for the reference backend +(reference_omp is excluded -- notice the POSIX string terminator `$` at the +end). +The [official documentation](https://cmake.org/cmake/help/latest/manual/ctest.1.html#label-matching) +contains more information about this topic. + +Instead, if `-R` and `-L` are used simultaneously, the *intersection* is achieved; for example: + +```bash +ctest -L backend:reference_omp -R buildMatrix -N +``` + +lists all tests for the reference_omp backend whose name contains `buildMatrix`. +The `-U` flag instead achieves the *union* of results for this specific options +combination. + +A few noticeable options also control the output: + +* `-Q` suppresses any output +* `-O ` redirects the output to a file +* `-V` and `-VV` enable more (much more) output from tests run +* `--output-junit ` (from version 3.21) produces an XML output in JUnit + format, which can thus be interpreted by many common tools and platforms + (e.g., GitHub, GitLab, ...) + +The complete synopsis is available at the +[official website](https://cmake.org/cmake/help/latest/manual/ctest.1.html#id16). + +# Add ALP/GraphBLAS Tests +Dedicated facilities are present to add ALP/GraphBLAS tests easily, following +the same philosophy of the facilities to +[add new test executables](Build_and_test_infra.md#adding-a-new-test-executable). + +Since a test requires an executable to run, its directive must be added after +the `add_grb_executables()` directive to create the test executable; it can be +added in the same CMake file, and it is an encouraged practice to do so that +one can immediately see how tests are built and run, and changes to one +directive can immediately be applied to the other. + +Using an example from +[the CMake file for unit tests](../tests/unit/CMakeLists.txt), the test +executable for various backends is created via + + +```cmake +add_grb_executables( clearMatrix clearMatrix.cpp + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking +) +``` + +where the first argument `clearMatrix` is the base name, from which the CMake +targets for each backend and mode are created and so are the corresponding +executable names (see +[Naming conventions for targets](Build_and_test_infra.md#naming-conventions-for-targets)). + +Starting from this base name, one can define tests for these executables as +follows: + +```cmake +add_grb_tests( clearMatrix clearMatrix + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking + ARGUMENTS 10000000 + Test_OK_SUCCESS +) +``` + +where: + +1. `add_grb_tests` is the command to define one or multiple CTest's +2. the first `clearMatrix` argument is the base name of the test, which is used + to generate one or more tests according to the + [Test generation and naming conventions](#test-generation-and-naming-conventions) +3. the second `clearMatrix` argument is the base name of the CMake target, i.e. + the first argument of the previous `add_grb_executables()` command generating + the compilation target(s); note that the first and second argument of + `add_grb_tests` are not mandated to match, and one may define multiple tests + (hence with different test base names - first argument) for the same CMake + target `clearMatrix` (second argument) +4. `BACKENDS reference ...` is the list of backends to generate the tests for; + much like for `add_grb_executables()`, this generates one test per backend + with appropriate naming and labels +5. `ARGUMENTS 10000000` is the list of arguments to pass to the executable for + testing; it can be omitted, in which case the executable is called with no + argument +6. `Test_OK_SUCCESS` instructs CTest to check the execution output for the + string `Test OK`, a common convention in ALP/GraphBLAS + +Further non-mandatory options, not exemplified above, are: + +* `PROCESSES p1 [p2 ...]`: lists the number of processes to run the test with + (if allowed by the backend) +* `THREADS t1 [t2 ...]`: lists the number of threads to run the test with (if + allowed by the backend) +* `OUTPUT_VALIDATE ` : is a Bash command to validate the output, + where a `0` return code means successful validation (failure otherwise); since + the output (stdout/stderr) of a test is stored in a file, this command can + access it via the `@@TEST_OUTPUT_FILE@@` placeholder, which is automatically + replaced with the absolute path of the file storing stdout/stderr + + +# Test generation and naming conventions +As for `add_grb_executables()`, also CTest's are generated according to certain +naming conventions. +The structure is + +```cmake +---processes:-threads: +``` + +which makes test runtime information explicit. +Note that each backend has a predefined list of `` and +`` to generate tests for, which the user can override via the +above-mentioned `PROCESSES` and `THREADS` options. +If any of them is not specified, the backend-dependent options are used and test +for all possible configurations are generated, i.e., for the cartesian product +of `PROCESSES` and `THREADS` (wherever this information comes from). +These rules apply to all backends, hence the command + +```cmake +add_grb_tests( clearMatrix clearMatrix + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking + ARGUMENTS 10000000 + Test_OK_SUCCESS + PROCESSES 1 2 + THREADS 3 4 +) +``` + +generates 24 tests, corresponding to the cartesian products of `BACKENDS` x +`PROCESSES` x `THREADS`. +This means that, for example, the four different tests with the "reference" +(i.e., sequential) backend run exactly the same way, as this backend ignores any +number of `PROCESSES` or `THREADS`. +Hence, care should be used when overriding default values for `PROCESSES` or +`THREADS`, and one should make sure that all listed backends allow specifying +those resources. +The above example could therefore be modified as: + +```cmake +add_grb_tests( clearMatrix clearMatrix + BACKENDS reference hyperdags + ARGUMENTS 10000000 + Test_OK_SUCCESS +) + +add_grb_tests( clearMatrix clearMatrix + BACKENDS reference_omp nonblocking + ARGUMENTS 10000000 + Test_OK_SUCCESS + THREADS 3 4 +) + +add_grb_tests( clearMatrix clearMatrix + BACKENDS bsp1d + ARGUMENTS 10000000 + Test_OK_SUCCESS + PROCESSES 1 2 +) + +add_grb_tests( clearMatrix clearMatrix + BACKENDS hybrid + ARGUMENTS 10000000 + Test_OK_SUCCESS + PROCESSES 1 2 + THREADS 3 4 +) +``` + + +# Internals As from the +[specification](https://cmake.org/cmake/help/latest/command/add_test.html#command:add_test), +CTest essentially runs a command-line program and checks certain configurable +conditions after the command returns (return code, output, ...); as such, it has +no notion of "backend" or "launcher". +ALP/GraphBLAS then internally generates a command that creates the proper +environment to run the command by using a Python3-based test launcher, which +needs options to run the executable, all coming directly from the CMake/CTest +infrastructure: + +* backend name; this can be any of ALP/GraphBLAS backends, or "none" for + standard executables +* number of processes (single value) +* number of threads (single value) +* absolute path of file to redirect stdout and stderr to +* whether to look for a success string +* path of test executable and (optionally) its arguments + +This launcher is generated during CMake configuration into +`/tests/grb_test_runner.py` and can also be used manually; +for more details about its options, it can be invoked as + +```bash +python3 /tests/grb_test_runner.py --help +``` + +Since this launcher acts as "intermediate" between CTest and the executable +being run, its messages are also visible in the CTest log, while the executable +output is forwarded to a file and thus not visible in the CTest log. +However, in case of test failure, the launcher also prints this output on +stdout, making it directly available on the CTest log. +In any case, the launcher prints the file path it is redirecting to. + + +# Testing environment + +For reproducibility of tests, the section [Reproducible Builds section](Build_and_test_infra.md#reproducible-builds), -Docker images can be built to have a reproducible environment for building and -testing. +describes how dedicated Docker images can be built to have a reproducible +environment for building and testing. These images store all tools, dependencies and input datasets to build and run all backends and tests; you may refer to the [section](Build_and_test_infra.md#reproducible-builds) for more details. diff --git a/include/graphblas/bsp1d/benchmark.hpp b/include/graphblas/bsp1d/benchmark.hpp index c68495cd0..0f363da12 100644 --- a/include/graphblas/bsp1d/benchmark.hpp +++ b/include/graphblas/bsp1d/benchmark.hpp @@ -235,12 +235,6 @@ namespace grb { const size_t inner, const size_t outer, const bool broadcast = false ) const { - static_assert( - mode != AUTOMATIC || - std::is_default_constructible< U >::value, - "The output type U should be default-constructible when using automatic " - "mode launchers." - ); // check input arguments if( in_size > 0 && data_in == nullptr ) { return grb::ILLEGAL; @@ -281,21 +275,6 @@ namespace grb { const size_t inner, const size_t outer, const bool broadcast = false ) const { - static_assert( - mode != AUTOMATIC || - std::is_default_constructible< U >::value, - "The output type U should be default-constructible when using automatic " - "mode launchers." - ); - if( - mode == AUTOMATIC && broadcast == false && - !std::is_default_constructible< T >::value - ) { - std::cerr << "Error: input type of an ALP function must be " - "default-constructible when using automatic mode benchmarkers without " - "broadcasting.\n"; - return grb::ILLEGAL; - } return pack_and_run< T, U, false >( reinterpret_cast< lpf_func_t >( alp_program ), &data_in, sizeof( T ), &data_out, diff --git a/include/graphblas/bsp1d/exec.hpp b/include/graphblas/bsp1d/exec.hpp index 0d415d636..1e8838294 100644 --- a/include/graphblas/bsp1d/exec.hpp +++ b/include/graphblas/bsp1d/exec.hpp @@ -415,14 +415,19 @@ namespace grb { P > 1 ) { std::cerr << "Error: cannot locally construct input type (typeid name \"" - << typeid(T).name() << "\"for an ALP program that is launched " + << typeid(T).name() << "\") for an ALP program that is launched " << "in automatic mode, with broadcasting, and using more than one user" << "one user process.\n" << "Additionally, this error should have been caught prior to the " << "attempted launch of the ALP program-- please submit a bug report." << std::endl; - assert( false ); - return; + // if this condition triggers, we must abort execution and signal an error + // condition to the caller of Launcher.exec(), because her function could + // not be run; however, LPF has no principled way to propagate error + // conditions and we must resort to this exeception, which LPF catches, + // then reporting to the caller that an error occurred; hence, + // DO NOT REMOVE THIS EXCEPTION! + throw std::logic_error( "Error: cannot locally construct input data" ); } lpf_coll_t coll; @@ -617,6 +622,12 @@ namespace grb { const size_t in_size, U * const data_out ) const { + static_assert( + mode != AUTOMATIC || + std::is_default_constructible< U >::value, + "The output type U should be default-constructible when using" + "automatic mode launchers." + ); // construct LPF I/O args lpf_args_t args = { data_in, in_size, @@ -737,24 +748,11 @@ namespace grb { U &data_out, const bool broadcast = false ) { - static_assert( - mode != AUTOMATIC || - std::is_default_constructible< U >::value, - "The output type U should be default-constructible when using automatic " - "mode launchers." + return pack_data_and_run< T, U, false >( + reinterpret_cast< lpf_func_t >( alp_program ), + &data_in, sizeof( T ), + &data_out, broadcast ); - if( - mode == AUTOMATIC && broadcast == false && - !std::is_default_constructible< T >::value - ) { - return grb::ILLEGAL; - } else { - return pack_data_and_run< T, U, false >( - reinterpret_cast< lpf_func_t >( alp_program ), - &data_in, sizeof( T ), - &data_out, broadcast - ); - } } /** @@ -786,12 +784,6 @@ namespace grb { U &data_out, const bool broadcast = false ) { - static_assert( - mode != AUTOMATIC || - std::is_default_constructible< U >::value, - "The output type U should be default-constructible when using automatic " - "mode launchers." - ); return pack_data_and_run< void, U, true >( reinterpret_cast< lpf_func_t >( alp_program ), data_in, in_size, &data_out, broadcast diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index d90ca5cdb..e3ed8449d 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -17,9 +17,56 @@ # # Main tests, which do not use the Launcher mechanism # -assert_valid_variables( ALP_UTILS_SRC_PATH AVAILABLE_TEST_BACKENDS DATASETS_DIR TESTS_EXE_OUTPUT_DIR ) +assert_valid_variables( ALP_UTILS_SRC_PATH AVAILABLE_TEST_BACKENDS DATASETS_DIR ) assert_defined_variables( GNN_DATASET_PATH WITH_BSP1D_BACKEND WITH_HYBRID_BACKEND ) +set( TEST_RUNNER "${CMAKE_CURRENT_BINARY_DIR}/grb_test_runner.py" ) +set( BASH_RUNNER "${CMAKE_CURRENT_SOURCE_DIR}/validate_run.sh" ) + +set( TESTS_EXE_OUTPUT_DIR "${PROJECT_BINARY_DIR}/tests" ) +find_package( Python3 REQUIRED COMPONENTS Interpreter ) + +include( ProcessorCount ) +ProcessorCount( MAX_THREADS ) +include( AddGRBTests ) + +set( MPI_IMPLEMENTATION "unknown" ) + +if( MPI_FOUND ) + execute_process( + COMMAND ${MPIEXEC_EXECUTABLE} -h + COMMAND grep -o -E "openmpi|OMPI|mpich" + COMMAND tail -n 1 + COMMAND tr -d '\n' + RESULT_VARIABLE error_var OUTPUT_VARIABLE matched_mpi_impl + ) + if( NOT error_var EQUAL "0" ) + message( FATAL_ERROR "cannot get MPI implementation" ) + endif() + + + if( matched_mpi_impl STREQUAL "mpich" ) + set( MPI_IMPLEMENTATION "mpich" ) + elseif( matched_mpi_impl STREQUAL "OMPI" ) + set( MPI_IMPLEMENTATION "openmpi" ) + elseif( matched_mpi_impl STREQUAL "openmpi" ) + set( MPI_IMPLEMENTATION "openmpi" ) + else() + message( FATAL_ERROR "unknown MPI implementation" ) + endif() + +endif() + +configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/grb_test_runner.py.in ${TEST_RUNNER} @ONLY ) +execute_process( COMMAND ${Python3_EXECUTABLE} -OO -m py_compile ${TEST_RUNNER} + OUTPUT_QUIET ERROR_QUIET RESULT_VARIABLE python_opt +) + +if ( NOT python_opt EQUAL "0" ) + message( FATAL_ERROR "cannot compile test runner" ) +else() + message( "compilation of test runner successful" ) +endif() ### CONSUMABLE TARGETS ### i.e. targets with the default backend already set in the compilation interface: @@ -190,7 +237,7 @@ foreach( b ${AVAILABLE_TEST_BACKENDS} ) ) endforeach() -# a test may not belong to a backend (because of add_grb_executable_custom) +# a test may not belong to a backend # but it must belong to a category: hence, get all tests from there foreach( cat ${TEST_CATEGORIES} ) get_property( ct GLOBAL PROPERTY tests_category_${cat} ) diff --git a/tests/grb_test_runner.py.in b/tests/grb_test_runner.py.in new file mode 100644 index 000000000..a24ba866e --- /dev/null +++ b/tests/grb_test_runner.py.in @@ -0,0 +1,248 @@ +# +# Copyright 2023 Huawei Technologies Co., Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +### launcher to run one or more commands in parallel and redirect the output +# to a given file; in case of error, it also informs the Gitlab CI by attaching the test log + +import os +import argparse +import sys +import shutil +import subprocess + +if sys.version_info.major < 3 or ( sys.version_info.major == 3 and sys.version_info.minor < 2 ): + print( "the Python version is too old" ) + sys.exit( 1 ) + +LPFRUN_cmake_list="@LPFRUN_CMD@" +BACKENDS_cmake_list="none;@AVAILABLE_BACKENDS@" +CMAKE_SEPARATOR=';' +LPFRUN=LPFRUN_cmake_list.split( CMAKE_SEPARATOR ) +BACKENDS=BACKENDS_cmake_list.split( CMAKE_SEPARATOR ) + +MPI_IMPLEMENTATION="@MPI_IMPLEMENTATION@" + +LPFRUN_PASSTHROUGH="-mpirun," + +def get_mpi_binding(): + return [ f"{LPFRUN_PASSTHROUGH}-bind-to", f"{LPFRUN_PASSTHROUGH}none" ] + +## the following functions return the runner command and the environment variables to set +## for each backend +def get_run_none( processes, threads ): + return ( [], None ) + +def get_run_reference( processes, threads ): + return get_run_none( processes, threads ) + +def get_run_reference_omp( processes, threads ): + return ( [], { "OMP_NUM_THREADS": str( threads ) } ) + +def get_run_nonblocking( processes, threads ): + return ( [], { "OMP_NUM_THREADS": str( threads ) } ) + +def get_run_bsp1d( processes, threads ): + return ( LPFRUN + [ "-np", str( processes ) ] + get_mpi_binding(), None ) + +def get_lpf_pass_down_var_definition( var_name, value ): + if "@MPI_IMPLEMENTATION@" == "mpich": + return [ f"{LPFRUN_PASSTHROUGH}-genv", LPFRUN_PASSTHROUGH + var_name, LPFRUN_PASSTHROUGH + value ] + else: + return [ f"{LPFRUN_PASSTHROUGH}-x", f"{LPFRUN_PASSTHROUGH}{var_name}={value}" ] + +def get_run_hybrid( processes, threads ): + proc, env = get_run_bsp1d( processes, threads ) + return ( proc + get_lpf_pass_down_var_definition( "OMP_NUM_THREADS", str( threads ) ), env ) + +def get_run_hyperdags( processes, threads ): + if "@WITH_HYPERDAGS_USING@" in BACKENDS: + f = getattr( sys.modules[__name__], "get_run_@WITH_HYPERDAGS_USING@" ) + return f( processes, threads ) + else: + raise Exception( "No Hyperdags backend available" ) + +get_run_command = { + "none" : get_run_none, + "reference" : get_run_none, + "reference_omp" : get_run_reference_omp, + "hyperdags" : get_run_hyperdags, + "nonblocking" : get_run_reference_omp, + "bsp1d" : get_run_bsp1d, + "hybrid" : get_run_hybrid +} + +# check whether the executable passed is an actual executable or a valid command +def is_path_exe( cmd ): + return ( not shutil.which( cmd ) is None ) + +# basic log facilities +def log( *largs ): + print( ">>", *largs ) + +def err_log( *largs ): + print( "-- ERROR:", *largs ) + sys.exit( 1 ) + +# terminate all processes in the list +def terminate_all( processes ): + for p in processes: + p.terminate() + +parser = argparse.ArgumentParser( description='ALP/GraphBLAS runner' ) +parser.add_argument( '--verbose', action='store_true' ) +parser.add_argument( '--processes', help='Number of processes, for LPF runs', type=int, default=1 ) +parser.add_argument( '--threads', help='Number of threads, for LPF and OpenMP runs', type=int, default=1 ) +parser.add_argument( '--backend', help='Backend', choices=BACKENDS, default="none" ) +parser.add_argument( '--output', help='Output file', required=True ) +parser.add_argument( '--success-string', help='Success string' ) +parser.add_argument( '--output-on-failure', action='store_true', help='Print test output on failure' ) +parser.add_argument( '--parallel-instances', help='Number of parallel instances to run', type=int, choices=range(1, 1000), required=False ) +parser.add_argument( 'cmd', nargs=argparse.REMAINDER ) + +def verb_log( *largs ): + if args.verbose: + print( ">-->", *largs ) + +args = parser.parse_args() +if len( args.cmd ) == 0: + print( "must provide a command" ) + sys.exit( 1 ) + +# get launcher and environment +grb_launcher, grb_env = get_run_command[ args.backend ]( args.processes, args.threads ) +verb_log( f"launcher for backend \"{args.backend}\":", grb_launcher ) +verb_log( "environment:", grb_env ) + +exe = args.cmd[ 0 ] +if not is_path_exe( exe ) : + err_log( exe, "is not an executable or a known command" ) + +__args = args.cmd[ 1: ] +arg_groups = [ __args ] if args.parallel_instances is None else [ __args + [ str( i ) ] for i in range( 0, args.parallel_instances ) ] + +runs = len( arg_groups ) +# with more commands to run in parallel, use intermediate output files (to be merged together) +output_files = [ args.output ] if runs == 1 else [ args.output + "." + str( i ) for i in range( 0, runs ) ] + +log( "current directory:", os.getcwd() ) +processes = [] +outputs = [] +# run test(s) +for i in range( 0, runs ): + output = output_files[ i ] + f = None + full_command = grb_launcher + [ exe ] + arg_groups[ i ] + # log basic information to debug and manually reproduce + log( "__environment__: ", grb_env ) + log( "__command__:", " ".join( full_command ) ) + log( "__output file__:", output ) + try: + # open output file for redirection + f = open( output, "w" ) + outputs.append( f ) + # run process asynchronously + process_handler = subprocess.Popen( full_command, stderr=subprocess.STDOUT, stdout=f, env=grb_env ) + processes.append( process_handler ) + except subprocess.SubprocessError as spe: + terminate_all( processes ) + err_log( "cannot run the command:", spe ) + except IOError as ioe: + terminate_all( processes ) + err_log( "IOError:", ioe ) + except BaseException as b: + terminate_all( processes ) + err_log( "unknown error:", b ) + +# if process tests are still running, wait for the end +# then check error code and close output file +success = True +for i in range( 0, runs ): + try: + p = processes[ i ] + p.wait() + success = success and ( p.returncode == 0 ) + if p.returncode != 0: + full_command = " ".join( grb_launcher + [ exe ] + arg_groups[ i ] ) + log( f"ERROR - command: \"{full_command}\", return code:", p.returncode ) + outputs[ i ].close() + except subprocess.SubprocessError as spe: + terminate_all( processes ) + err_log( "cannot wait for the command:", spe ) + except IOError as ioe: + terminate_all( processes ) + err_log( "IOError:", ioe ) + except BaseException as b: + terminate_all( processes ) + err_log( "unknown error:", b ) + +# if test(s) run successfully, check the output +if success and args.success_string is not None: + for out in output_files: + grep_command = [ "grep", args.success_string, out ] + verb_log( "running:", grep_command ) + try: + grep = subprocess.run( grep_command, stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL ) + grep_retcode = grep.returncode + except BaseException as b: + err_log( "error running grep on output file", out, ":", b) + + if grep_retcode != 0: + log( "ERROR - success string", f"\"{args.success_string}\"", "not present in", + out, "error code", grep_retcode ) + success = False + else: + log( "output check successful for", out ) + +# consolidate outputs from multiple tests into a single file +if runs > 1: + try: + log( "consolidating outputs into", args.output ) + single_out = open( args.output, 'w' ) + for out in output_files: + fout = open( out, 'r' ) + single_out.write( f"=== content of file {out} ===\n" ) + shutil.copyfileobj( fout, single_out ) + single_out.write( "\n" ) + fout.close() + log( f"deleting {out}" ) + os.remove( out ) + single_out.close() + except IOError as ioe: + err_log( "IOError:", ioe ) + except BaseException as b: + err_log( "unknown error:", b ) + +# print the log on the terminal (if selected) and print the ATTACHMENT for Gitlab CI +if not success: + log( "error running the command(s)" ) + try: + if args.output_on_failure: + f = open( args.output, 'r' ) + log( "=== content of the logfile:", args.output, '\n' ) + shutil.copyfileobj( f, sys.stdout ) + log( "=== end content of the logfile:", args.output, '\n' ) + f.close() + except BaseException as b: + err_log( "output file does not exist or an I/O error occurred:", b ) + # if we are running in the CI, attach tes output in case of error + ci_path=os.getenv( "CI_PROJECT_DIR" ) + if ci_path is not None and len( ci_path ) > 0: + attach_path = os.path.relpath( args.output, ci_path ) + # https://docs.gitlab.com/ee/ci/testing/unit_test_reports.html#view-junit-screenshots-on-gitlab + log( f"[[ATTACHMENT|{attach_path}]]" ) + sys.exit( 1 ) + +log( "run successful" ) diff --git a/tests/parse_env.sh b/tests/parse_env.sh deleted file mode 100644 index faf9899b7..000000000 --- a/tests/parse_env.sh +++ /dev/null @@ -1,277 +0,0 @@ -# do NOT run, just source - -if [[ $_ = $0 ]]; then - echo -e "you are running me, while you should just source me" - exit -1 -fi - -SCRIPT_NAME="$0" - -# Default modes -MODES="ndebug debug" - -function print_synopsis() { - echo "SYNOPSIS: ${SCRIPT_NAME} [OPTIONS] " - echo " OPTIONS:" - echo " --help prints this help" - echo " --backends (space-separated) list of backends to run the tests against" - echo " --test-bin-dir directory storing the tests" - echo " --test-out-dir directory to store the output of tests" - echo " --input-dir directory for input datasets" - echo " --lpfexe path to LPF runner" - echo " --lpf-engine LPF engine to run against" - echo " --lpf-args additional arguments for the LPF engine" - echo " --gnn-dataset-path " - echo " --manual-run-args LPF engine arguments for manual run" - echo " --output-verification-dir directory with golden output for test verification" - echo " --test-data-dir directory with original data used for tests" - echo " --modes (space-separated) modes for the unit-tests, default: $MODES" -} - -function check_dir() { - if [[ ! -d "$1" ]]; then - echo -e "'$1' is not a directory" - exit -1 - fi -} - -cmd_args="$@" - -while test -n "$1" -do - case "$1" in - --help|-h) - print_synopsis - exit 0 - ;; - --backends) - BACKENDS=("$2") - shift 2 - ;; - --test-bin-dir) - TEST_BIN_DIR="$2" - check_dir "${TEST_BIN_DIR}" - TEST_BIN_DIR="$( cd "$2" &> /dev/null && pwd )" - shift 2 - ;; - --test-out-dir) - TEST_OUT_DIR="$2" - shift 2 - ;; - --input-dir) - INPUT_DIR="$( cd "$2" &> /dev/null && pwd )" - if [[ "$?" != "0" ]]; then - # print in yellow - echo -e "\033[1;33m>> '$2' is an invalid path: tests depending on a dataset will be skipped\033[0m" - INPUT_DIR='' - fi - shift 2 - ;; - --gnn-dataset-path) - GNN_DATASET_PATH="$2" - if [[ ! -d "${GNN_DATASET_PATH}" ]]; then - echo -e "${GNN_DATASET_PATH} is not a valid directory" - exit -1 - fi - GNN_DATASET_PATH="$(realpath -e "$2")" - shift 2 - ;; - --lpfexe) - LPFEXE="$2" - if [[ ! -x "${LPFEXE}" ]]; then - echo -e "${LPEXE} is not executable or does not exist" - exit -1 - fi - LPFEXE="$(realpath "$2")" - shift 2 - ;; - --lpf-engine) - LPF_ENGINE="$2" - shift 2 - ;; - --lpf-args) - LPF_ARGS="$2" - if [[ -z "${LPF_ARGS}" ]]; then - echo -e "--lpf-args cannot be empty" - exit -1 - fi - shift 2 - ;; - --manual-run-args) - MANUAL_RUN_ARGS="$2" - if [[ -z "${MANUAL_RUN_ARGS}" ]]; then - echo -e "--manual-run-args cannot be empty" - exit -1 - fi - shift 2 - ;; - --test-data-dir) - TEST_DATA_DIR="$(realpath "$2")" - shift 2 - ;; - --output-verification-dir) - OUTPUT_VERIFICATION_DIR="$(realpath "$2")" - shift 2 - ;; - --modes) - MODES=("$2") - shift 2 - ;; - --*) - echo -e "unknown option '$1' inside" - echo "---" - echo "${cmd_args}" - echo "---" - print_synopsis - exit -1 - ;; - *) - break - ;; - esac -done - -# some parameters are mandatory -if [[ -z "${TEST_BIN_DIR}" ]]; then - echo "no argument for --test-bin-dir" - exit 1 -fi -if [[ -z "${TEST_OUT_DIR}" ]]; then - echo "no argument for --test-out-dir" - exit 1 -fi -if [[ -z "${BACKENDS}" ]]; then - echo "no argument for --backends" - exit 1 -fi -if [[ -z "${MODES}" ]]; then - echo "no argument for --modes" - exit 1 -fi - -# print in green -function print_green() { - local text="$@" - echo -e "\033[1;32m"${text}"\033[0m" -} -print_green ">>> RUNNING TESTS IN ${SCRIPT_NAME} <<<" - -mkdir ${TEST_OUT_DIR} || true -TEST_OUT_DIR=$(realpath "${TEST_OUT_DIR}") - -# define LPFRUN only if it is not already defined AND LPFEXE is defined -# if none of these variables is defined, it means we are not going to run LPF benchmarks anyway -if [[ -z "${LPFRUN}" && ! -z "${LPFEXE}" ]]; then - - # command to run LPF programs - if [[ -z "${LPF_ENGINE}" ]]; then - LPFRUN="${LPFEXE} ${LPF_ARGS}" - else - LPFRUN="${LPFEXE} ${LPF_ARGS} -engine ${LPF_ENGINE}" - fi - -fi - -# similar strategy for MANUALRUN -if [[ -z "${MANUALRUN}" && ! -z "${LPFEXE}" ]]; then - MANUALRUN="${LPFRUN} ${MANUAL_RUN_ARGS}" -fi - -# in case LPF is enabled, define some additional environment variables -if [[ ! -z "${LPFRUN}" ]]; then - - # switch to pass arguments to any underlying runner - if [ -z "${LPFRUN_PASSTHROUGH}" ]; then - LPFRUN_PASSTHROUGH="-mpirun," - echo "Warning: LPFRUN_PASSTHROUGH was not set. I assumed the following: -mpirun," - fi - - # switch to pass environment variables to the underlying MPI layer - if [ -z "${MPI_PASS_ENV}" ]; then - # MPICH / Intel MPI: - MPI_PASS_ENV=${LPFRUN_PASSTHROUGH}-genv - # OpenMPI - #MPI_PASS_ENV=${LPFRUN_PASSTHROUGH}-x - # IBM Platform MPI - #MPI_PASS_ENV=${LPFRUN_PASSTHROUGH}-e - echo "Warning: MPI_PASS_ENV was not set. I assumed the following: ${MPI_PASS_ENV}" - fi - - # the following two variables are used by both unit and smoke tests, and hence defined here - if [[ -z ${BIND_PROCESSES_TO_HW_THREADS} ]]; then - # MPICH and OpenMPI - BIND_PROCESSES_TO_HW_THREADS="${LPFRUN_PASSTHROUGH}-bind-to ${LPFRUN_PASSTHROUGH}hwthread" - # Intel MPI - #BIND_PROCESSES_TO_HW_THREADS="${LPFRUN_PASSTHROUGH}-genv ${LPFRUN_PASSTHROUGH}I_MPI_PIN=1 ${LPFRUN_PASSTHROUGH}-genv ${LPFRUN_PASSTHROUGH}I_MPI_PIN_DOMAIN=core" - # IBM Platform MPI - #BIND_PROCESSES_TO_HW_THREADS="${LPFRUN_PASSTHROUGH}-affcycle=numa ${LPFRUN_PASSTHROUGH}-affwidth=core" - printf "Warning: BIND_PROCESSES_TO_HW_THREADS environment variable was not set. " - printf "I assumed the following: ${BIND_PROCESSES_TO_HW_THREADS}\n" - fi - if [[ -z ${BIND_PROCESSES_TO_MULTIPLE_HW_THREADS} ]]; then - # MPICH - BIND_PROCESSES_TO_MULTIPLE_HW_THREADS="${LPFRUN_PASSTHROUGH}-bind-to ${LPFRUN_PASSTHROUGH}hwthread:" - # OpenMPI - #BIND_PROCESSES_TO_MULTIPLE_HW_THREADS="${LPFRUN_PASSTHROUGH}--map-by ${LPFRUN_PASSTHROUGH}socket ${LPFRUN_PASSTHROUGH}--bind-to ${LPFRUN_PASSTHROUGH}socket ${MPI_PASS_ENV} ${LPFRUN_PASSTHROUGH}IGNORE_NUMBER_OF_THREADS=" - # Intel MPI - #BIND_PROCESSES_TO_HW_THREADS="${MPI_PASS_ENV} ${LPFRUN_PASSTHROUGH}I_MPI_PIN=1 ${MPI_PASS_ENV} ${LPFRUN_PASSTHROUGH}I_MPI_PIN_DOMAIN=" - # IBM Platform MPI - #BIND_PROCESSES_TO_HW_THREADS="${LPFRUN_PASSTHROUGH}-affcycle=numa ${LPFRUN_PASSTHROUGH}-affwidth=core ${LPFRUN_PASSTHROUGH}-affblock=" - printf "Warning: BIND_PROCESSES_TO_MULTIPLE_HW_THREADS environment variable " - printf "was not set. I assumed the following: " - echo "${BIND_PROCESSES_TO_MULTIPLE_HW_THREADS}" - fi - -fi - -if [[ -z ${MAX_THREADS} ]]; then - if ! command -v nproc &> /dev/null; then - echo "Error: nproc command does not exist while MAX_THREADS was not set." - echo "Please set MAX_THREADS explicitly and try again." - exit 255; - else - MAX_THREADS=`nproc --all` - echo "Info: detected ${MAX_THREADS} threads" - fi -fi - - -echo -echo "*** ENVIRONMENT ***" -echo " TEST_BIN_DIR=${TEST_BIN_DIR}" -echo " TEST_OUT_DIR=${TEST_OUT_DIR}" -echo " INPUT_DIR=${INPUT_DIR}" -echo " BACKENDS=${BACKENDS}" -echo " GNN_DATASET_PATH=${GNN_DATASET_PATH}" -echo " OUTPUT_VERIFICATION_DIR=${OUTPUT_VERIFICATION_DIR}" -echo " TEST_DATA_DIR=${TEST_DATA_DIR}" -if [[ ! -z "${LPFRUN}" ]]; then - echo " LPFRUN=${LPFRUN}" - echo " MANUALRUN=${MANUALRUN}" - echo " LPFRUN_PASSTHROUGH=${LPFRUN_PASSTHROUGH}" - echo " MPI_PASS_ENV=${MPI_PASS_ENV}" - echo " BIND_PROCESSES_TO_HW_THREADS=${BIND_PROCESSES_TO_HW_THREADS}" - echo " BIND_PROCESSES_TO_MULTIPLE_HW_THREADS=${BIND_PROCESSES_TO_MULTIPLE_HW_THREADS}" -fi -echo "*******************" -echo - -# common parameters to check - -if [[ -z "${BACKENDS}" ]]; then - echo "BACKENDS is not set!" - exit 255; -fi - -# even if some tests do not require it, just warn about this important -# environment variable if it was pre-set -if [[ ! -z ${OMP_NUM_THREADS} ]]; then - echo "Warning: OMP_NUM_THREADS was set (value was \`${OMP_NUM_THREADS}');" - echo " this value may be overwritten during testing." -fi - -if [[ ! -d ${INPUT_DIR} ]]; then - printf "Warning: INPUT_DIR does not exist. " - printf "Some tests will not run without input datasets.\n" -fi - diff --git a/tests/performance/CMakeLists.txt b/tests/performance/CMakeLists.txt index 732431691..948e3a3af 100644 --- a/tests/performance/CMakeLists.txt +++ b/tests/performance/CMakeLists.txt @@ -13,14 +13,65 @@ # See the License for the specific language governing permissions and # limitations under the License. # +assert_valid_variables( DATASETS_DIR MAX_THREADS ) -# write here the name of the category -# add_grb_executables and add_grb_executable_custom need this information -set( TEST_CATEGORY "performance" ) +# ======= CONFIGURATION ======= +# get number of sockets from lscpu command +execute_process( COMMAND lscpu + COMMAND grep -i "socket(s)" + COMMAND awk "{print \$2}" + RESULT_VARIABLE result + OUTPUT_VARIABLE num_sockets + OUTPUT_STRIP_TRAILING_WHITESPACE +) +# check the result makes sense +if ( NOT result STREQUAL "0" OR NOT num_sockets MATCHES "^[0-9]+$") + message( FATAL_ERROR "cannot parse number of sockets" ) +endif () + +math( EXPR _hybrid_max_threads "${MAX_THREADS}/${num_sockets}" OUTPUT_FORMAT DECIMAL ) +set( hybrid_threads "1" ) +if( "${_hybrid_max_threads}" GREATER "1" ) + set( hybrid_threads "${_hybrid_max_threads}" ) +endif() + +# Setup the environment for the tests in this directory +setup_grb_tests_environment( + CATEGORY "performance" + BSP1D_PROCESSES "1" + HYBRID_PROCESSES "${num_sockets}" + + HYBRID_THREADS "${hybrid_threads}" + REFERENCE_OMP_THREADS "${MAX_THREADS}" + NONBLOCKING_THREADS "${MAX_THREADS}" +) + +# Datasets to use & common variables +set( DATASETS + west0497.mtx facebook_combined.txt cit-HepTh.txt com-amazon.ungraph.txt + com-youtube.ungraph.txt cit-Patents.txt com-orkut.ungraph.txt +) +set( DATASET_MODES direct direct indirect indirect indirect indirect indirect ) +# set( DATASET_SIZES 97 4039 27770 334863 1134890 3774768 3072441 ) +list( LENGTH DATASETS __num_datasets ) +math( EXPR num_datasets "${__num_datasets}-1" ) + +set( MULTIPLICATION_DATASETS + west0497.mtx fidap037.mtx cavity17.mtx s3rmt3m3.mtx bloweybq.mtx + bcsstk17.mtx Pres_Poisson.mtx gyro_m.mtx memplus.mtx lhr34.mtx + bcsstk32.mtx vanbody.mtx s3dkt3m2.mtx G2_circuit.mtx Stanford.mtx + coPapersCiteseer.mtx bundle_adj.mtx Stanford_Berkeley.mtx apache2.mtx + Emilia_923.mtx ldoor.mtx ecology2.mtx Serena.mtx cage14.mtx G3_circuit.mtx + wikipedia-20051105.mtx wikipedia-20061104.mtx Freescale1.mtx + wikipedia-20070206.mtx Queen_4147.mtx cage15.mtx adaptive.mtx + rgg_n_2_24_s0.mtx uk-2002.mtx road_usa.mtx MOLIERE_2016.mtx europe_osm.mtx + twitter.mtx com-Friendster.mtx +) +set( KNN4SOLS 59 421 1138 1 32 1 609122 ) +set( KNN6SOLS 238 526 4189 1 181 1 1268035 ) -# list tests, without the CATEROR[IES,Y] keyword (it's now passed via TEST_CATEGORY) ### Kernels tests add_library( bench_kernels OBJECT bench_kernels.c bench_kernels.h ) @@ -28,71 +79,224 @@ add_library( bench_kernels_omp OBJECT bench_kernels.c bench_kernels.h ) target_compile_definitions( bench_kernels_omp PRIVATE BENCH_KERNELS_OPENMP ) add_grb_executables( fma fma.cpp $ - BACKENDS reference NO_BACKEND_NAME + BACKENDS reference ADDITIONAL_LINK_LIBRARIES "rt" ) +add_grb_tests( fma fma + ARGUMENTS 10000000 0 + Test_OK_SUCCESS + BACKENDS reference +) add_grb_executables( fma-openmp fma.cpp $ - BACKENDS reference_omp NO_BACKEND_NAME + BACKENDS reference_omp ADDITIONAL_LINK_LIBRARIES OpenMP::OpenMP_CXX "rt" ) +add_grb_tests( fma fma-openmp + ARGUMENTS 10000000 0 + Test_OK_SUCCESS + BACKENDS reference_omp +) add_grb_executables( reduce reduce.cpp $ - BACKENDS reference NO_BACKEND_NAME + BACKENDS reference +) +add_grb_tests( reduce reduce + ARGUMENTS 10000000 0 + Test_OK_SUCCESS + BACKENDS reference ) add_grb_executables( reduce-openmp reduce.cpp $ - BACKENDS reference_omp NO_BACKEND_NAME + BACKENDS reference_omp ADDITIONAL_LINK_LIBRARIES OpenMP::OpenMP_CXX ) +add_grb_tests( reduce reduce-openmp + ARGUMENTS 10000000 0 + Test_OK_SUCCESS + BACKENDS reference_omp +) add_grb_executables( dot dot.cpp $ - BACKENDS reference NO_BACKEND_NAME + BACKENDS reference ADDITIONAL_LINK_LIBRARIES backend_headers_nodefs ) +add_grb_tests( dot dot + ARGUMENTS 10000000 0 + Test_OK_SUCCESS + BACKENDS reference +) add_grb_executables( dot-openmp dot.cpp $ - BACKENDS reference_omp NO_BACKEND_NAME + BACKENDS reference_omp ADDITIONAL_LINK_LIBRARIES backend_headers_nodefs OpenMP::OpenMP_CXX ) +add_grb_tests( dot dot-openmp + ARGUMENTS 10000000 0 + Test_OK_SUCCESS + BACKENDS reference_omp +) -add_grb_executables( scaling scaling.cpp - ../unit/parser.cpp - BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking +# Scaling tests +add_grb_executables( scaling scaling.cpp ../unit/parser.cpp + BACKENDS reference reference_omp bsp1d hybrid nonblocking hyperdags ) +foreach( t 1 2 3 4 ) + foreach( d 1000 1000000 10000000 ) + add_grb_tests( "scaling_${d}_${t}" scaling + ARGUMENTS ${d} ${t} 0 + Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid nonblocking #hyperdags + ) + endforeach() + + # Hyperdags only + set( d 1000 ) + add_grb_tests( "scaling_${d}_${t}" scaling + ARGUMENTS ${d} ${t} 0 + Test_OK_SUCCESS + BACKENDS hyperdags + ) +endforeach() +# KNN tests add_grb_executables( driver_knn ../smoke/knn.cpp ../unit/parser.cpp BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking ) +foreach( i RANGE 0 ${num_datasets} ) + list( GET DATASETS ${i} dataSet ) + list( GET DATASET_MODES ${i} parseMode ) + list( GET KNN4SOLS ${i} size ) + add_grb_tests( "driver_knn.4_${dataSet}_${parseMode}" driver_knn + ARGUMENTS 4 "${DATASETS_DIR}/${dataSet}" ${parseMode} + REQUIRED_FILES "${DATASETS_DIR}/${dataSet}" + Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking + OUTPUT_VALIDATE grep -q "\"Neighbourhood size is ${size}\"" @@TEST_OUTPUT_FILE@@ + ) +endforeach() +# Hyperdags is skipped for kValue == 6 +foreach( i RANGE 0 ${num_datasets} ) + list( GET DATASETS ${i} dataSet ) + list( GET DATASET_MODES ${i} parseMode ) + list( GET KNN6SOLS ${i} size ) + add_grb_tests( "driver_knn.6.${dataSet}.${parseMode}" driver_knn + ARGUMENTS 6 "${DATASETS_DIR}/${dataSet}" ${parseMode} + REQUIRED_FILES "${DATASETS_DIR}/${dataSet}" + Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid nonblocking + OUTPUT_VALIDATE grep -q "Neighbourhood size is ${size}" @@TEST_OUTPUT_FILE@@ + ) +endforeach() + +# Other tests (simple_pagerank, label) add_grb_executables( driver_simple_pagerank ../smoke/simple_pagerank.cpp BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking ADDITIONAL_LINK_LIBRARIES test_utils_headers ) - add_grb_executables( driver_label label.cpp ../unit/parser.cpp BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking ADDITIONAL_LINK_LIBRARIES test_utils_headers ) +# for hyperdags backend, only smallest input for simple_pagerank and label tests +add_grb_tests( "driver_simple_pagerank_${dataSet}_${parseMode}" driver_simple_pagerank + ARGUMENTS ${DATASETS_DIR}/west0497.mtx direct + REQUIRED_FILES ${DATASETS_DIR}/west0497.mtx + Test_OK_SUCCESS + BACKENDS hyperdags +) +add_grb_tests( "driver_label_${dataSet}_${parseMode}" driver_label + ARGUMENTS ${DATASETS_DIR}/west0497.mtx direct + REQUIRED_FILES ${DATASETS_DIR}/west0497.mtx + Test_OK_SUCCESS + BACKENDS hyperdags +) +# for all other backends, test for all inputs +foreach( i RANGE 0 ${num_datasets} ) + list( GET DATASETS ${i} dataSet ) + list( GET DATASET_MODES ${i} parseMode ) + add_grb_tests( "driver_simple_pagerank_${dataSet}_${parseMode}" driver_simple_pagerank + ARGUMENTS "${DATASETS_DIR}/${dataSet}" ${parseMode} + REQUIRED_FILES "${DATASETS_DIR}/${dataSet}" + Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid nonblocking + ) + add_grb_tests( "driver_label_${dataSet}_${parseMode}" driver_label + ARGUMENTS "${DATASETS_DIR}/${dataSet}" ${parseMode} + REQUIRED_FILES "${DATASETS_DIR}/${dataSet}" + Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid nonblocking + ) +endforeach() + +# Multiplication kernels tests add_grb_executables( driver_spmv spmv.cpp BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking ADDITIONAL_LINK_LIBRARIES test_utils_headers ) - add_grb_executables( driver_spmspv spmspv.cpp BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking ADDITIONAL_LINK_LIBRARIES test_utils_headers ) - add_grb_executables( driver_spmspm spmspm.cpp - BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking + BACKENDS reference reference_omp hyperdags nonblocking ADDITIONAL_LINK_LIBRARIES test_utils_headers ) +add_grb_tests( driver_spmv_west0497_direct driver_spmv + ARGUMENTS ${kValue} ${DATASETS_DIR}/west0497.mtx direct + REQUIRED_FILES ${DATASETS_DIR}/west0497.mtx + Test_OK_SUCCESS + BACKENDS hyperdags +) +add_grb_tests( driver_spmspv_west0497_direct driver_spmspv + ARGUMENTS ${kValue} ${DATASETS_DIR}/west0497.mtx direct + REQUIRED_FILES ${DATASETS_DIR}/west0497.mtx + Test_OK_SUCCESS + BACKENDS hyperdags +) +add_grb_tests( driver_spmspm_west0497_direct driver_spmspm + ARGUMENTS ${kValue} ${DATASETS_DIR}/west0497.mtx ${DATASETS_DIR}/west0497.mtx direct + REQUIRED_FILES ${DATASETS_DIR}/west0497.mtx + Test_OK_SUCCESS + BACKENDS hyperdags +) + +set( _index 0 ) +foreach( dataSet ${MULTIPLICATION_DATASETS} ) + add_grb_tests( "driver_spmv_${dataSet}_direct" driver_spmv + ARGUMENTS ${kValue} "${DATASETS_DIR}/${dataSet}" direct + REQUIRED_FILES "${DATASETS_DIR}/${dataSet}" + Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid nonblocking + ) + + add_grb_tests( "driver_spmspv_${dataSet}_direct" driver_spmspv + ARGUMENTS ${kValue} "${DATASETS_DIR}/${dataSet}" direct + REQUIRED_FILES "${DATASETS_DIR}/${dataSet}" + Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid nonblocking + ) + + # long-running sparse matrix--sparse matrix multiplications are disabled + if( _index LESS_EQUAL "14" ) + add_grb_tests( "driver_spmspm_${dataSet}_direct" driver_spmspm + ARGUMENTS ${kValue} ${DATASETS_DIR}/${dataSet} ${DATASETS_DIR}/${dataSet} direct + REQUIRED_FILES "${DATASETS_DIR}/${dataSet}" + Test_OK_SUCCESS + BACKENDS reference reference_omp nonblocking + ) + endif() + + # Increment the index + math( EXPR _index "${_index} + 1" ) +endforeach() + # targets to list and build the test for this category get_property( performance_tests_list GLOBAL PROPERTY tests_category_performance ) add_custom_target( "list_tests_category_performance" @@ -105,19 +309,10 @@ add_custom_target( "build_tests_category_performance" ) # target to run the tests in this category by calling the appropriate runner -add_custom_target( tests_performance - COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/performancetests.sh - ${SCRIPTS_COMMON_ARGS} - "--test-bin-dir" "\"${CMAKE_CURRENT_BINARY_DIR}\"" - "--test-out-dir" "\"${CMAKE_CURRENT_BINARY_DIR}/output\"" - - WORKING_DIRECTORY "${PROJECT_SOURCE_DIR}" - DEPENDS build_tests_category_performance # add dependency on this target - # to automatically build before running - COMMAND_EXPAND_LISTS - USES_TERMINAL +add_custom_target( performancetests + COMMAND ${CMAKE_CTEST_COMMAND} -L "mode:performance" --output-on-failure + WORKING_DIRECTORY "${PROJECT_BINARY_DIR}" + DEPENDS build_tests_category_performance ) - -add_custom_target( performancetests DEPENDS tests_performance ) -add_custom_target( perftests DEPENDS tests_performance ) +add_custom_target( perftests DEPENDS performancetests ) diff --git a/tests/performance/performancetests.sh b/tests/performance/performancetests.sh deleted file mode 100755 index f8f198b6a..000000000 --- a/tests/performance/performancetests.sh +++ /dev/null @@ -1,519 +0,0 @@ -#!/bin/bash - -# -# Copyright 2021 Huawei Technologies Co., Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -#Usage: $0 (DATASET) (EXPERIMENT) -#Note that all arguments are optional. They select a subset of performance tests only. -#EXPERIMENT can be one of PAGERANK, KNN, LABEL, KERNEL, or SCALING. -#DATASET can be one of facebook_combined cit-HepTh com-amazon.ungraph com-youtube.ungraph cit-Patents com-orkut.ungraph - -#Example (run everything): $0 -#Example (run kernels only): $0 KERNEL -#Example (run scaling tests only): $0 SCALING -#Example (run pagerank experiment on facebook_combined): $0 facebook_combined PAGERANK -#Example (run k-NN experiment on facebook_combined): $0 facebook_combined KNN -#Example (run all non-kernel experiments on given dataset): $0 facebook_combined - -TESTS_ROOT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )"/../ &> /dev/null && pwd )" -source ${TESTS_ROOT_DIR}/parse_env.sh - -DATASETTORUN=$1 -EXPTYPE=$2 -echo "Info: script called with the following arguments: ${DATASETTORUN} ${EXPTYPE}" - -#number of sockets of machine -if [[ -z ${NUM_SOCKETS} ]]; then - NUM_SOCKETS=`grep -i "physical id" /proc/cpuinfo | sort -u | wc -l` - echo "Info: number of sockets detected is ${NUM_SOCKETS}" -fi -if [ "${NUM_SOCKETS}" -eq "0" ]; then - echo "Warning: failed to auto-detect the number of sockets, assuming 1;" - echo " if incorrect, please set NUM_SOCKETS manually." - NUM_SOCKETS=1 -fi -echo "Info: selected number of sockets is ${NUM_SOCKETS}" - -if [[ -z ${MAX_PROCESSES} ]]; then - echo "Info: MAX_PROCESSES was not set. Will set it equal to the number of sockets." - MAX_PROCESSES=${NUM_SOCKETS} -fi -echo "Info: maximum number of processes is ${MAX_PROCESSES}" - -if [[ "${DATASETTORUN}" == "KERNEL" ]]; then - EXPTYPE=${DATASETTORUN} - unset DATASETTORUN -elif [[ "${DATASETTORUN}" == "SCALING" ]]; then - EXPTYPE=${DATASETTORUN} - unset DATASETTORUN -else - echo "Info: selected dataset ${DATASETTORUN}" -fi - -if [ "x${EXPTYPE}" != "x" ]; then - echo "Info: selected experiment: ${EXPTYPE}" -fi - -DATASETS=(west0497.mtx facebook_combined.txt cit-HepTh.txt com-amazon.ungraph.txt com-youtube.ungraph.txt cit-Patents.txt com-orkut.ungraph.txt) -DATASET_MODES=(direct direct indirect indirect indirect indirect indirect) -DATASET_SIZES=(497 4039 27770 334863 1134890 3774768 3072441) -KNN4SOLS=(59 421 1138 1 32 1 609122) -KNN6SOLS=(238 526 4189 1 181 1 1268035) - -#the following datasets are used for benchmarking SpMV, SpMSpV, and SpMSpM -MULTIPLICATION_DATASETS=(west0497.mtx fidap037.mtx cavity17.mtx s3rmt3m3.mtx bloweybq.mtx bcsstk17.mtx Pres_Poisson.mtx gyro_m.mtx memplus.mtx lhr34.mtx bcsstk32.mtx vanbody.mtx s3dkt3m2.mtx G2_circuit.mtx Stanford.mtx coPapersCiteseer.mtx bundle_adj.mtx Stanford_Berkeley.mtx apache2.mtx Emilia_923.mtx ldoor.mtx ecology2.mtx Serena.mtx cage14.mtx G3_circuit.mtx wikipedia-20051105.mtx wikipedia-20061104.mtx Freescale1.mtx wikipedia-20070206.mtx Queen_4147.mtx cage15.mtx adaptive.mtx rgg_n_2_24_s0.mtx uk-2002.mtx road_usa.mtx MOLIERE_2016.mtx europe_osm.mtx twitter.mtx com-Friendster.mtx) - -#which command to use to run a GraphBLAS program -LPF=yes -if [ -z "${LPFRUN}" ]; then - echo "LPFRUN is not set; corresponding performance tests will be disabled" - LPF=no -else - if [ -z "${LPFRUN_PASSTHROUGH}" ]; then - echo "Error: LPFRUN was set, but LPFRUN_PASSTHROUGH was not" - exit 255; - fi - if [ -z "${MPI_PASS_ENV}" ]; then - echo "Error: LPFRUN was set, but MPI_PASS_ENV was not" - exit 255; - fi -fi - -#binding arguments to underlying MPI layer when spawning a number of processes less than or equal to the number of sockets -if [ -z "${MPI_BINDING_ARGS}" ]; then - #assume MPICH-style syntax - MPI_BINDING_ARGS="${LPFRUN_PASSTHROUGH}-bind-to ${LPFRUN_PASSTHROUGH}socket" - #NOTE: OpenMPI - #MPI_BINDING_ARGS="${LPFRUN_PASSTHROUGH}--map-by ${LPFRUN_PASSTHROUGH}socket ${LPFRUN_PASSTHROUGH}--bind-to ${LPFRUN_PASSTHROUGH}socket" - #NOTE: Intel MPI - #MPI_BINDING_ARGS="${LPFRUN_PASSTHROUGH}-genv ${LPFRUN_PASSTHROUGH}I_MPI_PIN=1 ${LPFRUN_PASSTHROUGH}-genv ${LPFRUN_PASSTHROUGH}I_MPI_PIN_DOMAIN=socket ${LPFRUN_PASSTHROUGH}-genv ${LPFRUN_PASSTHROUGH}I_MPI_PIN_ORDER=spread" - #NOTE: IBM Platform MPI - #MPI_BINDING_ARGS="${LPFRUN_PASSTHROUGH}-affcycle=socket ${LPFRUN_PASSTHROUGH}-affwidth=socket" -fi -echo "Info: Using MPI_BINDING_ARGS \`\`${MPI_BINDING_ARGS}''" - -#binding arguments to underlying MPI layer when spawning multiple processes per socket -if [ -z "${MPI_OVERBINDING_ARGS}" ]; then - #Assume equal to MPI_BINDING_ARGS - MPI_OVERBINDING_ARGS=${MPI_BINDING_ARGS} - #NOTE: Intel MPI (tested) - #MPI_OVERBINDING_ARGS="${LPFRUN_PASSTHROUGH}-genv ${LPFRUN_PASSTHROUGH}I_MPI_PIN=1 ${LPFRUN_PASSTHROUGH}-genv ${LPFRUN_PASSTHROUGH}I_MPI_PIN_ORDER=spread" -fi -printf "Info: Using MPI_OVERBINDING_ARGS \`\`${MPI_OVERBINDING_ARGS}''. " -printf "The use of these bindings over MPI_BINDING_ARGS is triggered manually by defining USE_MPI_OVERBINDING, " -if [ -z ${USE_MPI_OVERBINDING+x} ]; then - printf "which is currently NOT defined.\n" -else - printf "which IS currently defined.\n" -fi - -if [ ! -z "${DATASETTORUN}" ]; then - echo "Info: dataset called is ${DATASETTORUN}" -fi -if [ ! -z ${EXPTYPE} ]; then - echo "Info: experiment requested is ${EXPTYPE}" -fi - -if [ -f "${TEST_OUT_DIR}/benchmarks" ]; then - echo "Warning: old benchmark summaries are deleted" - rm -f ${TEST_OUT_DIR}/benchmarks || true -fi - -if [ -f "${TEST_OUT_DIR}/scaling" ]; then - echo "Warning: old scaling summaries are deleted" - rm -f ${TEST_OUT_DIR}/scaling || true -fi - -echo " " -echo "*****************************************************************************************************" -echo " FUNCTIONAL PERFORMANCE DESCRIPTION " -echo "-----------------------------------------------------------------------------------------------------" -echo " " - -# kernel performance tests - -if [[ -z $DATASETTORUN && ( -z "$EXPTYPE" || "$EXPTYPE" == "KERNEL" ) ]]; then - - echo ">>> [ ] [x] Testing semiring axpy versus hardcoded axpy over" - echo " 10 000 000 doubles" - echo " " - ${TEST_BIN_DIR}/fma &> ${TEST_OUT_DIR}/fma 10000000 0 - head -1 ${TEST_OUT_DIR}/fma - tail -2 ${TEST_OUT_DIR}/fma - egrep 'label|Overall timings|0,' ${TEST_OUT_DIR}/fma | grep -v Outer >> ${TEST_OUT_DIR}/benchmarks - - echo ">>> [ ] [x] Testing monoid reduce versus hardcoded reduce over" - echo " 10 000 000 doubles" - echo " " - ${TEST_BIN_DIR}/reduce &> ${TEST_OUT_DIR}/reduce 10000000 0 - head -1 ${TEST_OUT_DIR}/reduce - tail -2 ${TEST_OUT_DIR}/reduce - egrep 'label|Overall timings|0,' ${TEST_OUT_DIR}/reduce | grep -v Outer >> ${TEST_OUT_DIR}/benchmarks - - echo ">>> [ ] [x] Testing semiring dot product versus its hardcoded variant" - echo " over 10 000 000 doubles" - echo " " - ${TEST_BIN_DIR}/dot &> ${TEST_OUT_DIR}/dot 10000000 0 - head -1 ${TEST_OUT_DIR}/dot - tail -2 ${TEST_OUT_DIR}/dot - egrep 'label|Overall timings|0,' ${TEST_OUT_DIR}/dot | grep -v Outer >> ${TEST_OUT_DIR}/benchmarks - - echo ">>> [ ] [x] Testing semiring axpy versus hardcoded axpy over" - echo " 10 000 000 doubles, using the OpenMP reference backend" - echo " " - ${TEST_BIN_DIR}/fma-openmp &> ${TEST_OUT_DIR}/fma-openmp 10000000 0 - head -1 ${TEST_OUT_DIR}/fma-openmp - tail -2 ${TEST_OUT_DIR}/fma-openmp - egrep 'label|Overall timings|0,' ${TEST_OUT_DIR}/fma-openmp | grep -v Outer >> ${TEST_OUT_DIR}/benchmarks - - echo ">>> [ ] [x] Testing monoid reduce versus hardcoded reduce over" - echo " 10 000 000 doubles, using the OpenMP reference backend" - echo " " - ${TEST_BIN_DIR}/reduce-openmp &> ${TEST_OUT_DIR}/reduce-openmp 10000000 0 - head -1 ${TEST_OUT_DIR}/reduce-openmp - tail -2 ${TEST_OUT_DIR}/reduce-openmp - egrep 'label|Overall timings|0,' ${TEST_OUT_DIR}/reduce-openmp | grep -v Outer >> ${TEST_OUT_DIR}/benchmarks - - - echo ">>> [ ] [x] Testing semiring dot product versus its hardcoded variant" - echo " over 10 000 000 doubles, using the OpenMP reference backend" - echo " " - ${TEST_BIN_DIR}/dot-openmp &> ${TEST_OUT_DIR}/dot-openmp 10000000 0 - head -1 ${TEST_OUT_DIR}/dot-openmp - tail -2 ${TEST_OUT_DIR}/dot-openmp - egrep 'label|Overall timings|0,' ${TEST_OUT_DIR}/dot-openmp | grep -v Outer >> ${TEST_OUT_DIR}/benchmarks - -fi - -# start definition of helper functions for remainder performance tests - -function runScalingTest() -{ - local runner=$1 - local backend=$2 - if [ "${backend}" = "hyperdags" ]; then - local DATASETS=( 1000 ) - else - local DATASETS=( 1000 1000000 10000000 ) - fi - local TESTS=(1 2 3 4) - - for ((d=0;d<${#DATASETS[@]};++d)); - do - local DATASET=${DATASETS[d]} - for ((t=0;t<${#TESTS[@]};++t)); - do - local TEST=${TESTS[t]} - - echo ">>> [ ] [x] Benchmark level-2 kernel ${TEST} on matrices of size ${DATASET}" - echo " to gauge the weak scaling behaviour of the ${backend} backend." - echo - $runner ${TEST_BIN_DIR}/scaling_${backend} ${DATASET} ${TEST} 0 &> ${TEST_OUT_DIR}/scaling_${backend}_${DATASET}_${TEST}.log - head -1 ${TEST_OUT_DIR}/scaling_${backend}_${DATASET}_${TEST}.log - echo "$backend scaling on size $DATASET and test ${TEST}" >> ${TEST_OUT_DIR}/scaling - grep -A4 'Overall timings' ${TEST_OUT_DIR}/scaling_${backend}_${DATASET}_${TEST}.log >> ${TEST_OUT_DIR}/scaling - tail -2 ${TEST_OUT_DIR}/scaling_${backend}_${DATASET}_${TEST}.log | tee -a ${TEST_OUT_DIR}/scaling - done - done -} - -function runKNNBenchMarkTests() -{ - local runner=$1 - local backend=$2 - local kValue=$3 - local dataSet=$4 - local parseMode=$5 - local parseSize=$6 - local nbhSize=$7 - - echo ">>> [x] [x] Testing k-NN using ${dataSet} dataset, k=$kValue," - echo " $backend backend. Also verifies the neighbourhood" - echo " size with a ground truth value." - echo - $runner ${TEST_BIN_DIR}/driver_knn_${backend} $kValue ${INPUT_DIR}/${dataSet} ${parseMode} &> ${TEST_OUT_DIR}/driver_${kValue}nn_${backend}_${dataSet}.log - head -1 ${TEST_OUT_DIR}/driver_${kValue}nn_${backend}_${dataSet}.log - if grep -q "Neighbourhood size is ${nbhSize}" ${TEST_OUT_DIR}/driver_${kValue}nn_${backend}_${dataSet}.log; then - printf "Test OK\n\n" - echo "$backend k-hop computation for k=$kValue using the ${dataSet} dataset" >> ${TEST_OUT_DIR}/benchmarks - egrep 'Avg|Std' ${TEST_OUT_DIR}/driver_${kValue}nn_${backend}_${dataSet}.log >> ${TEST_OUT_DIR}/benchmarks - echo >> ${TEST_OUT_DIR}/benchmarks - else - printf "Test FAILED\n\n" - fi -} - -runOtherBenchMarkTests() -{ - local runner=$1 - local backend=$2 - local dataSet=$3 - local parseMode=$4 - local parseSize=$5 - local alg=$6 - - - echo ">>> [ ] [x] Testing $alg using ${dataSet} dataset, $backend backend." - echo - $runner ${TEST_BIN_DIR}/driver_${alg}_${backend} ${INPUT_DIR}/${dataSet} ${parseMode} &> ${TEST_OUT_DIR}/driver_${alg}_${backend}_${dataSet} - head -1 ${TEST_OUT_DIR}/driver_${alg}_${backend}_${dataSet} - if grep -q "Test OK" ${TEST_OUT_DIR}/driver_${alg}_${backend}_${dataSet}; then - printf "Test OK\n\n" - else - printf "Test FAILED\n\n" - fi - echo "$backend $alg using the ${dataSet} dataset" >> ${TEST_OUT_DIR}/benchmarks - egrep 'Avg|Std' ${TEST_OUT_DIR}/driver_${alg}_${backend}_${dataSet} >> ${TEST_OUT_DIR}/benchmarks - echo >> ${TEST_OUT_DIR}/benchmarks -} - -runMultiplicationKernels() -{ - local runner=$1 - local backend=$2 - local dataSet=$3 - local parseMode=$4 - local i=$5 - - # the check for the matrices existence is assumed to have already passed - - if [ -z "$EXPTYPE" ] || [ "$EXPTYPE" == "SPMV" ]; then - - # --------------------------------------------------------------------- - # spmv - echo ">>> [ ] [x] Testing spmv using ${dataSet} dataset, $backend backend." - echo - $runner ${TEST_BIN_DIR}/driver_spmv_${backend} ${INPUT_DIR}/${dataSet} ${parseMode} &> ${TEST_OUT_DIR}/driver_spmv_${backend}_${dataSet} - head -1 ${TEST_OUT_DIR}/driver_spmv_${backend}_${dataSet} - if grep -q "Test OK" ${TEST_OUT_DIR}/driver_spmv_${backend}_${dataSet}; then - printf "Test OK\n\n" - else - printf "Test FAILED\n\n" - fi - echo "$backend spmv using the ${dataSet} dataset" >> ${TEST_OUT_DIR}/benchmarks - egrep 'Avg|Std' ${TEST_OUT_DIR}/driver_spmv_${backend}_${dataSet} >> ${TEST_OUT_DIR}/benchmarks - echo >> ${TEST_OUT_DIR}/benchmarks - - fi - - if [ -z "$EXPTYPE" ] || [ "$EXPTYPE" == "SPMSPV" ]; then - - # --------------------------------------------------------------------- - # spmspv - echo ">>> [ ] [x] Testing spmspv using ${dataSet} dataset, $backend backend." - echo - $runner ${TEST_BIN_DIR}/driver_spmspv_${backend} ${INPUT_DIR}/${dataSet} ${parseMode} &> ${TEST_OUT_DIR}/driver_spmspv_${backend}_${dataSet} - head -1 ${TEST_OUT_DIR}/driver_spmspv_${backend}_${dataSet} - if grep -q "Test OK" ${TEST_OUT_DIR}/driver_spmspv_${backend}_${dataSet}; then - printf "Test OK\n\n" - else - printf "Test FAILED\n\n" - fi - echo "$backend spmspv using the ${dataSet} dataset" >> ${TEST_OUT_DIR}/benchmarks - egrep 'Avg|Std' ${TEST_OUT_DIR}/driver_spmspv_${backend}_${dataSet} >> ${TEST_OUT_DIR}/benchmarks - echo >> ${TEST_OUT_DIR}/benchmarks - - fi - - if [ -z "$EXPTYPE" ] || [ "$EXPTYPE" == "SPMSPM" ]; then - - # --------------------------------------------------------------------- - # spmspm - echo ">>> [ ] [x] Testing spmspm using ${dataSet} dataset, $backend backend." - echo - if [ "$BACKEND" = "bsp1d" ] || [ "$BACKEND" = "hybrid" ]; then - echo "Test DISABLED: no sparse level-3 operations recommended for 1D distributions." - echo " " - elif [ "$i" -gt "14" ]; then - echo "Test DISABLED: by default, long-running sparse matrix--sparse matrix multiplications are disabled (skipping dataset ${dataSet})." - echo " " - else - $runner ${TEST_BIN_DIR}/driver_spmspm_${backend} ${INPUT_DIR}/${dataSet} ${INPUT_DIR}/${dataSet} ${parseMode} &> ${TEST_OUT_DIR}/driver_spmspm_${backend}_${dataSet} - head -1 ${TEST_OUT_DIR}/driver_spmspm_${backend}_${dataSet} - if grep -q "Test OK" ${TEST_OUT_DIR}/driver_spmspm_${backend}_${dataSet}; then - printf "Test OK\n\n" - else - printf "Test FAILED\n\n" - fi - echo "$backend spmspm using the ${dataSet} dataset" >> ${TEST_OUT_DIR}/benchmarks - egrep 'Avg|Std' ${TEST_OUT_DIR}/driver_spmspm_${backend}_${dataSet} >> ${TEST_OUT_DIR}/benchmarks - echo >> ${TEST_OUT_DIR}/benchmarks - fi - fi -} - -# end helper functions - -if [ -z "$EXPTYPE" ] || ! [ "$EXPTYPE" == "KERNEL" ]; then - - for BACKEND in ${BACKENDS[@]}; do - - if [ "$BACKEND" = "hybrid" ]; then - P=${MAX_PROCESSES} - fi - if [ "$BACKEND" = "bsp1d" ] || [ "$BACKEND" = "hybrid" ]; then - if [ -z "${LPFRUN}" ]; then - echo "LPFRUN is not set!" - exit 255; - fi - else - P=1 # note, also for BSP1D to check for performance loss vs. reference - # BSP1D otherwise is never used for a performance test; hybrid(1D) - # should be used instead. - fi - if [ "$BACKEND" = "reference_omp" ]; then - T=${MAX_THREADS} - elif [ "$BACKEND" = "nonblocking" ]; then - T=${MAX_THREADS} - elif [ "$BACKEND" = "hybrid" ]; then - T=$((MAX_THREADS/NUM_SOCKETS)) - echo "Warning: assuming each socket will run its own user process." - echo " MAX_PROCESSES = ${MAX_PROCESSES}, NUM_SOCKETS = ${NUM_SOCKETS}" - echo " MAX_THREADS = ${MAX_THREADS}, P = ${P}, T = ${T}" - else - T=1 - fi - if [ "$BACKEND" = "bsp1d" ] || [ "$BACKEND" = "hybrid" ]; then - runner="${LPFRUN} -n ${P}" - else - runner= - fi - if [ "$BACKEND" = "hybrid" ]; then - if [ -z ${USE_MPI_OVERBINDING} ]; then - runner="${runner} ${MPI_PASS_ENV} ${LPFRUN_PASSTHROUGH}OMP_NUM_THREADS=${T} ${MPI_OVERBINDING_ARGS}" - else - runner="${runner} ${MPI_PASS_ENV} ${LPFRUN_PASSTHROUGH}OMP_NUM_THREADS=${T} ${MPI_BINDING_ARGS}" - fi - fi - if [ "$BACKEND" = "reference_omp" ] || [ "$BACKEND" = "nonblocking" ]; then - export OMP_NUM_THREADS=${T} - fi - - echo "#######################################################################" - echo "# Starting standardised performance tests for the ${BACKEND} backend" - echo "# using ${P} user processes" - echo "# using ${T} threads" - if [ "$BACKEND" = "bsp1d" ] || [ "$BACKEND" = "hybrid" ]; then - echo "# using \`\`${LPFRUN}'' for automatic launchers" - fi - if [ "x${runner}" != "x" ]; then - echo "# using runner \`\`$runner''" - fi - echo "#######################################################################" - echo " " - - # scaling performance tests - if [[ -z $DATASETTORUN && ( -z "$EXPTYPE" || "$EXPTYPE" == "SCALING" ) ]]; then - runScalingTest "$runner" "${BACKEND}" - fi - - for ((i=0;i<${#DATASETS[@]};++i)); - do - if [ "$BACKEND" = "hyperdags" ] && [ "$i" -gt "0" ]; then - echo "Info: hyperdags performance tests run only on the smallest dataset" - echo " " - break - fi - if [ ! -z "$DATASETTORUN" ] && [ "$DATASETTORUN" != "${DATASETS[i]}" ]; then - continue - fi - - # initialise parameters - DATASET=${DATASETS[i]} - PARSE_MODE=${DATASET_MODES[i]} - PARSE_SIZE=${DATASET_SIZES[i]} - KNN4SOL=${KNN4SOLS[i]} - KNN6SOL=${KNN6SOLS[i]} - - # test for file - if [ ! -f ${INPUT_DIR}/${DATASET} ]; then - echo ">>> [x] [x] Test algorithms using ${DATASET} dataset, ${BACKEND} backend." - echo "Tests DISABLED: dataset/${DATASET} not found. Provide the dataset to enable performance tests with it." - echo " " - continue - fi - - if [ -z "$EXPTYPE" ] || [ "$EXPTYPE" == "KNN" ]; then - - # --------------------------------------------------------------------- - # k-NN k=4 - runKNNBenchMarkTests "$runner" "$BACKEND" 4 "$DATASET" "$PARSE_MODE" "$PARSE_SIZE" "$KNN4SOL" - - if [ "$BACKEND" = "hyperdags" ]; then - echo "Info: 6-NN is skipped for the hyperdags backend" - echo " " - else - # --------------------------------------------------------------------- - # k-NN k=6 - runKNNBenchMarkTests "$runner" "$BACKEND" 6 "$DATASET" "$PARSE_MODE" "$PARSE_SIZE" "$KNN6SOL" - fi - fi - if [ -z "$EXPTYPE" ] || [ "$EXPTYPE" == "LABEL" ]; then - - # --------------------------------------------------------------------- - # label propagation - runOtherBenchMarkTests "$runner" "$BACKEND" "$DATASET" "$PARSE_MODE" "$PARSE_SIZE" "label" - - fi - if [ -z "$EXPTYPE" ] || [ "$EXPTYPE" == "PAGERANK" ]; then - - # --------------------------------------------------------------------- - # pagerank - runOtherBenchMarkTests "$runner" "$BACKEND" "$DATASET" "$PARSE_MODE" 0 "simple_pagerank" - - fi - done - - for ((i=0;i<${#MULTIPLICATION_DATASETS[@]};++i)); - do - if [ ! -z "$DATASETTORUN" ] && [ "$DATASETTORUN" != "${MULTIPLICATION_DATASETS[i]}" ]; then - continue - fi - - if [ "$BACKEND" = "hyperdags" ] && [ "$i" -gt "0" ]; then - echo "Info: hyperdags performance tests run only on the smallest dataset" - echo " " - break - fi - - # initialise parameters - DATASET=${MULTIPLICATION_DATASETS[i]} - PARSE_MODE=direct - - # test for file - if [ ! -f ${INPUT_DIR}/${DATASET} ]; then - echo ">>> [ ] [x] Test multiplication kernels using ${DATASET} dataset," - echo " ${BACKEND} backend." - echo "Tests DISABLED: dataset/${DATASET} not found. Provide the dataset to enable performance tests with it." - echo " " - continue - fi - - runMultiplicationKernels "$runner" "$BACKEND" "$DATASET" "$PARSE_MODE" "$i" - - done - - done - -fi - -echo "*****************************************************************************************" -echo "All benchmark tests done; see ${TEST_OUT_DIR}/benchmarks." -if [[ -z $DATASETTORUN && ( -z "$EXPTYPE" || "$EXPTYPE" == "SCALING" ) ]]; then - echo "All scaling tests done; see ${TEST_OUT_DIR}/scaling." -fi -echo " " - diff --git a/tests/smoke/CMakeLists.txt b/tests/smoke/CMakeLists.txt index 1f99446ee..d48f4325d 100644 --- a/tests/smoke/CMakeLists.txt +++ b/tests/smoke/CMakeLists.txt @@ -14,123 +14,334 @@ # limitations under the License. # +assert_defined_variables( GNN_DATASET_PATH ) +assert_valid_variables( DATASETS_DIR MAX_THREADS ) -# write here the name of the category -# add_grb_executables and add_grb_executable_custom need this information -set( TEST_CATEGORY "smoke" ) +# ======= CONFIGURATION ======= -add_grb_executables( manual_hook_hw manual_launcher.cpp - hook/hello_world.cpp - BACKENDS bsp1d NO_BACKEND_NAME +# Compute the number of threads for the Hybrid backend +math( EXPR _HALF_MAX_THREADS "${MAX_THREADS} / 2" OUTPUT_FORMAT DECIMAL ) +set_if_else( _hybrid_threads "2" "${_HALF_MAX_THREADS}" "${_HALF_MAX_THREADS}" LESS_EQUAL "2" ) + +# Setup the environment for the tests in this directory +setup_grb_tests_environment( + CATEGORY "smoke" + + BSP1D_PROCESSES 5 + HYBRID_PROCESSES 2 + + HYBRID_THREADS "${_hybrid_threads}" + REFERENCE_OMP_THREADS "${MAX_THREADS}" + NONBLOCKING_THREADS "${MAX_THREADS}" ) -add_grb_executables( manual_hook_grb_set manual_launcher.cpp - hook/grb_launcher.cpp hook/setvector.cpp - BACKENDS bsp1d NO_BACKEND_NAME + +# Verification files +set( OUTPUT_VERIFICATION_DIR "${CMAKE_CURRENT_SOURCE_DIR}/output_verification" ) +set( TEST_DATA_DIR "${CMAKE_CURRENT_SOURCE_DIR}/test_data" ) + +add_grb_executables( small_knn ../unit/auto_launcher.cpp + hook/knn.cpp + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking ) +# TODO: Remove the first argument of this test in order to fit te add_grb_tests signature +foreach( P 1 2 3 4 ) + add_grb_tests( small_knn small_knn + ARGUMENTS ${P} + Test_OK_SUCCESS + PROCESSES ${P} + BACKENDS bsp1d hybrid + ) +endforeach() -add_grb_executables( manual_hook_grb_reduce manual_launcher.cpp - hook/grb_launcher.cpp ../unit/launcher/reduce.cpp - BACKENDS bsp1d NO_BACKEND_NAME +add_grb_executables( small_pagerank ../unit/auto_launcher.cpp + hook/small_simple_pagerank.cpp + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking ) +add_grb_tests( small_pagerank small_pagerank + ARGUMENTS 1 + Test_OK_SUCCESS + BACKENDS reference reference_omp hyperdags nonblocking + OUTPUT_VALIDATE grep 'Pagerank vector local to PID 0 on exit is \( 0.106896 0.105862 0.104983 0.104235 0.1036 0.10306 0.102601 0.102211 0.0584396 0.108113 \)' @@TEST_OUTPUT_FILE@@ +) +# TODO: Remove the first parameter from the test +foreach( P 1 2 3 4 ) + add_grb_tests( small_pagerank small_pagerank + ARGUMENTS ${P} + Test_OK_SUCCESS + PROCESSES ${P} + BACKENDS bsp1d hybrid + OUTPUT_VALIDATE grep 'Pagerank vector local to PID 0 on exit is \( 0.106896 0.105862 0.104983 0.104235 0.1036 0.10306 0.102601 0.102211 0.0584396 0.108113 \)' @@TEST_OUTPUT_FILE@@ + ) +endforeach() -add_grb_executables( manual_hook_grb_dot manual_launcher.cpp - hook/grb_launcher.cpp hook/dot.cpp - BACKENDS bsp1d NO_BACKEND_NAME +add_grb_executables( simple_pagerank simple_pagerank.cpp + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking + ADDITIONAL_LINK_LIBRARIES test_utils_headers +) +add_grb_tests( simple_pagerank_west0497 simple_pagerank + ARGUMENTS ${DATASETS_DIR}/west0497.mtx direct 1 1 1000 verification ${OUTPUT_VERIFICATION_DIR}/pagerank_out_west0497_ref + REQUIRED_FILES ${DATASETS_DIR}/west0497.mtx ${OUTPUT_VERIFICATION_DIR}/pagerank_out_west0497_ref + Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking +) +add_grb_tests( simple_pagerank_facebook simple_pagerank + ARGUMENTS ${DATASETS_DIR}/facebook_combined.txt direct 1 1 + REQUIRED_FILES ${DATASETS_DIR}/facebook_combined.txt + Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking ) -add_grb_executables( manual_hook_grb_collectives_blas0 manual_launcher.cpp - hook/grb_launcher.cpp ../unit/launcher/collectives_blas0.cpp - BACKENDS bsp1d NO_BACKEND_NAME +add_grb_executables( kcore_decomposition kcore_decomposition.cpp + ADDITIONAL_LINK_LIBRARIES test_utils_headers + BACKENDS reference reference_omp hyperdags nonblocking bsp1d hybrid +) +add_grb_tests( kcore_decomposition kcore_decomposition + ARGUMENTS ${DATASETS_DIR}/EPA.mtx direct 1 1 verification ${OUTPUT_VERIFICATION_DIR}/kcore_decomposition_eda_ref + REQUIRED_FILES ${DATASETS_DIR}/EPA.mtx ${OUTPUT_VERIFICATION_DIR}/kcore_decomposition_eda_ref + Test_OK_SUCCESS + BACKENDS reference reference_omp hyperdags nonblocking bsp1d hybrid ) -add_grb_executables( manual_hook_grb_collectives_blas1 manual_launcher.cpp - hook/grb_launcher.cpp hook/collectives_blas1.cpp - BACKENDS bsp1d NO_BACKEND_NAME +add_grb_executables( bicgstab bicgstab.cpp + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking + ADDITIONAL_LINK_LIBRARIES test_utils_headers +) +add_grb_tests( bicgstab bicgstab + ARGUMENTS ${DATASETS_DIR}/gyro_m.mtx direct 1 1 10000 verification ${OUTPUT_VERIFICATION_DIR}/conjugate_gradient_out_gyro_m_ref + REQUIRED_FILES ${DATASETS_DIR}/gyro_m.mtx ${OUTPUT_VERIFICATION_DIR}/conjugate_gradient_out_gyro_m_ref + Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking ) -add_grb_executables( manual_hook_grb_collectives_blas1_raw manual_launcher.cpp - hook/grb_launcher.cpp hook/collectives_blas1_raw.cpp - BACKENDS bsp1d NO_BACKEND_NAME +add_grb_executables( graphchallenge_nn_single_inference graphchallenge_nn_single_inference.cpp + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking + ADDITIONAL_LINK_LIBRARIES test_utils_headers ) -add_grb_executables( small_knn ../unit/auto_launcher.cpp - hook/knn.cpp +if( GNN_DATASET_PATH ) + add_grb_tests( graphchallenge_nn_single_inference graphchallenge_nn_single_inference + ARGUMENTS ${GNN_DATASET_PATH} 1024 120 294 1 32 indirect 1 1 verification ${OUTPUT_VERIFICATION_DIR}/graphchallenge_nn_out_1024_120_294_32_threshold_ref + Test_OK_SUCCESS + REQUIRED_FILES ${GNN_DATASET_PATH} ${OUTPUT_VERIFICATION_DIR}/graphchallenge_nn_out_1024_120_294_32_threshold_ref + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking + ) + add_grb_tests( graphchallenge_nn_single_inference.threshold graphchallenge_nn_single_inference + ARGUMENTS ${GNN_DATASET_PATH} 1024 120 294 1 32 indirect 1 1 verification ${OUTPUT_VERIFICATION_DIR}/graphchallenge_nn_out_1024_120_294_32_threshold_ref + Test_OK_SUCCESS + REQUIRED_FILES ${GNN_DATASET_PATH} ${OUTPUT_VERIFICATION_DIR}/graphchallenge_nn_out_1024_120_294_32_threshold_ref + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking + ) + add_grb_tests( graphchallenge_nn_single_inference.nothreshold graphchallenge_nn_single_inference + ARGUMENTS ${GNN_DATASET_PATH} 1024 120 294 0 0 indirect 1 1 verification ${OUTPUT_VERIFICATION_DIR}/graphchallenge_nn_out_1024_120_294_no_threshold_ref + Test_OK_SUCCESS + REQUIRED_FILES ${GNN_DATASET_PATH} ${OUTPUT_VERIFICATION_DIR}/graphchallenge_nn_out_1024_120_294_no_threshold_ref + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking + ) +endif() + +add_grb_executables( labeltest label_test.cpp +../unit/parser.cpp +BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking +) + +list( APPEND _LABELTEST_SIZES 8 256 4096 ) +list( APPEND _LABELTEST_RESULTS 4 9 13 ) +foreach( i RANGE 0 2 ) + list( GET _LABELTEST_SIZES ${i} __SIZE ) + list( GET _LABELTEST_RESULTS ${i} __RESULT ) + add_grb_tests( "labeltest.${__SIZE}" labeltest + ARGUMENTS ${__SIZE} + Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking + OUTPUT_VALIDATE grep 'converged in ${__RESULT} iterations' @@TEST_OUTPUT_FILE@@ + ) +endforeach() + +add_grb_executables( pregel_pagerank_global pregel_pagerank.cpp + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking + ADDITIONAL_LINK_LIBRARIES test_utils_headers + COMPILE_DEFINITIONS PR_CONVERGENCE_MODE=false +) +add_grb_tests( pregel_pagerank_global pregel_pagerank_global + ARGUMENTS ${DATASETS_DIR}/west0497.mtx direct 1 1 + REQUIRED_FILES ${DATASETS_DIR}/west0497.mtx + Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking + OUTPUT_VALIDATE grep '56 iterations to converge' @@TEST_OUTPUT_FILE@@ +) + +add_grb_executables( pregel_pagerank_local pregel_pagerank.cpp + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking + ADDITIONAL_LINK_LIBRARIES test_utils_headers + COMPILE_DEFINITIONS PR_CONVERGENCE_MODE=true +) +add_grb_tests( pregel_pagerank_local pregel_pagerank_local + ARGUMENTS ${DATASETS_DIR}/west0497.mtx direct 1 1 + REQUIRED_FILES ${DATASETS_DIR}/west0497.mtx + Test_OK_SUCCESS BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking + OUTPUT_VALIDATE grep '47 iterations to converge' @@TEST_OUTPUT_FILE@@ +) + +add_grb_executables( kmeans kmeans.cpp + BACKENDS reference reference_omp hyperdags nonblocking +) +add_grb_tests( kmeans kmeans + ARGUMENTS + Test_OK_SUCCESS + BACKENDS reference reference_omp hyperdags nonblocking +) +add_grb_executables( kcore_decomposition_critical kcore_decomposition.cpp + ADDITIONAL_LINK_LIBRARIES test_utils_headers + BACKENDS reference reference_omp + COMPILE_DEFINITIONS KCORE_VARIANT=true +) +add_grb_tests( kcore_decomposition_critical kcore_decomposition_critical + ARGUMENTS ${DATASETS_DIR}/EPA.mtx direct 1 1 verification ${OUTPUT_VERIFICATION_DIR}/kcore_decomposition_eda_ref + REQUIRED_FILES ${DATASETS_DIR}/EPA.mtx ${OUTPUT_VERIFICATION_DIR}/kcore_decomposition_eda_ref + Test_OK_SUCCESS + BACKENDS reference reference_omp +) + +function( generate_hook_test test_name port num_procs src1 ) + add_grb_executables( "${test_name}" manual_launcher.cpp ${src1} ${ARGN} BACKENDS bsp1d ) + + add_grb_tests( "${test_name}" "${test_name}" + ARGUMENTS localhost ${port} ${num_procs} + Test_OK_SUCCESS + PROCESSES 1 PARALLEL_PROCESSES ${num_procs} + BACKENDS bsp1d + ) +endfunction() + +generate_hook_test( manual_hook_hw 7770 4 hook/hello_world.cpp ) + +generate_hook_test( manual_hook_grb_set 7771 3 hook/grb_launcher.cpp hook/setvector.cpp ) + +generate_hook_test( manual_hook_grb_reduce 7772 4 hook/grb_launcher.cpp ../unit/launcher/reduce.cpp ) + +generate_hook_test( manual_hook_grb_dot 7773 4 hook/grb_launcher.cpp hook/dot.cpp ) + +generate_hook_test( manual_hook_grb_collectives_blas0 7774 4 + hook/grb_launcher.cpp ../unit/launcher/collectives_blas0.cpp +) + +generate_hook_test( manual_hook_grb_collectives_blas1 7775 4 + hook/grb_launcher.cpp hook/collectives_blas1.cpp ) -add_grb_executables( manual_hook_small_knn manual_launcher.cpp - hook/grb_launcher.cpp hook/knn.cpp - BACKENDS bsp1d NO_BACKEND_NAME +generate_hook_test( manual_hook_grb_collectives_blas1_raw 7776 4 + hook/grb_launcher.cpp hook/collectives_blas1_raw.cpp ) +generate_hook_test( manual_hook_small_knn 7777 4 hook/grb_launcher.cpp hook/knn.cpp ) + add_grb_executables( from_mpi_launch_simple_pagerank simple_pagerank_from_mpi.cpp - BACKENDS bsp1d NO_BACKEND_NAME + BACKENDS bsp1d ADDITIONAL_LINK_LIBRARIES MPI::MPI_CXX ) +add_grb_tests( from_mpi_launch_simple_pagerank from_mpi_launch_simple_pagerank + ARGUMENTS + PROCESSES "4" + BACKENDS bsp1d +) -add_grb_executables( from_mpi_launch_simple_pagerank_multiple_entry simple_pagerank_from_mpi.cpp - BACKENDS bsp1d NO_BACKEND_NAME +add_grb_executables( from_mpi_launch_simple_pagerank_multiple_entry + simple_pagerank_from_mpi.cpp + BACKENDS bsp1d ADDITIONAL_LINK_LIBRARIES MPI::MPI_CXX COMPILE_DEFINITIONS MULTIPLE_ENTRY ) +add_grb_tests( from_mpi_launch_simple_pagerank_multiple_entry + from_mpi_launch_simple_pagerank_multiple_entry + ARGUMENTS + PROCESSES "5" + BACKENDS bsp1d +) -add_grb_executables( from_mpi_launch_simple_pagerank_broadcast_pinning_multiple_entry simple_pagerank_broadcast.cpp - BACKENDS bsp1d NO_BACKEND_NAME +add_grb_executables( from_mpi_launch_simple_pagerank_broadcast_pinning_multiple_entry + simple_pagerank_broadcast.cpp + BACKENDS bsp1d ADDITIONAL_LINK_LIBRARIES MPI::MPI_CXX COMPILE_DEFINITIONS MULTIPLE_ENTRY PINNED_OUTPUT ) +add_grb_tests( from_mpi_launch_simple_pagerank_broadcast_pinning_multiple_entry + from_mpi_launch_simple_pagerank_broadcast_pinning_multiple_entry + ARGUMENTS + PROCESSES "3" + BACKENDS bsp1d +) add_grb_executables( from_mpi_launch_simple_pagerank_broadcast_multiple_entry simple_pagerank_broadcast.cpp - BACKENDS bsp1d NO_BACKEND_NAME + BACKENDS bsp1d ADDITIONAL_LINK_LIBRARIES MPI::MPI_CXX COMPILE_DEFINITIONS MULTIPLE_ENTRY ) +add_grb_tests( from_mpi_launch_simple_pagerank_broadcast_multiple_entry from_mpi_launch_simple_pagerank_broadcast_multiple_entry + ARGUMENTS + PROCESSES "7" + BACKENDS bsp1d +) add_grb_executables( from_mpi_launch_simple_pagerank_broadcast simple_pagerank_broadcast.cpp - BACKENDS bsp1d NO_BACKEND_NAME + BACKENDS bsp1d ADDITIONAL_LINK_LIBRARIES MPI::MPI_CXX ) +add_grb_tests( from_mpi_launch_simple_pagerank_broadcast from_mpi_launch_simple_pagerank_broadcast + ARGUMENTS + PROCESSES "6" + BACKENDS bsp1d +) add_grb_executables( knn knn.cpp ../unit/parser.cpp BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking ) +add_grb_tests( knn knn + ARGUMENTS 4 ${DATASETS_DIR}/facebook_combined.txt direct 1 1 + REQUIRED_FILES ${DATASETS_DIR}/facebook_combined.txt + Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking + OUTPUT_VALIDATE grep 'Neighbourhood size is 421' @@TEST_OUTPUT_FILE@@ +) add_grb_executables( hpcg hpcg.cpp BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking ADDITIONAL_LINK_LIBRARIES test_utils ) - -add_grb_executables( graphchallenge_nn_single_inference graphchallenge_nn_single_inference.cpp +add_grb_tests( hpcg hpcg + ARGUMENTS + Test_OK_SUCCESS BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking - ADDITIONAL_LINK_LIBRARIES test_utils_headers ) -add_grb_executables( simple_pagerank simple_pagerank.cpp +add_grb_executables( pregel_connected_components pregel_connected_components.cpp BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking - ADDITIONAL_LINK_LIBRARIES test_utils_headers ) - -add_grb_executables( pregel_pagerank_local pregel_pagerank.cpp +add_grb_tests( pregel_connected_components pregel_connected_components + ARGUMENTS ${DATASETS_DIR}/west0497.mtx direct 1 1 + REQUIRED_FILES ${DATASETS_DIR}/west0497.mtx + Test_OK_SUCCESS BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking - ADDITIONAL_LINK_LIBRARIES test_utils_headers - COMPILE_DEFINITIONS PR_CONVERGENCE_MODE=true + OUTPUT_VALIDATE grep '11 iterations to converge' @@TEST_OUTPUT_FILE@@ ) -add_grb_executables( pregel_pagerank_global pregel_pagerank.cpp +add_grb_executables( conjugate_gradient conjugate_gradient.cpp BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking ADDITIONAL_LINK_LIBRARIES test_utils_headers - COMPILE_DEFINITIONS PR_CONVERGENCE_MODE=false ) - -add_grb_executables( pregel_connected_components pregel_connected_components.cpp +add_grb_tests( conjugate_gradient conjugate_gradient + ARGUMENTS ${DATASETS_DIR}/gyro_m.mtx direct 1 1 10000 false verification ${OUTPUT_VERIFICATION_DIR}/conjugate_gradient_out_gyro_m_ref + REQUIRED_FILES ${DATASETS_DIR}/gyro_m.mtx ${OUTPUT_VERIFICATION_DIR}/conjugate_gradient_out_gyro_m_ref + Test_OK_SUCCESS BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking ) - -add_grb_executables( conjugate_gradient conjugate_gradient.cpp +add_grb_tests( preconditioned_conjugate_gradient conjugate_gradient + ARGUMENTS ${DATASETS_DIR}/gyro_m.mtx direct 1 1 10000 true verification ${OUTPUT_VERIFICATION_DIR}/conjugate_gradient_out_gyro_m_ref + REQUIRED_FILES ${DATASETS_DIR}/gyro_m.mtx ${OUTPUT_VERIFICATION_DIR}/conjugate_gradient_out_gyro_m_ref + Test_OK_SUCCESS BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking - ADDITIONAL_LINK_LIBRARIES test_utils_headers ) add_grb_executables( conjugate_gradient_complex conjugate_gradient.cpp @@ -138,6 +349,12 @@ add_grb_executables( conjugate_gradient_complex conjugate_gradient.cpp ADDITIONAL_LINK_LIBRARIES test_utils_headers COMPILE_DEFINITIONS _CG_COMPLEX ) +add_grb_tests( conjugate_gradient_complex conjugate_gradient_complex + ARGUMENTS ${TEST_DATA_DIR}/rndHermit256.mtx direct 1 1 1000 false verification ${OUTPUT_VERIFICATION_DIR}/complex_conjugate_conjugate_gradient_out_rndHermit256_ref + REQUIRED_FILES ${TEST_DATA_DIR}/rndHermit256.mtx ${OUTPUT_VERIFICATION_DIR}/complex_conjugate_conjugate_gradient_out_rndHermit256_ref + Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking +) add_grb_executables( gmres gmres.cpp BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking @@ -150,35 +367,6 @@ add_grb_executables( gmres_complex gmres.cpp COMPILE_DEFINITIONS _GMRES_COMPLEX ) -add_grb_executables( bicgstab bicgstab.cpp - BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking - ADDITIONAL_LINK_LIBRARIES test_utils_headers -) - -add_grb_executables( kmeans kmeans.cpp - BACKENDS reference reference_omp hyperdags nonblocking -) - -add_grb_executables( labeltest label_test.cpp - ../unit/parser.cpp - BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking -) - -add_grb_executables( small_pagerank ../unit/auto_launcher.cpp - hook/small_simple_pagerank.cpp - BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking -) - -add_grb_executables( kcore_decomposition_critical kcore_decomposition.cpp - ADDITIONAL_LINK_LIBRARIES test_utils_headers - BACKENDS reference reference_omp - COMPILE_DEFINITIONS KCORE_VARIANT=true -) - -add_grb_executables( kcore_decomposition kcore_decomposition.cpp - ADDITIONAL_LINK_LIBRARIES test_utils_headers - BACKENDS reference reference_omp hyperdags nonblocking bsp1d hybrid -) # targets to list and build the test for this category get_property( smoke_tests_list GLOBAL PROPERTY tests_category_smoke ) @@ -192,21 +380,8 @@ add_custom_target( "build_tests_category_smoke" ) # target to run the tests in this category by calling the appropriate runner -add_custom_target( tests_smoke - COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/smoketests.sh - ${SCRIPTS_COMMON_ARGS} - "--test-bin-dir" "\"${CMAKE_CURRENT_BINARY_DIR}\"" - "--test-out-dir" "\"${CMAKE_CURRENT_BINARY_DIR}/output\"" - "--output-verification-dir" "\"${CMAKE_CURRENT_SOURCE_DIR}/output_verification\"" - "--test-data-dir" "\"${CMAKE_CURRENT_SOURCE_DIR}/test_data\"" - - WORKING_DIRECTORY "${PROJECT_SOURCE_DIR}" - DEPENDS build_tests_category_smoke # add dependency on this target - # to automatically build before running - COMMAND_EXPAND_LISTS - USES_TERMINAL +add_custom_target( smoketests + COMMAND ${CMAKE_CTEST_COMMAND} -L "mode:smoke" --output-on-failure + WORKING_DIRECTORY "${PROJECT_BINARY_DIR}" + DEPENDS build_tests_category_smoke ) - -# custom target "smoketests" just to keep old naming -add_custom_target( smoketests DEPENDS tests_smoke ) - diff --git a/tests/smoke/manual_launcher.cpp b/tests/smoke/manual_launcher.cpp index 9db84baec..9dbfcc294 100644 --- a/tests/smoke/manual_launcher.cpp +++ b/tests/smoke/manual_launcher.cpp @@ -57,15 +57,15 @@ int main( int argc, char ** argv ) { std::cout << "Functional test executable: " << argv[ 0 ] << "\n"; if( argc != 5 ) { - USE1 USE2 USE3 USE4 USE5 - return 0; + USE1 USE2 USE5 USE3 USE4 + return -1; } // read command-line args char * const host = argv[ 1 ]; - lpf_pid_t s = static_cast< lpf_pid_t >( atoi( argv[ 2 ] ) ); + char * const port = argv[ 2 ]; lpf_pid_t P = static_cast< lpf_pid_t >( atoi( argv[ 3 ] ) ); - char * const port = argv[ 4 ]; + lpf_pid_t s = static_cast< lpf_pid_t >( atoi( argv[ 4 ] ) ); // input sanity checks if( host == NULL || host[ 0 ] == '\0' ) { @@ -73,6 +73,12 @@ int main( int argc, char ** argv ) { USE2 return 100; } + if( port == NULL || port[ 0 ] == '\0' ) { + std::cerr << "Invalid value for port name or number: " + << argv[ 2 ] << "." << std::endl; + USE5 + return 400; + } if( !grb::utils::is_geq_zero( P ) ) { std::cerr << "Invalid value for #processes: " << argv[ 3 ] << ", " "parsed as " << static_cast< size_t >(P) << "." << std::endl; @@ -80,18 +86,12 @@ int main( int argc, char ** argv ) { return 200; } if( !grb::utils::is_in_normalized_range( s, P ) ) { - std::cerr << "Invalid value for PID: " << argv[ 2 ] << ", " + std::cerr << "Invalid value for PID: " << argv[ 4 ] << ", " << "parsed as " << static_cast< size_t >(s) << "." << std::endl; USE4 return 300; } - if( port == NULL || port[ 0 ] == '\0' ) { - std::cerr << "Invalid value for port name or number: " - << argv[ 4 ] << "." << std::endl; - USE5 - return 400; - } // initialise MPI if( MPI_Init( NULL, NULL ) != MPI_SUCCESS ) { diff --git a/tests/smoke/smoketests.sh b/tests/smoke/smoketests.sh deleted file mode 100755 index 45793fb42..000000000 --- a/tests/smoke/smoketests.sh +++ /dev/null @@ -1,606 +0,0 @@ -#!/bin/bash - -# -# Copyright 2021 Huawei Technologies Co., Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -TESTS_ROOT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )"/../ &> /dev/null && pwd )" -source ${TESTS_ROOT_DIR}/parse_env.sh - -if [ -z "${GNN_DATASET_PATH}" ]; then - echo "Info: GNN_DATASET_PATH was undefined or empty; trying ${INPUT_DIR}/GraphChallengeDataset" - export GNN_DATASET_PATH=${INPUT_DIR}/GraphChallengeDataset -fi - -if [ ! -d "${GNN_DATASET_PATH}" ]; then - echo "Warning: GNN_DATASET_PATH does not exist. Some tests will not run without input GNN data." -fi - - -LABELTEST_SIZES=(8 256 4096) # for size 32, the ground-truth number of iterations is 6. This size is -LABELTEST_RESULTS=(4 9 13) # disabled as there is no reason why this should behave differently - # from size 8 (both will map to the same single thread and process). - -echo " " -echo " " -echo "****************************************************************************************" -echo " FUNCTIONAL PERFORMANCE DESCRIPTION " -echo "----------------------------------------------------------------------------------------" -echo " " -for BACKEND in ${BACKENDS[@]}; do - if [ "$BACKEND" = "bsp1d" ]; then - if [ -z "${LPFRUN}" ]; then - echo "LPFRUN is not set!" - exit 255; - fi - if [ -z "${MANUALRUN}" ]; then - echo "MANUALRUN is not set!" - exit 255; - fi - fi - - if [ "$BACKEND" = "bsp1d" ]; then - Ps=( 5 ) - fi - if [ "$BACKEND" = "hybrid" ]; then - Ps=( 2 ) - fi - if [ "$BACKEND" = "bsp1d" ] || [ "$BACKEND" = "hybrid" ]; then - if [ -z "${LPFRUN}" ]; then - echo "LPFRUN is not set!" - exit 255; - fi - if [ -z "${MANUALRUN}" ]; then - echo "MANUALRUN is not set!" - exit 255; - fi - else - Ps=( 1 ) - fi - if [ "$BACKEND" = "reference_omp" ] || [ "$BACKEND" = "nonblocking" ]; then - Pt=( ${MAX_THREADS} ) - elif [ "$BACKEND" = "hybrid" ]; then - MTDS=$((MAX_THREADS/2)) - if [ "$MTDS" -le "2" ]; then - Pt=( 2 ) - else - Pt=( ${MTDS} ) - fi - else - Pt=( 1 ) - fi - - for P in ${Ps[@]}; do - for T in ${Pt[@]}; do - - runner= - if [ "$BACKEND" = "bsp1d" ] || [ "$BACKEND" = "hybrid" ]; then - runner="${LPFRUN} -n ${P}" - fi - if [ "${BACKEND}" = "bsp1d" ]; then - runner="${runner} ${BIND_PROCESSES_TO_HW_THREADS}" - elif [ "${BACKEND}" = "hybrid" ]; then - runner="${runner} ${MPI_PASS_ENV} ${LPFRUN_PASSTHROUGH}OMP_NUM_THREADS=${T}" - runner="${runner} ${BIND_PROCESSES_TO_MULTIPLE_HW_THREADS}${T}" - elif [ "$BACKEND" = "reference_omp" ] || [ "$BACKEND" = "nonblocking" ]; then - export OMP_NUM_THREADS=${T} - fi - - echo "#################################################################" - echo "# Starting standardised smoke tests for the ${BACKEND} backend" - echo "# using ${P} user processes" - echo "# using ${T} threads" - if [ "$BACKEND" = "bsp1d" ] || [ "$BACKEND" = "hybrid" ]; then - echo "# using \`\`${LPFRUN}'' for automatic launchers" - echo "# using \`\`${MANUALRUN}'' for manual launchers" - fi - if [ "x${runner}" != "x" ]; then - echo "# using runner \`\`$runner''" - fi - echo "#################################################################" - echo " " - - echo ">>> [x] [ ] Tests k-nearest-neighbourhood (k-NN) calculation through" - echo " breadth-first search on a tiny graph." - bash -c "$runner ${TEST_BIN_DIR}/small_knn_${BACKEND} ${P} &> ${TEST_OUT_DIR}/small_knn_${BACKEND}_${P}_${T}.log" - head -1 ${TEST_OUT_DIR}/small_knn_${BACKEND}_${P}_${T}.log - grep 'Test OK' ${TEST_OUT_DIR}/small_knn_${BACKEND}_${P}_${T}.log || echo "Test FAILED" - echo " " - - echo ">>> [x] [x] Tests an automatically launching version of the k-NN on" - echo " the facebook_combined dataset for k=4 in serial mode." - echo " Uses file IO in direct mode." - if [ -f ${INPUT_DIR}/facebook_combined.txt ]; then - $runner ${TEST_BIN_DIR}/knn_${BACKEND} 4 ${INPUT_DIR}/facebook_combined.txt direct 1 1 &> ${TEST_OUT_DIR}/knn_${BACKEND}_${P}_${T}_facebook.log - head -1 ${TEST_OUT_DIR}/knn_${BACKEND}_${P}_${T}_facebook.log - if grep -q "Test OK" ${TEST_OUT_DIR}/knn_${BACKEND}_${P}_${T}_facebook.log; then - (grep -q "Neighbourhood size is 421" ${TEST_OUT_DIR}/knn_${BACKEND}_${P}_${T}_facebook.log && printf "Test OK\n\n") || (printf "Test FAILED (verification error)\n") - else - printf "Test FAILED\n" - fi - else - echo "Test DISABLED; dataset not found. Provide facebook_combined.txt in the ${INPUT_DIR} directory to enable." - fi - echo " " - - if [ "${GITHUB_ACTIONS}" = true ] && [ "${BACKEND}" = "hyperdags" ]; then - echo "Test DISABLED; GitHub runner does not have enough memory for this test" - else - echo ">>> [x] [ ] Tests HPCG on a small matrix" - echo "Functional test executable: ${TEST_BIN_DIR}/hpcg_${BACKEND}" - bash -c "$runner ${TEST_BIN_DIR}/hpcg_${BACKEND} 2>&1 | sed -e '1p' -e '/===/!d' > ${TEST_OUT_DIR}/hpcg_${BACKEND}_${P}_${T}.log" - grep 'Test OK' ${TEST_OUT_DIR}/hpcg_${BACKEND}_${P}_${T}.log || echo "Test FAILED" - fi - echo " " - - echo ">>> [x] [ ] Tests an automatically launching version of the simple pagerank" - echo " algorithm for a small 10 x 10 problem. Verifies against known" - echo " output." - echo "Functional test executable: ${TEST_BIN_DIR}/small_pagerank_${BACKEND}" - bash -c "$runner ${TEST_BIN_DIR}/small_pagerank_${BACKEND} ${P} &> ${TEST_OUT_DIR}/small_pagerank_${BACKEND}_${P}_${T}.log" - if grep -q 'Test OK' ${TEST_OUT_DIR}/small_pagerank_${BACKEND}_${P}_${T}.log; then - (grep -q 'Pagerank vector local to PID 0 on exit is ( 0.106896 0.105862 0.104983 0.104235 0.1036 0.10306 0.102601 0.102211 0.0584396 0.108113 )' ${TEST_OUT_DIR}/small_pagerank_${BACKEND}_${P}_${T}.log && printf "Test OK\n\n") || printf "Test FAILED (verification error)\n\n" - else - printf "Test FAILED\n\n" - fi - - echo ">>> [x] [ ] Testing the pagerank algorithm for the 497 by 497 matrix" - echo " west0497.mtx. This test verifies against a ground-truth" - echo " PageRank vector for this dataset. The test employs the" - echo " grb::Launcher in automatic mode with statically sized IO," - echo " and uses sequential file IO in direct mode." - if [ -f ${INPUT_DIR}/west0497.mtx ]; then - $runner ${TEST_BIN_DIR}/simple_pagerank_${BACKEND} ${INPUT_DIR}/west0497.mtx direct 1 1 1000 verification ${OUTPUT_VERIFICATION_DIR}/pagerank_out_west0497_ref &> ${TEST_OUT_DIR}/simple_pagerank_${BACKEND}_west0497_${P}_${T}.log - head -1 ${TEST_OUT_DIR}/simple_pagerank_${BACKEND}_west0497_${P}_${T}.log - grep 'Test OK' ${TEST_OUT_DIR}/simple_pagerank_${BACKEND}_west0497_${P}_${T}.log || echo "Test FAILED" - else - echo "Test DISABLED: west0497.mtx was not found. To enable, please provide ${INPUT_DIR}/west0497.mtx" - fi - echo " " - - echo ">>> [x] [ ] Tests grb::Launcher on a PageRank on the SNAP dataset" - echo " facebook_combined. The launcher is used in automatic" - echo " mode and the IO mode is sequential in direct mode." - echo " Launcher::exec is used with statically sized input and" - echo " statically sized output." - echo "Functional test executable: ${TEST_BIN_DIR}/simple_pagerank_${BACKEND}" - if [ -f ${INPUT_DIR}/facebook_combined.txt ]; then - $runner ${TEST_BIN_DIR}/simple_pagerank_${BACKEND} ${INPUT_DIR}/facebook_combined.txt direct 1 1 &> ${TEST_OUT_DIR}/simple_pagerank_${BACKEND}_facebook_${P}_${T}.log - grep 'Test OK' ${TEST_OUT_DIR}/simple_pagerank_${BACKEND}_facebook_${P}_${T}.log || printf 'Test FAILED.\n' - else - echo "Test DISABLED; dataset not found. Provide facebook_combined.txt in the ./datasets/ directory to enable." - fi - echo " " - - echo ">>> [x] [ ] Testing the conjugate gradient algorithm for the input" - echo " matrix (17361x17361) taken from gyro_m.mtx. This test" - echo " verifies against a ground-truth solution vector. The test" - echo " employs the grb::Launcher in automatic mode. It uses" - echo " direct-mode file IO." - if [ -f ${INPUT_DIR}/gyro_m.mtx ]; then - $runner ${TEST_BIN_DIR}/conjugate_gradient_${BACKEND} ${INPUT_DIR}/gyro_m.mtx direct 1 1 10000 false verification ${OUTPUT_VERIFICATION_DIR}/conjugate_gradient_out_gyro_m_ref &> ${TEST_OUT_DIR}/conjugate_gradient_${BACKEND}_${P}_${T}.log - head -1 ${TEST_OUT_DIR}/conjugate_gradient_${BACKEND}_${P}_${T}.log - grep 'Test OK' ${TEST_OUT_DIR}/conjugate_gradient_${BACKEND}_${P}_${T}.log || echo "Test FAILED" - else - echo "Test DISABLED: gyro_m.mtx was not found. To enable, please provide ${INPUT_DIR}/gyro_m.mtx" - fi - echo " " - - echo ">>> [x] [ ] Testing the Jacobi-preconditioned conjugate gradient algorithm" - echo " for the input matrix (17361x17361) taken from gyro_m.mtx. This" - echo " test verifies against a ground-truth solution vector. The test" - echo " employs the grb::Launcher in automatic mode. It uses" - echo " direct-mode file IO." - if [ -f ${INPUT_DIR}/gyro_m.mtx ]; then - $runner ${TEST_BIN_DIR}/conjugate_gradient_${BACKEND} ${INPUT_DIR}/gyro_m.mtx direct 1 1 10000 true verification ${OUTPUT_VERIFICATION_DIR}/conjugate_gradient_out_gyro_m_ref &> ${TEST_OUT_DIR}/preconditioned_conjugate_gradient_${BACKEND}_${P}_${T}.log - head -1 ${TEST_OUT_DIR}/preconditioned_conjugate_gradient_${BACKEND}_${P}_${T}.log - grep 'Test OK' ${TEST_OUT_DIR}/preconditioned_conjugate_gradient_${BACKEND}_${P}_${T}.log || echo "Test FAILED" - else - echo "Test DISABLED: gyro_m.mtx was not found. To enable, please provide ${INPUT_DIR}/gyro_m.mtx" - fi - echo " " - - echo ">>> [x] [ ] Tests grb::Launcher on a K-core decomposition on the dataset" - echo " EPA.mtx. The launcher is used in automatic mode and the I/O" - echo " mode is sequential. The Launcher::exec called is with struct" - echo " I/O with broadcast true. This launches the default k-core" - echo " variant." - echo "Functional test executable: ${TEST_BIN_DIR}/kcore_decomposition_${BACKEND}" - if [ -f ${INPUT_DIR}/EPA.mtx ]; then - $runner ${TEST_BIN_DIR}/kcore_decomposition_${BACKEND} ${INPUT_DIR}/EPA.mtx direct 1 1 verification ${OUTPUT_VERIFICATION_DIR}/kcore_decomposition_eda_ref &> ${TEST_OUT_DIR}/kcore_decomposition_${BACKEND}_EPA_${P}_${T}.log - grep 'Test OK' ${TEST_OUT_DIR}/kcore_decomposition_${BACKEND}_EPA_${P}_${T}.log || printf 'Test FAILED.\n' - else - echo "Test DISABLED; dataset not found. Provide EPA.mtx in the ./datasets/ directory to enable." - fi - echo " " - - TESTNAME=rndHermit256 - if [ -f ${TEST_DATA_DIR}/${TESTNAME}.mtx ]; then - n=$(grep -v '^%' ${TEST_DATA_DIR}/${TESTNAME}.mtx | head -1 | awk '{print $1}' ) - m=$(grep -v '^%' ${TEST_DATA_DIR}/${TESTNAME}.mtx | head -1 | awk '{print $2}' ) - echo ">>> [x] [ ] Testing the conjugate gradient complex algorithm for the input" - echo " matrix (${n}x${m}) taken from ${TESTNAME}.mtx. This test" - echo " verifies against a ground-truth solution vector. The test" - echo " employs the grb::Launcher in automatic mode. It uses" - echo " direct-mode file IO." - $runner ${TEST_BIN_DIR}/conjugate_gradient_complex_${BACKEND} ${TEST_DATA_DIR}/${TESTNAME}.mtx direct 1 1 1000 false verification ${OUTPUT_VERIFICATION_DIR}/complex_conjugate_conjugate_gradient_out_${TESTNAME}_ref &> ${TEST_OUT_DIR}/conjugate_gradient_complex_${BACKEND}_${P}_${T}.log - head -1 ${TEST_OUT_DIR}/conjugate_gradient_complex_${BACKEND}_${P}_${T}.log - grep 'Test OK' ${TEST_OUT_DIR}/conjugate_gradient_complex_${BACKEND}_${P}_${T}.log || echo "Test FAILED" - else - echo "Test DISABLED: ${TESTNAME}.mtx was not found. To enable, please provide ${TEST_DATA_DIR}/${TESTNAME}.mtx" - fi - echo " " - - # note the below test relies on the bash variables defined in the previous one - if [ -f ${TEST_DATA_DIR}/${TESTNAME}.mtx ]; then - echo ">>> [x] [ ] Testing the Jacobi-preconditioned conjugate gradient complex" - echo " algorithm for the input matrix (${n}x${m}) taken from" - echo " ${TESTNAME}.mtx. This test verifies against a ground-truth" - echo " solution vector. The test employs the grb::Launcher in automatic" - echo " mode. It uses direct-mode file IO." - $runner ${TEST_BIN_DIR}/conjugate_gradient_complex_${BACKEND} ${TEST_DATA_DIR}/${TESTNAME}.mtx direct 1 1 10000 true verification ${OUTPUT_VERIFICATION_DIR}/complex_conjugate_conjugate_gradient_out_${TESTNAME}_ref &> ${TEST_OUT_DIR}/preconditioned_conjugate_gradient_complex_${BACKEND}_${P}_${T}.log - head -1 ${TEST_OUT_DIR}/preconditioned_conjugate_gradient_complex_${BACKEND}_${P}_${T}.log - grep 'Test OK' ${TEST_OUT_DIR}/preconditioned_conjugate_gradient_complex_${BACKEND}_${P}_${T}.log || echo "Test FAILED" - else - echo "Test DISABLED: ${TESTNAME}.mtx was not found. To enable, please provide ${TEST_DATA_DIR}/${TESTNAME}.mtx" - fi - echo " " - - NTEST=256 - if [ -f "${TEST_BIN_DIR}/gmres_${BACKEND}" ] - then - echo ">>> [x] [ ] Testing the GMRES real algorithm for the random generated" - echo " matrix (${NTEST}x${NTEST}) with preconditioner. This test" - echo " verifies against a predefined solution vector. The test" - echo " employs the grb::Launcher in automatic mode. It uses" - echo " direct-mode file IO." - $runner ${TEST_BIN_DIR}/gmres_${BACKEND} --n ${NTEST} &> ${TEST_OUT_DIR}/gmres_${BACKEND}_${P}_${T}.log - head -1 ${TEST_OUT_DIR}/gmres_${BACKEND}_${P}_${T}.log - grep 'Test OK' ${TEST_OUT_DIR}/gmres_${BACKEND}_${P}_${T}.log || echo "Test FAILED" - echo " " - fi - - NTEST=50000 - if [ -f "${TEST_BIN_DIR}/gmres_complex_${BACKEND}" ] - then - echo ">>> [x] [ ] Testing the GMRES complex algorithm for the random generated" - echo " matrix (${NTEST}x${NTEST}) with preconditioner. This test" - echo " verifies against a predefined solution vector. The test" - echo " employs the grb::Launcher in automatic mode. It uses" - echo " direct-mode file IO." - $runner ${TEST_BIN_DIR}/gmres_complex_${BACKEND} --n ${NTEST} &> ${TEST_OUT_DIR}/gmres_complex_${BACKEND}_${P}_${T}.log - head -1 ${TEST_OUT_DIR}/gmres_complex_${BACKEND}_${P}_${T}.log - grep 'Test OK' ${TEST_OUT_DIR}/gmres_complex_${BACKEND}_${P}_${T}.log || echo "Test FAILED" - echo " " - fi - - echo ">>> [x] [ ] Testing the BiCGstab algorithm for the 17361 x 17361 input" - echo " matrix gyro_m.mtx. This test verifies against a ground-" - echo " truth solution vector, the same as used for the earlier" - echo " conjugate gradient test. Likewise to that one, this test" - echo " employs the grb::Launcher in automatic mode. It uses" - echo " direct-mode file IO." - if [ -f ${INPUT_DIR}/gyro_m.mtx ]; then - $runner ${TEST_BIN_DIR}/bicgstab_${BACKEND} ${INPUT_DIR}/gyro_m.mtx direct 1 1 10000 verification ${OUTPUT_VERIFICATION_DIR}/conjugate_gradient_out_gyro_m_ref &> ${TEST_OUT_DIR}/bicgstab_${BACKEND}_${P}_${T}.log - head -1 ${TEST_OUT_DIR}/bicgstab_${BACKEND}_${P}_${T}.log - grep 'Test OK' ${TEST_OUT_DIR}/bicgstab_${BACKEND}_${P}_${T}.log || echo "Test FAILED" - else - echo "Test DISABLED: gyro_m.mtx was not found. To enable, please provide ${INPUT_DIR}/gyro_m.mtx" - fi - echo " " - - echo ">>> [x] [ ] Testing the Sparse Neural Network algorithm for the GraphChallenge" - echo " dataset (neurons=1024, layers=120, offset=294) taken from" - echo " ${GNN_DATASET_PATH} and using thresholding 32." - if [ -d ${GNN_DATASET_PATH} ]; then - $runner ${TEST_BIN_DIR}/graphchallenge_nn_single_inference_${BACKEND} ${GNN_DATASET_PATH} 1024 120 294 1 32 indirect 1 1 verification ${OUTPUT_VERIFICATION_DIR}/graphchallenge_nn_out_1024_120_294_32_threshold_ref &> ${TEST_OUT_DIR}/graphchallenge_nn_single_inference_${BACKEND}_${P}_${T}.log - head -1 ${TEST_OUT_DIR}/graphchallenge_nn_single_inference_${BACKEND}_${P}_${T}.log - grep 'Test OK' ${TEST_OUT_DIR}/graphchallenge_nn_single_inference_${BACKEND}_${P}_${T}.log || echo "Test FAILED" - else - echo "Test DISABLED: ${GNN_DATASET_PATH} was not found. To enable, please provide the dataset." - fi - echo " " - - echo ">>> [x] [ ] Testing the Sparse Neural Network algorithm for the GraphChallenge" - echo " dataset (neurons=1024, layers=120, offset=294) taken from" - echo " ${GNN_DATASET_PATH} and without using thresholding." - if [ -d ${GNN_DATASET_PATH} ]; then - $runner ${TEST_BIN_DIR}/graphchallenge_nn_single_inference_${BACKEND} ${GNN_DATASET_PATH} 1024 120 294 0 0 indirect 1 1 verification ${OUTPUT_VERIFICATION_DIR}/graphchallenge_nn_out_1024_120_294_no_threshold_ref &> ${TEST_OUT_DIR}/graphchallenge_nn_single_inference_${BACKEND}_${P}_${T}.log - head -1 ${TEST_OUT_DIR}/graphchallenge_nn_single_inference_${BACKEND}_${P}_${T}.log - grep 'Test OK' ${TEST_OUT_DIR}/graphchallenge_nn_single_inference_${BACKEND}_${P}_${T}.log || echo "Test FAILED" - else - echo "Test DISABLED: ${GNN_DATASET_PATH} was not found. To enable, please provide the dataset." - fi - echo " " - - for ((i=0;i<${#LABELTEST_SIZES[@]};++i)); - do - LABELTEST_SIZE=${LABELTEST_SIZES[i]} - LABELTEST_EXPECTED_RESULT=${LABELTEST_RESULTS[i]} - echo ">>> [x] [ ] Testing label propagation using a a generated dataset" - echo " of size ${LABELTEST_SIZE} using the ${BACKEND} backend." - echo " This test verifies the number of iterations required" - echo " to convergence against the ground-truth value of ${LABELTEST_EXPECTED_RESULT}" - $runner ${TEST_BIN_DIR}/labeltest_${BACKEND} ${LABELTEST_SIZE} &> ${TEST_OUT_DIR}/labeltest_${BACKEND}_${LABELTEST_SIZE}.log - head -1 ${TEST_OUT_DIR}/labeltest_${BACKEND}_${LABELTEST_SIZE}.log - (grep -q "converged in ${LABELTEST_EXPECTED_RESULT} iterations" ${TEST_OUT_DIR}/labeltest_${BACKEND}_${LABELTEST_SIZE}.log && grep -i 'test ok' ${TEST_OUT_DIR}/labeltest_${BACKEND}_${LABELTEST_SIZE}.log) || echo "Test FAILED" - echo " " - done - - echo ">>> [x] [ ] Testing the Pregel PageRank-like algorithm using a global" - echo " stopping criterion. Verifies via a simple regression test in" - echo " number of rounds required." - if [ -f ${INPUT_DIR}/west0497.mtx ]; then - $runner ${TEST_BIN_DIR}/pregel_pagerank_global_${BACKEND} ${INPUT_DIR}/west0497.mtx direct 1 1 &> ${TEST_OUT_DIR}/pregel_pagerank_global_west0497_${BACKEND}_${P}_${T}.log - head -1 ${TEST_OUT_DIR}/pregel_pagerank_global_west0497_${BACKEND}_${P}_${T}.log - if ! grep -q 'Test OK' ${TEST_OUT_DIR}/pregel_pagerank_global_west0497_${BACKEND}_${P}_${T}.log; then - echo "Test FAILED" - elif ! grep -q '56 iterations to converge' ${TEST_OUT_DIR}/pregel_pagerank_global_west0497_${BACKEND}_${P}_${T}.log; then - echo "Verification FAILED" - echo "Test FAILED" - else - echo "Test OK" - fi - else - echo "Test DISABLED: west0497.mtx was not found. To enable, please provide ${INPUT_DIR}/west0497.mtx" - fi - echo " " - - echo ">>> [x] [ ] Testing the Pregel PageRank-like algorithm using a vertex-local" - echo " stopping criterion. Verifies via a simple regression test in" - echo " number of rounds required." - if [ -f ${INPUT_DIR}/west0497.mtx ]; then - $runner ${TEST_BIN_DIR}/pregel_pagerank_local_${BACKEND} ${INPUT_DIR}/west0497.mtx direct 1 1 &> ${TEST_OUT_DIR}/pregel_pagerank_local_west0497_${BACKEND}_${P}_${T}.log - head -1 ${TEST_OUT_DIR}/pregel_pagerank_local_west0497_${BACKEND}_${P}_${T}.log - if ! grep -q 'Test OK' ${TEST_OUT_DIR}/pregel_pagerank_local_west0497_${BACKEND}_${P}_${T}.log; then - echo "Test FAILED" - elif ! grep -q '47 iterations to converge' ${TEST_OUT_DIR}/pregel_pagerank_local_west0497_${BACKEND}_${P}_${T}.log; then - echo "Verification FAILED" - echo "Test FAILED" - else - echo "Test OK" - fi - else - echo "Test DISABLED: west0497.mtx was not found. To enable, please provide ${INPUT_DIR}/west0497.mtx" - fi - echo " " - - echo ">>> [x] [ ] Testing the Pregel connected components algorithm. Verifies" - echo " using a simple regression test in number of rounds required." - if [ -f ${INPUT_DIR}/west0497.mtx ]; then - $runner ${TEST_BIN_DIR}/pregel_connected_components_${BACKEND} ${INPUT_DIR}/west0497.mtx direct 1 1 &> ${TEST_OUT_DIR}/pregel_connected_components_west0497_${BACKEND}_${P}_${T}.log - head -1 ${TEST_OUT_DIR}/pregel_connected_components_west0497_${BACKEND}_${P}_${T}.log - if ! grep -q 'Test OK' ${TEST_OUT_DIR}/pregel_connected_components_west0497_${BACKEND}_${P}_${T}.log; then - echo "Test FAILED" - elif ! grep -q '11 iterations to converge' ${TEST_OUT_DIR}/pregel_connected_components_west0497_${BACKEND}_${P}_${T}.log; then - echo "Verification FAILED" - echo "Test FAILED" - else - echo "Test OK" - fi - else - echo "Test DISABLED: west0497.mtx was not found. To enable, please provide ${INPUT_DIR}/west0497.mtx" - fi - echo " " - - if [ "$BACKEND" = "bsp1d" ] || [ "$BACKEND" = "hybrid" ]; then - echo "Additional standardised smoke tests not yet supported for the ${BACKEND} backend" - echo - continue - fi - - echo ">>> [x] [ ] Testing the k-means algorithm" - $runner ${TEST_BIN_DIR}/kmeans_${BACKEND} &> ${TEST_OUT_DIR}/kmeans_${BACKEND}_${P}_${T}.log - head -1 ${TEST_OUT_DIR}/kmeans_${BACKEND}_${P}_${T}.log - tail -1 ${TEST_OUT_DIR}/kmeans_${BACKEND}_${P}_${T}.log - echo " " - - if [ "$BACKEND" = "reference_omp" ] || [ "$BACKEND" = "reference" ]; then - echo "Non-standard reference- and reference-omp specific smoke tests:" - echo " " - echo ">>> [x] [ ] Tests grb::Launcher on a K-core decomposition on the dataset" - echo " EPA.mtx. The launcher is used in automatic mode and the I/O" - echo " mode is sequential. The Launcher::exec called is with struct" - echo " I/O with broadcast true. This launches the k-core variant" - echo " that employs critical sections. This is a non-ALP-compliant" - echo " implementation that furthermore assumes an OpenMP-based" - echo " backend." - echo "Functional test executable: ${TEST_BIN_DIR}/kcore_decomposition_critical_${BACKEND}" - if [ -f ${INPUT_DIR}/EPA.mtx ]; then - $runner ${TEST_BIN_DIR}/kcore_decomposition_critical_${BACKEND} ${INPUT_DIR}/EPA.mtx direct 1 1 verification ${OUTPUT_VERIFICATION_DIR}/kcore_decomposition_eda_ref &> ${TEST_OUT_DIR}/kcore_decomposition_critical_${BACKEND}_EPA_${P}_${T}.log - grep 'Test OK' ${TEST_OUT_DIR}/kcore_decomposition_critical_${BACKEND}_EPA_${P}_${T}.log || printf 'Test FAILED.\n' - else - echo "Test DISABLED; dataset not found. Provide EPA.mtx in the ./datasets/ directory to enable." - fi - echo " " - fi - - done - done - - if [ "$BACKEND" = "bsp1d" ]; then - echo "Non-standard BSP1D-specific smoke tests:" - echo " " - - echo ">>> [x] [ ] Tests a manual call to bsp_hook via LPF. This is a smoke" - echo " test that makes sure the manual launcher is operational" - echo " via a simple \`\`hello world' test." - echo "Functional test executable: ${TEST_BIN_DIR}/manual_hook_hw. Script hardcodes test for four" - echo "separate processes running on and connecting to localhost on port 77770." - bash -c "${MANUALRUN} ${TEST_BIN_DIR}/manual_hook_hw localhost 0 4 77770 &> ${TEST_OUT_DIR}/manual_hook_hw.0 & \ - ${MANUALRUN} ${TEST_BIN_DIR}/manual_hook_hw localhost 3 4 77770 &> ${TEST_OUT_DIR}/manual_hook_hw.3 & \ - ${MANUALRUN} ${TEST_BIN_DIR}/manual_hook_hw localhost 1 4 77770 &> ${TEST_OUT_DIR}/manual_hook_hw.1 & \ - ${MANUALRUN} ${TEST_BIN_DIR}/manual_hook_hw localhost 2 4 77770 &> ${TEST_OUT_DIR}/manual_hook_hw.2 & \ - wait" - (grep -q 'Test OK' ${TEST_OUT_DIR}/manual_hook_hw.1 && grep -q 'Test OK' ${TEST_OUT_DIR}/manual_hook_hw.2 && grep -q 'Test OK' ${TEST_OUT_DIR}/manual_hook_hw.3 && grep -q 'Test OK' ${TEST_OUT_DIR}/manual_hook_hw.0 && printf "Test OK.\n\n") || (printf "Test FAILED.\n\n") - - echo ">>> [x] [ ] Uses the same infrastructure to initialise the BSP1D" - echo " implementation of the GraphBLAS and test the grb::set" - echo " function over an array of doubles of 100 elements" - echo " " - echo "Functional test executable: ${TEST_BIN_DIR}/manual_hook_grb_set. Script hardcodes test for" - echo "three separate processes running on and connecting to localhost on port 77770." - bash -c "${MANUALRUN} ${TEST_BIN_DIR}/manual_hook_grb_set localhost 0 3 77770 &> ${TEST_OUT_DIR}/manual_hook_grb_set.0 & \ - ${MANUALRUN} ${TEST_BIN_DIR}/manual_hook_grb_set localhost 1 3 77770 &> ${TEST_OUT_DIR}/manual_hook_grb_set.1 & \ - ${MANUALRUN} ${TEST_BIN_DIR}/manual_hook_grb_set localhost 2 3 77770 &> ${TEST_OUT_DIR}/manual_hook_grb_set.2 & \ - wait" - (grep -q 'Test OK' ${TEST_OUT_DIR}/manual_hook_grb_set.1 && grep -q 'Test OK' ${TEST_OUT_DIR}/manual_hook_grb_set.2 && grep -q 'Test OK' ${TEST_OUT_DIR}/manual_hook_grb_set.0 && printf "Test OK.\n\n" ) || (printf "Test FAILED.\n\n") - - echo ">>> [x] [ ] Uses the same infrastructure to initialise the BSP1D" - echo " implementation of the GraphBLAS and test the grb::reduce" - echo " function over an array of doubles" - echo " " - echo "Functional test executable: ${TEST_BIN_DIR}/manual_hook_grb_reduce. Script hardcodes test for" - echo "four separate processes running on and connecting to localhost on port 77770." - bash -c "${MANUALRUN} ${TEST_BIN_DIR}/manual_hook_grb_reduce localhost 0 4 77770 &> ${TEST_OUT_DIR}/manual_hook_grb_reduce.0 & \ - ${MANUALRUN} ${TEST_BIN_DIR}/manual_hook_grb_reduce localhost 3 4 77770 &> ${TEST_OUT_DIR}/manual_hook_grb_reduce.3 & \ - ${MANUALRUN} ${TEST_BIN_DIR}/manual_hook_grb_reduce localhost 1 4 77770 &> ${TEST_OUT_DIR}/manual_hook_grb_reduce.1 & \ - ${MANUALRUN} ${TEST_BIN_DIR}/manual_hook_grb_reduce localhost 2 4 77770 &> ${TEST_OUT_DIR}/manual_hook_grb_reduce.2 & \ - wait" - (grep -q 'Test OK' ${TEST_OUT_DIR}/manual_hook_grb_reduce.1 && grep -q 'Test OK' ${TEST_OUT_DIR}/manual_hook_grb_reduce.2 && grep -q 'Test OK' ${TEST_OUT_DIR}/manual_hook_grb_reduce.3 && grep -q 'Test OK' ${TEST_OUT_DIR}/manual_hook_grb_reduce.0 && printf "Test OK.\n\n") || (printf "Test FAILED.\n\n") - - echo ">>> [x] [ ] Uses the same infrastructure to initialise the BSP1D" - echo " implementation of the GraphBLAS and test the grb::set" - echo " function over an array of ints of 100 000 elements" - echo " " - echo "Functional test executable: ${TEST_BIN_DIR}/manual_hook_grb_dot. Script hardcodes test for" - echo "four separate processes running on and connecting to localhost on port 77770." - bash -c "${MANUALRUN} ${TEST_BIN_DIR}/manual_hook_grb_dot localhost 0 4 77770 &> ${TEST_OUT_DIR}/manual_hook_grb_dot.0 & \ - ${MANUALRUN} ${TEST_BIN_DIR}/manual_hook_grb_dot localhost 3 4 77770 &> ${TEST_OUT_DIR}/manual_hook_grb_dot.3 & \ - ${MANUALRUN} ${TEST_BIN_DIR}/manual_hook_grb_dot localhost 1 4 77770 &> ${TEST_OUT_DIR}/manual_hook_grb_dot.1 & \ - ${MANUALRUN} ${TEST_BIN_DIR}/manual_hook_grb_dot localhost 2 4 77770 &> ${TEST_OUT_DIR}/manual_hook_grb_dot.2 & \ - wait" - (grep -q 'Test OK' ${TEST_OUT_DIR}/manual_hook_grb_dot.1 && grep -q 'Test OK' ${TEST_OUT_DIR}/manual_hook_grb_dot.2 && grep -q 'Test OK' ${TEST_OUT_DIR}/manual_hook_grb_dot.3 && grep -q 'Test OK' ${TEST_OUT_DIR}/manual_hook_grb_dot.0 && printf "Test OK.\n\n") || (printf "Test FAILED.\n\n") - - echo ">>> [x] [ ] Uses the same infrastructure to initialise the BSP1D" - echo " implementation of the GraphBLAS and test blas0 grb::collectives" - echo " " - echo "Functional test executable: ${TEST_BIN_DIR}/manual_hook_grb_collectives_blas0. Script hardcodes test for" - echo "four separate processes running on and connecting to localhost on port 77770." - bash -c "${MANUALRUN} ${TEST_BIN_DIR}/manual_hook_grb_collectives_blas0 localhost 0 4 77770 &> ${TEST_OUT_DIR}/manual_hook_grb_collectives_blas0.0 & \ - ${MANUALRUN} ${TEST_BIN_DIR}/manual_hook_grb_collectives_blas0 localhost 3 4 77770 &> ${TEST_OUT_DIR}/manual_hook_grb_collectives_blas0.3 & \ - ${MANUALRUN} ${TEST_BIN_DIR}/manual_hook_grb_collectives_blas0 localhost 1 4 77770 &> ${TEST_OUT_DIR}/manual_hook_grb_collectives_blas0.1 & \ - ${MANUALRUN} ${TEST_BIN_DIR}/manual_hook_grb_collectives_blas0 localhost 2 4 77770 &> ${TEST_OUT_DIR}/manual_hook_grb_collectives_blas0.2 & \ - wait" - (grep -q 'Test OK' ${TEST_OUT_DIR}/manual_hook_grb_collectives_blas0.1 && grep -q 'Test OK' ${TEST_OUT_DIR}/manual_hook_grb_collectives_blas0.2 && grep -q 'Test OK' ${TEST_OUT_DIR}/manual_hook_grb_collectives_blas0.3 && grep -q 'Test OK' ${TEST_OUT_DIR}/manual_hook_grb_collectives_blas0.0 && printf "Test OK.\n\n") || (printf "Test FAILED.\n\n") - - echo ">>> [x] [ ] Uses the same infrastructure to initialise the BSP1D" - echo " implementation of the GraphBLAS and test blas1 grb::collectives" - echo " " - echo "Functional test executable: ${TEST_BIN_DIR}/manual_hook_grb_collectives_blas1. Script hardcodes test for" - echo "four separate processes running on and connecting to localhost on port 77770." - bash -c "${MANUALRUN} ${TEST_BIN_DIR}/manual_hook_grb_collectives_blas1 localhost 0 4 77770 &> ${TEST_OUT_DIR}/manual_hook_grb_collectives_blas1.0 & \ - ${MANUALRUN} ${TEST_BIN_DIR}/manual_hook_grb_collectives_blas1 localhost 3 4 77770 &> ${TEST_OUT_DIR}/manual_hook_grb_collectives_blas1.3 & \ - ${MANUALRUN} ${TEST_BIN_DIR}/manual_hook_grb_collectives_blas1 localhost 1 4 77770 &> ${TEST_OUT_DIR}/manual_hook_grb_collectives_blas1.1 & \ - ${MANUALRUN} ${TEST_BIN_DIR}/manual_hook_grb_collectives_blas1 localhost 2 4 77770 &> ${TEST_OUT_DIR}/manual_hook_grb_collectives_blas1.2 & \ - wait" - (grep -q 'Test OK' ${TEST_OUT_DIR}/manual_hook_grb_collectives_blas1.1 && grep -q 'Test OK' ${TEST_OUT_DIR}/manual_hook_grb_collectives_blas1.2 && grep -q 'Test OK' ${TEST_OUT_DIR}/manual_hook_grb_collectives_blas1.3 && grep -q 'Test OK' ${TEST_OUT_DIR}/manual_hook_grb_collectives_blas1.0 && printf "Test OK.\n\n") || (printf "Test FAILED.\n\n") - - echo ">>> [x] [ ] Uses the same infrastructure to initialise the BSP1D" - echo " implementation of the GraphBLAS and test blas1 grb::collectives" - echo " " - echo "Functional test executable: ${TEST_BIN_DIR}/manual_hook_grb_collectives_blas1_raw. Script hardcodes test for" - echo "four separate processes running on and connecting to localhost on port 77770." - bash -c "${MANUALRUN} ${TEST_BIN_DIR}/manual_hook_grb_collectives_blas1_raw localhost 0 4 77770 &> ${TEST_OUT_DIR}/manual_hook_grb_collectives_blas1_raw.0 & \ - ${MANUALRUN} ${TEST_BIN_DIR}/manual_hook_grb_collectives_blas1_raw localhost 3 4 77770 &> ${TEST_OUT_DIR}/manual_hook_grb_collectives_blas1_raw.3 & \ - ${MANUALRUN} ${TEST_BIN_DIR}/manual_hook_grb_collectives_blas1_raw localhost 1 4 77770 &> ${TEST_OUT_DIR}/manual_hook_grb_collectives_blas1_raw.1 & \ - ${MANUALRUN} ${TEST_BIN_DIR}/manual_hook_grb_collectives_blas1_raw localhost 2 4 77770 &> ${TEST_OUT_DIR}/manual_hook_grb_collectives_blas1_raw.2 & \ - wait" - (grep -q 'Test OK' ${TEST_OUT_DIR}/manual_hook_grb_collectives_blas1_raw.1 && grep -q 'Test OK' ${TEST_OUT_DIR}/manual_hook_grb_collectives_blas1_raw.2 && grep -q 'Test OK' ${TEST_OUT_DIR}/manual_hook_grb_collectives_blas1_raw.3 && grep -q 'Test OK' ${TEST_OUT_DIR}/manual_hook_grb_collectives_blas1_raw.0 && printf "Test OK.\n\n") || (printf "Test FAILED.\n\n") - - echo ">>> [x] [ ] Tests manually hooked k-nearest-neighbourhood" - echo " calculation on a tiny graph, using 4 processes" - echo "Functional test executable: ${TEST_BIN_DIR}/manual_hook_small_knn. Script hardcodes test for four" - echo "separate processes running on and connecting to localhost on port 77770." - bash -c "${MANUALRUN} ${TEST_BIN_DIR}/manual_hook_small_knn localhost 0 4 77770 &> ${TEST_OUT_DIR}/manual_hook_small_knn.0 & \ - ${MANUALRUN} ${TEST_BIN_DIR}/manual_hook_small_knn localhost 3 4 77770 &> ${TEST_OUT_DIR}/manual_hook_small_knn.3 & \ - ${MANUALRUN} ${TEST_BIN_DIR}/manual_hook_small_knn localhost 1 4 77770 &> ${TEST_OUT_DIR}/manual_hook_small_knn.1 & \ - ${MANUALRUN} ${TEST_BIN_DIR}/manual_hook_small_knn localhost 2 4 77770 &> ${TEST_OUT_DIR}/manual_hook_small_knn.2 & \ - wait" - (grep -q 'Test OK' ${TEST_OUT_DIR}/manual_hook_small_knn.1 && grep -q 'Test OK' ${TEST_OUT_DIR}/manual_hook_small_knn.2 && grep -q 'Test OK' ${TEST_OUT_DIR}/manual_hook_small_knn.3 && grep -q 'Test OK' ${TEST_OUT_DIR}/manual_hook_small_knn.0 && printf "Test OK.\n\n") || (printf "Test FAILED.\n\n") - - echo ">>> [x] [ ] Tests grb::Launcher on a PageRank on a 1M x 1M matrix" - echo " with 1M+1 nonzeroes. The matrix corresponds to a cycle" - echo " path through all 1M vertices, plus one edge from vertex" - echo " 1M-3 to vertex 1M-1. The launcher is used in FROM_MPI" - echo " mode, IO is sequential, number of processes is 4, and" - echo " the backend implementation is BSP1D. Launcher::exec is" - echo " used with statically sized input and statically sized" - echo " output." - echo "Functional test executable: ${TEST_BIN_DIR}/from_mpi_launch_simple_pagerank" - bash -c "(set -o pipefail && ${LPFRUN} -np 4 ${TEST_BIN_DIR}/from_mpi_launch_simple_pagerank &> ${TEST_OUT_DIR}/from_mpi_launch_simple_pagerank && printf 'Test OK.\n\n') || (printf 'Test FAILED.\n\n')" - - echo ">>> [x] [ ] Tests grb::Launcher on a PageRank on a 1M x 1M matrix" - echo " with 1M+1 nonzeroes. The matrix corresponds to a cycle" - echo " path through all 1M vertices, plus one edge from vertex" - echo " 1M-3 to vertex 1M-1. The launcher is used in FROM_MPI" - echo " mode, IO is sequential, number of processes is 5, and" - echo " the backend implementation is BSP1D. Launcher::exec is" - echo " used with statically sized input and statically sized" - echo " output. The entire test is repeated three times, to" - echo " test re-entrance capabilities of the 1) Launcher" - echo " constructor, 2) Launcher destructor, and 3) exec" - echo " function." - echo "Functional test executable: ${TEST_BIN_DIR}/from_mpi_launch_simple_pagerank_multiple_entry" - bash -c "(set -o pipefail && ${LPFRUN} -np 5 ${TEST_BIN_DIR}/from_mpi_launch_simple_pagerank_multiple_entry &> ${TEST_OUT_DIR}/from_mpi_launch_simple_pagerank_multiple_entry && printf 'Test OK.\n\n') || (printf 'Test FAILED.\n\n')" - - echo ">>> [x] [ ] Tests grb::Launcher on a PageRank on a 1M x 1M matrix" - echo " with 1M+1 nonzeroes. The matrix corresponds to a cycle" - echo " path through all 1M vertices, plus one edge from vertex" - echo " 1M-3 to vertex 1M-1. The launcher is used in FROM_MPI" - echo " mode, IO is sequential, number of processes is 3, and" - echo " the backend implementation is BSP1D. Launcher::exec is" - echo " used with variably sized input and statically sized" - echo " output containing a PinnedVector instance. The input" - echo " at PID 0 is broadcasted to all other processes. The" - echo " entire test is repeated three times, to test re-" - echo " entrance capabilities of the 1) Launcher constructor," - echo " 2) Launcher destructor, and 3) exec function." - echo "Functional test executable: ${TEST_BIN_DIR}/from_mpi_launch_simple_pagerank_broadcast_pinning_multiple_entry" - bash -c "(set -o pipefail && ${LPFRUN} -np 3 ${TEST_BIN_DIR}/from_mpi_launch_simple_pagerank_broadcast_pinning_multiple_entry &> ${TEST_OUT_DIR}/from_mpi_launch_simple_pagerank_broadcast_pinning_multiple_entry && printf 'Test OK.\n\n') || (printf 'Test FAILED.\n\n')" - - echo ">>> [x] [ ] Tests grb::Launcher on a PageRank on a 1M x 1M matrix" - echo " with 1M+1 nonzeroes. The matrix corresponds to a cycle" - echo " path through all 1M vertices, plus one edge from vertex" - echo " 1M-3 to vertex 1M-1. The launcher is used in FROM_MPI" - echo " mode, IO is sequential, number of processes is 7, and" - echo " the backend implementation is BSP1D. Launcher::exec is" - echo " used with variably sized input and statically sized" - echo " output. The input at PID 0 is broadcasted to all other" - echo " processes. The entire test is repeated three times, to" - echo " test re-entrance capabilities of the 1) Launcher" - echo " constructor, 2) Launcher destructor, and 3) exec" - echo " function." - echo "Functional test executable: ${TEST_BIN_DIR}/from_mpi_launch_simple_pagerank_broadcast_multiple_entry" - bash -c "(set -o pipefail && ${LPFRUN} -np 7 ${TEST_BIN_DIR}/from_mpi_launch_simple_pagerank_broadcast_multiple_entry &> ${TEST_OUT_DIR}/from_mpi_launch_simple_pagerank_broadcast_multiple_entry && printf 'Test OK.\n\n') || (printf 'Test FAILED.\n\n')" - - echo ">>> [x] [ ] Tests grb::Launcher on a PageRank on a 1M x 1M matrix" - echo " with 1M+1 nonzeroes. The matrix corresponds to a cycle" - echo " path through all 1M vertices, plus one edge from vertex" - echo " 1M-3 to vertex 1M-1. The launcher is used in FROM_MPI" - echo " mode, IO is sequential, number of processes is 6, and" - echo " the backend implementation is BSP1D. Launcher::exec is" - echo " used with variably sized input and statically sized" - echo " output." - echo "Functional test executable: ${TEST_BIN_DIR}/from_mpi_launch_simple_pagerank_broadcast" - bash -c "(set -o pipefail && ${LPFRUN} -np 6 ${TEST_BIN_DIR}/from_mpi_launch_simple_pagerank_broadcast &> ${TEST_OUT_DIR}/from_mpi_launch_simple_pagerank_broadcast && printf 'Test OK.\n\n') || (printf 'Test FAILED.\n\n')" - - fi -done - -echo "*****************************************************************************************" -echo "All smoke tests done." -echo " " - diff --git a/tests/summarise.sh b/tests/summarise.sh deleted file mode 100755 index fa7d7b942..000000000 --- a/tests/summarise.sh +++ /dev/null @@ -1,54 +0,0 @@ -#!/bin/bash - -if [ $# -lt 1 ]; then - echo "Usage: $0 ..." - echo " - test log: mandatory log of unittests, smoketests, or perftests output" - echo " - output files: mandatory output files to be reported if log was OK" - echo "The number of mandatory output files is optional and can be zero" - exit 1 -fi - -if [ ! -f "$1" ]; then - echo "Given log file (${1}) was not found" - exit 50 -fi - -NUM_OK=`grep -i "Test OK" "$1" | wc -l` -NUM_DISABLED=`grep -i "Test DISABLED" "$1" | wc -l` -NUM_CDISABLED=`grep -i "Tests DISABLED" "$1" | wc -l` -NUM_FAILED=`grep -i "Test FAILED" "$1" | wc -l` - -echo "Summary of $1:" -printf " %4s PASSED\n" ${NUM_OK} -printf " %4s SKIPPED\n" ${NUM_DISABLED} -printf " %4s FAILED\n" ${NUM_FAILED} -printf " %4s TEST CATEGORIES SKIPPED\n" ${NUM_CDISABLED} - -if [ ${NUM_FAILED} -gt 0 ]; then - printf "\nOne or more failures detected. Log contents:\n\n" - cat "$1" - exit 100 -fi - -if [ ${NUM_OK} -eq 0 ]; then - printf "\nZero tests succeeded. Log contents:\n\n" - cat "$1" - exit 200 -fi - -shift - -for OUTPUT in "$@" -do - if [ ! -f "${OUTPUT}" ]; then - echo "Mandatory output file (${OUTPUT}) was not found" - cat "$1" - exit 250 - fi - printf "\nContents of ${OUTPUT}:" - printf "\n------------------------------------\n\n" - cat ${OUTPUT} -done - -exit 0 - diff --git a/tests/unit/CMakeLists.txt b/tests/unit/CMakeLists.txt index fe6ddee06..d4cc5bbd2 100644 --- a/tests/unit/CMakeLists.txt +++ b/tests/unit/CMakeLists.txt @@ -14,121 +14,250 @@ # limitations under the License. # +assert_valid_variables( DATASETS_DIR MAX_THREADS ) # write here the name of the category -# add_grb_executables and add_grb_executable_custom need this information -set( TEST_CATEGORY "unit" ) +# add_grb_executables and friends need this information + +set( shmem_test_threads "1;2" ) +if( "${MAX_THREADS}" GREATER 2 ) + list( APPEND test_threads "${MAX_THREADS}" ) +endif() + +set( hybrid_threads 2 ) +math( EXPR _hybrid_max_threads "${MAX_THREADS}/7" OUTPUT_FORMAT DECIMAL ) +if( "${_hybrid_max_threads}" GREATER 2 ) + list( APPEND hybrid_threads "${_hybrid_max_threads}" ) +endif() + +# Setup the environment for the tests in this directory +setup_grb_tests_environment( + CATEGORY "unit" + + BSP1D_PROCESSES "1;2;16" + HYBRID_PROCESSES "2;7" + + REFERENCE_OMP_THREADS "${shmem_test_threads}" + NONBLOCKING_THREADS "${shmem_test_threads}" + HYBRID_THREADS "${hybrid_threads}" +) + +set( OUTPUT_VERIFICATION_DIR "${CMAKE_CURRENT_SOURCE_DIR}/output_verification" ) # list tests, without the CATEROR[IES,Y] keyword (it's now passed via TEST_CATEGORY) add_grb_executables( equals equals.cpp - BACKENDS reference NO_BACKEND_NAME + BACKENDS reference ) +add_grb_tests( equals equals BACKENDS reference Test_OK_SUCCESS ) add_grb_executables( add15d add15d.cpp - BACKENDS reference NO_BACKEND_NAME + BACKENDS reference ) +add_grb_tests( add15d add15d BACKENDS reference Test_OK_SUCCESS ) add_grb_executables( add15m add15m.cpp - BACKENDS reference NO_BACKEND_NAME + BACKENDS reference ) +add_grb_tests( add15m add15m BACKENDS reference Test_OK_SUCCESS ) add_grb_executables( argmax argmax.cpp BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking ) +add_grb_tests( argmax argmax Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking +) add_grb_executables( argmin argmin.cpp BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking ) +add_grb_tests( argmin argmin Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking +) add_grb_executables( buildVector buildVector.cpp BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking ) +add_grb_tests( buildVector buildVector Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking +) add_grb_executables( clearMatrix clearMatrix.cpp BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking ) +add_grb_tests( clearMatrix clearMatrix ARGUMENTS 10000000 Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking +) add_grb_executables( compareParserTest parser.cpp - BACKENDS reference NO_BACKEND_NAME + BACKENDS reference COMPILE_DEFINITIONS COMPARE ) +add_grb_tests( compareParserTest compareParserTest BACKENDS reference Test_OK_SUCCESS + ARGUMENTS ${DATASETS_DIR}/cit-HepTh.txt + REQUIRED_FILES ${DATASETS_DIR}/cit-HepTh.txt +) add_grb_executables( copyAndAssignVectorIterator copyAndAssignVectorIterator.cpp BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking ) +add_grb_tests( copyAndAssignVectorIterator copyAndAssignVectorIterator + ARGUMENTS 10000000 Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking +) add_grb_executables( copyVector copyVector.cpp BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking ) +add_grb_tests( copyVector copyVector ARGUMENTS 100 Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking +) +add_grb_tests( copyVector_large copyVector ARGUMENTS 10000000 Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking +) -add_grb_executables( distribution_bsp1d distribution_bsp1d.cpp - BACKENDS reference NO_BACKEND_NAME +add_grb_executables( distribution_indices distribution_bsp1d.cpp + BACKENDS bsp1d +) +add_grb_tests( distribution_indices distribution_indices + BACKENDS bsp1d Test_OK_SUCCESS ) add_grb_executables( distribution_matrix_bsp1d distribution_matrix_bsp1d.cpp - BACKENDS bsp1d NO_BACKEND_NAME + BACKENDS bsp1d ADDITIONAL_LINK_LIBRARIES test_utils ) add_grb_executables( distribution distribution.cpp - BACKENDS bsp1d NO_BACKEND_NAME + BACKENDS bsp1d +) + +add_grb_executables( distribution_length distribution.cpp + BACKENDS bsp1d +) +add_grb_tests( distribution_length distribution_length + BACKENDS bsp1d Test_OK_SUCCESS PROCESSES 1 ) add_grb_executables( id id.cpp BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking ) +add_grb_tests( id id Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking +) add_grb_executables( id_distributed id_distributed.cpp BACKENDS bsp1d hybrid ) +add_grb_tests( id_distributed id_distributed Test_OK_SUCCESS + BACKENDS bsp1d hybrid +) add_grb_executables( dot dot.cpp BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking ) +add_grb_tests( dot dot ARGUMENTS 1874 Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking +) +add_grb_tests( dot_large dot ARGUMENTS 10000000 Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking +) add_grb_executables( emptyVector emptyVector.cpp BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking ) +add_grb_tests( emptyVector emptyVector Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking +) add_grb_executables( ewiseapply ewiseapply.cpp BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking ) +add_grb_tests( ewiseapply_small ewiseapply ARGUMENTS 14 Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking +) +add_grb_tests( ewiseapply ewiseapply ARGUMENTS 100 Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking +) +add_grb_tests( ewiseapply_large ewiseapply ARGUMENTS 10000000 Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking +) add_grb_executables( eWiseMatrix eWiseMatrix.cpp BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking ) +add_grb_tests( eWiseMatrix eWiseMatrix Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking +) add_grb_executables( collectives_blas0 auto_launcher.cpp launcher/collectives_blas0.cpp BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking ) +add_grb_tests( collectives_blas0 collectives_blas0 Test_OK_SUCCESS + BACKENDS reference reference_omp hyperdags nonblocking ARGUMENTS 1 +) +foreach( P 1 2 16 ) + add_grb_tests( collectives_blas0 collectives_blas0 Test_OK_SUCCESS + BACKENDS bsp1d hybrid + PROCESSES ${P} ARGUMENTS ${P} + ) +endforeach() + add_grb_executables( fold_to_scalar auto_launcher.cpp launcher/reduce.cpp BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking ) +add_grb_tests( fold_to_scalar fold_to_scalar Test_OK_SUCCESS + BACKENDS reference reference_omp hyperdags nonblocking ARGUMENTS 1 +) +foreach( P 1 2 16 ) + add_grb_tests( fold_to_scalar fold_to_scalar Test_OK_SUCCESS + BACKENDS bsp1d hybrid + PROCESSES ${P} ARGUMENTS ${P} + ) +endforeach() add_grb_executables( fork_launcher fork_launcher.cpp - BACKENDS bsp1d NO_BACKEND_NAME + BACKENDS bsp1d ) -add_grb_executable_custom( hpparser ${ALP_UTILS_SRC_PATH}/hpparser.c - LINK_LIBRARIES backend_headers_nodefs OpenMP::OpenMP_C +add_grb_executables( hpparser ${ALP_UTILS_SRC_PATH}/hpparser.c + BACKENDS none + ADDITIONAL_LINK_LIBRARIES backend_headers_nodefs OpenMP::OpenMP_C COMPILE_DEFINITIONS TEST_HPPARSER _GNU_SOURCE _DEBUG ) +set( hpparser_golden_out "[ 0, *] nrow = 59, ncol = 59, nnnz = 163 +[ 0, *] offb = 564, fsiz = 1494, offe = 1493 +[ *, *] ntot = 163" ) +add_grb_tests( hpparser hpparser + BACKENDS none + REQUIRED_FILES ${DATASETS_DIR}/dwt_59.mtx + ARGUMENTS 1 ${MAX_THREADS} 131072 8388608 ${DATASETS_DIR}/dwt_59.mtx 1 + OUTPUT_VALIDATE diff "<(echo \"${hpparser_golden_out}\")" @@TEST_OUTPUT_FILE@@ +) add_grb_executables( masked_mxv masked_mxv.cpp BACKENDS reference reference_omp hyperdags nonblocking ) +add_grb_tests( masked_mxv masked_mxv Test_OK_SUCCESS + BACKENDS reference reference_omp hyperdags nonblocking +) add_grb_executables( masked_vxm masked_vxm.cpp BACKENDS reference reference_omp hyperdags nonblocking ) +add_grb_tests( masked_vxm masked_vxm Test_OK_SUCCESS + BACKENDS reference reference_omp hyperdags nonblocking +) add_grb_executables( matrixIterator matrixIterator.cpp BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking ADDITIONAL_LINK_LIBRARIES test_utils_headers ) +add_grb_tests( matrixIterator matrixIterator ARGUMENTS 10000000 Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking +) add_grb_executables( fold_matrix_to_scalar fold_matrix_to_scalar.cpp BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking @@ -137,126 +266,277 @@ add_grb_executables( fold_matrix_to_scalar fold_matrix_to_scalar.cpp add_grb_executables( doubleAssign doubleAssign.cpp BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking ) +add_grb_tests( doubleAssign doubleAssign ARGUMENTS 1337 Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking +) add_grb_executables( matrixSet matrixSet.cpp BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking ) +add_grb_tests( matrixSet matrixSet Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking +) add_grb_executables( moveVector moveVector.cpp BACKENDS reference reference_omp hyperdags bsp1d hybrid nonblocking ) +add_grb_tests( moveVector moveVector ARGUMENTS 100 Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking +) add_grb_executables( mul15i mul15i.cpp - BACKENDS reference NO_BACKEND_NAME + BACKENDS reference ) +add_grb_tests( mul15i mul15i BACKENDS reference Test_OK_SUCCESS ) add_grb_executables( mul15m mul15m.cpp - BACKENDS reference NO_BACKEND_NAME + BACKENDS reference ) +add_grb_tests( mul15m mul15m BACKENDS reference Test_OK_SUCCESS ) add_grb_executables( eWiseMul eWiseMul.cpp BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking ) +add_grb_tests( eWiseMul eWiseMul ARGUMENTS 100 Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking +) +add_grb_tests( eWiseMul_large eWiseMul ARGUMENTS 100002 Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking +) add_grb_executables( factories factories.cpp BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking ) +add_grb_tests( factories factories Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking +) add_grb_executables( muladd muladd.cpp BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking ) +add_grb_tests( muladd_large muladd ARGUMENTS 10000000 Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking +) add_grb_executables( mxm mxm.cpp BACKENDS reference reference_omp hyperdags nonblocking #bsp1d hybrid ADDITIONAL_LINK_LIBRARIES test_utils ) +add_grb_tests( mxm mxm Test_OK_SUCCESS + BACKENDS reference reference_omp hyperdags nonblocking +) add_grb_executables( parserTest utilParserTest.cpp - BACKENDS reference NO_BACKEND_NAME + BACKENDS reference COMPILE_DEFINITIONS COMPARE ) +add_grb_tests( parserTest parserTest + BACKENDS reference Test_OK_SUCCESS + REQUIRED_FILES ${DATASETS_DIR}/west0497.mtx + ARGUMENTS ${DATASETS_DIR}/west0497.mtx +) add_grb_executables( iteratorFilter iteratorFilter.cpp - BACKENDS reference NO_BACKEND_NAME + BACKENDS reference +) +add_grb_tests( iteratorFilter_tiny iteratorFilter + BACKENDS reference ARGUMENTS 3 Test_OK_SUCCESS +) +add_grb_tests( iteratorFilter_default iteratorFilter + BACKENDS reference Test_OK_SUCCESS +) +add_grb_tests( iteratorFilter_large iteratorFilter + BACKENDS reference ARGUMENTS 7013 Test_OK_SUCCESS ) add_grb_executables( RBGaussSeidel RBGaussSeidel.cpp BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking ) +add_grb_tests( RBGaussSeidel RBGaussSeidel + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking + REQUIRED_FILES ${DATASETS_DIR}/west0497.mtx + ARGUMENTS ${DATASETS_DIR}/west0497.mtx Test_OK_SUCCESS +) add_grb_executables( selectMatrix selectMatrix.cpp BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking ADDITIONAL_LINK_LIBRARIES test_utils_headers ) +add_grb_tests( selectMatrix_small selectMatrix + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking + ARGUMENTS 3 Test_OK_SUCCESS +) +add_grb_tests( selectMatrix_big selectMatrix + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking + ARGUMENTS 5000 Test_OK_SUCCESS +) add_grb_executables( set set.cpp BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking ) +add_grb_tests( set set ARGUMENTS 1000000 Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking +) add_grb_executables( sparse_mxv sparse_mxv.cpp BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking ) +add_grb_tests( sparse_mxv sparse_mxv Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking +) add_grb_executables( sparse_vxm sparse_vxm.cpp BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking ) +add_grb_tests( sparse_vxm_10_1 sparse_vxm Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking + ARGUMENTS 10 1 1 1 +) +add_grb_tests( sparse_vxm_10_2 sparse_vxm Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking + ARGUMENTS 10 2 1 1 +) +add_grb_tests( sparse_vxm_10_3 sparse_vxm Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking + ARGUMENTS 10 3 1 1 +) +add_grb_tests( sparse_vxm_10_4 sparse_vxm Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking + ARGUMENTS 10 4 1 1 +) +add_grb_tests( sparse_vxm_1000_1 sparse_vxm Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking + ARGUMENTS 1000 1 1 1 +) +add_grb_tests( sparse_vxm_1000_2 sparse_vxm Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking + ARGUMENTS 1000 2 1 1 +) +add_grb_tests( sparse_vxm_1000_3 sparse_vxm Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking + ARGUMENTS 1000 3 1 1 +) +add_grb_tests( sparse_vxm_1000_4 sparse_vxm Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking + ARGUMENTS 1000 4 1 1 +) add_grb_executables( stdVector stdVector.cpp BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking ) +add_grb_tests( stdVector stdVector ARGUMENTS 100 Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking +) add_grb_executables( swapVector swapVector.cpp BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking ) +add_grb_tests( swapVector swapVector ARGUMENTS 100 Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking +) add_grb_executables( thread_local_storage thread_local_storage.cpp - BACKENDS reference NO_BACKEND_NAME + BACKENDS reference ADDITIONAL_LINK_LIBRARIES Threads::Threads ) +add_grb_tests( thread_local_storage thread_local_storage BACKENDS reference ) add_grb_executables( vectorToMatrix vectorToMatrix.cpp BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking ) +add_grb_tests( vectorToMatrix vectorToMatrix Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking +) add_grb_executables( vmxa vmxa.cpp - BACKENDS reference reference_omp bsp1d hyperdags nonblocking + BACKENDS reference reference_omp hyperdags nonblocking +) +add_grb_tests( vmxa vmxa Test_OK_SUCCESS + BACKENDS reference reference_omp hyperdags nonblocking ) add_grb_executables( vmx vmx.cpp - BACKENDS reference reference_omp bsp1d hyperdags nonblocking + BACKENDS reference reference_omp hyperdags nonblocking +) +add_grb_tests( vmx vmx Test_OK_SUCCESS + BACKENDS reference reference_omp hyperdags nonblocking ) add_grb_executables( zip zip.cpp BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking ) +add_grb_tests( zip_large zip ARGUMENTS 10000000 Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking +) add_grb_executables( copyVoidMatrices copyVoidMatrices.cpp BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking ) +add_grb_tests( copyVoidMatrices copyVoidMatrices ARGUMENTS 1003 Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking +) add_grb_executables( masked_muladd masked_muladd.cpp BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking ) +add_grb_tests( masked_muladd_large masked_muladd ARGUMENTS 7000000 Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking +) add_grb_executables( spy spy.cpp BACKENDS reference reference_omp hyperdags nonblocking ) +add_grb_tests( spy spy + BACKENDS reference reference_omp hyperdags nonblocking + REQUIRED_FILES ${DATASETS_DIR}/west0497.mtx + ARGUMENTS ${DATASETS_DIR}/west0497.mtx + OUTPUT_VALIDATE grep 'Spy matrix' @@TEST_OUTPUT_FILE@@ | cut -d' ' -f9 | grep -q 315 +) add_grb_executables( dense_spmv dense_spmv.cpp BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking ) +add_grb_tests( dense_spmv_Ax dense_spmv Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking + REQUIRED_FILES ${DATASETS_DIR}/west0497.mtx + ARGUMENTS ${DATASETS_DIR}/west0497.mtx direct 1 1 1 +) +add_grb_tests( dense_spmv_ATx dense_spmv Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking + REQUIRED_FILES ${DATASETS_DIR}/west0497.mtx + ARGUMENTS ${DATASETS_DIR}/west0497.mtx direct 2 1 1 +) +add_grb_tests( dense_spmv_xA dense_spmv Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking + REQUIRED_FILES ${DATASETS_DIR}/west0497.mtx + ARGUMENTS ${DATASETS_DIR}/west0497.mtx direct 3 1 1 +) +add_grb_tests( dense_spmv_xAT dense_spmv Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking + REQUIRED_FILES ${DATASETS_DIR}/west0497.mtx + ARGUMENTS ${DATASETS_DIR}/west0497.mtx direct 4 1 1 +) add_grb_executables( moveMatrix moveMatrix.cpp BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking ) +add_grb_tests( moveMatrix moveMatrix ARGUMENTS 100 Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking +) add_grb_executables( stdMatrix stdMatrix.cpp BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking ) +add_grb_tests( stdMatrix stdMatrix ARGUMENTS 100 Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking +) add_grb_executables( eWiseApply_matrix eWiseApply_matrix.cpp BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking ) +add_grb_tests( eWiseApply_matrix eWiseApply_matrix Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking +) # in the below, test_utils_headers is retained in case CMake is configured to # include _DEBUG flags @@ -264,59 +544,108 @@ add_grb_executables( eWiseApplyMatrixReference eWiseApplyMatrixReference.cpp BACKENDS reference reference_omp hyperdags nonblocking ADDITIONAL_LINK_LIBRARIES test_utils_headers ) +add_grb_tests( eWiseApplyMatrixReference_tiny eWiseApplyMatrixReference + BACKENDS reference reference_omp hyperdags nonblocking Test_OK_SUCCESS +) add_grb_executables( eWiseLambda eWiseLambda.cpp BACKENDS reference reference_omp hyperdags nonblocking ) +add_grb_tests( eWiseLambda eWiseLambda + BACKENDS reference reference_omp hyperdags nonblocking Test_OK_SUCCESS +) add_grb_executables( outer outer.cpp BACKENDS reference reference_omp hyperdags nonblocking ) - -# must generate the golden output for other tests -force_add_grb_executable( mxv mxv.cpp - BACKEND reference +add_grb_tests( outer outer Test_OK_SUCCESS + BACKENDS reference reference_omp hyperdags nonblocking ) + add_grb_executables( mxv mxv.cpp - BACKENDS reference_omp bsp1d hybrid hyperdags nonblocking + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking + ADDITIONAL_LINK_LIBRARIES test_utils_headers ) - -# must generate the golden output for other tests -force_add_grb_executable( vxm vxm.cpp - BACKEND reference +add_grb_tests( mxv_west0497 mxv Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking + REQUIRED_FILES ${DATASETS_DIR}/west0497.mtx + ARGUMENTS ${DATASETS_DIR}/west0497.mtx ${OUTPUT_VERIFICATION_DIR}/mxv_reference_golden.log ) +# extra tests for bsp1d backend +add_grb_tests( mxv_west0497_extra mxv Test_OK_SUCCESS + BACKENDS bsp1d PROCESSES "1;2;3;4" + REQUIRED_FILES ${DATASETS_DIR}/west0497.mtx + ARGUMENTS ${DATASETS_DIR}/west0497.mtx ${OUTPUT_VERIFICATION_DIR}/mxv_reference_golden.log +) + add_grb_executables( vxm vxm.cpp - BACKENDS reference_omp bsp1d hybrid hyperdags nonblocking + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking + ADDITIONAL_LINK_LIBRARIES test_utils_headers +) +add_grb_tests( vxm_west0497 vxm Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking + REQUIRED_FILES ${DATASETS_DIR}/west0497.mtx + ARGUMENTS ${DATASETS_DIR}/west0497.mtx ${OUTPUT_VERIFICATION_DIR}/vxm_reference_golden.log +) +# extra tests for bsp1d backend +add_grb_tests( vxm_west0497_extra vxm Test_OK_SUCCESS + BACKENDS bsp1d PROCESSES "1;2;3;4" + REQUIRED_FILES ${DATASETS_DIR}/west0497.mtx + ARGUMENTS ${DATASETS_DIR}/west0497.mtx ${OUTPUT_VERIFICATION_DIR}/vxm_reference_golden.log ) add_grb_executables( capacity capacity.cpp BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking ) +add_grb_tests( capacity capacity ARGUMENTS 5230 Test_OK_SUCCESS + BACKENDS reference reference_omp hyperdags nonblocking +) add_grb_executables( wait wait.cpp BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking ) +add_grb_tests( wait wait Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking +) +add_grb_tests( wait_large wait ARGUMENTS 11733 Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking +) add_grb_executables( buildMatrixUnique buildMatrixUnique.cpp BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking ADDITIONAL_LINK_LIBRARIES test_utils ) +add_grb_tests( buildMatrixUnique buildMatrixUnique Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking +) add_grb_executables( pinnedVector pinnedVector.cpp BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking ) +add_grb_tests( pinnedVector pinnedVector Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking +) add_grb_executables( parallelRegularIterators parallelRegularIterators.cpp BACKENDS reference reference_omp hyperdags nonblocking bsp1d hybrid ) +add_grb_tests( parallelRegularIterators parallelRegularIterators Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking +) add_grb_executables( adapterIterator adapterIterator.cpp BACKENDS reference reference_omp hyperdags nonblocking bsp1d hybrid ) +add_grb_tests( adapterIterator adapterIterator Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking +) add_grb_executables( vectorFromListConstructor vectorFromListConstructor.cpp BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking ) +add_grb_tests( vectorFromListConstructor vectorFromListConstructor Test_OK_SUCCESS + BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking +) # the below targets test successfully when they compile -- they do not need to # be executed successfully as part of the unit test suite. @@ -324,29 +653,47 @@ add_grb_executables( vectorFromListConstructor vectorFromListConstructor.cpp add_grb_executables( properties static_asserts/properties.cpp BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking ) - add_grb_executables( matrix_type static_asserts/matrix.cpp BACKENDS reference reference_omp bsp1d hybrid hyperdags nonblocking ) + add_grb_executables( launch_benchmark_auto launcherAndBenchmarker.cpp BACKENDS bsp1d hybrid COMPILE_DEFINITIONS DISTRIBUTED_EXECUTION ) - add_grb_executables( launch_benchmark_auto launcherAndBenchmarker.cpp BACKENDS reference reference_omp hyperdags nonblocking ) +# AUTOMATIC mode +add_grb_tests( launch_benchmark_auto launch_benchmark_auto Test_OK_SUCCESS + BACKENDS reference reference_omp hyperdags nonblocking bsp1d hybrid +) add_grb_executables( launch_benchmark_frommpi_manual launcherAndBenchmarker.cpp BACKENDS bsp1d hybrid COMPILE_DEFINITIONS DISTRIBUTED_EXECUTION NO_LPF_AUTO_INIT ) +# FROM_MPI mode for distributed backends +add_grb_tests( launch_benchmark_frommpi launch_benchmark_frommpi_manual Test_OK_SUCCESS + BACKENDS bsp1d hybrid +) +# MANUAL mode for distributed backends +add_grb_tests( launch_benchmark_manual launch_benchmark_frommpi_manual Test_OK_SUCCESS + BACKENDS bsp1d hybrid + PROCESSES 1 PARALLEL_PROCESSES 4 + ARGUMENTS localhost 77770 4 +) add_grb_executables( launch_benchmark_frommpi_manual launcherAndBenchmarker.cpp BACKENDS reference reference_omp hyperdags nonblocking COMPILE_DEFINITIONS NO_LPF_AUTO_INIT ) +# MANUAL mode for shared-memory backends +add_grb_tests( launch_benchmark_manual launch_benchmark_frommpi_manual Test_OK_SUCCESS + BACKENDS reference reference_omp hyperdags nonblocking + ARGUMENTS localhost 77770 1 0 +) # targets to list and build the test for this category get_property( unit_tests_list GLOBAL PROPERTY tests_category_unit ) @@ -359,30 +706,9 @@ add_custom_target( "build_tests_category_unit" DEPENDS "${unit_tests_list}" ) - -foreach( mode ${MODES_unit} ) - # Removing the prefix - string( SUBSTRING "${MODES_${mode}_suffix}" 1 -1 __mode ) - list( APPEND __MODES "${__mode}" ) -endforeach() - # target to run the tests in this category by calling the appropriate runner -add_custom_target( tests_unit - COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/unittests.sh - ${SCRIPTS_COMMON_ARGS} - "--test-bin-dir" "\"${CMAKE_CURRENT_BINARY_DIR}\"" - "--test-out-dir" "\"${CMAKE_CURRENT_BINARY_DIR}/output\"" - "--output-verification-dir" "\"${CMAKE_CURRENT_SOURCE_DIR}/output_verification\"" - "--test-data-dir" "\"${CMAKE_CURRENT_SOURCE_DIR}/test_data\"" - "--modes" "\"${__MODES}\"" - - WORKING_DIRECTORY "${PROJECT_SOURCE_DIR}" - DEPENDS build_tests_category_unit # add dependency on this target - # to automatically build before running - COMMAND_EXPAND_LISTS - USES_TERMINAL +add_custom_target( unittests + COMMAND ${CMAKE_CTEST_COMMAND} -L "mode:unit" --output-on-failure + WORKING_DIRECTORY "${PROJECT_BINARY_DIR}" + DEPENDS build_tests_category_unit ) - -# custom target "unittests" just to keep old naming -add_custom_target( unittests DEPENDS tests_unit ) - diff --git a/tests/unit/mxv.cpp b/tests/unit/mxv.cpp index c35270376..6d121b221 100644 --- a/tests/unit/mxv.cpp +++ b/tests/unit/mxv.cpp @@ -18,16 +18,18 @@ #include #include +#include #include "graphblas/utils/parser.hpp" #include "graphblas.hpp" - +#include using namespace grb; struct output { int exit_code; + std::unique_ptr< PinnedVector< int > > pinnedVector; }; struct input { @@ -74,33 +76,15 @@ void grbProgram( const struct input &in, struct output &out ) { return; } - const size_t P = grb::spmd<>::nprocs(); - const size_t s = grb::spmd<>::pid(); - if( s == 0 ) { - std::cout << "%%MatrixMarket vector coordinate double general\n"; - std::cout << "%Global index \tValue\n"; - std::cout << grb::size( y ) << "\n"; - } - for( size_t k = 0; k < P; ++k ) { - if( k == s ) { - for( const auto pair : y ) { - const size_t index = pair.first; - std::cout << index << " " << pair.second << "\n"; - } - } - grb::spmd<>::barrier(); - } - + out.pinnedVector = std::unique_ptr< PinnedVector< int > >( new PinnedVector< int >( y, SEQUENTIAL ) ); out.exit_code = 0; - std::cout << std::flush; - std::cerr << std::flush; } int main( int argc, char ** argv ) { std::cout << "Functional test executable: " << argv[ 0 ] << "\n"; - if( argc != 2 ) { - std::cout << "Usage: " << argv[ 0 ] << " \n"; + if( argc != 3 ) { + std::cout << "Usage: " << argv[ 0 ] << " \n"; return EXIT_SUCCESS; } @@ -117,7 +101,16 @@ int main( int argc, char ** argv ) { return EXIT_FAILURE; } - if( out.exit_code != 0 ) { + int rc = 0; + if( not out.pinnedVector ) { + std::cout << "no pinned vector" << std::endl; + rc = 1; + } else { + const char * truth_filename = argv[ 2 ]; + rc = vector_verification( *out.pinnedVector, truth_filename, 1e-5, 1e-6 ); + } + + if( out.exit_code != 0 || rc != 0 ) { std::cout << "Test FAILED (program returned non-zero exit code " << out.exit_code << ")\n" << std::endl; } else { diff --git a/tests/unit/output_verification/mxv_reference_golden.log b/tests/unit/output_verification/mxv_reference_golden.log new file mode 100644 index 000000000..96a658a83 --- /dev/null +++ b/tests/unit/output_verification/mxv_reference_golden.log @@ -0,0 +1,497 @@ +3 +132 +246 +360 +473 +457 +441 +266 +496 +727 +957 +924 +891 +4 +4 +4 +4 +4 +4 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +0 +1 +10 +-686321 +2 +-253 +-12810 +1 +3 +3 +3 +3 +3 +3 +360 +0 +-15602 +1 +0 +1 +1 +1 +1 +1 +2 +1 +0 +2 +3 +2 +3 +3 +-252 +254 +11 +3 +1 +1 +1 +0 +1 +1 +3 +2 +137 +-109 +-9 +0 +1 +1 +1 +1 +2 +1 +2 +2 +1 +1 +1 +2 +-5 +-4 +-5 +-5 +-4 +-5 +1 +2 +1 +2 +1 +2 +7 +10 +7 +9 +8 +9 +1 +1 +1 +2 +2 +1 +1 +2 +2 +-3 +-3 +-3 +-2 +-3 +-2 +-1896 +-801 +-73 +350 +250 +114 +2 +1 +1 +1 +1 +1 +218 +153 +99 +35 +44 +42 +17 +15 +14 +12 +12 +13 +2 +2 +3 +3 +3 +3 +2 +1 +1 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +8 +8 +2 +-4 +2 +2 +2 +2 +2 +2 +8 +105 +194 +283 +371 +359 +346 +209 +389 +569 +749 +723 +698 +4 +4 +4 +4 +4 +4 +3 +3 +3 +3 +3 +3 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +-7 +1 +7 +-686972 +2 +-381 +-12659 +1 +-12 +-11 +-11 +-11 +-11 +-11 +232 +0 +-24266 +6 +1 +2 +2 +2 +2 +2 +2 +2 +-12 +-12 +-12 +-12 +-12 +-12 +-379 +375 +12 +6 +8 +104 +193 +281 +369 +356 +344 +208 +387 +566 +745 +719 +693 +4 +4 +4 +4 +4 +4 +3 +3 +3 +3 +3 +3 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +-7 +1 +20 +-170006 +1 +-9058 +-12348 +1 +-10 +-10 +-10 +-10 +-10 +-10 +230 +0 +-20934 +5 +2 +2 +2 +2 +2 +2 +2 +2 +-11 +-11 +-10 +-10 +-10 +-10 +-9174 +2357 +3 +5 +8 +103 +192 +280 +368 +355 +343 +207 +386 +565 +744 +718 +692 +4 +4 +4 +4 +4 +4 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +-7 +1 +21 +-170006 +2 +-74 +-13224 +1 +3 +3 +3 +3 +3 +3 +280 +0 +-4572 +6 +1 +2 +2 +2 +2 +2 +2 +1 +-4 +1 +3 +1 +3 +3 +-75 +80 +10 +7 +8 +104 +193 +281 +369 +356 +344 +208 +387 +566 +744 +719 +693 +4 +4 +4 +4 +4 +4 +3 +3 +3 +3 +3 +3 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +-7 +1 +7 +-686987 +1 +-9177 +-12348 +1 +-10 +-10 +-10 +-10 +-10 +-10 +230 +0 +-20867 +5 +1 +2 +2 +2 +2 +2 +2 +2 +-11 +-10 +-10 +-10 +-10 +-10 +-9202 +2343 +3 +5 diff --git a/tests/unit/output_verification/vxm_reference_golden.log b/tests/unit/output_verification/vxm_reference_golden.log new file mode 100644 index 000000000..15e823f62 --- /dev/null +++ b/tests/unit/output_verification/vxm_reference_golden.log @@ -0,0 +1,497 @@ +3 +3 +3 +3 +3 +3 +357 +2 +3 +0 +2 +2 +2 +1 +2 +2 +-2 +3 +3 +3 +3 +3 +3 +-5 +-1 +11 +132 +246 +359 +473 +456 +440 +269 +499 +730 +960 +927 +894 +3 +3 +3 +3 +3 +3 +3 +3 +3 +2 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +-717495 +12 +2977 +-1 +-517 +3 +55 +138 +3 +1 +-87 +-130 +-59 +91 +3 +3 +3 +3 +3 +2 +2 +223 +-149 +-52 +-220 +3 +2 +2 +2 +3 +1 +-1 +3 +3 +3 +3 +3 +3 +279 +1 +4 +0 +2 +2 +2 +1 +2 +2 +-9 +3 +3 +3 +3 +3 +3 +-3 +-0 +9 +104 +192 +281 +370 +357 +344 +211 +391 +571 +751 +725 +700 +3 +3 +3 +3 +3 +3 +3 +3 +3 +2 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +4 +-725882 +8 +2327 +1 +-778 +3 +56 +3 +3 +3 +4 +2 +1 +-1 +-2 +-4 +5 +-1107 +-5 +4 +5 +-107 +-10 +3 +4 +1 +1 +3 +3 +3 +3 +3 +3 +2 +-0 +-0 +-0 +0 +0 +-0 +2 +2 +2 +2 +2 +2 +0 +0 +0 +0 +0 +0 +0 +0 +0 +5 +0 +1 +2 +2 +2 +2 +2 +2 +4 +4 +4 +4 +4 +4 +2 +163 +996 +5 +5 +5 +5 +5 +5 +49 +36 +26 +21 +22 +24 +290 +278 +268 +257 +259 +258 +3 +3 +3 +3 +3 +3 +277 +1 +4 +3 +3 +3 +3 +3 +3 +277 +3 +2 +1 +2 +2 +2 +1 +2 +2 +-8 +3 +3 +3 +3 +3 +3 +6 +2 +4 +1 +2 +2 +2 +1 +2 +2 +-8 +3 +3 +3 +3 +3 +3 +0 +0 +4 +103 +191 +279 +368 +355 +342 +210 +389 +568 +747 +721 +695 +3 +3 +3 +2 +3 +3 +3 +3 +3 +2 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +4 +-196425 +20 +2313 +125 +-25192 +3 +6 +103 +191 +279 +368 +355 +342 +210 +389 +568 +747 +721 +695 +3 +3 +3 +2 +3 +3 +-3667 +-2631 +-1873 +-1429 +-1541 +-1699 +3 +3 +3 +2 +2 +2 +3 +3 +3 +2 +3 +3 +3 +2 +3 +-4 +-180481 +24 +2312 +-119 +-154 +-3 +52 +2 +1 +2 +2 +1 +4 +4 +4 +4 +4 +3 +1 +1 +1 +1 +1 +4 +4 +4 +4 +4 +3 +-1 +3 +3 +3 +3 +3 +3 +277 +1 +3 +0 +2 +2 +2 +1 +2 +2 +-8 +3 +3 +3 +3 +3 +3 +6 +2 +4 +103 +191 +279 +367 +355 +342 +210 +389 +568 +746 +721 +695 +3 +3 +3 +2 +3 +3 +3 +3 +3 +2 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +4 +-713312 +7 +2312 +33 +-25287 +3 +6 \ No newline at end of file diff --git a/tests/unit/unittests.sh b/tests/unit/unittests.sh deleted file mode 100755 index f70d486a2..000000000 --- a/tests/unit/unittests.sh +++ /dev/null @@ -1,864 +0,0 @@ -#!/bin/bash - -# -# Copyright 2021 Huawei Technologies Co., Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -#set -e - -TESTS_ROOT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )"/../ &> /dev/null && pwd )" -source ${TESTS_ROOT_DIR}/parse_env.sh - -REFERENCE_COUNT=$(echo ${BACKENDS[@]} | grep -o "reference" | wc -l) - -for MODE in ${MODES}; do - - echo "****************************************************************************************" - echo " FUNCTIONAL PERFORMANCE DESCRIPTION " - echo "----------------------------------------------------------------------------------------" - echo " " - - # run only if the reference backend is present - if [[ "${REFERENCE_COUNT}" -gt "0" ]]; then - echo ">>> [x] [ ] Testing grb::utils::equals over floats and doubles" - ${TEST_BIN_DIR}/equals_${MODE} &> ${TEST_OUT_DIR}/equals_${MODE}.log - head -1 ${TEST_OUT_DIR}/equals_${MODE}.log - grep 'Test OK' ${TEST_OUT_DIR}/equals_${MODE}.log || echo "Test FAILED" - echo " " - - echo ">>> [x] [ ] Testing numerical addition operator over doubles" - ${TEST_BIN_DIR}/add15d_${MODE} - - echo ">>> [x] [ ] Testing numerical addition operator over a mixed field" - echo " (double, integers, and floats)" - ${TEST_BIN_DIR}/add15m_${MODE} - - echo ">>> [x] [ ] Testing numerical multiplication operator over integers" - ${TEST_BIN_DIR}/mul15i_${MODE} - - echo ">>> [x] [ ] Testing numerical multiplication operator over a mixed" - echo " field (double, integers, and floats)" - ${TEST_BIN_DIR}/mul15m_${MODE} - - echo ">>> [x] [ ] Tests the built-in parser on the west0497 MatrixMarket file" - if [ -f ${INPUT_DIR}/west0497.mtx ]; then - ${TEST_BIN_DIR}/parserTest_${MODE} ${INPUT_DIR}/west0497.mtx 2> ${TEST_OUT_DIR}/parserTest_${MODE}.err 1> ${TEST_OUT_DIR}/parserTest_${MODE}.out - head -1 ${TEST_OUT_DIR}/parserTest_${MODE}.out - grep 'Test OK' ${TEST_OUT_DIR}/parserTest_${MODE}.out || echo "Test FAILED" - else - echo "Test DISABLED: west0497.mtx was not found. To enable, please provide ${INPUT_DIR}/west0497.mtx" - fi - echo " " - - echo ">>> [x] [ ] Tests the IteratorFilter utility on tiny inputs" - ${TEST_BIN_DIR}/iteratorFilter_${MODE} 3 &> ${TEST_OUT_DIR}/iteratorFilter_${MODE}_tiny.log - head -1 ${TEST_OUT_DIR}/iteratorFilter_${MODE}_tiny.log - grep 'Test OK' ${TEST_OUT_DIR}/iteratorFilter_${MODE}_tiny.log || echo "Test FAILED" - echo " " - - echo ">>> [x] [ ] Tests the IteratorFilter utility on default input" - ${TEST_BIN_DIR}/iteratorFilter_${MODE} &> ${TEST_OUT_DIR}/iteratorFilter_${MODE}.log - head -1 ${TEST_OUT_DIR}/iteratorFilter_${MODE}.log - grep 'Test OK' ${TEST_OUT_DIR}/iteratorFilter_${MODE}.log || echo "Test FAILED" - echo " " - - echo ">>> [x] [ ] Tests the IteratorFilter utility on large inputs" - ${TEST_BIN_DIR}/iteratorFilter_${MODE} 7013 &> ${TEST_OUT_DIR}/iteratorFilter_${MODE}_large.log - head -1 ${TEST_OUT_DIR}/iteratorFilter_${MODE}_large.log - grep 'Test OK' ${TEST_OUT_DIR}/iteratorFilter_${MODE}_large.log || echo "Test FAILED" - echo " " - - echo ">>> [x] [ ] Tests the built-in parser (in graphblas/utils/parser.hpp)" - echo " versus the parser in tests/parser.cpp on cit-HepTh.txt." - if [ -f ${INPUT_DIR}/cit-HepTh.txt ]; then - ${TEST_BIN_DIR}/compareParserTest_${MODE} ${INPUT_DIR}/cit-HepTh.txt &> ${TEST_OUT_DIR}/compareParserTest_${MODE} - head -1 ${TEST_OUT_DIR}/compareParserTest_${MODE} - tail -2 ${TEST_OUT_DIR}/compareParserTest_${MODE} - else - echo "Test DISABLED: cit-HepTh was not found. To enable, please provide the dataset within ${INPUT_DIR}/cit-HepTh.txt" - echo " " - fi - - echo ">>> [x] [ ] Tests the built-in high-performance parser (in" - echo " include/graphblas/utils/parser.h &" - echo " src/graphblas/utils/parser.c) on dwt_59.mtx" - echo " Parameters: P=1, no hyperthreads (half the available threads)," - echo " block size = 128k, buffer size = 8M" - if [ -f ${INPUT_DIR}/dwt_59.mtx ]; then - echo "Functional test executable: ${TEST_BIN_DIR}/hpparser_${MODE}" - ${TEST_BIN_DIR}/hpparser_${MODE} 1 ${MAX_THREADS} 131072 8388608 ${INPUT_DIR}/dwt_59.mtx 1 &> ${TEST_OUT_DIR}/hpparser_${MODE} - echo "[ 0, *] nrow = 59, ncol = 59, nnnz = 163 -[ 0, *] offb = 564, fsiz = 1494, offe = 1493 -[ *, *] ntot = 163" > ${TEST_OUT_DIR}/hpparser.chk - (diff ${TEST_OUT_DIR}/hpparser_${MODE} ${TEST_OUT_DIR}/hpparser.chk && printf "Test OK.\n\n") || printf "Test FAILED.\n\n" - else - echo "Test DISABLED: dwt_59.mtx was not found. To enable, please provide ${INPUT_DIR}/dwt_59.mtx" - echo " " - fi - fi - - for BACKEND in ${BACKENDS[@]}; do - if [ "$BACKEND" = "bsp1d" ]; then - Ps=( 1 2 16 ) - fi - if [ "$BACKEND" = "hybrid" ]; then - Ps=( 2 7 ) - fi - if [ "$BACKEND" = "bsp1d" ] || [ "$BACKEND" = "hybrid" ]; then - if [ -z "${LPFRUN}" ]; then - echo "LPFRUN is not set!" - exit 255; - fi - if [ -z "${MANUALRUN}" ]; then - echo "MANUALRUN is not set!" - exit 255; - fi - else - Ps=( 1 ) - fi - if [ "$BACKEND" = "reference_omp" ]; then - Pt=( 1 2 ${MAX_THREADS} ) - elif [ "$BACKEND" = "nonblocking" ]; then - Pt=( 1 2 ${MAX_THREADS} ) - elif [ "$BACKEND" = "hybrid" ]; then - MTDS=$((MAX_THREADS/7)) - if [ "$MTDS" -le "2" ]; then - Pt=( 2 ) - else - Pt=( 2 $((MAX_THREADS/7)) ) - fi - else - Pt=( 1 ) - fi - for P in ${Ps[@]}; do - for T in ${Pt[@]}; do - - runner= - if [ "$BACKEND" = "bsp1d" ] || [ "$BACKEND" = "hybrid" ]; then - runner="${LPFRUN} -n ${P}" - fi - if [ "${BACKEND}" = "bsp1d" ]; then - runner="${runner} ${BIND_PROCESSES_TO_HW_THREADS}" - elif [ "${BACKEND}" = "hybrid" ]; then - runner="${runner} ${MPI_PASS_ENV} ${LPFRUN_PASSTHROUGH}OMP_NUM_THREADS=${T}" - runner="${runner} ${BIND_PROCESSES_TO_MULTIPLE_HW_THREADS}${T}" - elif [ "$BACKEND" = "reference_omp" ]; then - export OMP_NUM_THREADS=${T} - fi - - if [ "$BACKEND" = "reference" ] || [ "${BACKEND}" = "reference_omp" ]; then - echo "#################################################################" - echo "# Starting unit tests specific to the ${BACKEND} backend" - echo "# using ${MODE} mode" - echo "# using ${T} threads" - echo "#################################################################" - echo " " - - if [ "x${runner}" != "x" ]; then - echo "# using runner \`\`$runner''" - fi - - echo ">>> [x] [ ] Testing grb::eWiseApply on tiny matrices whilst" - echo " checking the resulting internal data structures" - $runner ${TEST_BIN_DIR}/eWiseApplyMatrixReference_${MODE}_${BACKEND} &> ${TEST_OUT_DIR}/eWiseApplyMatrixReference_${MODE}_${BACKEND}_${T}.log - head -1 ${TEST_OUT_DIR}/eWiseApplyMatrixReference_${MODE}_${BACKEND}_${T}.log - grep 'Test OK' ${TEST_OUT_DIR}/eWiseApplyMatrixReference_${MODE}_${BACKEND}_${T}.log || echo "Test FAILED" - echo " " - fi - - echo "#################################################################" - echo "# Starting standardised unit tests for the ${BACKEND} backend" - echo "# using ${MODE} mode" - echo "# using ${P} user processes" - echo "# using ${T} threads" - if [ "$BACKEND" = "bsp1d" ] || [ "$BACKEND" = "hybrid" ]; then - echo "# using \`\`${LPFRUN}'' for automatic launchers" - fi - if [ "x${runner}" != "x" ]; then - echo "# using runner \`\`$runner''" - fi - echo "#################################################################" - echo " " - - # test utilities first, as some other unit tests depend on them - - echo ">>> [x] [ ] Testing parallel iterators of the grb::utils Range and" - echo " ConstantVector containers" - $runner ${TEST_BIN_DIR}/parallelRegularIterators_${MODE}_${BACKEND} &> ${TEST_OUT_DIR}/parallelRegularIterators_${MODE}_${BACKEND}_${P}_${T}.log - head -1 ${TEST_OUT_DIR}/parallelRegularIterators_${MODE}_${BACKEND}_${P}_${T}.log - grep 'Test OK' ${TEST_OUT_DIR}/parallelRegularIterators_${MODE}_${BACKEND}_${P}_${T}.log || echo "Test FAILED" - echo " " - - echo ">>> [x] [ ] Testing the adapter iterator from grb::utils::iterators" - $runner ${TEST_BIN_DIR}/adapterIterator_${MODE}_${BACKEND} &> ${TEST_OUT_DIR}/adapterIterator_${MODE}_${BACKEND}_${P}_${T}.log - head -1 ${TEST_OUT_DIR}/adapterIterator_${MODE}_${BACKEND}_${P}_${T}.log - grep 'Test OK' ${TEST_OUT_DIR}/adapterIterator_${MODE}_${BACKEND}_${P}_${T}.log || echo "Test FAILED" - echo " " - - # test buildMatrix and factory first, as other unit tests depend on them - - echo ">>> [x] [ ] Testing building a matrix via input iterators" - echo " both sequentially and in parallel" - $runner ${TEST_BIN_DIR}/buildMatrixUnique_${MODE}_${BACKEND} &> ${TEST_OUT_DIR}/buildMatrixUnique_${MODE}_${BACKEND}_${P}_${T}.log - head -1 ${TEST_OUT_DIR}/buildMatrixUnique_${MODE}_${BACKEND}_${P}_${T}.log - grep 'Test OK' ${TEST_OUT_DIR}/buildMatrixUnique_${MODE}_${BACKEND}_${P}_${T}.log || echo "Test FAILED" - echo " " - - echo ">>> [x] [ ] Testing grb::factories." - $runner ${TEST_BIN_DIR}/factories_${MODE}_${BACKEND} &> ${TEST_OUT_DIR}/factories_${MODE}_${BACKEND}_${P}_${T} - head -1 ${TEST_OUT_DIR}/factories_${MODE}_${BACKEND}_${P}_${T} - grep 'Test OK' ${TEST_OUT_DIR}/factories_${MODE}_${BACKEND}_${P}_${T} || echo "Test FAILED" - echo " " - - # order of unit tests below here should not matter / matter less - - echo ">>> [x] [ ] Testing grb::id on vectors and matrices" - $runner ${TEST_BIN_DIR}/id_${MODE}_${BACKEND} &> ${TEST_OUT_DIR}/id_${MODE}_${BACKEND}_${P}_${T}.log - head -1 ${TEST_OUT_DIR}/id_${MODE}_${BACKEND}_${P}_${T}.log - grep 'Test OK' ${TEST_OUT_DIR}/id_${MODE}_${BACKEND}_${P}_${T}.log || echo "Test FAILED" - echo " " - - if [ "$BACKEND" = "bsp1d" ] || [ "$BACKEND" = "hybrid" ]; then - echo ">>> [x] [ ] Testing grb::id on distributed vectors and matrices" - $runner ${TEST_BIN_DIR}/id_distributed_${MODE}_${BACKEND} &> ${TEST_OUT_DIR}/id_distributed_${MODE}_${BACKEND}_${P}_${T}.log - head -1 ${TEST_OUT_DIR}/id_distributed_${MODE}_${BACKEND}_${P}_${T}.log - grep 'Test OK' ${TEST_OUT_DIR}/id_distributed_${MODE}_${BACKEND}_${P}_${T}.log || echo "Test FAILED" - echo " " - fi - - echo ">>> [x] [ ] Testing grb::capacity, grb::resize, and default" - echo " and explicit capacities set during container" - echo " construction" - $runner ${TEST_BIN_DIR}/capacity_${MODE}_${BACKEND} 5230 &> ${TEST_OUT_DIR}/capacity_${MODE}_${BACKEND}_${P}_${T}.log - head -1 ${TEST_OUT_DIR}/capacity_${MODE}_${BACKEND}_${P}_${T}.log - grep 'Test OK' ${TEST_OUT_DIR}/capacity_${MODE}_${BACKEND}_${P}_${T}.log || echo "Test FAILED" - echo " " - - echo ">>> [x] [ ] Testing grb::set on vectors of doubles of size" - echo " 1 000 000." - $runner ${TEST_BIN_DIR}/set_${MODE}_${BACKEND} 1000000 &> ${TEST_OUT_DIR}/set_${MODE}_${BACKEND}_${P}_${T}.log - head -1 ${TEST_OUT_DIR}/set_${MODE}_${BACKEND}_${P}_${T}.log - grep 'Test OK' ${TEST_OUT_DIR}/set_${MODE}_${BACKEND}_${P}_${T}.log || echo "Test FAILED" - echo " " - - echo ">>> [x] [ ] Testing the grb::pinnedVector on fundamental and" - echo " non-fundamental value types." - $runner ${TEST_BIN_DIR}/pinnedVector_${MODE}_${BACKEND} &> ${TEST_OUT_DIR}/pinnedVector_${MODE}_${BACKEND}_${P}_${T}.log - head -1 ${TEST_OUT_DIR}/pinnedVector_${MODE}_${BACKEND}_${P}_${T}.log - grep 'Test OK' ${TEST_OUT_DIR}/pinnedVector_${MODE}_${BACKEND}_${P}_${T}.log || echo "Test FAILED" - echo " " - - echo ">>> [x] [ ] Testing grb::eWiseApply using (+,0) on vectors" - echo " of doubles of size 14." - $runner ${TEST_BIN_DIR}/ewiseapply_${MODE}_${BACKEND} 14 &> ${TEST_OUT_DIR}/ewiseapply_small_${MODE}_${BACKEND}_${P}_${T} - head -1 ${TEST_OUT_DIR}/ewiseapply_small_${MODE}_${BACKEND}_${P}_${T} - grep 'Test OK' ${TEST_OUT_DIR}/ewiseapply_small_${MODE}_${BACKEND}_${P}_${T} || echo "Test FAILED" - echo " " - - echo ">>> [x] [ ] Testing grb::eWiseApply using (+,0) on vectors" - echo " of doubles of size 100." - $runner ${TEST_BIN_DIR}/ewiseapply_${MODE}_${BACKEND} 100 &> ${TEST_OUT_DIR}/ewiseapply_${MODE}_${BACKEND}_${P}_${T} - head -1 ${TEST_OUT_DIR}/ewiseapply_${MODE}_${BACKEND}_${P}_${T} - grep 'Test OK' ${TEST_OUT_DIR}/ewiseapply_${MODE}_${BACKEND}_${P}_${T} || echo "Test FAILED" - echo " " - - echo ">>> [x] [ ] Testing grb::eWiseApply using (+,0) on vectors" - echo " of doubles of size 10 000 000." - $runner ${TEST_BIN_DIR}/ewiseapply_${MODE}_${BACKEND} 10000000 &> ${TEST_OUT_DIR}/ewiseapply_large_${MODE}_${BACKEND}_${P}_${T} - head -1 ${TEST_OUT_DIR}/ewiseapply_large_${MODE}_${BACKEND}_${P}_${T} - grep 'Test OK' ${TEST_OUT_DIR}/ewiseapply_large_${MODE}_${BACKEND}_${P}_${T} || echo "Test FAILED" - echo " " - - echo ">>> [x] [x] Testing grb::foldl and grb::foldr reducing dense" - echo " vectors into scalars using operators and monoids." - $runner ${TEST_BIN_DIR}/fold_to_scalar_${MODE}_${BACKEND} ${P} &> ${TEST_OUT_DIR}/fold_to_scalar_${MODE}_${BACKEND}_${P}_${T}.log - head -1 ${TEST_OUT_DIR}/fold_to_scalar_${MODE}_${BACKEND}_${P}_${T}.log - grep 'Test OK' ${TEST_OUT_DIR}/fold_to_scalar_${MODE}_${BACKEND}_${P}_${T}.log || echo "Test FAILED" - echo " " - - echo ">>> [x] [ ] Testing grb::dot on two vectors of doubles and" - echo " ints of size 1874." - $runner ${TEST_BIN_DIR}/dot_${MODE}_${BACKEND} 1874 &> ${TEST_OUT_DIR}/dot_${MODE}_${BACKEND}_${P}_${T} - head -1 ${TEST_OUT_DIR}/dot_${MODE}_${BACKEND}_${P}_${T} - grep 'Test OK' ${TEST_OUT_DIR}/dot_${MODE}_${BACKEND}_${P}_${T} || echo "Test FAILED" - echo " " - - echo ">>> [x] [ ] Testing grb::dot on two vectors of doubles and" - echo " ints of size 10 000 000." - $runner ${TEST_BIN_DIR}/dot_${MODE}_${BACKEND} 10000000 &> ${TEST_OUT_DIR}/dot_large_${MODE}_${BACKEND}_${P}_${T} - head -1 ${TEST_OUT_DIR}/dot_large_${MODE}_${BACKEND}_${P}_${T} - grep 'Test OK' ${TEST_OUT_DIR}/dot_large_${MODE}_${BACKEND}_${P}_${T} || echo "Test FAILED" - echo " " - - echo ">>> [x] [ ] Testing std::swap on two vectors of doubles of" - echo " size 100." - $runner ${TEST_BIN_DIR}/swapVector_${MODE}_${BACKEND} 100 &> ${TEST_OUT_DIR}/swapVector_${MODE}_${BACKEND}_${P}_${T} - head -1 ${TEST_OUT_DIR}/swapVector_${MODE}_${BACKEND}_${P}_${T} - grep 'Test OK' ${TEST_OUT_DIR}/swapVector_${MODE}_${BACKEND}_${P}_${T} || echo "Test FAILED" - echo " " - - echo ">>> [x] [ ] Testing std::move on two vectors of doubles of" - echo " size 100." - $runner ${TEST_BIN_DIR}/moveVector_${MODE}_${BACKEND} 100 &> ${TEST_OUT_DIR}/moveVector_${MODE}_${BACKEND}_${P}_${T} - head -1 ${TEST_OUT_DIR}/moveVector_${MODE}_${BACKEND}_${P}_${T} - grep 'Test OK' ${TEST_OUT_DIR}/moveVector_${MODE}_${BACKEND}_${P}_${T} || echo "Test FAILED" - echo " " - - echo ">>> [x] [ ] Testing std::move on two vectors of doubles of" - echo " size 100." - $runner ${TEST_BIN_DIR}/moveMatrix_${MODE}_${BACKEND} 100 &> ${TEST_OUT_DIR}/moveMatrix_${MODE}_${BACKEND}_${P}_${T} - head -1 ${TEST_OUT_DIR}/moveMatrix_${MODE}_${BACKEND}_${P}_${T} - grep 'Test OK' ${TEST_OUT_DIR}/moveMatrix_${MODE}_${BACKEND}_${P}_${T} || echo "Test FAILED" - echo " " - - echo ">>> [x] [ ] Testing std::vector of thirteen GraphBLAS" - echo " vectors of unsigned chars of sizes 100 and 50." - $runner ${TEST_BIN_DIR}/stdVector_${MODE}_${BACKEND} 100 &> ${TEST_OUT_DIR}/stdVector_${MODE}_${BACKEND}_${P}_${T} - head -1 ${TEST_OUT_DIR}/stdVector_${MODE}_${BACKEND}_${P}_${T} - grep 'Test OK' ${TEST_OUT_DIR}/stdVector_${MODE}_${BACKEND}_${P}_${T} || echo "Test FAILED" - echo " " - - echo ">>> [x] [ ] Testing std::vector of thirteen GraphBLAS" - echo " matrices of unsigned chars of various sizes." - $runner ${TEST_BIN_DIR}/stdMatrix_${MODE}_${BACKEND} 100 &> ${TEST_OUT_DIR}/stdMatrix_${MODE}_${BACKEND}_${P}_${T} - head -1 ${TEST_OUT_DIR}/stdMatrix_${MODE}_${BACKEND}_${P}_${T} - grep 'Test OK' ${TEST_OUT_DIR}/stdMatrix_${MODE}_${BACKEND}_${P}_${T} || echo "Test FAILED" - echo " " - - echo ">>> [x] [ ] Testing grb::Vector's copy-constructor on a" - echo " vector of doubles of size 100." - $runner ${TEST_BIN_DIR}/copyVector_${MODE}_${BACKEND} 100 &> ${TEST_OUT_DIR}/copyVector_${MODE}_${BACKEND}_${P}_${T} - head -1 ${TEST_OUT_DIR}/copyVector_${MODE}_${BACKEND}_${P}_${T} - grep 'Test OK' ${TEST_OUT_DIR}/copyVector_${MODE}_${BACKEND}_${P}_${T} || echo "Test FAILED" - echo " " - - echo ">>> [x] [ ] Testing grb::Vector's copy-constructor on a" - echo " vector of doubles of size 10 000 000." - $runner ${TEST_BIN_DIR}/copyVector_${MODE}_${BACKEND} 10000000 &> ${TEST_OUT_DIR}/copyVector_large_${MODE}_${BACKEND}_${P}_${T} - head -1 ${TEST_OUT_DIR}/copyVector_large_${MODE}_${BACKEND}_${P}_${T} - grep 'Test OK' ${TEST_OUT_DIR}/copyVector_large_${MODE}_${BACKEND}_${P}_${T} || echo "Test FAILED" - echo " " - - echo ">>> [x] [ ] Testing grb::Matrix's output-iterators on" - echo " square matrices of size 15 and 1 000 000." - $runner ${TEST_BIN_DIR}/matrixIterator_${MODE}_${BACKEND} 1000000 2> ${TEST_OUT_DIR}/matrixIterator_${MODE}_${BACKEND}_${P}_${T}.err 1> ${TEST_OUT_DIR}/matrixIterator_${MODE}_${BACKEND}_${P}_${T}.log - head -1 ${TEST_OUT_DIR}/matrixIterator_${MODE}_${BACKEND}_${P}_${T}.log - grep 'Test OK' ${TEST_OUT_DIR}/matrixIterator_${MODE}_${BACKEND}_${P}_${T}.log || echo "Test FAILED" - echo " " - - echo ">>> [x] [ ] Testing double-assignment of ALP/GraphBLAS containers, i.e.," - echo " assigning one container another one (a=b), twice in a row." - $runner ${TEST_BIN_DIR}/doubleAssign_${MODE}_${BACKEND} 1337 &> ${TEST_OUT_DIR}/doubleAssign_${MODE}_${BACKEND}_${P}_${T}.log - head -1 ${TEST_OUT_DIR}/doubleAssign_${MODE}_${BACKEND}_${P}_${T}.log - grep -i 'test ok' ${TEST_OUT_DIR}/doubleAssign_${MODE}_${BACKEND}_${P}_${T}.log || echo "Test FAILED" - echo " " - - echo ">>> [x] [ ] Testing copy and move constructors and assignment" - echo " of the const_iterator of grb::Vector< double > of" - echo " length 10 000 000." - $runner ${TEST_BIN_DIR}/copyAndAssignVectorIterator_${MODE}_${BACKEND} 10000000 &> ${TEST_OUT_DIR}/copyAndAssignVectorIterator_${MODE}_${BACKEND}_${P}_${T}.log - head -1 ${TEST_OUT_DIR}/copyAndAssignVectorIterator_${MODE}_${BACKEND}_${P}_${T}.log - grep 'Test OK' ${TEST_OUT_DIR}/copyAndAssignVectorIterator_${MODE}_${BACKEND}_${P}_${T}.log || echo "Test FAILED" - echo " " - - echo ">>> [x] [ ] Testing grb::eWiseMul on a vector of" - echo " doubles of size 100." - $runner ${TEST_BIN_DIR}/eWiseMul_${MODE}_${BACKEND} 100 &> ${TEST_OUT_DIR}/eWiseMul_${MODE}_${BACKEND}_${P}_${T} - head -1 ${TEST_OUT_DIR}/eWiseMul_${MODE}_${BACKEND}_${P}_${T} - grep 'Test OK' ${TEST_OUT_DIR}/eWiseMul_${MODE}_${BACKEND}_${P}_${T} || echo "Test FAILED" - echo " " - - echo ">>> [x] [ ] Testing grb::eWiseMul on a vector of" - echo " doubles of size 100002." - $runner ${TEST_BIN_DIR}/eWiseMul_${MODE}_${BACKEND} 100002 &> ${TEST_OUT_DIR}/eWiseMul_large_${MODE}_${BACKEND}_${P}_${T} - head -1 ${TEST_OUT_DIR}/eWiseMul_large_${MODE}_${BACKEND}_${P}_${T} - grep 'Test OK' ${TEST_OUT_DIR}/eWiseMul_large_${MODE}_${BACKEND}_${P}_${T} || echo "Test FAILED" - echo " " - - echo ">>> [x] [ ] Testing grb::eWiseMulAdd on a vector of" - echo " doubles of size 7 000 000." - $runner ${TEST_BIN_DIR}/masked_muladd_${MODE}_${BACKEND} 7000000 &> ${TEST_OUT_DIR}/masked_muladd_large_${MODE}_${BACKEND}_${P}_${T} - head -1 ${TEST_OUT_DIR}/masked_muladd_large_${MODE}_${BACKEND}_${P}_${T} - grep 'Test OK' ${TEST_OUT_DIR}/masked_muladd_large_${MODE}_${BACKEND}_${P}_${T} || echo "Test FAILED" - echo " " - - echo ">>> [x] [ ] Testing grb::eWiseMulAdd on a vector of" - echo " doubles of size 10 000 000." - $runner ${TEST_BIN_DIR}/muladd_${MODE}_${BACKEND} 10000000 &> ${TEST_OUT_DIR}/muladd_large_${MODE}_${BACKEND}_${P}_${T} - head -1 ${TEST_OUT_DIR}/muladd_large_${MODE}_${BACKEND}_${P}_${T} - grep 'Test OK' ${TEST_OUT_DIR}/muladd_large_${MODE}_${BACKEND}_${P}_${T} || echo "Test FAILED" - echo " " - - echo ">>> [x] [ ] Testing grb::buildVector and" - echo " grb::buildVectorUnique" - $runner ${TEST_BIN_DIR}/buildVector_${MODE}_${BACKEND} &> ${TEST_OUT_DIR}/buildVector_${MODE}_${BACKEND}_${P}_${T}.log - head -1 ${TEST_OUT_DIR}/buildVector_${MODE}_${BACKEND}_${P}_${T}.log - grep 'Test OK' ${TEST_OUT_DIR}/buildVector_${MODE}_${BACKEND}_${P}_${T}.log || echo "Test FAILED" - echo " " - - echo ">>> [x] [ ] Testing grb::Vector( initializer_list ) constructor" - $runner ${TEST_BIN_DIR}/vectorFromListConstructor_${MODE}_${BACKEND} &> ${TEST_OUT_DIR}/vectorFromListConstructor_${MODE}_${BACKEND}_${P}_${T}.log - head -1 ${TEST_OUT_DIR}/vectorFromListConstructor_${MODE}_${BACKEND}_${P}_${T}.log - grep 'Test OK' ${TEST_OUT_DIR}/vectorFromListConstructor_${MODE}_${BACKEND}_${P}_${T}.log || echo "Test FAILED" - echo " " - - echo ">>> [x] [ ] Testing grb::vectorToMatrixConverter" - $runner ${TEST_BIN_DIR}/vectorToMatrix_${MODE}_${BACKEND} &> ${TEST_OUT_DIR}/vectorToMatrix_${MODE}_${BACKEND}_${P}_${T}.log - head -1 ${TEST_OUT_DIR}/vectorToMatrix_${MODE}_${BACKEND}_${P}_${T}.log - grep 'Test OK' ${TEST_OUT_DIR}/vectorToMatrix_${MODE}_${BACKEND}_${P}_${T}.log || echo "Test FAILED" - echo " " - - echo ">>> [x] [ ] Testing grb::clear on a 1M by 1M matrix of" - echo " doubles" - $runner ${TEST_BIN_DIR}/clearMatrix_${MODE}_${BACKEND} 10000000 &> ${TEST_OUT_DIR}/clearMatrix_${MODE}_${BACKEND}_${P}_${T}.log - head -1 ${TEST_OUT_DIR}/clearMatrix_${MODE}_${BACKEND}_${P}_${T}.log - grep 'Test OK' ${TEST_OUT_DIR}/clearMatrix_${MODE}_${BACKEND}_${P}_${T}.log || echo "Test FAILED" - echo " " - - echo ">>> [x] [ ] Testing double-masked grb::vxm and grb::mxv" - echo " on a mock red-black Gauss Seidel on west0497." - if [ -f ${INPUT_DIR}/west0497.mtx ]; then - $runner ${TEST_BIN_DIR}/RBGaussSeidel_${MODE}_${BACKEND} ${INPUT_DIR}/west0497.mtx &> ${TEST_OUT_DIR}/RBGaussSeidel_${MODE}_${BACKEND}_${P}_${T} - head -1 ${TEST_OUT_DIR}/RBGaussSeidel_${MODE}_${BACKEND}_${P}_${T} - grep 'Test OK' ${TEST_OUT_DIR}/RBGaussSeidel_${MODE}_${BACKEND}_${P}_${T} || echo "Test FAILED" - else - echo "Test DISABLED: west0497.mtx was not found. To enable, please provide ${INPUT_DIR}/west0497.mtx" - fi - echo " " - - echo ">>> [x] [ ] Testing grb::argmin" - $runner ${TEST_BIN_DIR}/argmin_${MODE}_${BACKEND} 2> ${TEST_OUT_DIR}/argmin_${MODE}_${BACKEND}_${P}_${T}.err 1> ${TEST_OUT_DIR}/argmin_${MODE}_${BACKEND}_${P}_${T}.log - head -1 ${TEST_OUT_DIR}/argmin_${MODE}_${BACKEND}_${P}_${T}.log - grep "Test OK" ${TEST_OUT_DIR}/argmin_${MODE}_${BACKEND}_${P}_${T}.log || echo "Test FAILED" - echo " " - - echo ">>> [x] [ ] Testing grb::argmax" - $runner ${TEST_BIN_DIR}/argmax_${MODE}_${BACKEND} 2> ${TEST_OUT_DIR}/argmax_${MODE}_${BACKEND}_${P}_${T}.err 1> ${TEST_OUT_DIR}/argmax_${MODE}_${BACKEND}_${P}_${T}.log - head -1 ${TEST_OUT_DIR}/argmax_${MODE}_${BACKEND}_${P}_${T}.log - grep "Test OK" ${TEST_OUT_DIR}/argmax_${MODE}_${BACKEND}_${P}_${T}.log || echo "Test FAILED" - echo " " - - echo ">>> [x] [ ] Testing grb::set (matrices)" - $runner ${TEST_BIN_DIR}/matrixSet_${MODE}_${BACKEND} 2> ${TEST_OUT_DIR}/matrixSet_${MODE}_${BACKEND}_${P}_${T}.err 1> ${TEST_OUT_DIR}/matrixSet_${MODE}_${BACKEND}_${P}_${T}.log - head -1 ${TEST_OUT_DIR}/matrixSet_${MODE}_${BACKEND}_${P}_${T}.log - echo "Test OK" ${TEST_OUT_DIR}/matrixSet_${MODE}_${BACKEND}_${P}_${T}.log || echo "Test FAILED" - echo " " - - echo ">>> [x] [ ] Testing grb::foldl+r (scalar, matrix, [mask], monoid)" - $runner ${TEST_BIN_DIR}/fold_matrix_to_scalar_${MODE}_${BACKEND} 2> ${TEST_OUT_DIR}/fold_matrix_to_scalar_${MODE}_${BACKEND}_${P}_${T}.err 1> ${TEST_OUT_DIR}/fold_matrix_to_scalar_${MODE}_${BACKEND}_${P}_${T}.log - head -1 ${TEST_OUT_DIR}/fold_matrix_to_scalar_${MODE}_${BACKEND}_${P}_${T}.log - echo "Test OK" ${TEST_OUT_DIR}/fold_matrix_to_scalar_${MODE}_${BACKEND}_${P}_${T}.log || echo "Test FAILED" - echo " " - - echo ">>> [x] [ ] Tests the \`level-0' grb::collectives" - echo "Functional test executable: ${TEST_BIN_DIR}/collectives_blas0_${MODE}_${BACKEND}" - $runner ${TEST_BIN_DIR}/collectives_blas0_${MODE}_${BACKEND} ${P} &> ${TEST_OUT_DIR}/collectives_blas0_${MODE}_${BACKEND}_${P}_${T}.log - grep 'Test OK' ${TEST_OUT_DIR}/collectives_blas0_${MODE}_${BACKEND}_${P}_${T}.log || echo "Test FAILED" - echo " " - - if [ -f ${INPUT_DIR}/west0497.mtx ]; then - echo ">>> [x] [ ] Testing the spmv (y=Ax) on west0497, using dense vectors" - $runner ${TEST_BIN_DIR}/dense_spmv_${MODE}_${BACKEND} ${INPUT_DIR}/west0497.mtx direct 1 1 1 &> ${TEST_OUT_DIR}/dense_spmv_${MODE}_${BACKEND}_Ax_${P}_${T}.log - head -1 ${TEST_OUT_DIR}/dense_spmv_${MODE}_${BACKEND}_Ax_${P}_${T}.log - if grep -q 'Test OK' ${TEST_OUT_DIR}/dense_spmv_${MODE}_${BACKEND}_Ax_${P}_${T}.log; then - echo 'Test OK' - else - echo 'Test FAILED' - fi - echo " " - - echo ">>> [x] [ ] Testing the spmv (y=A^Tx) on west0497, using dense vectors" - $runner ${TEST_BIN_DIR}/dense_spmv_${MODE}_${BACKEND} ${INPUT_DIR}/west0497.mtx direct 2 1 1 &> ${TEST_OUT_DIR}/dense_spmv_${MODE}_${BACKEND}_ATx_${P}_${T}.log - head -1 ${TEST_OUT_DIR}/dense_spmv_${MODE}_${BACKEND}_ATx_${P}_${T}.log - if grep -q 'Test OK' ${TEST_OUT_DIR}/dense_spmv_${MODE}_${BACKEND}_ATx_${P}_${T}.log; then - echo 'Test OK' - else - echo 'Test FAILED' - fi - echo " " - - echo ">>> [x] [ ] Testing the spmv (y=xA) on west0497, using dense vectors" - $runner ${TEST_BIN_DIR}/dense_spmv_${MODE}_${BACKEND} ${INPUT_DIR}/west0497.mtx direct 3 1 1 &> ${TEST_OUT_DIR}/dense_spmv_${MODE}_${BACKEND}_xA_${P}_${T}.log - head -1 ${TEST_OUT_DIR}/dense_spmv_${MODE}_${BACKEND}_xA_${P}_${T}.log - if grep -q 'Test OK' ${TEST_OUT_DIR}/dense_spmv_${MODE}_${BACKEND}_xA_${P}_${T}.log; then - echo 'Test OK' - else - echo 'Test FAILED' - fi - echo " " - - echo ">>> [x] [ ] Testing the spmv (y=xA^T) on west0497, using dense vectors" - $runner ${TEST_BIN_DIR}/dense_spmv_${MODE}_${BACKEND} ${INPUT_DIR}/west0497.mtx direct 4 1 1 &> ${TEST_OUT_DIR}/dense_spmv_${MODE}_${BACKEND}_xAT_${P}_${T}.log - head -1 ${TEST_OUT_DIR}/dense_spmv_${MODE}_${BACKEND}_xAT_${P}_${T}.log - if grep -q 'Test OK' ${TEST_OUT_DIR}/dense_spmv_${MODE}_${BACKEND}_xAT_${P}_${T}.log; then - echo 'Test OK' - else - echo 'Test FAILED' - fi - echo " " - else - echo "Test DISABLED: ${INPUT_DIR}/west0497.mtx was not found. To enabled, please provide the dataset." - fi - - echo ">>> [x] [ ] Testing BLAS1 functions on empty vectors" - $runner ${TEST_BIN_DIR}/emptyVector_${MODE}_${BACKEND} &> ${TEST_OUT_DIR}/emptyVector_${MODE}_${BACKEND}_${P}_${T}.log - head -1 ${TEST_OUT_DIR}/emptyVector_${MODE}_${BACKEND}_${P}_${T}.log - grep -i "test ok" ${TEST_OUT_DIR}/emptyVector_${MODE}_${BACKEND}_${P}_${T}.log || echo "Test FAILED" - echo " " - - echo ">>> [x] [ ] Testing matrix times vector using the number (+,*)" - echo " semiring over integers on a diagonal 15x15 matrix. Each" - echo " of the 15 output elements are computed element-by-element" - echo " using masked operations. The implementation should keep" - echo " both the mask and the output vector sparse. In this test," - echo " the in_place variant is also tested-- there, also the" - echo " input vector shall be sparse." - $runner ${TEST_BIN_DIR}/sparse_mxv_${MODE}_${BACKEND} &> ${TEST_OUT_DIR}/sparse_mxv_${MODE}_${BACKEND}_${P}_${T}.log - head -1 ${TEST_OUT_DIR}/sparse_mxv_${MODE}_${BACKEND}_${P}_${T}.log - grep -i 'test ok' ${TEST_OUT_DIR}/sparse_mxv_${MODE}_${BACKEND}_${P}_${T}.log || echo "Test FAILED" - echo " " - - echo ">>> [x] [ ] Testing matrix times vector using the number (+,*)" - echo " semiring over integers on a 10x10 matrix. The input vector" - echo " is sparse. Each of y=Ax, y=A^Tx, y=xA, and y=xA^T is" - echo " tested in turn. The implementation should result in a" - echo " sparse output vector." - echo "Functional test executable: ${TEST_BIN_DIR}/sparse_vxm_${MODE}_${BACKEND}" - $runner ${TEST_BIN_DIR}/sparse_vxm_${MODE}_${BACKEND} 10 1 1 1 &> ${TEST_OUT_DIR}/sparse_vxm_${MODE}_${BACKEND}_${P}_${T}_10_1.log - $runner ${TEST_BIN_DIR}/sparse_vxm_${MODE}_${BACKEND} 10 2 1 1 &> ${TEST_OUT_DIR}/sparse_vxm_${MODE}_${BACKEND}_${P}_${T}_10_2.log - $runner ${TEST_BIN_DIR}/sparse_vxm_${MODE}_${BACKEND} 10 3 1 1 &> ${TEST_OUT_DIR}/sparse_vxm_${MODE}_${BACKEND}_${P}_${T}_10_3.log - $runner ${TEST_BIN_DIR}/sparse_vxm_${MODE}_${BACKEND} 10 4 1 1 &> ${TEST_OUT_DIR}/sparse_vxm_${MODE}_${BACKEND}_${P}_${T}_10_4.log - (grep -i "Test failed" ${TEST_OUT_DIR}/sparse_vxm_${MODE}_${BACKEND}_${P}_${T}_10_?.log) || - (grep -i "Test OK" ${TEST_OUT_DIR}/sparse_vxm_${MODE}_${BACKEND}_${P}_${T}_10_?.log) || - echo "Test FAILED" - echo " " - - echo ">>> [x] [ ] Testing matrix times vector using the number (+,*)" - echo " semiring over integers on a 1000x1000 matrix. The" - echo " input vector is sparse. Each of y=Ax, y=A^Tx, y=xA," - echo " and y=xA^T is tested in turn. The implementation" - echo " should result in a sparse output vector." - echo "Functional test executable: ${TEST_BIN_DIR}/sparse_vxm_${MODE}_${BACKEND}" - $runner ${TEST_BIN_DIR}/sparse_vxm_${MODE}_${BACKEND} 1000 1 1 1 &> ${TEST_OUT_DIR}/sparse_vxm_${MODE}_${BACKEND}_${P}_${T}_1000_1.log - $runner ${TEST_BIN_DIR}/sparse_vxm_${MODE}_${BACKEND} 1000 2 1 1 &> ${TEST_OUT_DIR}/sparse_vxm_${MODE}_${BACKEND}_${P}_${T}_1000_2.log - $runner ${TEST_BIN_DIR}/sparse_vxm_${MODE}_${BACKEND} 1000 3 1 1 &> ${TEST_OUT_DIR}/sparse_vxm_${MODE}_${BACKEND}_${P}_${T}_1000_3.log - $runner ${TEST_BIN_DIR}/sparse_vxm_${MODE}_${BACKEND} 1000 4 1 1 &> ${TEST_OUT_DIR}/sparse_vxm_${MODE}_${BACKEND}_${P}_${T}_1000_4.log - (grep -i "Test failed" ${TEST_OUT_DIR}/sparse_vxm_${MODE}_${BACKEND}_${P}_${T}_1000_?.log) || - (grep -i "Test OK" ${TEST_OUT_DIR}/sparse_vxm_${MODE}_${BACKEND}_${P}_${T}_1000_?.log) || - echo "Test FAILED" - echo " " - - echo ">>> [x] [ ] Testing dense vector times matrix using the double (+,*)" - echo " semiring where matrix elements are doubles and vector" - echo " elements ints. The input matrix is taken from west0497." - echo " " - if [ -f ${INPUT_DIR}/west0497.mtx ]; then - $runner ${TEST_BIN_DIR}/vxm_${MODE}_${BACKEND} ${INPUT_DIR}/west0497.mtx &> ${TEST_OUT_DIR}/vxm_${MODE}_${BACKEND}_${P}_${T}.west0497 - head -1 ${TEST_OUT_DIR}/vxm_${MODE}_${BACKEND}_${P}_${T}.west0497 - grep 'Test OK' ${TEST_OUT_DIR}/vxm_${MODE}_${BACKEND}_${P}_${T}.west0497 || echo "Test FAILED" - else - echo "Test DISABLED: west0497.mtx was not found. To enable, please provide ${INPUT_DIR}/west0497.mtx" - fi - echo " " - - echo ">>> [x] [ ] Testing matrix times dense vector using the double (+,*)" - echo " semiring where matrix elements are doubles and vector" - echo " elements ints. The input matrix is taken from west0497." - echo " " - if [ -f ${INPUT_DIR}/west0497.mtx ]; then - $runner ${TEST_BIN_DIR}/mxv_${MODE}_${BACKEND} ${INPUT_DIR}/west0497.mtx &> ${TEST_OUT_DIR}/mxv_${MODE}_${BACKEND}_${P}_${T}.west0497 - head -1 ${TEST_OUT_DIR}/mxv_${MODE}_${BACKEND}_${P}_${T}.west0497 - grep 'Test OK' ${TEST_OUT_DIR}/mxv_${MODE}_${BACKEND}_${P}_${T}.west0497 || echo "Test FAILED" - else - echo "Test DISABLED: west0497.mtx was not found. To enable, please provide ${INPUT_DIR}/west0497.mtx" - fi - echo " " - - echo ">>> [x] [ ] Testing grb::wait on small inputs" - $runner ${TEST_BIN_DIR}/wait_${MODE}_${BACKEND} &> ${TEST_OUT_DIR}/wait_${MODE}_${BACKEND}_${P}_${T}.log - head -1 ${TEST_OUT_DIR}/wait_${MODE}_${BACKEND}_${P}_${T}.log - grep 'Test OK' ${TEST_OUT_DIR}/wait_${MODE}_${BACKEND}_${P}_${T}.log || echo "Test FAILED" - echo " " - - echo ">>> [x] [ ] Testing grb::wait on large inputs" - $runner ${TEST_BIN_DIR}/wait_${MODE}_${BACKEND} 11733 &> ${TEST_OUT_DIR}/wait_large_${MODE}_${BACKEND}_${P}_${T}.log - head -1 ${TEST_OUT_DIR}/wait_large_${MODE}_${BACKEND}_${P}_${T}.log - grep 'Test OK' ${TEST_OUT_DIR}/wait_large_${MODE}_${BACKEND}_${P}_${T}.log || echo "Test FAILED" - echo " " - - echo ">>> [x] [ ] Testing grb::eWiseApply using + on matrices" - $runner ${TEST_BIN_DIR}/eWiseApply_matrix_${MODE}_${BACKEND} &> ${TEST_OUT_DIR}/eWiseApply_matrix_${MODE}_${BACKEND}_${P}_${T} - head -1 ${TEST_OUT_DIR}/eWiseApply_matrix_${MODE}_${BACKEND}_${P}_${T} - grep 'Test OK' ${TEST_OUT_DIR}/eWiseApply_matrix_${MODE}_${BACKEND}_${P}_${T} || echo "Test FAILED" - echo " " - - echo ">>> [x] [ ] Testing grb::eWiseLambda (matrices)" - $runner ${TEST_BIN_DIR}/eWiseMatrix_${MODE}_${BACKEND} &> ${TEST_OUT_DIR}/eWiseMatrix_${MODE}_${BACKEND}_${P}_${T}.log - head -1 ${TEST_OUT_DIR}/eWiseMatrix_${MODE}_${BACKEND}_${P}_${T}.log - grep 'Test OK' ${TEST_OUT_DIR}/eWiseMatrix_${MODE}_${BACKEND}_${P}_${T}.log || echo "Test FAILED" - echo " " - - echo ">>> [x] [ ] Testing grb::zip on two vectors of doubles and" - echo " ints of size 10 000 000." - $runner ${TEST_BIN_DIR}/zip_${MODE}_${BACKEND} 10000000 &> ${TEST_OUT_DIR}/zip_large_${MODE}_${BACKEND}_${P}_${T} - head -1 ${TEST_OUT_DIR}/zip_large_${MODE}_${BACKEND}_${P}_${T} - grep 'Test OK' ${TEST_OUT_DIR}/zip_large_${MODE}_${BACKEND}_${P}_${T} || echo "Test FAILED" - echo " " - - echo ">>> [x] [ ] Testing grb::select on matrices of integers and of size 3" - $runner ${TEST_BIN_DIR}/selectMatrix_${MODE}_${BACKEND} 3 &> ${TEST_OUT_DIR}/selectMatrix_${MODE}_${BACKEND}_${P}_${T}_3 - head -1 ${TEST_OUT_DIR}/selectMatrix_${MODE}_${BACKEND}_${P}_${T}_3 - grep 'Test OK' ${TEST_OUT_DIR}/selectMatrix_${MODE}_${BACKEND}_${P}_${T}_3 || echo "Test FAILED" - echo " " - - echo ">>> [x] [ ] Testing grb::select on matrices of integers and of size 5'000" - $runner ${TEST_BIN_DIR}/selectMatrix_${MODE}_${BACKEND} 5000 &> ${TEST_OUT_DIR}/selectMatrix_${MODE}_${BACKEND}_${P}_${T} - head -1 ${TEST_OUT_DIR}/selectMatrix_${MODE}_${BACKEND}_${P}_${T} - grep 'Test OK' ${TEST_OUT_DIR}/selectMatrix_${MODE}_${BACKEND}_${P}_${T} || echo "Test FAILED" - echo " " - - echo ">>> [x] [ ] Testing copy-constructor of square pattern matrices" - echo " of size 1003." - $runner ${TEST_BIN_DIR}/copyVoidMatrices_${MODE}_${BACKEND} 1003 &> ${TEST_OUT_DIR}/copyVoidMatrices_${MODE}_${BACKEND}_${P}_${T} - head -1 ${TEST_OUT_DIR}/copyVoidMatrices_${MODE}_${BACKEND}_${P}_${T} - grep 'Test OK' ${TEST_OUT_DIR}/copyVoidMatrices_${MODE}_${BACKEND}_${P}_${T} || echo "Test FAILED" - echo " " - - if [ "$BACKEND" = "bsp1d" ] || [ "$BACKEND" = "hybrid" ]; then - echo "Additional standardised unit tests not yet supported for the ${BACKEND} backend." - echo - continue - fi - - echo ">>> [x] [ ] Testing BLAS3 grb::mxm (unmasked) on simple matrices" - echo " of size 100 x 100 using the (+,*) semiring over" - echo " doubles" - $runner ${TEST_BIN_DIR}/mxm_${MODE}_${BACKEND} &> ${TEST_OUT_DIR}/mxm_${MODE}_${BACKEND}_${P}_${T}.log - head -1 ${TEST_OUT_DIR}/mxm_${MODE}_${BACKEND}_${P}_${T}.log - grep 'Test OK' ${TEST_OUT_DIR}/mxm_${MODE}_${BACKEND}_${P}_${T}.log || echo "Test FAILED" - echo " " - - echo ">>> [x] [ ] Testing grb::eWiseLambda on a small matrix" - $runner ${TEST_BIN_DIR}/eWiseLambda_${MODE}_${BACKEND} &> ${TEST_OUT_DIR}/eWiseLambda_${MODE}_${BACKEND}_${P}_${T}.log - head -1 ${TEST_OUT_DIR}/eWiseLambda_${MODE}_${BACKEND}_${P}_${T}.log - grep 'Test OK' ${TEST_OUT_DIR}/eWiseLambda_${MODE}_${BACKEND}_${P}_${T}.log || echo "Test FAILED" - echo " " - - echo ">>> [x] [ ] Testing grb::outer on a small matrix" - $runner ${TEST_BIN_DIR}/outer_${MODE}_${BACKEND} &> ${TEST_OUT_DIR}/outer_${MODE}_${BACKEND}_${P}_${T}.log - head -1 ${TEST_OUT_DIR}/outer_${MODE}_${BACKEND}_${P}_${T}.log - grep 'Test OK' ${TEST_OUT_DIR}/outer_${MODE}_${BACKEND}_${P}_${T}.log || echo "Test FAILED" - echo " " - - echo ">>> [x] [ ] Testing vector times matrix using the normal (+,*)" - echo " semiring over integers on a diagonal matrix" - echo " " - $runner ${TEST_BIN_DIR}/vmx_${MODE}_${BACKEND} 2> ${TEST_OUT_DIR}/vmx_${MODE}_${BACKEND}_${P}_${T}.err 1> ${TEST_OUT_DIR}/vmx_${MODE}_${BACKEND}_${P}_${T}.log - head -1 ${TEST_OUT_DIR}/vmx_${MODE}_${BACKEND}_${P}_${T}.log - grep 'Test OK' ${TEST_OUT_DIR}/vmx_${MODE}_${BACKEND}_${P}_${T}.log || echo "Test FAILED" - echo " " - - echo ">>> [x] [ ] Testing vector times matrix using a (*,+) semiring over" - echo " doubles on a diagonal matrix" - echo " " - $runner ${TEST_BIN_DIR}/vmxa_${MODE}_${BACKEND} 2> ${TEST_OUT_DIR}/vmxa_${MODE}_${BACKEND}_${P}_${T}.err 1> ${TEST_OUT_DIR}/vmxa_${MODE}_${BACKEND}_${P}_${T}.log - head -1 ${TEST_OUT_DIR}/vmxa_${MODE}_${BACKEND}_${P}_${T}.log - grep 'Test OK' ${TEST_OUT_DIR}/vmxa_${MODE}_${BACKEND}_${P}_${T}.log || echo "Test FAILED" - echo " " - - echo ">>> [x] [ ] Testing vector times matrix using the number (+,*)" - echo " semiring over integers on a diagonal 15x15 matrix. Each" - echo " of the 15 output elements are computed, like the above" - echo " test, by masked operations. Instead of one element per" - echo " mask, this mask will have two elements. One element is" - echo " fixed to 3. All 14 combinations are tested. The in_place" - echo " specifier is tested as well." - $runner ${TEST_BIN_DIR}/masked_vxm_${MODE}_${BACKEND} &> ${TEST_OUT_DIR}/masked_vxm_${MODE}_${BACKEND}_${P}_${T}.log - head -1 ${TEST_OUT_DIR}/masked_vxm_${MODE}_${BACKEND}_${P}_${T}.log - grep -i 'test ok' ${TEST_OUT_DIR}/masked_vxm_${MODE}_${BACKEND}_${P}_${T}.log || echo "Test FAILED" - echo " " - - echo ">>> [x] [ ] Testing matrix times vector using the number (+,*)" - echo " semiring over integers on a diagonal 15x15 matrix--" - echo " apart from mxv instead of vxm, this is the same test" - echo " as the above." - $runner ${TEST_BIN_DIR}/masked_mxv_${MODE}_${BACKEND} &> ${TEST_OUT_DIR}/masked_mxv_${MODE}_${BACKEND}_${P}_${T}.log - head -1 ${TEST_OUT_DIR}/masked_mxv_${MODE}_${BACKEND}_${P}_${T}.log - grep -i 'test ok' ${TEST_OUT_DIR}/masked_mxv_${MODE}_${BACKEND}_${P}_${T}.log || echo "Test FAILED" - echo " " - - echo ">>> [x] [ ] Testing the spy algorithm on west0497. While perhaps not a pure unit" - echo " test, this test naturally hits uncommon border cases and hence is" - echo " retained within the unit test suite." - if [ -f ${INPUT_DIR}/west0497.mtx ]; then - $runner ${TEST_BIN_DIR}/spy_${MODE}_${BACKEND} ${INPUT_DIR}/west0497.mtx &> ${TEST_OUT_DIR}/spy_${MODE}_${BACKEND}_${P}_${T}.log - head -1 ${TEST_OUT_DIR}/spy_${MODE}_${BACKEND}_${P}_${T}.log - if grep -q 'Test OK' ${TEST_OUT_DIR}/spy_${MODE}_${BACKEND}_${P}_${T}.log; then - if grep 'Spy matrix' ${TEST_OUT_DIR}/spy_${MODE}_${BACKEND}_${P}_${T}.log | cut -d' ' -f9 | grep -q 315; then - echo 'Test OK' - else - echo 'Verification FAILED' - echo 'Test FAILED' - fi - else - echo 'Test FAILED' - fi - else - echo "Test DISABLED: west0497.mtx was not found. To enable, please provide ${INPUT_DIR}/west0497.mtx" - fi - echo " " - - echo ">>> [x] [ ] Testing Launcher and Benchmarker, AUTOMATIC mode." - test_name=launch_benchmark_auto_${MODE}_${BACKEND} - test_log=${TEST_OUT_DIR}/${test_name}_${P}_${T}.log - $runner ${TEST_BIN_DIR}/${test_name} &> ${test_log} - head -1 ${test_log} - grep -i 'Test OK' ${test_log} || echo "Test FAILED" - echo " " - - #if [ "$BACKEND" = "reference_omp" ]; then - # echo "Additional standardised unit tests not yet supported for the ${BACKEND} backend" - # echo - # continue - #fi - - #none here: all unit tests are operational for reference_omp - - if [ "$BACKEND" = "bsp1d" ] || [ "$BACKEND" = "hybrid" ]; then - echo ">>> [x] [ ] Testing Launcher and Benchmarker, FROM_MPI mode for distributed backends." - test_name=launch_benchmark_frommpi_manual_${MODE}_${BACKEND} - test_log=${TEST_OUT_DIR}/launch_benchmark_frommpi_${MODE}_${BACKEND}_${P}_${T}.log - $runner ${TEST_BIN_DIR}/${test_name} &> ${test_log} - head -1 ${test_log} - grep -i 'Test OK' ${test_log} || echo "Test FAILED" - echo " " - else - echo ">>> [x] [ ] Testing Launcher and Benchmarker, MANUAL mode for shared-memory backends." - test_log=${TEST_OUT_DIR}/launch_benchmark_manual_${MODE}_${BACKEND}_${P}_${T}.log - $runner ${TEST_BIN_DIR}/launch_benchmark_frommpi_manual_${MODE}_${BACKEND} localhost 77770 1 0 &> ${test_log} - head -1 ${test_log} - grep -i 'Test OK' ${test_log} || echo "Test FAILED" - echo " " - fi - - done - - done - - if [ "$BACKEND" = "bsp1d" ] || [ "$BACKEND" = "hybrid" ]; then - echo ">>> [x] [ ] Testing Launcher and Benchmarker, MANUAL mode for distributed backends." - test_name=launch_benchmark_frommpi_manual_${MODE}_${BACKEND} - test_log=${TEST_OUT_DIR}/launch_benchmark_manual_${MODE}_${BACKEND}.log - bash -c "${MANUALRUN} ${TEST_BIN_DIR}/${test_name} localhost 77770 4 0 &> ${test_log}.0 & \ - ${MANUALRUN} ${TEST_BIN_DIR}/${test_name} localhost 77770 4 3 &> ${test_log}.3 & \ - ${MANUALRUN} ${TEST_BIN_DIR}/${test_name} localhost 77770 4 1 &> ${test_log}.1 & \ - ${MANUALRUN} ${TEST_BIN_DIR}/${test_name} localhost 77770 4 2 &> ${test_log}.2 & \ - wait" - head -1 ${test_log}.0 - (grep -q 'Test OK' ${test_log}.1 && grep -q 'Test OK' ${test_log}.2 && grep -q 'Test OK' ${test_log}.3 \ - && grep -q 'Test OK' ${test_log}.0 && printf "Test OK.\n\n") || (printf "Test FAILED.\n\n") - fi - - if [ "$BACKEND" = "bsp1d" ]; then - echo "Additional unit tests for the BSP1D backend:" - echo " " - echo ">>> [x] [ ] Testing BSP1D distribution for a vector of size 100 000" - echo " " - ${TEST_BIN_DIR}/distribution_bsp1d_${MODE} - - echo ">>> [x] [ ] Testing BSP1D distribution for an identity matrix of size" - echo " 7777 x 7777. The test evaluates whether the internal data" - echo " structures match the BSP1D distribution" - echo " " - ${TEST_BIN_DIR}/distribution_matrix_bsp1d_${MODE} 7777 - - echo ">>> [x] [ ] Testing dense vector times matrix using the double (+,*)" - echo " semiring where matrix elements are doubles and vector" - echo " elements ints. The input matrix is taken from west0497." - if [ -f ${INPUT_DIR}/west0497.mtx ]; then - ${LPFRUN} -np 1 ${TEST_BIN_DIR}/vxm_${MODE}_bsp1d ${INPUT_DIR}/west0497.mtx &> ${TEST_OUT_DIR}/vxm_${MODE}_bsp1d.west0497.P1 - ${LPFRUN} -np 2 ${TEST_BIN_DIR}/vxm_${MODE}_bsp1d ${INPUT_DIR}/west0497.mtx &> ${TEST_OUT_DIR}/vxm_${MODE}_bsp1d.west0497.P2 - ${LPFRUN} -np 3 ${TEST_BIN_DIR}/vxm_${MODE}_bsp1d ${INPUT_DIR}/west0497.mtx &> ${TEST_OUT_DIR}/vxm_${MODE}_bsp1d.west0497.P3 - ${LPFRUN} -np 4 ${TEST_BIN_DIR}/vxm_${MODE}_bsp1d ${INPUT_DIR}/west0497.mtx &> ${TEST_OUT_DIR}/vxm_${MODE}_bsp1d.west0497.P4 - head -1 ${TEST_OUT_DIR}/vxm_${MODE}_bsp1d.west0497.P4 - if [[ ! -f ${TEST_OUT_DIR}/vxm_${MODE}_reference_1_1.west0497 ]]; then - # if golden output file not present, materialize it - ${TEST_BIN_DIR}/vxm_${MODE}_reference ${INPUT_DIR}/west0497.mtx &> ${TEST_OUT_DIR}/vxm_${MODE}_reference_1_1.west0497 - head -1 ${TEST_OUT_DIR}/vxm_${MODE}_reference_1_1.west0497 - grep 'Test OK' ${TEST_OUT_DIR}/vxm_${MODE}_reference_1_1.west0497 || echo "Test FAILED" - fi - (grep -q 'Test OK' ${TEST_OUT_DIR}/vxm_${MODE}_reference_1_1.west0497 && grep -q 'Test OK' ${TEST_OUT_DIR}/vxm_${MODE}_bsp1d.west0497.P1 && grep -q 'Test OK' ${TEST_OUT_DIR}/vxm_${MODE}_bsp1d.west0497.P2 && grep -q 'Test OK' ${TEST_OUT_DIR}/vxm_${MODE}_bsp1d.west0497.P3 && grep -q 'Test OK' ${TEST_OUT_DIR}/vxm_${MODE}_bsp1d.west0497.P4 && printf "Test OK.\n") || printf "Test FAILED.\n" - cat ${TEST_OUT_DIR}/vxm_${MODE}_reference_1_1.west0497 | grep '^[0-9][0-9]* [ ]*[-]*[0-9]' | sort -n > ${TEST_OUT_DIR}/vxm_${MODE}.west0497.chk - cat ${TEST_OUT_DIR}/vxm_${MODE}_bsp1d.west0497.P1 | grep '^[0-9][0-9]* [ ]*[-]*[0-9]' | sort -n > ${TEST_OUT_DIR}/vxm_${MODE}_bsp1d.west0497.P1.chk - cat ${TEST_OUT_DIR}/vxm_${MODE}_bsp1d.west0497.P2 | grep '^[0-9][0-9]* [ ]*[-]*[0-9]' | sort -n > ${TEST_OUT_DIR}/vxm_${MODE}_bsp1d.west0497.P2.chk - cat ${TEST_OUT_DIR}/vxm_${MODE}_bsp1d.west0497.P3 | grep '^[0-9][0-9]* [ ]*[-]*[0-9]' | sort -n > ${TEST_OUT_DIR}/vxm_${MODE}_bsp1d.west0497.P3.chk - cat ${TEST_OUT_DIR}/vxm_${MODE}_bsp1d.west0497.P4 | grep '^[0-9][0-9]* [ ]*[-]*[0-9]' | sort -n > ${TEST_OUT_DIR}/vxm_${MODE}_bsp1d.west0497.P4.chk - (diff -q ${TEST_OUT_DIR}/vxm_${MODE}_bsp1d.west0497.P1.chk ${TEST_OUT_DIR}/vxm_${MODE}.west0497.chk && printf "Verification (1 to serial) OK.\n") || printf "Verification (1 to serial) FAILED.\n" - (diff -q ${TEST_OUT_DIR}/vxm_${MODE}_bsp1d.west0497.P1.chk ${TEST_OUT_DIR}/vxm_${MODE}_bsp1d.west0497.P2.chk && printf "Verification (1 to 2) OK.\n") || printf "Verification (1 to 2) FAILED.\n" - (diff -q ${TEST_OUT_DIR}/vxm_${MODE}_bsp1d.west0497.P1.chk ${TEST_OUT_DIR}/vxm_${MODE}_bsp1d.west0497.P3.chk && printf "Verification (1 to 3) OK.\n") || printf "Verification (1 to 3) FAILED.\n" - (diff -q ${TEST_OUT_DIR}/vxm_${MODE}_bsp1d.west0497.P1.chk ${TEST_OUT_DIR}/vxm_${MODE}_bsp1d.west0497.P4.chk && printf "Verification (1 to 4) OK.\n\n") || printf "Verification (1 to 4) FAILED.\n\n" - else - echo "Test DISABLED: west0497.mtx was not found. To enable, please provide ${INPUT_DIR}/west0497.mtx" - fi - echo " " - - echo ">>> [x] [ ] Testing matrix times dense vector using the double (+,*)" - echo " semiring where matrix elements are doubles and vector" - echo " elements ints. The input matrix is taken from west0497." - echo " " - if [ -f ${INPUT_DIR}/west0497.mtx ]; then - ${LPFRUN} -np 1 ${TEST_BIN_DIR}/mxv_${MODE}_bsp1d ${INPUT_DIR}/west0497.mtx &> ${TEST_OUT_DIR}/mxv_${MODE}_bsp1d.west0497.P1 - ${LPFRUN} -np 2 ${TEST_BIN_DIR}/mxv_${MODE}_bsp1d ${INPUT_DIR}/west0497.mtx &> ${TEST_OUT_DIR}/mxv_${MODE}_bsp1d.west0497.P2 - ${LPFRUN} -np 3 ${TEST_BIN_DIR}/mxv_${MODE}_bsp1d ${INPUT_DIR}/west0497.mtx &> ${TEST_OUT_DIR}/mxv_${MODE}_bsp1d.west0497.P3 - ${LPFRUN} -np 4 ${TEST_BIN_DIR}/mxv_${MODE}_bsp1d ${INPUT_DIR}/west0497.mtx &> ${TEST_OUT_DIR}/mxv_${MODE}_bsp1d.west0497.P4 - head -1 ${TEST_OUT_DIR}/mxv_${MODE}_bsp1d.west0497.P4 - if [[ ! -f ${TEST_OUT_DIR}/mxv_${MODE}_reference_1_1.west0497 ]]; then - # if golden output file not present, materialize it - ${TEST_BIN_DIR}/mxv_${MODE}_reference ${INPUT_DIR}/west0497.mtx &> ${TEST_OUT_DIR}/mxv_${MODE}_reference_1_1.west0497 - head -1 ${TEST_OUT_DIR}/mxv_${MODE}_reference_1_1.west0497 - grep 'Test OK' ${TEST_OUT_DIR}/mxv_${MODE}_reference_1_1.west0497 || echo "Test FAILED" - fi - (grep -q 'Test OK' ${TEST_OUT_DIR}/mxv_${MODE}_reference_1_1.west0497 && grep -q 'Test OK' ${TEST_OUT_DIR}/mxv_${MODE}_bsp1d.west0497.P1 && grep -q 'Test OK' ${TEST_OUT_DIR}/mxv_${MODE}_bsp1d.west0497.P2 && grep -q 'Test OK' ${TEST_OUT_DIR}/mxv_${MODE}_bsp1d.west0497.P3 && grep -q 'Test OK' ${TEST_OUT_DIR}/mxv_${MODE}_bsp1d.west0497.P4 && printf "Test OK.\n") || printf "Test FAILED.\n" - cat ${TEST_OUT_DIR}/mxv_${MODE}_reference_1_1.west0497 | grep '^[0-9][0-9]* [ ]*[-]*[0-9]' | sort -n > ${TEST_OUT_DIR}/mxv_${MODE}.west0497.chk - cat ${TEST_OUT_DIR}/mxv_${MODE}_bsp1d.west0497.P1 | grep '^[0-9][0-9]* [ ]*[-]*[0-9]' | sort -n > ${TEST_OUT_DIR}/mxv_${MODE}_bsp1d.west0497.P1.chk - cat ${TEST_OUT_DIR}/mxv_${MODE}_bsp1d.west0497.P2 | grep '^[0-9][0-9]* [ ]*[-]*[0-9]' | sort -n > ${TEST_OUT_DIR}/mxv_${MODE}_bsp1d.west0497.P2.chk - cat ${TEST_OUT_DIR}/mxv_${MODE}_bsp1d.west0497.P3 | grep '^[0-9][0-9]* [ ]*[-]*[0-9]' | sort -n > ${TEST_OUT_DIR}/mxv_${MODE}_bsp1d.west0497.P3.chk - cat ${TEST_OUT_DIR}/mxv_${MODE}_bsp1d.west0497.P4 | grep '^[0-9][0-9]* [ ]*[-]*[0-9]' | sort -n > ${TEST_OUT_DIR}/mxv_${MODE}_bsp1d.west0497.P4.chk - (diff -q ${TEST_OUT_DIR}/mxv_${MODE}_bsp1d.west0497.P1.chk ${TEST_OUT_DIR}/mxv_${MODE}.west0497.chk && printf "Verification (1 to serial) OK.\n") || printf "Verification (1 to serial) FAILED.\n" - (diff -q ${TEST_OUT_DIR}/mxv_${MODE}_bsp1d.west0497.P1.chk ${TEST_OUT_DIR}/mxv_${MODE}_bsp1d.west0497.P2.chk && printf "Verification (1 to 2) OK.\n") || printf "Verification (1 to 2) FAILED.\n" - (diff -q ${TEST_OUT_DIR}/mxv_${MODE}_bsp1d.west0497.P1.chk ${TEST_OUT_DIR}/mxv_${MODE}_bsp1d.west0497.P3.chk && printf "Verification (1 to 3) OK.\n") || printf "Verification (1 to 3) FAILED.\n" - (diff -q ${TEST_OUT_DIR}/mxv_${MODE}_bsp1d.west0497.P1.chk ${TEST_OUT_DIR}/mxv_${MODE}_bsp1d.west0497.P4.chk && printf "Verification (1 to 4) OK.\n\n") || printf "Verification (1 to 4) FAILED.\n\n" - else - echo "Test DISABLED: west0497.mtx was not found. To enable, please provide ${INPUT_DIR}/west0497.mtx" - fi - echo " " - - echo ">>> [x] [ ] Testing BSP1D distribution." - echo " " - ${LPFRUN} -np 1 ${TEST_BIN_DIR}/distribution_${MODE} - fi - done - - echo ">>> [x] [ ] Testing threadlocal storage, parallel, double values," - echo " including checks for const-correctness." - echo " " - ${TEST_BIN_DIR}/thread_local_storage_${MODE} - -done - -echo -echo "*****************************************************************************************" -echo "All unit tests done." -echo " " - diff --git a/tests/unit/utilParserTest.cpp b/tests/unit/utilParserTest.cpp index 30d8d6624..78efae1cd 100644 --- a/tests/unit/utilParserTest.cpp +++ b/tests/unit/utilParserTest.cpp @@ -23,11 +23,9 @@ int main( int argc, char ** argv ) { std::cout << "Functional test executable: " << argv[ 0 ] << "\n"; - if( argc != 2 ) { - std::cerr << "please, give path to west0497.mtx" << std::endl; - std::cout << "Test FAILED" << std::endl; - return 255; + std::cerr << "path to input file west0497.mtx is needed" << std::endl; + std::exit( 1 ); } int ret = 0; diff --git a/tests/unit/vxm.cpp b/tests/unit/vxm.cpp index 81ccb2a36..0284ca3ac 100644 --- a/tests/unit/vxm.cpp +++ b/tests/unit/vxm.cpp @@ -20,16 +20,19 @@ #include #include +#include #include "graphblas/utils/parser.hpp" #include "graphblas.hpp" +#include using namespace grb; struct output { int exit_code; + std::unique_ptr< PinnedVector< double > > pinnedVector; }; struct input { @@ -78,35 +81,15 @@ void grbProgram( const struct input &in, struct output &out ) { return; } - const size_t P = grb::spmd<>::nprocs(); - const size_t s = grb::spmd<>::pid(); - if( s == 0 ) { - std::cout << "%%MatrixMarket vector coordinate double general\n"; - std::cout << "%Global index \tValue\n"; - std::cout << grb::size( y ) << "\n"; - } - for( size_t k = 0; k < P; ++k ) { - if( k == s ) { - for( const auto &pair : y ) { - const size_t index = pair.first; - std::cout << index << " " - << std::round( pair.second ) << "\n"; - } - } - grb::spmd<>::barrier(); - } - + out.pinnedVector = std::unique_ptr< PinnedVector< double > >( new PinnedVector< double >( y, SEQUENTIAL ) ); out.exit_code = 0; - - std::cout << std::flush; - std::cerr << std::flush; } int main( int argc, char ** argv ) { std::cout << "Functional test executable: " << argv[ 0 ] << "\n"; - if( argc != 2 ) { - std::cout << "Usage: " << argv[ 0 ] << " " << std::endl; + if( argc != 3 ) { + std::cout << "Usage: " << argv[ 0 ] << " " << std::endl; return EXIT_SUCCESS; } @@ -123,7 +106,16 @@ int main( int argc, char ** argv ) { return EXIT_FAILURE; } - if( out.exit_code != 0 ) { + int rc = 0; + if( not out.pinnedVector ) { + std::cout << "no pinned vector" << std::endl; + rc = 1; + } else { + const char * truth_filename = argv[ 2 ]; + rc = vector_verification( *out.pinnedVector, truth_filename, 1e-5, 1e-6 ); + } + + if( out.exit_code != 0 || rc != 0 ) { std::cerr << std::flush; std::cout << "Test FAILED (program returned non-zero exit code " << out.exit_code << "\n" << std::endl; diff --git a/tests/utils/output_verification.hpp b/tests/utils/output_verification.hpp index c9c3ae403..bbcdcc95d 100644 --- a/tests/utils/output_verification.hpp +++ b/tests/utils/output_verification.hpp @@ -25,7 +25,6 @@ #ifndef _H_GRB_UTILS_OUTPUT_VERIFICATION #define _H_GRB_UTILS_OUTPUT_VERIFICATION -#include #include #include @@ -34,55 +33,14 @@ #include #include #include +#include +#include +#include #include - -/** - * Attempts to read in a value from a given file into a given memory - * location. - * - * @tparam T The datatype of the value - * - * @param[in] in The input file - * @param[out] out Where to store the read value. - * - * @returns 0 on success and 1 on failure. - * - * If the function fails, \a out shall not be assigned. - * - * \internal This is the overload for reading T data. - */ -template< typename T > -int data_fscanf( std::ifstream &in, T * const out ) { - return !(in >> *out); -}; - -/** - * Attempts to read in a complex value from a given file into a given memory - * location. - * - * @tparam T The data type to be used in the complex value - * - * @param[in] in The input file - * @param[out] out Where to store the read value. - * - * @returns 0 on success and 1 on failure. - * - * If the function fails, \a out shall not be assigned. - * - * \internal This is the overload for reading complex data. - */ -template< typename T > -int data_fscanf( std::ifstream &in, std::complex< T > * const out ) { - T x, y; - if( in >> x >> y ) { - *out = std::complex< T >( x, y ); - return 0; - } else { - return 1; - } -}; +#include +#include "read_dense_vector.hpp" /** * Verifies a dense vector against a ground-truth output vector. @@ -142,38 +100,11 @@ int vector_verification( assert( c2 > 0 ); assert( c2 < 1 ); const constexpr T one = static_cast< T >( 1 ); - // open verification file - std::ifstream in; - in.open( truth_filename); - - if( !in.is_open() ) { - std::stringstream error; - error << "Could not open the file \"" << truth_filename << "\"." - << std::endl; - throw std::runtime_error(error.str()); - } - // read the truth output vector from the input verification file const size_t n = output_vector.size(); - T * const truth = new T[ n ]; - if( truth == nullptr ) { - std::cerr << "Could not allocate necessary buffer" << std::endl; - throw std::bad_alloc(); - } + std::unique_ptr< T[] > truth( new T[ n ] ); - for( size_t i = 0; i < n; i++ ) { - const int rc = data_fscanf( in, truth + i ); - if( rc != 0 ) { - std::stringstream error; - error << "The verification file looks incomplete. " << "Line i = " << i - << ", data = " << truth[ i ] << ", rc = " << rc << std::endl; - delete [] truth; - throw std::runtime_error(error.str()); - } - } - - // close verification file - in.close(); + read_dense_vector_to_array( truth_filename, truth.get(), n ); // compute magnitudes double magnitude2 = 0; @@ -189,20 +120,13 @@ int vector_verification( magnitude2 = sqrt( magnitude2 ); // convert the Pinned Vector into raw data - T * const raw_output_vector = new T[ n ]; - bool * const written_to = new bool[ n ]; - if( raw_output_vector == nullptr || written_to == nullptr ) { - std::cerr << "Could not allocate necessary buffers" << std::endl; - delete [] truth; - throw std::bad_alloc(); - } - for( size_t i = 0; i < n; i++ ) { - written_to[ i ] = false; - } + std::unique_ptr< T[] > raw_output_vector( new T[ n ] ); + + std::vector< bool > written_to( n, false ); for( size_t k = 0; k < output_vector.nonzeroes(); k++ ) { - const T &value = output_vector.getNonzeroValue( k, one ); const size_t index = output_vector.getNonzeroIndex( k ); + const T &value = output_vector.getNonzeroValue( k, one ); assert( index < n ); assert( !written_to[ index ] ); raw_output_vector[ index ] = value; @@ -233,7 +157,7 @@ int vector_verification( // curInfNorm, but prevents a bunch of code duplication for checking the // output at i = 0. We prefer no code duplication. for( size_t i = 0; i < n; i++ ) { - const double curInfNorm = fabs( raw_output_vector[ i ] - truth[ i ] ); + const double curInfNorm = static_cast < double >( std::abs( raw_output_vector[ i ] - truth[ i ] ) ); // if any of the variables involved in the condition below is NaN or -NaN // the condition evaluated by the function isless will be false and then // the whole condition of the if-statement will be evaluated to true @@ -284,14 +208,6 @@ int vector_verification( ret += 8; } - // free local buffers - assert( truth != nullptr ); - assert( written_to != nullptr ); - assert( raw_output_vector != nullptr ); - delete [] truth; - delete [] written_to; - delete [] raw_output_vector; - // perform check and return if( !std::isless( norm2, c1 * magnitude2 + n * eps ) ) { std::cerr << "Output vector failed 2-norm verification:\n" diff --git a/tests/utils/read_dense_vector.hpp b/tests/utils/read_dense_vector.hpp new file mode 100644 index 000000000..a48284281 --- /dev/null +++ b/tests/utils/read_dense_vector.hpp @@ -0,0 +1,107 @@ + +#ifndef _H_GRB_UTILS_READ_DENSE_VECTOR +#define _H_GRB_UTILS_READ_DENSE_VECTOR + +#include +#include +#include +#include +#include + +/** + * Attempts to read in a value from a given file into a given memory + * location. + * + * @tparam T The datatype of the value + * + * @param[in] in The input file + * @param[out] out Where to store the read value. + * + * @returns 0 on success and 1 on failure. + * + * If the function fails, \a out shall not be assigned. + * + * \internal This is the overload for reading T data. + */ +template< typename T > +int data_fscanf( std::ifstream &in, T * const out ) { + return !(in >> *out); +} + +/** + * Attempts to read in a complex value from a given file into a given memory + * location. + * + * @tparam T The data type to be used in the complex value + * + * @param[in] in The input file + * @param[out] out Where to store the read value. + * + * @returns 0 on success and 1 on failure. + * + * If the function fails, \a out shall not be assigned. + * + * \internal This is the overload for reading complex data. + */ +template< typename T > +int data_fscanf( std::ifstream &in, std::complex< T > * const out ) { + T x, y; + if( in >> x >> y ) { + *out = std::complex< T >( x, y ); + return 0; + } else { + return 1; + } +} + +/** + * Reads the values stored on different lines of text file \a filename into array \a dst; + * it reads exactly \a dst_size values, and throws if less values are available or more + * are present. + * + * This function assumes each line contains one and one value only. + * + * @tparam T type of values to read and store + * @param filename name of input file + * @param dst pointer to destination array + * @param dst_size number of values to read + */ +template< typename T > +void read_dense_vector_to_array( + const char * const filename, + T * dst, + size_t dst_size +) { + if( filename == nullptr ) { + throw std::runtime_error( "filename is nullptr" ); + } + if( dst == nullptr ) { + throw std::runtime_error( "destination vector is nullptr" ); + } + if( dst_size == 0UL ) { + throw std::runtime_error( "destination vector size is 0" ); + } + std::ifstream in( filename); + + if( not in.is_open() ) { + throw std::runtime_error( "Could not open the file \"" + std::string( filename ) + "\""); + } + + int rc = 0; + size_t i = 0; + for( ; i < dst_size && rc == 0; i++ ) { + rc = data_fscanf( in, dst + i ); + } + if( rc != 0 || i < dst_size ) { + throw std::runtime_error( "file \"" + std::string( filename ) + + "\" looks incomplete from line " + std::to_string( i - 1 ) ); + } + while( not in.eof() ) { + if( std::isalnum( in.get() ) ) { + throw std::runtime_error( "file \"" + std::string( filename ) + + "\" has more than " + std::to_string( dst_size ) + " lines" ); + } + } +} + +#endif // _H_GRB_UTILS_READ_DENSE_VECTOR diff --git a/tests/validate_run.sh b/tests/validate_run.sh new file mode 100755 index 000000000..a8d9baa34 --- /dev/null +++ b/tests/validate_run.sh @@ -0,0 +1,45 @@ +#!/bin/bash + +# +# Copyright 2023 Huawei Technologies Co., Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +### runner for validation: runs the command passed in input, typically with piping or other +# Bash operators (&&, ||, ...) + +# log of test +infile=$1 +# path to output file for the validation +outfile=$2 +shift 2 + +# validation command +validation_command=$@ + +echo ">>> running: ${validation_command}" +eval "${validation_command}" +retcode=$? + +if [[ "${retcode}" != "0" && -f "${infile}" ]]; then + cp "${infile}" "${outfile}" + echo "-- copying original input file \"${infile}\" into \"${outfile}\"" + # report attachment for Gitlab CI + if [[ ! -z "${CI_PROJECT_DIR}" ]]; then + rel_path=$(realpath --relative-to="${CI_PROJECT_DIR}" "${outfile}") + echo "[[ATTACHMENT|${rel_path}]]" + fi +fi + +exit ${retcode} diff --git a/tools/ctest-junit-parse.py b/tools/ctest-junit-parse.py new file mode 100644 index 000000000..3c8bbe476 --- /dev/null +++ b/tools/ctest-junit-parse.py @@ -0,0 +1,140 @@ +# +# Copyright 2023 Huawei Technologies Co., Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +### starting from the JUnit files produced from CTest, it summarizes test results +# per outcome, printing which tests failed; if requested, remove the logs of successful +# tests in order to keep the size of the artifacts uploaded from the CI minimal +# (by uploading only the logs of failed tests); it parsers the JUnit XML file +# using Python internal 'xml' package + +import sys +import os +import argparse +import xml.etree.ElementTree as ET + +ALL_CATEGORIES=[ "unit", "performance", "smoke" ] + +parser = argparse.ArgumentParser(description='ALP/GraphBLAS test result parser') +parser.add_argument('--categories', help='Test categories', required=True, nargs='+', choices=ALL_CATEGORIES ) +parser.add_argument('--xmls-dir', + help='directory with XML files (one per test category: .xml, .xml)', + required=True +) +parser.add_argument('--remove-successful-logs-from', + help='Remove logs of successful tests stored in this directory, under //output' +) +args = parser.parse_args() + +# manipulate an XML nodes to find test names matching a status string +def get_names_list( tags ): + return [ at.attrib['name'] for at in tags ] + +def get_tags_with_status( root, status_str ): + return root.findall('./testcase[@status="' + status_str + '"]') + +def filter_test_names( root, status_str ): + return [ at.attrib['name'] for at in get_tags_with_status( root, status_str ) ] + +# list all tests under the given caption +def list_tests( test_names, caption ): + if len(test_names) > 0: + print( caption ) + for tn in test_names: + print("-", tn) + print() + +# analyze tests for the given category, returning the number of failed ones +# and the list of successful ones +def analyze_tests( xmls_dir, category ): + xml_path = os.path.join( os.path.abspath( xmls_dir), category + '.xml' ) + tree = ET.parse( xml_path ) + root = tree.getroot() + + # extract counts from root attributes + at = root.attrib + num_tests = int( at['tests'] ) + num_failures = int( at['failures'] ) + num_disabled = int( at['disabled'] ) + num_skipped = int( at['skipped'] ) + num_passed = num_tests - num_failures - num_disabled - num_skipped + + # pretty print summary + lines = [ + ( "TESTS:", num_tests ), + ( "success:", num_passed ), + ( "disabled:", num_disabled ), + ( "skipped:", num_skipped ), + ( "FAILED:", num_failures ) + ] + for s, v in lines: + print( "{:<9} {:>4}".format( s, v ) ) + print() + + # filter test names by status and list them + if num_disabled > 0: + disabled_tests = filter_test_names(root, 'disabled') + list_tests(disabled_tests, "DISABLED TESTS:") + if num_skipped > 0: + skipped_tests = filter_test_names(root, 'notrun') + list_tests(skipped_tests, "SKIPPED TESTS:") + if num_failures > 0: + failed_tests = filter_test_names(root, 'fail') + list_tests(failed_tests, "FAILED TESTS:") + if num_passed > 0: + passed_tests = filter_test_names(root, 'run') + else: + passed_tests = [] + return num_failures, passed_tests + +total_num_failures = 0 +per_category_passed_tests = dict() +for category in args.categories: + print("CATEGORY:", category) + # for each category, accumulate number of failed tests and store passed tests into a dictionary + test_num_failures, passed_tests = analyze_tests( args.xmls_dir, category ) + total_num_failures += test_num_failures + per_category_passed_tests[category] = passed_tests + +if args.remove_successful_logs_from is None: + sys.exit( 0 if total_num_failures == 0 else 1 ) + +base_directory = os.path.abspath( args.remove_successful_logs_from ) +if not os.path.isdir(base_directory): + print(f"directory {base_directory} does not exist") + sys.exit(1) + +print("removing logs of successful tests") +for category in args.categories: + passed_tests_names = per_category_passed_tests[category] + # build path to test log: / / output / + indir = os.path.join( base_directory, category, "output" ) + for test_name in passed_tests_names: + # test log files are expecte to be named = -output.log + filename = test_name + "-output.log" + input_file = os.path.join( indir, filename ) + if not os.path.exists( input_file ): + # silently skip removing this log, the test may have produced none + continue + try: + os.remove(input_file) + except FileNotFoundError as e: + print(f"{input_file} is not found:", e) + except OSError as e: + print(f"{input_file} is a directory:", e) + except BaseException as e: + print(f"{input_file}: unknown error:", e) + +sys.exit( 0 if total_num_failures == 0 else 1 )