From 8b31c9a5a6534a8acf951d557092d7788793686c Mon Sep 17 00:00:00 2001 From: Louis Dionne Date: Fri, 3 Oct 2025 10:31:27 -0400 Subject: [PATCH] Run Tox tests in the Github Action CI This patch enables running the Tox tests in Github Actions CI. It also adds some dependencies required to run the tests locally with Tox. An important change is that we move from LLVM's FileCheck implementation to a near-equivalent Python version that can be installed with pip. This will make the test suite more independent and easier to run: requiring a full LLVM monorepo checkout and a configured LLVM build just to run the LNT unit tests seems like a very cumbersome requirement. Also, this updates the documentation to reflect my current understanding of how the test suite can be run. Note that the tests are not passing at the moment, we will need to update some dependencies and fix other issues. But this gets us closer to running them, and at least we can see the progress being made in the CI. --- .../workflows/{flake8_docs.yaml => tox.yaml} | 4 +- docs/developer_guide.rst | 24 ++++++------ tests/Formats/json.py | 4 +- tests/Formats/plist.py | 4 +- tests/lnttool/PostgresDB.shtest | 4 +- tests/lnttool/Profile.py | 8 ++-- tests/lnttool/UpdateDB.py | 4 +- tests/lnttool/admin.shtest | 20 +++++----- tests/lnttool/checkformat.shtest | 10 ++--- tests/lnttool/email_tools.py | 4 +- tests/lnttool/showtests.shtest | 2 +- tests/lnttool/submit.shtest | 22 +++++------ tests/lnttool/submit_badurl.shtest | 2 +- tests/runtest/exclude_stat.py | 4 +- tests/runtest/multisample.py | 4 +- tests/runtest/nt-darwin.py | 2 +- tests/runtest/nt.py | 38 +++++++++---------- tests/runtest/only_test.py | 4 +- tests/runtest/rerun.py | 8 ++-- .../test_suite-benchmarking-only.shtest | 2 +- tests/runtest/test_suite-c-compiler.shtest | 2 +- tests/runtest/test_suite-cache.shtest | 12 +++--- tests/runtest/test_suite-cc.shtest | 2 +- tests/runtest/test_suite-cflags0.shtest | 2 +- tests/runtest/test_suite-cflags1.shtest | 2 +- tests/runtest/test_suite-cflags2.shtest | 2 +- tests/runtest/test_suite-cflags3.shtest | 2 +- tests/runtest/test_suite-cflags4.shtest | 2 +- tests/runtest/test_suite-compile-only.shtest | 2 +- tests/runtest/test_suite-cross.shtest | 2 +- tests/runtest/test_suite-fail-compile.shtest | 2 +- tests/runtest/test_suite-fail-exec.shtest | 2 +- tests/runtest/test_suite-machine-name.shtest | 2 +- tests/runtest/test_suite-metrics.shtest | 4 +- tests/runtest/test_suite-only-test.shtest | 2 +- tests/runtest/test_suite-perf-events.shtest | 2 +- tests/runtest/test_suite-pgo.shtest | 4 +- tests/runtest/test_suite-profile.shtest | 2 +- tests/runtest/test_suite-run-order.shtest | 2 +- tests/runtest/test_suite-run-under.shtest | 6 +-- tests/runtest/test_suite-use-perf.shtest | 2 +- tests/runtest/test_suite.shtest | 14 +++---- tests/runtest/test_suite_diagnose.shtest | 4 +- tests/server/db/ImportV4TestSuiteInstance.py | 10 ++--- tests/server/db/yamlschema.shtest | 12 +++--- tox.ini | 5 +++ 46 files changed, 143 insertions(+), 136 deletions(-) rename .github/workflows/{flake8_docs.yaml => tox.yaml} (89%) diff --git a/.github/workflows/flake8_docs.yaml b/.github/workflows/tox.yaml similarity index 89% rename from .github/workflows/flake8_docs.yaml rename to .github/workflows/tox.yaml index 1e8194fa..480fb812 100644 --- a/.github/workflows/flake8_docs.yaml +++ b/.github/workflows/tox.yaml @@ -1,4 +1,4 @@ -name: Python Flake8, Docs, mypy +name: Run Tox tests on: - push @@ -24,3 +24,5 @@ jobs: run: tox -e docs - name: Tox mypy run: tox -e mypy + - name: Tox py3 + run: tox -e py3 diff --git a/docs/developer_guide.rst b/docs/developer_guide.rst index 32d7c354..fe4352e9 100644 --- a/docs/developer_guide.rst +++ b/docs/developer_guide.rst @@ -24,20 +24,20 @@ all times, therefore you should run the regression tests as part of your development work-flow, just like you do when developing on other LLVM sub-projects. -The LNT regression tests make use of lit and other tools like FileCheck. At -the moment, probably the easiest way to get them installed is to compile LLVM -and use the binaries that are generated there. Assuming you've build LLVM -into $LLVMBUILD, and installed lnt in $LNTINSTALL you can run the regression -tests using the following command:: +The LNT regression tests make use of lit and other tools like [filecheck](https://github.com/AntonLydike/filecheck). +To run the tests, we recomment using ``tox`` in a virtual environment:: - PATH=$LLVMBUILD/bin:$LNTINSTALL/bin:$PATH llvm-lit -sv ./tests + python3 -m venv .venv + source .venv/bin/activate + pip install tox + tox -If you don't like temporary files being created in your LNT source directory, -you can run the tests in a different directory too:: +You can also run individual unit tests with ``lit`` directly:: - mkdir ../run_lnt_tests - cd ../run_lnt_tests - PATH=$LLVMBUILD/bin:$LNTINSTALL/bin:$PATH llvm-lit -sv ../lnt/tests + pip install lit + lit -sv ./tests + +However, that requires manually setting up the testing environment (``filecheck``, etc). For simple changes, adding a regression test and making sure all regression tests pass, is often a good enough testing approach. For some changes, the @@ -73,4 +73,4 @@ default. You can enable them by passing additional flags to lit: Example:: - PATH=$LLVMBUILD/bin:$LNTINSTALL/bin:$PATH llvm-lit -sv -Dpostgres=1 -Dmysql=1 -Dtidylib=1 ../lnt/tests + lit -sv -Dpostgres=1 -Dmysql=1 -Dtidylib=1 ./tests diff --git a/tests/Formats/json.py b/tests/Formats/json.py index 54c2a9c3..c54d9567 100644 --- a/tests/Formats/json.py +++ b/tests/Formats/json.py @@ -1,4 +1,4 @@ -# RUN: lnt convert --to=json %S/Inputs/test.json | FileCheck %s -# RUN: lnt convert --to=json < %S/Inputs/test.json | FileCheck %s +# RUN: lnt convert --to=json %S/Inputs/test.json | filecheck %s +# RUN: lnt convert --to=json < %S/Inputs/test.json | filecheck %s # CHECK: {"a": 1} diff --git a/tests/Formats/plist.py b/tests/Formats/plist.py index 1acb90a9..5aca8d37 100644 --- a/tests/Formats/plist.py +++ b/tests/Formats/plist.py @@ -1,4 +1,4 @@ -# RUN: lnt convert --to=json %S/Inputs/test.plist | FileCheck %s -# RUN: lnt convert --to=json < %S/Inputs/test.plist | FileCheck %s +# RUN: lnt convert --to=json %S/Inputs/test.plist | filecheck %s +# RUN: lnt convert --to=json < %S/Inputs/test.plist | filecheck %s # CHECK: {"a": 1} diff --git a/tests/lnttool/PostgresDB.shtest b/tests/lnttool/PostgresDB.shtest index c4816fb2..ef506c25 100644 --- a/tests/lnttool/PostgresDB.shtest +++ b/tests/lnttool/PostgresDB.shtest @@ -22,7 +22,7 @@ lnt import "${TESTDIR}/instance" "${SHARED_INPUTS}/sample-b-small.plist" --show- # lnt updatedb "${TESTDIR}/instance" --testsuite nts --delete-run 1 \ --show-sql >& "${TESTDIR}/runrm.out" -# RUN: FileCheck --check-prefix CHECK-RUNRM %s < "%t.install/runrm.out" +# RUN: filecheck --check-prefix CHECK-RUNRM %s < "%t.install/runrm.out" # CHECK-RUNRM: DELETE FROM "NT_Sample" WHERE "NT_Sample"."ID" = %(ID)s # CHECK-RUNRM-NEXT: ({'ID': 1}, {'ID': 2}) @@ -38,7 +38,7 @@ lnt import "${TESTDIR}/instance" "${SHARED_INPUTS}/sample-a-small.plist" --show- lnt updatedb "${TESTDIR}/instance" --testsuite nts \ --delete-machine "LNT SAMPLE MACHINE" \ --show-sql >& "${TESTDIR}/machinerm.out" -# RUN: FileCheck --check-prefix CHECK-MACHINERM %s < "%t.install/machinerm.out" +# RUN: filecheck --check-prefix CHECK-MACHINERM %s < "%t.install/machinerm.out" # CHECK-MACHINERM: DELETE FROM "NT_Sample" WHERE "NT_Sample"."ID" = %(ID)s # CHECK-MACHINERM-NEXT: ({'ID': 3}, {'ID': 4}, {'ID': 5}) diff --git a/tests/lnttool/Profile.py b/tests/lnttool/Profile.py index 807eb55e..38222e16 100644 --- a/tests/lnttool/Profile.py +++ b/tests/lnttool/Profile.py @@ -1,13 +1,13 @@ -# RUN: lnt profile getVersion %S/Inputs/test.lntprof | FileCheck --check-prefix=CHECK-GETVERSION %s +# RUN: lnt profile getVersion %S/Inputs/test.lntprof | filecheck --check-prefix=CHECK-GETVERSION %s # CHECK-GETVERSION: 1 -# RUN: lnt profile getTopLevelCounters %S/Inputs/test.lntprof | FileCheck --check-prefix=CHECK-GETTLC %s +# RUN: lnt profile getTopLevelCounters %S/Inputs/test.lntprof | filecheck --check-prefix=CHECK-GETTLC %s # CHECK-GETTLC: {"cycles": 12345.0, "branch-misses": 200.0} -# RUN: lnt profile getFunctions --sortkeys %S/Inputs/test.lntprof | FileCheck --check-prefix=CHECK-GETFUNCTIONS %s +# RUN: lnt profile getFunctions --sortkeys %S/Inputs/test.lntprof | filecheck --check-prefix=CHECK-GETFUNCTIONS %s # CHECK-GETFUNCTIONS: {"fn1": {"counters": {"branch-misses": 10.0, "cycles": 45.0}, "length": 2}} -# RUN: lnt profile getCodeForFunction %S/Inputs/test.lntprof fn1 | FileCheck --check-prefix=CHECK-GETFN1 %s +# RUN: lnt profile getCodeForFunction %S/Inputs/test.lntprof fn1 | filecheck --check-prefix=CHECK-GETFN1 %s # CHECK-GETFN1: [{}, 1048576, "add r0, r0, r0"], [{"cycles": 100.0}, 1048580, "sub r1, r0, r0"]] # RUN: mkdir -p %t diff --git a/tests/lnttool/UpdateDB.py b/tests/lnttool/UpdateDB.py index 9ead5ad2..f4557c9a 100644 --- a/tests/lnttool/UpdateDB.py +++ b/tests/lnttool/UpdateDB.py @@ -9,7 +9,7 @@ # # RUN: lnt updatedb %t.install --testsuite nts \ # RUN: --delete-run 1 --show-sql >& %t.out -# RUN: FileCheck --check-prefix CHECK-RUNRM %s < %t.out +# RUN: filecheck --check-prefix CHECK-RUNRM %s < %t.out # CHECK-RUNRM: DELETE FROM "NT_Sample" WHERE "NT_Sample"."ID" = ? # CHECK-RUNRM-NEXT: ((1,), (2,)) @@ -25,7 +25,7 @@ # RUN: --show-sample-count # RUN: lnt updatedb %t.install --testsuite nts \ # RUN: --delete-machine "LNT SAMPLE MACHINE" --show-sql >& %t.out -# RUN: FileCheck --check-prefix CHECK-MACHINERM %s < %t.out +# RUN: filecheck --check-prefix CHECK-MACHINERM %s < %t.out # CHECK-MACHINERM: DELETE FROM "NT_Sample" WHERE "NT_Sample"."ID" = ? # CHECK-MACHINERM-NEXT: ((1,), (2,)) diff --git a/tests/lnttool/admin.shtest b/tests/lnttool/admin.shtest index e6975ce9..27d1c8c3 100644 --- a/tests/lnttool/admin.shtest +++ b/tests/lnttool/admin.shtest @@ -12,7 +12,7 @@ cd "$DIR" rm -rf lntadmin.yaml lnt admin create-config > create_config.stdout mv lntadmin.yaml create_config.txt -# RUN: FileCheck %s --check-prefix=CREATE_CONFIG < %t.tmp/create_config.txt +# RUN: filecheck %s --check-prefix=CREATE_CONFIG < %t.tmp/create_config.txt # CREATE_CONFIG: lnt_url: "http://localhost:8000" # CREATE_CONFIG-NEXT: database: default # CREATE_CONFIG-NEXT: testsuite: nts @@ -28,11 +28,11 @@ auth_token: test_token __EOF__ lnt admin post-run "${SHARED_INPUTS}/sample-a-small.plist" > post_run.stdout -# RUN: FileCheck %s --check-prefix=POST_RN < %t.tmp/post_run.stdout +# RUN: filecheck %s --check-prefix=POST_RN < %t.tmp/post_run.stdout # POST_RN: http://localhost:9092/api/db_default/v4/nts/runs/3 lnt admin machine-info 1 > machine_info.stdout -# RUN: FileCheck %s --check-prefix=MACHINE_INFO < %t.tmp/machine_info.stdout +# RUN: filecheck %s --check-prefix=MACHINE_INFO < %t.tmp/machine_info.stdout # MACHINE_INFO: name: localhost__clang_DEV__x86_64 # MACHINE_INFO: id: 1 # MACHINE_INFO: hardware: x86_64 @@ -42,7 +42,7 @@ lnt admin machine-info 1 > machine_info.stdout rm -rf run_3.json lnt admin get-run 3 > get_run.stdout -# RUN: FileCheck %s --check-prefix=GET_RN < %t.tmp/run_3.json +# RUN: filecheck %s --check-prefix=GET_RN < %t.tmp/run_3.json # GET_RN: { # GET_RN: "generated_by": # GET_RN: "machine": { @@ -60,18 +60,18 @@ lnt admin get-run 3 > get_run.stdout # GET_RN: } lnt admin list-machines > list_machines.stdout -# RUN: FileCheck %s --check-prefix=LIST_MACHINES < %t.tmp/list_machines.stdout +# RUN: filecheck %s --check-prefix=LIST_MACHINES < %t.tmp/list_machines.stdout # LIST_MACHINES: localhost__clang_DEV__x86_64:1 # LIST_MACHINES-NEXT: LNT SAMPLE MACHINE:2 lnt admin list-runs 1 > list_runs.stdout -# RUN: FileCheck %s --check-prefix=LIST_RUNS < %t.tmp/list_runs.stdout +# RUN: filecheck %s --check-prefix=LIST_RUNS < %t.tmp/list_runs.stdout # LIST_RUNS: llvm_project_revision=154331 1 # LIST_RUNS: llvm_project_revision=152289 2 rm -rf machine_1.json lnt admin get-machine 1 > get_machine.stdout -# RUN: FileCheck %s --check-prefix=GET_MACHINE_JSON < %t.tmp/machine_1.json +# RUN: filecheck %s --check-prefix=GET_MACHINE_JSON < %t.tmp/machine_1.json # GET_MACHINE_JSON: { # GET_MACHINE_JSON: "generated_by": # GET_MACHINE_JSON: "machine": { @@ -88,12 +88,12 @@ lnt admin get-machine 1 > get_machine.stdout # GET_MACHINE_JSON: } lnt admin rm-machine 1 > rm_machine.stdout -# RUN: FileCheck %s --check-prefix=RM_MACHINE < %t.tmp/rm_machine.stdout +# RUN: filecheck %s --check-prefix=RM_MACHINE < %t.tmp/rm_machine.stdout # RM_MACHINE: Deleting runs 1 2 (2/2) # RM_MACHINE: Deleted machine localhost__clang_DEV__x86_64:1 lnt admin list-machines > list_machines2.stdout -# RUN: FileCheck %s --check-prefix=LIST_MACHINES2 < %t.tmp/list_machines2.stdout +# RUN: filecheck %s --check-prefix=LIST_MACHINES2 < %t.tmp/list_machines2.stdout # LIST_MACHINES2-NOT: localhost__clang_DEV__x86_64:1 # LIST_MACHINES2: LNT SAMPLE MACHINE:2 @@ -101,7 +101,7 @@ lnt admin rename-machine 2 hal9000 # No output lnt admin list-machines > list_machines3.stdout -# RUN: FileCheck %s --check-prefix=LIST_MACHINES3 < %t.tmp/list_machines3.stdout +# RUN: filecheck %s --check-prefix=LIST_MACHINES3 < %t.tmp/list_machines3.stdout # LIST_MACHINES3: hal9000:2 # Just post again so we have a machine to merge diff --git a/tests/lnttool/checkformat.shtest b/tests/lnttool/checkformat.shtest index 05676dab..31a7f02d 100644 --- a/tests/lnttool/checkformat.shtest +++ b/tests/lnttool/checkformat.shtest @@ -1,5 +1,5 @@ # Check an old plist format -# RUN: lnt checkformat %{shared_inputs}/sample-a-small.plist 2>&1 | FileCheck %s --check-prefix=CHECK0 +# RUN: lnt checkformat %{shared_inputs}/sample-a-small.plist 2>&1 | filecheck %s --check-prefix=CHECK0 # # CHECK0: Import succeeded. # CHECK0: Imported Data @@ -14,7 +14,7 @@ # # # Check an old json format -# RUN: lnt checkformat %{shared_inputs}/sample-report.json 2>&1 | FileCheck %s --check-prefix=CHECK1 +# RUN: lnt checkformat %{shared_inputs}/sample-report.json 2>&1 | filecheck %s --check-prefix=CHECK1 # # CHECK1: Import succeeded. # CHECK1: Imported Data @@ -29,7 +29,7 @@ # # # Check new-style json format -# RUN: lnt checkformat %{src_root}/docs/report-example.json 2>&1 | FileCheck %s --check-prefix=CHECK2 +# RUN: lnt checkformat %{src_root}/docs/report-example.json 2>&1 | filecheck %s --check-prefix=CHECK2 # # CHECK2: Import succeeded. # CHECK2: Imported Data @@ -43,14 +43,14 @@ # CHECK2: PASS : 10 # # -# RUN: lnt checkformat %S/Inputs/minimal.json 2>&1 | FileCheck %s --check-prefix=MINIMAL +# RUN: lnt checkformat %S/Inputs/minimal.json 2>&1 | filecheck %s --check-prefix=MINIMAL # MINIMAL: Import succeeded. # MINIMAL: Added Machines: 1 # MINIMAL: Added Runs : 1 # # # Check invalid format -# RUN: lnt checkformat %S/Inputs/invalid_submission0.json 2>&1 | FileCheck %s --check-prefix=CHECKFAIL0 +# RUN: lnt checkformat %S/Inputs/invalid_submission0.json 2>&1 | filecheck %s --check-prefix=CHECKFAIL0 # # CHECKFAIL0: Import Failed: # CHECKFAIL0: could not parse input format diff --git a/tests/lnttool/email_tools.py b/tests/lnttool/email_tools.py index 9f7ca152..cf9597d2 100644 --- a/tests/lnttool/email_tools.py +++ b/tests/lnttool/email_tools.py @@ -9,7 +9,7 @@ # RUN: lnt send-run-comparison --dry-run --to some@address.com \ # RUN: --from some.other@address.com \ -# RUN: --host localhost %t.instance 1 2 | FileCheck %s --check-prefix CHECK0 +# RUN: --host localhost %t.instance 1 2 | filecheck %s --check-prefix CHECK0 # # CHECK0: From: some.other@address.com # CHECK0: To: some@address.com @@ -37,7 +37,7 @@ # RUN: lnt send-daily-report --dry-run --from some.other@address.com \ # RUN: --host localhost --testsuite nts --filter-machine-regex=machine.? \ -# RUN: %t.instance some@address.com | FileCheck %s --check-prefix CHECK1 +# RUN: %t.instance some@address.com | filecheck %s --check-prefix CHECK1 # # CHECK1: From: some.other@address.com # CHECK1: To: some@address.com diff --git a/tests/lnttool/showtests.shtest b/tests/lnttool/showtests.shtest index 858c728b..8c2284dd 100644 --- a/tests/lnttool/showtests.shtest +++ b/tests/lnttool/showtests.shtest @@ -1,4 +1,4 @@ -# RUN: lnt showtests | FileCheck %s +# RUN: lnt showtests | filecheck %s # CHECK: Available tests: # CHECK-NEXT: compile - Single file compile-time performance testing # CHECK-NEXT: nt - LLVM test-suite compile and execution tests diff --git a/tests/lnttool/submit.shtest b/tests/lnttool/submit.shtest index 2c2fe0ee..03c771fa 100644 --- a/tests/lnttool/submit.shtest +++ b/tests/lnttool/submit.shtest @@ -14,7 +14,7 @@ SHARED_INPUTS="$3" SRC_ROOT="$4" lnt submit "http://localhost:9091/db_default/submitRun" "${SHARED_INPUTS}/sample-report.json" -v > "${OUTPUT_DIR}/submit_verbose.txt" -# RUN: FileCheck %s --check-prefix=CHECK-VERBOSE < %t.tmp/submit_verbose.txt +# RUN: filecheck %s --check-prefix=CHECK-VERBOSE < %t.tmp/submit_verbose.txt # # CHECK-VERBOSE: Import succeeded. # CHECK-VERBOSE: --- Tested: 10 tests -- @@ -33,21 +33,21 @@ lnt submit "http://localhost:9091/db_default/submitRun" "${SHARED_INPUTS}/sample # Make sure the old --commit=1 style argument is still accepted. lnt submit "http://localhost:9091/db_default/submitRun" --commit=1 "${SHARED_INPUTS}/sample-report1.json" > "${OUTPUT_DIR}/submit0.txt" -# RUN: FileCheck %s --check-prefix=CHECK-SUBMIT0 < %t.tmp/submit0.txt +# RUN: filecheck %s --check-prefix=CHECK-SUBMIT0 < %t.tmp/submit0.txt # CHECK-SUBMIT0: http://localhost:9091/db_default/v4/nts/4 # Submit sample-report1.json again and check it keeps the same URL lnt submit "http://localhost:9091/db_default/submitRun" --commit=1 "${SHARED_INPUTS}/sample-report1.json" > "${OUTPUT_DIR}/submit0-resubmit.txt" -# RUN: FileCheck %s --check-prefix=CHECK-RESUBMIT0 < %t.tmp/submit0-resubmit.txt +# RUN: filecheck %s --check-prefix=CHECK-RESUBMIT0 < %t.tmp/submit0-resubmit.txt # CHECK-RESUBMIT0: http://localhost:9091/db_default/v4/nts/4 lnt submit "http://localhost:9091/db_default/submitRun" --commit 1 "${SHARED_INPUTS}/sample-report2.json" > "${OUTPUT_DIR}/submit1.txt" -# RUN: FileCheck %s --check-prefix=CHECK-SUBMIT1 < %t.tmp/submit1.txt +# RUN: filecheck %s --check-prefix=CHECK-SUBMIT1 < %t.tmp/submit1.txt # CHECK-SUBMIT1: http://localhost:9091/db_default/v4/nts/5 lnt submit "http://localhost:9091/db_default/v4/compile/submitRun" "${INPUTS}/compile_submission.json" -v > "${OUTPUT_DIR}/submit_compile.txt" -# RUN: FileCheck %s --check-prefix=CHECK-COMPILE0 < %t.tmp/submit_compile.txt +# RUN: filecheck %s --check-prefix=CHECK-COMPILE0 < %t.tmp/submit_compile.txt # # CHECK-COMPILE0: --- Tested: 10 tests -- # @@ -63,7 +63,7 @@ lnt submit "http://localhost:9091/db_default/v4/compile/submitRun" "${INPUTS}/co # CHECK-COMPILE0: Results available at: http://localhost:9091/db_default/v4/compile/5 lnt submit "http://localhost:9091/db_default/submitRun" "${SRC_ROOT}/docs/report-example.json" -v > "${OUTPUT_DIR}/submit_newformat.txt" -# RUN: FileCheck %s --check-prefix=CHECK-NEWFORMAT < %t.tmp/submit_newformat.txt +# RUN: filecheck %s --check-prefix=CHECK-NEWFORMAT < %t.tmp/submit_newformat.txt # # CHECK-NEWFORMAT: Import succeeded. # CHECK-NEWFORMAT: --- Tested: 10 tests -- @@ -83,7 +83,7 @@ lnt submit "http://localhost:9091/db_default/submitRun" "${SRC_ROOT}/docs/report # test-suite based on the Info.Run.tag field instead of the URL. The result # should be the same as using the "correct" URL. lnt submit "http://localhost:9091/db_default/submitRun" "${INPUTS}/compile_submission1.json" -v > "${OUTPUT_DIR}/submit_compile1.txt" -# RUN: FileCheck %s --check-prefix=CHECK-COMPILE1 < %t.tmp/submit_compile1.txt +# RUN: filecheck %s --check-prefix=CHECK-COMPILE1 < %t.tmp/submit_compile1.txt # # CHECK-COMPILE1: Import succeeded. # @@ -93,7 +93,7 @@ lnt submit "http://localhost:9091/db_default/submitRun" "${INPUTS}/compile_submi # CHECK-COMPILE1: Results available at: http://localhost:9091/db_default/v4/compile/6 # Check some error handling/reporting -# RUN: FileCheck %s --check-prefix=CHECK-ERRORS < %t.tmp/submit_errors.txt +# RUN: filecheck %s --check-prefix=CHECK-ERRORS < %t.tmp/submit_errors.txt rm -f "${OUTPUT_DIR}/submit_errors.txt" echo "=== compile_submission.json badsuite" >> "${OUTPUT_DIR}/submit_errors.txt" @@ -132,7 +132,7 @@ not lnt submit "http://localhost:9091/db_default/v4/compile/submitRun" "${INPUTS # Adding extra fields to the machine in a submission is fine. lnt submit "http://localhost:9091/db_default/v4/compile/submitRun" "${INPUTS}/compile_submission_machine_diff_fine.json" -v > "${OUTPUT_DIR}/submit_compile_machine_diff_fine.txt" -# RUN: FileCheck %s --check-prefix=CHECK-MACHINEDIFF < %t.tmp/submit_compile_machine_diff_fine.txt +# RUN: filecheck %s --check-prefix=CHECK-MACHINEDIFF < %t.tmp/submit_compile_machine_diff_fine.txt # # CHECK-MACHINEDIFF: Imported Data # CHECK-MACHINEDIFF: ------------- @@ -147,7 +147,7 @@ lnt submit "http://localhost:9091/db_default/v4/compile/submitRun" "${INPUTS}/co # Test updating existing machine lnt submit "http://localhost:9091/db_default/v4/compile/submitRun" "${INPUTS}/compile_submission_machine_diff_reject.json" --select-machine=update -v > "${OUTPUT_DIR}/submit_compile_machine_update.txt" -# RUN: FileCheck %s --check-prefix=CHECK-UPDATEMACHINE < %t.tmp/submit_compile_machine_update.txt +# RUN: filecheck %s --check-prefix=CHECK-UPDATEMACHINE < %t.tmp/submit_compile_machine_update.txt # # CHECK-UPDATEMACHINE: Imported Data # CHECK-UPDATEMACHINE: ------------- @@ -162,7 +162,7 @@ lnt submit "http://localhost:9091/db_default/v4/compile/submitRun" "${INPUTS}/co # Test creation of new machines on information mismatch in split mode. lnt submit "http://localhost:9091/db_default/v4/compile/submitRun" "${INPUTS}/compile_submission_machine_diff_split.json" --select-machine=split -v > "${OUTPUT_DIR}/submit_compile_machine_split.txt" -# RUN: FileCheck %s --check-prefix=CHECK-SPLITMACHINE < %t.tmp/submit_compile_machine_split.txt +# RUN: filecheck %s --check-prefix=CHECK-SPLITMACHINE < %t.tmp/submit_compile_machine_split.txt # # We should have added a new machine: # CHECK-SPLITMACHINE: Imported Data diff --git a/tests/lnttool/submit_badurl.shtest b/tests/lnttool/submit_badurl.shtest index f7738fea..44176b2b 100644 --- a/tests/lnttool/submit_badurl.shtest +++ b/tests/lnttool/submit_badurl.shtest @@ -1,2 +1,2 @@ -# RUN: not lnt submit "http://doesnotexist.invalid/db_default/submitRun" %{shared_inputs}/sample-report.json 2>&1 | FileCheck %s +# RUN: not lnt submit "http://doesnotexist.invalid/db_default/submitRun" %{shared_inputs}/sample-report.json 2>&1 | filecheck %s # CHECK: error: could not resolve 'http://doesnotexist.invalid/db_default/submitRun': diff --git a/tests/runtest/exclude_stat.py b/tests/runtest/exclude_stat.py index 71dd04ac..9467466e 100644 --- a/tests/runtest/exclude_stat.py +++ b/tests/runtest/exclude_stat.py @@ -7,8 +7,8 @@ # RUN: --cc %{shared_inputs}/FakeCompilers/clang-r154331 \ # RUN: --exclude-stat-from-submission compile \ # RUN: --no-timestamp > %t.log 2> %t.err -# RUN: FileCheck --check-prefix CHECK-STDOUT < %t.log %s -# RUN: FileCheck --check-prefix CHECK-REPORT < %t.SANDBOX/build/report.json %s +# RUN: filecheck --check-prefix CHECK-STDOUT < %t.log %s +# RUN: filecheck --check-prefix CHECK-REPORT < %t.SANDBOX/build/report.json %s # CHECK-STDOUT: Import succeeded. # CHECK-REPORT: "Name": "nts.{{[^.]+}}.exec" # CHECK-REPORT-NOT: "Name": "nts.{{[^.]+}}.compile" diff --git a/tests/runtest/multisample.py b/tests/runtest/multisample.py index e5d10d92..8d34c61a 100644 --- a/tests/runtest/multisample.py +++ b/tests/runtest/multisample.py @@ -6,8 +6,8 @@ # RUN: --cc %{shared_inputs}/FakeCompilers/clang-r154331 \ # RUN: --no-timestamp --multisample 5 > %t.log 2> %t.err # -# RUN: FileCheck --check-prefix CHECK-STDOUT < %t.log %s -# RUN: FileCheck --check-prefix CHECK-STDERR < %t.err %s +# RUN: filecheck --check-prefix CHECK-STDOUT < %t.log %s +# RUN: filecheck --check-prefix CHECK-STDERR < %t.err %s # # CHECK-STDOUT: Import succeeded. # CHECK-STDOUT: Added Machines: 1 diff --git a/tests/runtest/nt-darwin.py b/tests/runtest/nt-darwin.py index 0c65eaaf..42a48710 100644 --- a/tests/runtest/nt-darwin.py +++ b/tests/runtest/nt-darwin.py @@ -6,7 +6,7 @@ # RUN: --test-suite %S/Inputs/test-suite \ # RUN: --cc %{shared_inputs}/FakeCompilers/clang-r154331 \ # RUN: --no-timestamp --use-isolation > %t.log 2> %t.err -# RUN: FileCheck --check-prefix CHECK-SANDBOX < %t.err %s +# RUN: filecheck --check-prefix CHECK-SANDBOX < %t.err %s # # CHECK-SANDBOX: creating sandbox profile diff --git a/tests/runtest/nt.py b/tests/runtest/nt.py index 1eb127bf..6162a7ff 100644 --- a/tests/runtest/nt.py +++ b/tests/runtest/nt.py @@ -6,9 +6,9 @@ # RUN: --test-suite %S/Inputs/test-suite \ # RUN: --cc %{shared_inputs}/FakeCompilers/clang-r154331 \ # RUN: --no-timestamp > %t.log 2> %t.err -# RUN: FileCheck --check-prefix CHECK-STDOUT < %t.log %s -# RUN: FileCheck --check-prefix CHECK-BASIC < %t.err %s -# RUN: FileCheck --check-prefix CHECK-REPORT < %t.SANDBOX/build/report.json %s +# RUN: filecheck --check-prefix CHECK-STDOUT < %t.log %s +# RUN: filecheck --check-prefix CHECK-BASIC < %t.err %s +# RUN: filecheck --check-prefix CHECK-REPORT < %t.SANDBOX/build/report.json %s # CHECK-REPORT: "run_order": "154331" # CHECK-REPORT: "Name": "nts.{{[^.]+}}.exec" # CHECK-REPORT: "Name": "nts.{{[^.]+}}.compile" @@ -36,7 +36,7 @@ # RUN: --test-suite %S/Inputs/test-suite \ # RUN: --cc %{shared_inputs}/FakeCompilers/clang-r154331 \ # RUN: --no-timestamp --no-configure > %t.log 2> %t.err -# RUN: FileCheck --check-prefix CHECK-NOCONF < %t.err %s +# RUN: filecheck --check-prefix CHECK-NOCONF < %t.err %s # CHECK-NOCONF-NOT: configuring # # Check a basic nt run on a test-suite without binary hash support. @@ -45,9 +45,9 @@ # RUN: --test-suite %S/Inputs/test-suite-nohash \ # RUN: --cc %{shared_inputs}/FakeCompilers/clang-r154331 \ # RUN: --no-timestamp > %t.log 2> %t.err -# RUN: FileCheck --check-prefix CHECK-STDOUT < %t.log %s -# RUN: FileCheck --check-prefix CHECK-BASIC < %t.err %s -# RUN: FileCheck --check-prefix CHECK-REPORT < %t.SANDBOX-NO-HASH/build/report.json %s +# RUN: filecheck --check-prefix CHECK-STDOUT < %t.log %s +# RUN: filecheck --check-prefix CHECK-BASIC < %t.err %s +# RUN: filecheck --check-prefix CHECK-REPORT < %t.SANDBOX-NO-HASH/build/report.json %s # # Manually set a run order. # RUN: lnt runtest nt \ @@ -55,7 +55,7 @@ # RUN: --test-suite %S/Inputs/test-suite \ # RUN: --cc %{shared_inputs}/FakeCompilers/clang-r154331 \ # RUN: --no-timestamp --run-order=123 > %t.log 2> %t.err -# RUN: FileCheck --check-prefix CHECK-RESULTS < %t.SANDBOX/build/report.json %s +# RUN: filecheck --check-prefix CHECK-RESULTS < %t.SANDBOX/build/report.json %s # CHECK-RESULTS: "run_order": "123" # # Change the machine name. Don't use LLVM. @@ -64,7 +64,7 @@ # RUN: --test-suite %S/Inputs/test-suite \ # RUN: --cc %{shared_inputs}/FakeCompilers/clang-r154331 \ # RUN: --no-auto-name foo > %t.log 2> %t.err -# RUN: FileCheck --check-prefix CHECK-AUTONAME < %t.err %s +# RUN: filecheck --check-prefix CHECK-AUTONAME < %t.err %s # CHECK-AUTONAME: using nickname: 'foo' # Run without LLVM. @@ -83,7 +83,7 @@ # RUN: --cc %{shared_inputs}/FakeCompilers/clang-r154331 \ # RUN: --cflag '-Wall' \ # RUN: --no-timestamp > %t.log 2> %t.err -# RUN: FileCheck --check-prefix CHECK-CFLAG1 < %t.err %s +# RUN: filecheck --check-prefix CHECK-CFLAG1 < %t.err %s # CHECK-CFLAG1: inferred C++ compiler under test # CHECK-CFLAG1: TARGET_FLAGS: -Wall @@ -96,7 +96,7 @@ # RUN: --cflag '-mfloat-abi=hard' \ # RUN: --cflag '-O3' \ # RUN: --no-timestamp > %t.log 2> %t.err -# RUN: FileCheck --check-prefix CHECK-CFLAG2 < %t.err %s +# RUN: filecheck --check-prefix CHECK-CFLAG2 < %t.err %s # CHECK-CFLAG2: inferred C++ compiler under test # CHECK-CFLAG2: TARGET_FLAGS: -Wall -mfloat-abi=hard -O3 @@ -107,7 +107,7 @@ # RUN: --cc %{shared_inputs}/FakeCompilers/clang-r154331 \ # RUN: --cflags '-Wall -mfloat-abi=hard -O3' \ # RUN: --no-timestamp > %t.log 2> %t.err -# RUN: FileCheck --check-prefix CHECK-CFLAG3 < %t.err %s +# RUN: filecheck --check-prefix CHECK-CFLAG3 < %t.err %s # CHECK-CFLAG3: inferred C++ compiler under test # CHECK-CFLAG3: TARGET_FLAGS: -Wall -mfloat-abi=hard -O3 @@ -118,7 +118,7 @@ # RUN: --cc %{shared_inputs}/FakeCompilers/clang-r154331 \ # RUN: --cflags "-Wall -test=escaped\ space -some-option='stay with me' -O3" \ # RUN: --no-timestamp > %t.log 2> %t.err -# RUN: FileCheck --check-prefix CHECK-CFLAG4 < %t.err %s +# RUN: filecheck --check-prefix CHECK-CFLAG4 < %t.err %s # CHECK-CFLAG4: inferred C++ compiler under test # CHECK-CFLAG4: TARGET_FLAGS: -Wall '-test=escaped space' '-some-option=stay with me' -O3 @@ -131,7 +131,7 @@ # RUN: --cflag '-Weverything' \ # RUN: --cflags '-Wall -test=escaped\ space -some-option="stay with me" -O3' \ # RUN: --no-timestamp > %t.log 2> %t.err -# RUN: FileCheck --check-prefix CHECK-CFLAG5 < %t.err %s +# RUN: filecheck --check-prefix CHECK-CFLAG5 < %t.err %s # CHECK-CFLAG5: inferred C++ compiler under test # CHECK-CFLAG5: TARGET_FLAGS: --target=armv7a-none-eabi -Weverything -Wall '-test=escaped space' # CHECK-CFLAG5: '-some-option=stay with me' -O3 @@ -146,7 +146,7 @@ # RUN: --qemu-flag '-net nic' \ # RUN: --qemu-flags '-device gus,irq=5 -test=escaped\ space -some-option="stay with me"' \ # RUN: --no-timestamp > %t.log 2> %t.err -# RUN: FileCheck --check-prefix CHECK-QEMU-FLAG1 < %t.err %s +# RUN: filecheck --check-prefix CHECK-QEMU-FLAG1 < %t.err %s # CHECK-QEMU-FLAG1: QEMU_USER_MODE_COMMAND: TEST -soundhw gus -net nic -device gus,irq=5 # CHECK-QEMU-FLAG1: '-test=escaped space' '-some-option=stay with me' @@ -162,8 +162,8 @@ # RUN: --test-suite %S/Inputs/rerun-test-suite1 \ # RUN: --cc %{shared_inputs}/FakeCompilers/clang-r154331 \ # RUN: --no-timestamp --rerun --run-order 1 > %t.log 2> %t.err -# RUN: FileCheck --check-prefix CHECK-SUBMIT-STDOUT < %t.log %s -# RUN: FileCheck --check-prefix CHECK-SUBMIT-STDERR < %t.err %s +# RUN: filecheck --check-prefix CHECK-SUBMIT-STDOUT < %t.log %s +# RUN: filecheck --check-prefix CHECK-SUBMIT-STDERR < %t.err %s # CHECK-SUBMIT-STDOUT: Import succeeded. # CHECK-SUBMIT-STDOUT: PASS : 345 @@ -192,5 +192,5 @@ # RUN: --test-suite %S/Inputs/rerun-test-suite1 \ # RUN: --cc %{shared_inputs}/FakeCompilers/clang-r154331 \ # RUN: --no-timestamp --rerun --run-order 2 > %t.log 2> %t.err -# RUN: FileCheck --check-prefix CHECK-SUBMIT-STDOUT < %t.log %s -# RUN: FileCheck --check-prefix CHECK-SUBMIT-STDERR < %t.err %s +# RUN: filecheck --check-prefix CHECK-SUBMIT-STDOUT < %t.log %s +# RUN: filecheck --check-prefix CHECK-SUBMIT-STDERR < %t.err %s diff --git a/tests/runtest/only_test.py b/tests/runtest/only_test.py index 03785b24..ac41b042 100644 --- a/tests/runtest/only_test.py +++ b/tests/runtest/only_test.py @@ -7,8 +7,8 @@ # RUN: --only-test subtest \ # RUN: --no-timestamp > %t.log 2> %t.err # -# RUN: FileCheck --check-prefix CHECK-STDOUT < %t.log %s -# RUN: FileCheck --check-prefix CHECK-STDERR < %t.err %s +# RUN: filecheck --check-prefix CHECK-STDOUT < %t.log %s +# RUN: filecheck --check-prefix CHECK-STDERR < %t.err %s # # CHECK-STDOUT: Import succeeded. # CHECK-STDOUT: Added Machines: 1 diff --git a/tests/runtest/rerun.py b/tests/runtest/rerun.py index 795e9e45..ebf1cd5c 100644 --- a/tests/runtest/rerun.py +++ b/tests/runtest/rerun.py @@ -14,8 +14,8 @@ # RUN: --test-suite %S/Inputs/rerun-test-suite1 \ # RUN: --cc %{shared_inputs}/FakeCompilers/clang-r154331 \ # RUN: --no-timestamp --rerun --run-order 1 > %t.log 2> %t.err -# RUN: FileCheck --check-prefix CHECK-STDOUT < %t.log %s -# RUN: FileCheck --check-prefix CHECK-STDERR < %t.err %s +# RUN: filecheck --check-prefix CHECK-STDOUT < %t.log %s +# RUN: filecheck --check-prefix CHECK-STDERR < %t.err %s # CHECK-STDOUT: Import succeeded. # CHECK-STDOUT: PASS : 345 @@ -42,8 +42,8 @@ # RUN: --no-timestamp --rerun --run-order 4 --verbose \ # RUN: > %t.2.log 2> %t.2.err || cat %t.2.err # RUN: echo "Run 2" -# RUN: FileCheck --check-prefix CHECK-STDOUT2 < %t.2.log %s -# RUN: FileCheck --check-prefix CHECK-STDERR2 < %t.2.err %s +# RUN: filecheck --check-prefix CHECK-STDOUT2 < %t.2.log %s +# RUN: filecheck --check-prefix CHECK-STDERR2 < %t.2.err %s # CHECK-STDOUT2: Import succeeded. # CHECK-STDOUT2: FAIL : 3 diff --git a/tests/runtest/test_suite-benchmarking-only.shtest b/tests/runtest/test_suite-benchmarking-only.shtest index c39ea3e4..9dbe3e5e 100644 --- a/tests/runtest/test_suite-benchmarking-only.shtest +++ b/tests/runtest/test_suite-benchmarking-only.shtest @@ -12,7 +12,7 @@ # RUN: --succinct-compile-output \ # RUN: --verbose \ # RUN: > %t.log 2> %t.err -# RUN: FileCheck --check-prefix CHECK-BENCHONLY < %t.err %s +# RUN: filecheck --check-prefix CHECK-BENCHONLY < %t.err %s # CHECK-BENCHONLY: Configuring with { # CHECK-BENCHONLY: TEST_SUITE_BENCHMARKING_ONLY: 'ON' # CHECK-BENCHONLY-NOT: VERBOSE=1 diff --git a/tests/runtest/test_suite-c-compiler.shtest b/tests/runtest/test_suite-c-compiler.shtest index 1cb79965..17e98496 100644 --- a/tests/runtest/test_suite-c-compiler.shtest +++ b/tests/runtest/test_suite-c-compiler.shtest @@ -11,5 +11,5 @@ # RUN: --use-make %S/Inputs/test-suite-cmake/fake-make \ # RUN: --use-lit %S/Inputs/test-suite-cmake/fake-lit \ # RUN: > %t.log 2> %t.err || true -# RUN: FileCheck --check-prefix CHECK-CC-CONFL-CMAKEDEFINE < %t.SANDBOX/build/report.json %s +# RUN: filecheck --check-prefix CHECK-CC-CONFL-CMAKEDEFINE < %t.SANDBOX/build/report.json %s # CHECK-CC-CONFL-CMAKEDEFINE: "run_order": "154332" diff --git a/tests/runtest/test_suite-cache.shtest b/tests/runtest/test_suite-cache.shtest index e0321b2d..1a5da4f9 100644 --- a/tests/runtest/test_suite-cache.shtest +++ b/tests/runtest/test_suite-cache.shtest @@ -20,16 +20,16 @@ # 1) there's 2 cmake compiler definition followed by the -C option # 2) each of the cmake compiler definition only accepts the other cmake # compiler definition between cmake and itself -# RUN: FileCheck --check-prefix CHECK-CACHE1 < %t.cmake-cache.log %s +# RUN: filecheck --check-prefix CHECK-CACHE1 < %t.cmake-cache.log %s # CHECK-CACHE1: Execute: {{.*}}cmake -DCMAKE_C{{(XX)?}}_COMPILER:FILEPATH={{.*}}/FakeCompilers/clang{{(\+\+)?}}-r154331 -DCMAKE_C{{(XX)?}}_COMPILER:FILEPATH={{.*}}/FakeCompilers/clang{{(\+\+)?}}-r154331 -C {{.*}}/Release.cmake -# RUN: FileCheck --check-prefix CHECK-CACHE2 < %t.cmake-cache.log %s +# RUN: filecheck --check-prefix CHECK-CACHE2 < %t.cmake-cache.log %s # CHECK-CACHE2: Execute: {{.*}}cmake {{(-DCMAKE_CXX_COMPILER:FILEPATH=.*/FakeCompilers/clang\+\+-r154331 )?}}-DCMAKE_C_COMPILER:FILEPATH={{.*}}/FakeCompilers/clang-r154331 -# RUN: FileCheck --check-prefix CHECK-CACHE3 < %t.cmake-cache.log %s +# RUN: filecheck --check-prefix CHECK-CACHE3 < %t.cmake-cache.log %s # CHECK-CACHE3: Execute: {{.*}}cmake {{(-DCMAKE_C_COMPILER:FILEPATH=.*/FakeCompilers/clang-r154331 )?}}-DCMAKE_CXX_COMPILER:FILEPATH={{.*}}/FakeCompilers/clang++-r154331 -# RUN: FileCheck --check-prefix CHECK-CACHE4 < %t.cmake-cache.log %s +# RUN: filecheck --check-prefix CHECK-CACHE4 < %t.cmake-cache.log %s # CHECK-CACHE4: Execute: {{.*}}cmake {{(.+ )?}}-DFOO=BAR -# RUN: FileCheck --check-prefix CHECK-CACHE5 < %t.cmake-cache.log %s +# RUN: filecheck --check-prefix CHECK-CACHE5 < %t.cmake-cache.log %s # CHECK-CACHE5: Execute: {{.*}}cmake {{(.+ )?}}-DBAR=BAZ # RUN: rm -rf %t.SANDBOX @@ -44,5 +44,5 @@ # RUN: --use-lit %S/Inputs/test-suite-cmake/fake-lit \ # RUN: --cmake-cache Debug \ # RUN: &> %t.cmake-cache2.err || true -# RUN: FileCheck --check-prefix CHECK-CACHE6 < %t.cmake-cache2.err %s +# RUN: filecheck --check-prefix CHECK-CACHE6 < %t.cmake-cache2.err %s # CHECK-CACHE6: Could not find CMake cache file diff --git a/tests/runtest/test_suite-cc.shtest b/tests/runtest/test_suite-cc.shtest index a5cf3bd4..2df68eaa 100644 --- a/tests/runtest/test_suite-cc.shtest +++ b/tests/runtest/test_suite-cc.shtest @@ -8,5 +8,5 @@ # RUN: --use-make %S/Inputs/test-suite-cmake/fake-make \ # RUN: --use-lit %S/Inputs/test-suite-cmake/fake-lit \ # RUN: > %t.log 2> %t.err || true -# RUN: FileCheck --check-prefix CHECK-MISSING-CC < %t.err %s +# RUN: filecheck --check-prefix CHECK-MISSING-CC < %t.err %s # CHECK-MISSING-CC: error: Couldn't find C compiler (). Maybe you should specify --cc? diff --git a/tests/runtest/test_suite-cflags0.shtest b/tests/runtest/test_suite-cflags0.shtest index af133e99..bc7a18e9 100644 --- a/tests/runtest/test_suite-cflags0.shtest +++ b/tests/runtest/test_suite-cflags0.shtest @@ -10,7 +10,7 @@ # RUN: --use-lit %S/Inputs/test-suite-cmake/fake-lit \ # RUN: --cflag '-Wall' \ # RUN: > %t.log 2> %t.err -# RUN: FileCheck --check-prefix CHECK-CFLAG1 < %t.err %s +# RUN: filecheck --check-prefix CHECK-CFLAG1 < %t.err %s # CHECK-CFLAG1: Inferred C++ compiler under test # CHECK-CFLAG1: CMAKE_C_FLAGS: '-Wall # Ensure that default c flags for build configurations are made empty to avoid diff --git a/tests/runtest/test_suite-cflags1.shtest b/tests/runtest/test_suite-cflags1.shtest index 087e54b4..851ccf1e 100644 --- a/tests/runtest/test_suite-cflags1.shtest +++ b/tests/runtest/test_suite-cflags1.shtest @@ -12,6 +12,6 @@ # RUN: --cflag '-mfloat-abi=hard' \ # RUN: --cflag '-O3' \ # RUN: > %t.log 2> %t.err -# RUN: FileCheck --check-prefix CHECK-CFLAG2 < %t.err %s +# RUN: filecheck --check-prefix CHECK-CFLAG2 < %t.err %s # CHECK-CFLAG2: Inferred C++ compiler under test # CHECK-CFLAG2: CMAKE_C_FLAGS: '-Wall -mfloat-abi=hard -O3 diff --git a/tests/runtest/test_suite-cflags2.shtest b/tests/runtest/test_suite-cflags2.shtest index 10c4de5b..0140db11 100644 --- a/tests/runtest/test_suite-cflags2.shtest +++ b/tests/runtest/test_suite-cflags2.shtest @@ -10,6 +10,6 @@ # RUN: --use-lit %S/Inputs/test-suite-cmake/fake-lit \ # RUN: --cflags '-Wall -mfloat-abi=hard -O3' \ # RUN: > %t.log 2> %t.err -# RUN: FileCheck --check-prefix CHECK-CFLAG3 < %t.err %s +# RUN: filecheck --check-prefix CHECK-CFLAG3 < %t.err %s # CHECK-CFLAG3: Inferred C++ compiler under test # CHECK-CFLAG3: CMAKE_C_FLAGS: '-Wall -mfloat-abi=hard -O3 diff --git a/tests/runtest/test_suite-cflags3.shtest b/tests/runtest/test_suite-cflags3.shtest index afe7436e..b9b3ede0 100644 --- a/tests/runtest/test_suite-cflags3.shtest +++ b/tests/runtest/test_suite-cflags3.shtest @@ -10,6 +10,6 @@ # RUN: --use-lit %S/Inputs/test-suite-cmake/fake-lit \ # RUN: --cflags "-Wall -test=escaped\ space -some-option='stay with me' -O3" \ # RUN: > %t.log 2> %t.err -# RUN: FileCheck --check-prefix CHECK-CFLAG4 < %t.err %s +# RUN: filecheck --check-prefix CHECK-CFLAG4 < %t.err %s # CHECK-CFLAG4: Inferred C++ compiler under test # CHECK-CFLAG4: CMAKE_C_FLAGS: '-Wall '-test=escaped space' '-some-option=stay with me' -O3 diff --git a/tests/runtest/test_suite-cflags4.shtest b/tests/runtest/test_suite-cflags4.shtest index 35cee586..b7d26664 100644 --- a/tests/runtest/test_suite-cflags4.shtest +++ b/tests/runtest/test_suite-cflags4.shtest @@ -12,6 +12,6 @@ # RUN: --cflag '-Weverything' \ # RUN: --cflags '-Wall -test=escaped\ space -some-option="stay with me" -O3' \ # RUN: > %t.log 2> %t.err -# RUN: FileCheck --check-prefix CHECK-CFLAG5 < %t.err %s +# RUN: filecheck --check-prefix CHECK-CFLAG5 < %t.err %s # CHECK-CFLAG5: Inferred C++ compiler under test # CHECK-CFLAG5: CMAKE_C_FLAGS: '--target=armv7a-none-eabi -Weverything -Wall '-test=escaped space' '-some-option=stay with me' -O3 diff --git a/tests/runtest/test_suite-compile-only.shtest b/tests/runtest/test_suite-compile-only.shtest index 215bb571..6d5b48e8 100644 --- a/tests/runtest/test_suite-compile-only.shtest +++ b/tests/runtest/test_suite-compile-only.shtest @@ -11,5 +11,5 @@ # RUN: --verbose \ # RUN: --only-compile \ # RUN: > %t.pgo.log 2> %t.compile-only.err -# RUN: FileCheck --check-prefix CHECK-CO < %t.compile-only.err %s +# RUN: filecheck --check-prefix CHECK-CO < %t.compile-only.err %s # CHECK-CO: TEST_SUITE_RUN_BENCHMARKS: 'Off' diff --git a/tests/runtest/test_suite-cross.shtest b/tests/runtest/test_suite-cross.shtest index a1cf2b5e..c916a47e 100644 --- a/tests/runtest/test_suite-cross.shtest +++ b/tests/runtest/test_suite-cross.shtest @@ -11,5 +11,5 @@ # RUN: --use-make %S/Inputs/test-suite-cmake/fake-make \ # RUN: --use-lit %S/Inputs/test-suite-cmake/fake-lit \ # RUN: > %t.log 2> %t.err || true -# RUN: FileCheck --check-prefix CHECK-CROSS-TARGET < %t.SANDBOX/build/report.json %s +# RUN: filecheck --check-prefix CHECK-CROSS-TARGET < %t.SANDBOX/build/report.json %s # CHECK-CROSS-TARGET: "cc_target": "targetarch-linux-gnu" diff --git a/tests/runtest/test_suite-fail-compile.shtest b/tests/runtest/test_suite-fail-compile.shtest index 4217b70a..1998e8f2 100644 --- a/tests/runtest/test_suite-fail-compile.shtest +++ b/tests/runtest/test_suite-fail-compile.shtest @@ -9,7 +9,7 @@ # RUN: --use-make %S/Inputs/test-suite-cmake/fake-make \ # RUN: --use-lit %S/Inputs/test-suite-cmake/fake-lit-fails-compile \ # RUN: --run-order=123 > %t.log 2> %t.err -# RUN: FileCheck --check-prefix CHECK-RESULTS-FAIL-COMPILE < %t.SANDBOX/build/report.json %s +# RUN: filecheck --check-prefix CHECK-RESULTS-FAIL-COMPILE < %t.SANDBOX/build/report.json %s # CHECK-RESULTS-FAIL-COMPILE: "no_errors": "False" # CHECK-RESULTS-FAIL-COMPILE: "run_order": "123" # CHECK-RESULTS-FAIL-COMPILE: "Name": "nts.bar.compile.status" diff --git a/tests/runtest/test_suite-fail-exec.shtest b/tests/runtest/test_suite-fail-exec.shtest index d47c8d23..912161fe 100644 --- a/tests/runtest/test_suite-fail-exec.shtest +++ b/tests/runtest/test_suite-fail-exec.shtest @@ -9,7 +9,7 @@ # RUN: --use-make %S/Inputs/test-suite-cmake/fake-make \ # RUN: --use-lit %S/Inputs/test-suite-cmake/fake-lit-fails-exec \ # RUN: --run-order=123 > %t.log 2> %t.err -# RUN: FileCheck --check-prefix CHECK-RESULTS-FAIL-EXEC < %t.SANDBOX/build/report.json %s +# RUN: filecheck --check-prefix CHECK-RESULTS-FAIL-EXEC < %t.SANDBOX/build/report.json %s # CHECK-RESULTS-FAIL-EXEC: "no_errors": "False" # CHECK-RESULTS-FAIL-EXEC: "run_order": "123" # CHECK-RESULTS-FAIL-EXEC: "Name": "nts.baz.exec.status" diff --git a/tests/runtest/test_suite-machine-name.shtest b/tests/runtest/test_suite-machine-name.shtest index 39ff8f31..08cc9a6e 100644 --- a/tests/runtest/test_suite-machine-name.shtest +++ b/tests/runtest/test_suite-machine-name.shtest @@ -11,5 +11,5 @@ # RUN: --use-lit %S/Inputs/test-suite-cmake/fake-lit \ # RUN: --no-auto-name foo \ # RUN: > %t.log 2> %t.err -# RUN: FileCheck --check-prefix CHECK-AUTONAME < %t.err %s +# RUN: filecheck --check-prefix CHECK-AUTONAME < %t.err %s # CHECK-AUTONAME: Using nickname: 'foo' diff --git a/tests/runtest/test_suite-metrics.shtest b/tests/runtest/test_suite-metrics.shtest index 83cab9e7..aee72268 100644 --- a/tests/runtest/test_suite-metrics.shtest +++ b/tests/runtest/test_suite-metrics.shtest @@ -11,8 +11,8 @@ # RUN: --use-lit %S/Inputs/test-suite-cmake/fake-lit \ # RUN: --verbose \ # RUN: > %t.log 2> %t.err -# RUN: FileCheck --check-prefix CHECK-METRICS < %t.SANDBOX/build/report.json %s -# RUN: FileCheck --check-prefix CHECK-METRICS2 < %t.SANDBOX/build/report.json %s +# RUN: filecheck --check-prefix CHECK-METRICS < %t.SANDBOX/build/report.json %s +# RUN: filecheck --check-prefix CHECK-METRICS2 < %t.SANDBOX/build/report.json %s # CHECK-METRICS-DAG: foo.exec # CHECK-METRICS-DAG: foo.compile # CHECK-METRICS-DAG: foo.score diff --git a/tests/runtest/test_suite-only-test.shtest b/tests/runtest/test_suite-only-test.shtest index b16b5073..8583163b 100644 --- a/tests/runtest/test_suite-only-test.shtest +++ b/tests/runtest/test_suite-only-test.shtest @@ -13,7 +13,7 @@ # RUN: --cmake-define three=four \ # RUN: --verbose \ # RUN: > %t.log 2> %t.err -# RUN: FileCheck --check-prefix CHECK-ONLYTEST < %t.err %s +# RUN: filecheck --check-prefix CHECK-ONLYTEST < %t.err %s # CHECK-ONLYTEST: Configuring with { # CHECK-ONLYTEST: one: 'two' # CHECK-ONLYTEST: three: 'four' diff --git a/tests/runtest/test_suite-perf-events.shtest b/tests/runtest/test_suite-perf-events.shtest index 57ad11bc..07ccaa32 100644 --- a/tests/runtest/test_suite-perf-events.shtest +++ b/tests/runtest/test_suite-perf-events.shtest @@ -12,5 +12,5 @@ # RUN: --perf-events=cycles,cache_misses \ # RUN: --verbose \ # RUN: > %t.log 2> %t.err -# RUN: FileCheck --check-prefix CHECK-USE-PERF-EVENT < %t.err %s +# RUN: filecheck --check-prefix CHECK-USE-PERF-EVENT < %t.err %s # CHECK-USE-PERF-EVENT: fake-lit-profile -v -j 1 {{.*--param profile=perf --param perf_profile_events=cycles,cache_misses}} diff --git a/tests/runtest/test_suite-pgo.shtest b/tests/runtest/test_suite-pgo.shtest index 5519bae6..611293d3 100644 --- a/tests/runtest/test_suite-pgo.shtest +++ b/tests/runtest/test_suite-pgo.shtest @@ -11,7 +11,7 @@ # RUN: --verbose \ # RUN: --pgo \ # RUN: > %t.pgo.log 2> %t.pgo.err -# RUN: FileCheck --check-prefix CHECK-PGO < %t.pgo.err %s +# RUN: filecheck --check-prefix CHECK-PGO < %t.pgo.err %s # CHECK-PGO: TEST_SUITE_PROFILE_GENERATE: 'On' # CHECK-PGO: TEST_SUITE_PROFILE_GENERATE: 'Off' # CHECK-PGO: TEST_SUITE_PROFILE_USE: 'On' @@ -30,7 +30,7 @@ # RUN: --pgo \ # RUN: --exec-multisample 2 \ # RUN: > %t.pgo_multi.log 2> %t.pgo_multi.err -# RUN: FileCheck --check-prefix CHECK-PGO-MULTI < %t.pgo_multi.err %s +# RUN: filecheck --check-prefix CHECK-PGO-MULTI < %t.pgo_multi.err %s # CHECK-PGO-MULTI: TEST_SUITE_PROFILE_GENERATE: 'On' # CHECK-PGO-MULTI: Execute: {{.*}}/Inputs/test-suite-cmake/fake-cmake --build {{.*}} -t all # CHECK-PGO-MULTI: fake-lit-profile diff --git a/tests/runtest/test_suite-profile.shtest b/tests/runtest/test_suite-profile.shtest index 300a4904..24d3f6e8 100644 --- a/tests/runtest/test_suite-profile.shtest +++ b/tests/runtest/test_suite-profile.shtest @@ -13,7 +13,7 @@ # RUN: --exec-multisample=2 \ # RUN: --verbose \ # RUN: > %t.log 2> %t.err -# RUN: FileCheck --check-prefix CHECK-USE-PERF-ALL < %t.err %s +# RUN: filecheck --check-prefix CHECK-USE-PERF-ALL < %t.err %s # CHECK-USE-PERF-ALL: Configuring with { # CHECK-USE-PERF-ALL: TEST_SUITE_USE_PERF: 'ON' # Verify that tests get run sequentially when perf profile gathering is enabled: diff --git a/tests/runtest/test_suite-run-order.shtest b/tests/runtest/test_suite-run-order.shtest index 7712328e..eec90d12 100644 --- a/tests/runtest/test_suite-run-order.shtest +++ b/tests/runtest/test_suite-run-order.shtest @@ -10,5 +10,5 @@ # RUN: --use-make %S/Inputs/test-suite-cmake/fake-make \ # RUN: --use-lit %S/Inputs/test-suite-cmake/fake-lit \ # RUN: --run-order=123 > %t.log 2> %t.err -# RUN: FileCheck --check-prefix CHECK-RESULTS < %t.SANDBOX/build/report.json %s +# RUN: filecheck --check-prefix CHECK-RESULTS < %t.SANDBOX/build/report.json %s # CHECK-RESULTS: "run_order": "123" diff --git a/tests/runtest/test_suite-run-under.shtest b/tests/runtest/test_suite-run-under.shtest index 8b886745..5b9a628a 100644 --- a/tests/runtest/test_suite-run-under.shtest +++ b/tests/runtest/test_suite-run-under.shtest @@ -11,7 +11,7 @@ # RUN: --use-lit %S/Inputs/test-suite-cmake/fake-lit \ # RUN: --run-under i_do_not_exist \ # RUN: > %t.log 2> %t.err -# RUN: FileCheck --check-prefix CHECK-RUNUNDER1 < %t.err %s +# RUN: filecheck --check-prefix CHECK-RUNUNDER1 < %t.err %s # CHECK-RUNUNDER1: Run under wrapper not found (looked for i_do_not_exist) # Use a run-under command @@ -26,7 +26,7 @@ # RUN: --use-lit %S/Inputs/test-suite-cmake/fake-lit \ # RUN: --run-under %S/Inputs/test-suite-cmake/fake-make \ # RUN: > %t.log 2> %t.err -# RUN: FileCheck --check-prefix CHECK-RUNUNDER2 < %t.err %s +# RUN: filecheck --check-prefix CHECK-RUNUNDER2 < %t.err %s # CHECK-RUNUNDER2: TEST_SUITE_RUN_UNDER: '{{.*}}/fake-make' # Use a run-under command with an argument @@ -41,5 +41,5 @@ # RUN: --use-lit %S/Inputs/test-suite-cmake/fake-lit \ # RUN: --run-under '%S/Inputs/test-suite-cmake/fake-make wibble' \ # RUN: > %t.log 2> %t.err -# RUN: FileCheck --check-prefix CHECK-RUNUNDER3 < %t.err %s +# RUN: filecheck --check-prefix CHECK-RUNUNDER3 < %t.err %s # CHECK-RUNUNDER3: TEST_SUITE_RUN_UNDER: '{{.*}}/fake-make wibble' diff --git a/tests/runtest/test_suite-use-perf.shtest b/tests/runtest/test_suite-use-perf.shtest index 4bc05859..bda9c528 100644 --- a/tests/runtest/test_suite-use-perf.shtest +++ b/tests/runtest/test_suite-use-perf.shtest @@ -11,6 +11,6 @@ # RUN: --use-perf=time \ # RUN: --verbose \ # RUN: > %t.log 2> %t.err -# RUN: FileCheck --check-prefix CHECK-USE-PERF < %t.err %s +# RUN: filecheck --check-prefix CHECK-USE-PERF < %t.err %s # CHECK-USE-PERF: Configuring with { # CHECK-USE-PERF: TEST_SUITE_USE_PERF: 'ON' diff --git a/tests/runtest/test_suite.shtest b/tests/runtest/test_suite.shtest index 251ef0f6..5226b87d 100644 --- a/tests/runtest/test_suite.shtest +++ b/tests/runtest/test_suite.shtest @@ -11,11 +11,11 @@ # RUN: --output %t.report \ # RUN: > %t.out 2> %t.err # RUN: lnt checkformat %t.report > %t.checkformat -# RUN: FileCheck --check-prefix CHECK-BASIC < %t.err %s -# RUN: FileCheck --check-prefix CHECK-REPORT < %t.SANDBOX/build/report.json %s -# RUN: FileCheck --check-prefix CHECK-XML < %t.SANDBOX/build/test-results.xunit.xml %s -# RUN: FileCheck --check-prefix CHECK-CSV < %t.SANDBOX/build/test-results.csv %s -# RUN: FileCheck --check-prefix CHECK-CHECKFORMAT < %t.checkformat %s +# RUN: filecheck --check-prefix CHECK-BASIC < %t.err %s +# RUN: filecheck --check-prefix CHECK-REPORT < %t.SANDBOX/build/report.json %s +# RUN: filecheck --check-prefix CHECK-XML < %t.SANDBOX/build/test-results.xunit.xml %s +# RUN: filecheck --check-prefix CHECK-CSV < %t.SANDBOX/build/test-results.csv %s +# RUN: filecheck --check-prefix CHECK-CHECKFORMAT < %t.checkformat %s # CHECK-REPORT: "no_errors": "True", # CHECK-REPORT: "run_order": "154331" @@ -59,7 +59,7 @@ # RUN: --use-make %S/Inputs/test-suite-cmake/fake-make \ # RUN: --use-lit %S/Inputs/test-suite-cmake/fake-lit \ # RUN: > %t.log 2> %t.err -# RUN: FileCheck --check-prefix CHECK-NOCONF < %t.err %s +# RUN: filecheck --check-prefix CHECK-NOCONF < %t.err %s # CHECK-NOCONF-NOT: Configuring # Use a different sandbox with --no-configure @@ -74,5 +74,5 @@ # RUN: --use-make %S/Inputs/test-suite-cmake/fake-make \ # RUN: --use-lit %S/Inputs/test-suite-cmake/fake-lit \ # RUN: > %t.log 2> %t.err -# RUN: FileCheck --check-prefix CHECK-NOCONF2 < %t.err %s +# RUN: filecheck --check-prefix CHECK-NOCONF2 < %t.err %s # CHECK-NOCONF2: Configuring diff --git a/tests/runtest/test_suite_diagnose.shtest b/tests/runtest/test_suite_diagnose.shtest index 78481914..cbc50123 100644 --- a/tests/runtest/test_suite_diagnose.shtest +++ b/tests/runtest/test_suite_diagnose.shtest @@ -15,7 +15,7 @@ # RUN: --use-lit %S/Inputs/test-suite-cmake/fake-lit \ # RUN: --diagnose \ # RUN: 2>&1 | tee %t.err || echo "expected to fail" -# RUN: FileCheck --check-prefix CHECK-ARGS < %t.err %s +# RUN: filecheck --check-prefix CHECK-ARGS < %t.err %s # CHECK-ARGS: --diagnose requires --only-test # Check a basic nt run. @@ -28,7 +28,7 @@ # RUN: --use-lit %S/Inputs/test-suite-cmake/fake-lit \ # RUN: --diagnose --only-test SingleSource/Benchmarks/Stanford/Bubblesort \ # RUN: 2>&1 | tee %t.diagnose.log -# RUN: FileCheck --check-prefix CHECK-DIAGNOSE < %t.diagnose.log %s +# RUN: filecheck --check-prefix CHECK-DIAGNOSE < %t.diagnose.log %s # CHECK-DIAGNOSE: Report produced in: # CHECK-DIAGNOSE: Bubblesort.report diff --git a/tests/server/db/ImportV4TestSuiteInstance.py b/tests/server/db/ImportV4TestSuiteInstance.py index f3bb01e2..9338509d 100644 --- a/tests/server/db/ImportV4TestSuiteInstance.py +++ b/tests/server/db/ImportV4TestSuiteInstance.py @@ -7,7 +7,7 @@ # Import the first test set. # RUN: lnt import %t.install %{shared_inputs}/sample-a-small.plist \ # RUN: --show-sample-count > %t1.log -# RUN: FileCheck -check-prefix=IMPORT-A-1 %s < %t1.log +# RUN: filecheck -check-prefix=IMPORT-A-1 %s < %t1.log # # IMPORT-A-1: Added Machines: 1 # IMPORT-A-1: Added Runs : 1 @@ -19,7 +19,7 @@ # Import the second test set. # RUN: lnt import %t.install %{shared_inputs}/sample-b-small.plist \ # RUN: --show-sample-count --show-sql > %t2.log -# RUN: FileCheck -check-prefix=IMPORT-B %s < %t2.log +# RUN: filecheck -check-prefix=IMPORT-B %s < %t2.log # # IMPORT-B: Added Runs : 1 # IMPORT-B: Added Samples : 1 @@ -27,7 +27,7 @@ # Check appending to an existing order # RUN: lnt import %t.install %{shared_inputs}/sample-a-small.plist \ # RUN: --show-sample-count --merge=append >& %t_append.log -# RUN: FileCheck -check-prefix=IMPORT-A-APPEND %s < %t_append.log +# RUN: filecheck -check-prefix=IMPORT-A-APPEND %s < %t_append.log # # IMPORT-A-APPEND-NOT: Added Machines # IMPORT-A-APPEND: Added Runs : 1 @@ -39,7 +39,7 @@ # Check that reimporting replaces the existing run. # RUN: lnt import %t.install %{shared_inputs}/sample-a-small.plist \ # RUN: --show-sample-count --merge=replace >& %t_replace.log -# RUN: FileCheck -check-prefix=IMPORT-A-REPLACE %s < %t_replace.log +# RUN: filecheck -check-prefix=IMPORT-A-REPLACE %s < %t_replace.log # # IMPORT-A-REPLACE-NOT: Added Machines # IMPORT-A-REPLACE: Added Runs : -1 @@ -51,7 +51,7 @@ # Check that reimporting the first test set properly reports as a duplicate. # RUN: not lnt import %t.install %{shared_inputs}/sample-a-small.plist \ # RUN: --show-sample-count --merge=reject >& %t_reject.log -# RUN: FileCheck -check-prefix=IMPORT-A-REJECT %s < %t_reject.log +# RUN: filecheck -check-prefix=IMPORT-A-REJECT %s < %t_reject.log # # IMPORT-A-REJECT: Duplicate submission for '1' diff --git a/tests/server/db/yamlschema.shtest b/tests/server/db/yamlschema.shtest index 422d4629..1ef47a71 100644 --- a/tests/server/db/yamlschema.shtest +++ b/tests/server/db/yamlschema.shtest @@ -1,7 +1,7 @@ # RUN: rm -rf "%t.install" # RUN: lnt create "%t.install" # RUN: ln -sf %{src_root}/docs/my_suite.yaml "%t.install/schemas/" -# RUN: lnt import "%t.install" -s my_suite %S/Inputs/customschema-report.json | FileCheck %s +# RUN: lnt import "%t.install" -s my_suite %S/Inputs/customschema-report.json | filecheck %s # CHECK: Import succeeded. # CHECK: Imported Data @@ -19,30 +19,30 @@ # =============== # # Inserting with an extra field shouldn't work just yet -# RUN: not lnt import "%t.install" -s my_suite "%S/Inputs/customschema-report2.json" 2>&1 | FileCheck %s --check-prefix=NOTUPGRADED +# RUN: not lnt import "%t.install" -s my_suite "%S/Inputs/customschema-report2.json" 2>&1 | filecheck %s --check-prefix=NOTUPGRADED # NOTUPGRADED: Metric 'newfield' unknown in suite # Upgrading to a schema with metrics/fields removed should fail # RUN: rm -f "%t.install/schemas/my_suite.yaml" # RUN: ln -sf "%S/Inputs/schema-example-nomigration0.yaml" "%t.install/schemas/my_suite.yaml" -# RUN: not lnt import "%t.install" -s my_suite "%S/Inputs/customschema-report.json" 2>&1 | FileCheck %s --check-prefix=NOMIGRATION0 +# RUN: not lnt import "%t.install" -s my_suite "%S/Inputs/customschema-report.json" 2>&1 | filecheck %s --check-prefix=NOMIGRATION0 # NOMIGRATION0: Cannot automatically migrate database: Metrics removed: data_size # # RUN: rm -f "%t.install/schemas/my_suite.yaml" # RUN: ln -sf "%S/Inputs/schema-example-nomigration1.yaml" "%t.install/schemas/my_suite.yaml" -# RUN: not lnt import "%t.install" -s my_suite "%S/Inputs/customschema-report.json" 2>&1 | FileCheck %s --check-prefix=NOMIGRATION1 +# RUN: not lnt import "%t.install" -s my_suite "%S/Inputs/customschema-report.json" 2>&1 | filecheck %s --check-prefix=NOMIGRATION1 # NOMIGRATION1: Cannot automatically migrate database: Machine fields removed: os # # RUN: rm -f "%t.install/schemas/my_suite.yaml" # RUN: ln -sf "%S/Inputs/schema-example-nomigration2.yaml" "%t.install/schemas/my_suite.yaml" -# RUN: not lnt import "%t.install" -s my_suite "%S/Inputs/customschema-report.json" 2>&1 | FileCheck %s --check-prefix=NOMIGRATION2 +# RUN: not lnt import "%t.install" -s my_suite "%S/Inputs/customschema-report.json" 2>&1 | filecheck %s --check-prefix=NOMIGRATION2 # NOMIGRATION2: Cannot automatically migrate database: Type mismatch in metric 'data_size' # This upgrade should finally work # RUN: rm -f "%t.install/schemas/my_suite.yaml" # RUN: ln -sf "%S/Inputs/schema-example-migratable.yaml" "%t.install/schemas/my_suite.yaml" -# RUN: lnt import "%t.install" "%S/Inputs/customschema-report2.json" -s my_suite --show-sql 2>&1 | FileCheck %s --check-prefix=MIGRATION +# RUN: lnt import "%t.install" "%S/Inputs/customschema-report2.json" -s my_suite --show-sql 2>&1 | filecheck %s --check-prefix=MIGRATION # # MIGRATION: ALTER TABLE "my_suite_Sample" ADD COLUMN newfield FLOAT # MIGRATION: ALTER TABLE "my_suite_Run" ADD COLUMN new_run_field VARCHAR(256) diff --git a/tox.ini b/tox.ini index 13923482..605bce7d 100644 --- a/tox.ini +++ b/tox.ini @@ -49,6 +49,11 @@ deps = commands = make -C {toxinidir}/docs/ html +[testenv:py3] +deps = + filecheck + lit + [testenv:runserver] # Don't use this for production. Just a handy target # for local testing.