File tree Expand file tree Collapse file tree 3 files changed +2
-31
lines changed Expand file tree Collapse file tree 3 files changed +2
-31
lines changed Original file line number Diff line number Diff line change @@ -31,7 +31,7 @@ test_path=$1
31
31
printf " source path: $test_path \n"
32
32
33
33
# collect all tests with parametrization based filtering with PL_RUN_STANDALONE_TESTS
34
- standalone_tests=$( python -m pytest $test_path -q --collect-only --pythonwarnings ignore)
34
+ standalone_tests=$( python3 -m pytest $test_path -q --collect-only --pythonwarnings ignore)
35
35
printf " Collected tests: \n $standalone_tests "
36
36
# match only lines with tests
37
37
parametrizations=$( grep -oP ' \S+::test_\S+' <<< " $standalone_tests" )
@@ -69,7 +69,7 @@ for i in "${!parametrizations_arr[@]}"; do
69
69
# execute the test in the background
70
70
# redirect to a log file that buffers test output. since the tests will run in the background, we cannot let them
71
71
# output to std{out,err} because the outputs would be garbled together
72
- python ${defaults} " $parametrization " & >> standalone_test_output.txt &
72
+ python3 ${defaults} " $parametrization " & >> standalone_test_output.txt &
73
73
# save the PID in an array
74
74
pids[${i} ]=$!
75
75
# add row to the final report
Original file line number Diff line number Diff line change @@ -471,24 +471,6 @@ def look_for_trace(trace_dir):
471
471
assert look_for_trace (tmpdir / "lightning_logs" / "version_0" )
472
472
473
473
474
- # Todo: this test has not been running as all our CI GPU runners have higher capacity
475
- # @RunIf(min_cuda_gpus=1, standalone=True)
476
- # @pytest.mark.skipif(torch.cuda.get_device_capability()[0] >= 8)
477
- # def test_pytorch_profiler_nested_emit_nvtx():
478
- # """This test check emit_nvtx is correctly supported."""
479
- # profiler = PyTorchProfiler(use_cuda=True, emit_nvtx=True)
480
- # model = BoringModel()
481
- # trainer = Trainer(
482
- # fast_dev_run=True,
483
- # profiler=profiler,
484
- # accelerator="gpu",
485
- # devices=1,
486
- # enable_progress_bar=False,
487
- # enable_model_summary=False,
488
- # )
489
- # trainer.fit(model)
490
-
491
-
492
474
def test_register_record_function (tmpdir ):
493
475
use_cuda = torch .cuda .is_available ()
494
476
pytorch_profiler = PyTorchProfiler (
Original file line number Diff line number Diff line change @@ -18,17 +18,6 @@ set -e
18
18
# this environment variable allows special tests to run
19
19
export PL_RUN_STANDALONE_TESTS=1
20
20
21
- # can_run_nvprof=$(python -c "import torch; print(torch.cuda.is_available() and torch.cuda.get_device_capability()[0] < 8)")
22
- # if [[ $can_run_nvprof == "True" ]]; then
23
- # echo "Running profilers/test_profiler.py::test_pytorch_profiler_nested_emit_nvtx"
24
- # nvprof --profile-from-start off \
25
- # -o trace_name.prof \
26
- # -- python -m coverage run \
27
- # --source lightning.pytorch \
28
- # --append -m pytest \
29
- # --no-header profilers/test_profiler.py::test_pytorch_profiler_nested_emit_nvtx
30
- # fi
31
-
32
21
# test that a user can manually launch individual processes
33
22
echo " Running manual ddp launch test"
34
23
export PYTHONPATH=" ${PYTHONPATH} :$( pwd) "
You can’t perform that action at this time.
0 commit comments