diff --git a/tests/e2e/vLLM/test_vllm.py b/tests/e2e/vLLM/test_vllm.py index 2d5ae66d5d..8b0e9dde3f 100644 --- a/tests/e2e/vLLM/test_vllm.py +++ b/tests/e2e/vLLM/test_vllm.py @@ -170,7 +170,7 @@ def test_vllm(self, test_data_file: str): self.tear_down() def tear_down(self): - if self.save_dir is not None: + if self.save_dir is not None and os.path.isdir(self.save_dir): shutil.rmtree(self.save_dir) timer = get_singleton_manager() diff --git a/tests/llmcompressor/transformers/compression/test_decompress.py b/tests/llmcompressor/transformers/compression/test_decompress.py index b18cba80e5..c40786388c 100644 --- a/tests/llmcompressor/transformers/compression/test_decompress.py +++ b/tests/llmcompressor/transformers/compression/test_decompress.py @@ -1,4 +1,5 @@ import copy +import os import shutil import tempfile import unittest @@ -128,7 +129,8 @@ def test_hf_quantizer_decompress_match_manual_decompress(self): @classmethod def tearDownClass(self): - shutil.rmtree(self.test_dir) + if os.path.isdir(self.test_dir): + shutil.rmtree(self.test_dir) del self.dense_model del self.decompressed_model_hf_quantizer del self.decompressed_model_manual diff --git a/tests/llmcompressor/transformers/compression/test_quantization.py b/tests/llmcompressor/transformers/compression/test_quantization.py index 06bf90fade..6d8a2aac8f 100644 --- a/tests/llmcompressor/transformers/compression/test_quantization.py +++ b/tests/llmcompressor/transformers/compression/test_quantization.py @@ -49,7 +49,8 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): - shutil.rmtree(cls.test_dir) + if os.path.isdir(cls.test_dir): + shutil.rmtree(cls.test_dir) del cls.model torch.cuda.empty_cache() diff --git a/tests/llmcompressor/transformers/compression/test_run_compressed.py b/tests/llmcompressor/transformers/compression/test_run_compressed.py index fa54271940..369828a8ed 100644 --- a/tests/llmcompressor/transformers/compression/test_run_compressed.py +++ b/tests/llmcompressor/transformers/compression/test_run_compressed.py @@ -1,3 +1,4 @@ +import os import shutil import tempfile import unittest @@ -82,7 +83,8 @@ def test_compressed_matches_decompressed(self): @classmethod def tearDownClass(cls): - shutil.rmtree(cls.test_dir) + if os.path.isdir(cls.test_dir): + shutil.rmtree(cls.test_dir) del cls.decompressed_model del cls.uncompressed_model torch.cuda.empty_cache() @@ -167,7 +169,8 @@ def test_compressed_matches_decompressed__hf_quantizer(self): @classmethod def tearDownClass(cls): - shutil.rmtree(cls.test_dir) + if os.path.isdir(cls.test_dir): + shutil.rmtree(cls.test_dir) del cls.decompressed_model del cls.compressed_model torch.cuda.empty_cache() diff --git a/tests/llmcompressor/transformers/finetune/test_finetune_no_recipe_custom_dataset.py b/tests/llmcompressor/transformers/finetune/test_finetune_no_recipe_custom_dataset.py index 0f3e9d54f7..3ccc31e0d8 100644 --- a/tests/llmcompressor/transformers/finetune/test_finetune_no_recipe_custom_dataset.py +++ b/tests/llmcompressor/transformers/finetune/test_finetune_no_recipe_custom_dataset.py @@ -107,7 +107,8 @@ def create_mock_file(self, extension, content, path, filename): return mock_filepath # Return the file path def tearDown(self): - shutil.rmtree(self.output) + if os.path.isdir(self.output): + shutil.rmtree(self.output) @pytest.mark.integration diff --git a/tests/llmcompressor/transformers/finetune/test_finetune_oneshot_with_modifier.py b/tests/llmcompressor/transformers/finetune/test_finetune_oneshot_with_modifier.py index 91d1e85876..08ec0792f9 100644 --- a/tests/llmcompressor/transformers/finetune/test_finetune_oneshot_with_modifier.py +++ b/tests/llmcompressor/transformers/finetune/test_finetune_oneshot_with_modifier.py @@ -1,3 +1,4 @@ +import os import shutil import unittest from pathlib import Path @@ -46,4 +47,5 @@ def test_oneshot_with_modifier_object(self): ) def tearDown(self): - shutil.rmtree(self.output) + if os.path.isdir(self.output): + shutil.rmtree(self.output) diff --git a/tests/llmcompressor/transformers/finetune/test_finetune_without_recipe.py b/tests/llmcompressor/transformers/finetune/test_finetune_without_recipe.py index 42eb495d87..66f71f8b3b 100644 --- a/tests/llmcompressor/transformers/finetune/test_finetune_without_recipe.py +++ b/tests/llmcompressor/transformers/finetune/test_finetune_without_recipe.py @@ -1,3 +1,4 @@ +import os import shutil import unittest @@ -41,4 +42,5 @@ def test_finetune_without_recipe(self): ) def tearDown(self): - shutil.rmtree(self.output) + if os.path.isdir(self.output): + shutil.rmtree(self.output) diff --git a/tests/llmcompressor/transformers/finetune/test_oneshot_and_finetune.py b/tests/llmcompressor/transformers/finetune/test_oneshot_and_finetune.py index cfce31db64..3cdd4921f9 100644 --- a/tests/llmcompressor/transformers/finetune/test_oneshot_and_finetune.py +++ b/tests/llmcompressor/transformers/finetune/test_oneshot_and_finetune.py @@ -75,7 +75,8 @@ def _test_oneshot_and_finetune(self): def tearDown(self): # TODO: we get really nice stats from finetune that we should log # stored in results.json - shutil.rmtree(self.output) + if os.path.isdir(self.output): + shutil.rmtree(self.output) @pytest.mark.integration diff --git a/tests/llmcompressor/transformers/finetune/test_oneshot_and_finetune_with_tokenizer.py b/tests/llmcompressor/transformers/finetune/test_oneshot_and_finetune_with_tokenizer.py index a09f62af65..4d5fdc9879 100644 --- a/tests/llmcompressor/transformers/finetune/test_oneshot_and_finetune_with_tokenizer.py +++ b/tests/llmcompressor/transformers/finetune/test_oneshot_and_finetune_with_tokenizer.py @@ -1,3 +1,4 @@ +import os import shutil import unittest @@ -78,4 +79,5 @@ def test_oneshot_and_finetune_with_tokenizer(self): print(tokenizer.decode(output[0])) def tearDown(self): - shutil.rmtree(self.output) + if os.path.isdir(self.output): + shutil.rmtree(self.output) diff --git a/tests/llmcompressor/transformers/finetune/test_oneshot_then_finetune.py b/tests/llmcompressor/transformers/finetune/test_oneshot_then_finetune.py index 046816e86b..0a2241b186 100644 --- a/tests/llmcompressor/transformers/finetune/test_oneshot_then_finetune.py +++ b/tests/llmcompressor/transformers/finetune/test_oneshot_then_finetune.py @@ -1,3 +1,4 @@ +import os import shutil import unittest from pathlib import Path @@ -175,4 +176,5 @@ def test_oneshot_quantization_then_finetune(self): ) def tearDown(self): - shutil.rmtree(self.output) + if os.path.isdir(self.output): + shutil.rmtree(self.output) diff --git a/tests/llmcompressor/transformers/finetune/test_safetensors.py b/tests/llmcompressor/transformers/finetune/test_safetensors.py index 462c529e6b..3015710f8a 100644 --- a/tests/llmcompressor/transformers/finetune/test_safetensors.py +++ b/tests/llmcompressor/transformers/finetune/test_safetensors.py @@ -53,4 +53,5 @@ def test_safetensors(self): ) def tearDown(self): - shutil.rmtree(self.output) + if os.path.isdir(self.output): + shutil.rmtree(self.output) diff --git a/tests/llmcompressor/transformers/gptq/test_oneshot.py b/tests/llmcompressor/transformers/gptq/test_oneshot.py index 6b16222609..3ee8e0580f 100644 --- a/tests/llmcompressor/transformers/gptq/test_oneshot.py +++ b/tests/llmcompressor/transformers/gptq/test_oneshot.py @@ -1,3 +1,4 @@ +import os import shutil import unittest @@ -113,4 +114,5 @@ def test_oneshot_application(self): assert not hasattr(not_targetted, "quantization_scheme") def tearDown(self): - shutil.rmtree(self.output) + if os.path.isdir(self.output): + shutil.rmtree(self.output) diff --git a/tests/llmcompressor/transformers/obcq/test_consecutive_runs.py b/tests/llmcompressor/transformers/obcq/test_consecutive_runs.py index 045c89a885..38b1127d5f 100644 --- a/tests/llmcompressor/transformers/obcq/test_consecutive_runs.py +++ b/tests/llmcompressor/transformers/obcq/test_consecutive_runs.py @@ -1,3 +1,4 @@ +import os import shutil import unittest from pathlib import Path @@ -109,7 +110,8 @@ def _test_consecutive_runs( self.assertEqual(stage1_modifier_names, exp_stage1_modifier_names) def tearDown(self): - shutil.rmtree(self.output) + if os.path.isdir(self.output): + shutil.rmtree(self.output) @pytest.mark.integration diff --git a/tests/llmcompressor/transformers/obcq/test_obcq_completion.py b/tests/llmcompressor/transformers/obcq/test_obcq_completion.py index 01770c580c..95615360ae 100644 --- a/tests/llmcompressor/transformers/obcq/test_obcq_completion.py +++ b/tests/llmcompressor/transformers/obcq/test_obcq_completion.py @@ -1,3 +1,4 @@ +import os import shutil import unittest @@ -90,7 +91,8 @@ def _test_oneshot_completion(self, model_name: str = None): self.assertLess(avg_new_ppl, self.perplexity) def tearDown(self): - shutil.rmtree(self.output) + if os.path.isdir(self.output): + shutil.rmtree(self.output) @requires_gpu diff --git a/tests/llmcompressor/transformers/obcq/test_obcq_sparsity.py b/tests/llmcompressor/transformers/obcq/test_obcq_sparsity.py index e4647ac916..347eb5dc07 100644 --- a/tests/llmcompressor/transformers/obcq/test_obcq_sparsity.py +++ b/tests/llmcompressor/transformers/obcq/test_obcq_sparsity.py @@ -1,4 +1,5 @@ import math +import os import shutil import unittest @@ -48,7 +49,8 @@ def test_sparsities(self): def tearDown(self): import torch - shutil.rmtree(self.output) + if os.path.isdir(self.output): + shutil.rmtree(self.output) torch.cuda.empty_cache() @@ -97,5 +99,6 @@ def test_sparsities_gpu(self): def tearDown(self): import torch - shutil.rmtree(self.output) + if os.path.isdir(self.output): + shutil.rmtree(self.output) torch.cuda.empty_cache() diff --git a/tests/llmcompressor/transformers/oneshot/test_api_inputs.py b/tests/llmcompressor/transformers/oneshot/test_api_inputs.py index 1520516087..1041c1410e 100644 --- a/tests/llmcompressor/transformers/oneshot/test_api_inputs.py +++ b/tests/llmcompressor/transformers/oneshot/test_api_inputs.py @@ -1,3 +1,4 @@ +import os import shutil import unittest @@ -62,4 +63,5 @@ def test_one_shot_inputs(self): ) def tearDown(self): - shutil.rmtree(self.output) + if os.path.isdir(self.output): + shutil.rmtree(self.output) diff --git a/tests/llmcompressor/transformers/sparsification/test_compress_tensor_utils.py b/tests/llmcompressor/transformers/sparsification/test_compress_tensor_utils.py index 749a119082..d03ac3cd87 100644 --- a/tests/llmcompressor/transformers/sparsification/test_compress_tensor_utils.py +++ b/tests/llmcompressor/transformers/sparsification/test_compress_tensor_utils.py @@ -1,4 +1,5 @@ import math +import os import shutil import pytest @@ -115,7 +116,8 @@ def test_sparse_model_reload(compressed, config, dtype, tmp_path): assert dense_tensor.dtype == reconstructed_tensor.dtype == dtype assert torch.equal(dense_tensor, reconstructed_tensor) - shutil.rmtree(tmp_path) + if os.path.isdir(tmp_path): + shutil.rmtree(tmp_path) @pytest.mark.parametrize( @@ -145,7 +147,8 @@ def test_dense_model_save(tmp_path, skip_compression_stats, save_compressed): sparsity_config = ModelCompressor.parse_sparsity_config(compression_config) assert sparsity_config is None - shutil.rmtree(tmp_path) + if os.path.isdir(tmp_path): + shutil.rmtree(tmp_path) @pytest.mark.parametrize( @@ -223,7 +226,8 @@ def test_quant_model_reload(format, dtype, tmp_path): assert not torch.any(diff > 0.01).item() else: assert torch.equal(dense_tensor, reconstructed_tensor) - shutil.rmtree(tmp_path) + if os.path.isdir(tmp_path): + shutil.rmtree(tmp_path) # technically only tie_word_embeddings=False is supported right now @@ -435,7 +439,8 @@ def test_compressor_stacking(model_stub, recipe, sparse_format, quant_format, tm assert not torch.any(diff > 0.025), f"Max diff: {torch.max(diff)}" else: assert torch.equal(dense_tensor, reconstructed_tensor) - shutil.rmtree(tmp_path) + if os.path.isdir(tmp_path): + shutil.rmtree(tmp_path) @pytest.mark.parametrize( @@ -503,7 +508,8 @@ def test_sparse_24_compressor_is_lossless(model_stub, recipe, sparse_format, tmp assert dense_tensor.dtype == reconstructed_tensor.dtype if key.endswith("weight"): assert torch.equal(dense_tensor, reconstructed_tensor) - shutil.rmtree(tmp_path) + if os.path.isdir(tmp_path): + shutil.rmtree(tmp_path) def test_disable_sparse_compression_flag(tmp_path): @@ -530,7 +536,8 @@ def test_disable_sparse_compression_flag(tmp_path): assert sparsity_config assert sparsity_config["format"] == "dense" - shutil.rmtree(tmp_path) + if os.path.isdir(tmp_path): + shutil.rmtree(tmp_path) class DummyLinearModel(nn.Module): diff --git a/tests/lmeval/test_lmeval.py b/tests/lmeval/test_lmeval.py index f8b5bb2cbb..9c2065df38 100644 --- a/tests/lmeval/test_lmeval.py +++ b/tests/lmeval/test_lmeval.py @@ -175,5 +175,5 @@ def tear_down(self): df = pd.DataFrame(measurements) df.to_csv(p / f"{self.save_dir}.csv", index=False) - if self.save_dir is not None: + if self.save_dir is not None and os.path.isdir(self.save_dir): shutil.rmtree(self.save_dir)