Skip to content

Commit 05e8677

Browse files
sayakpaula-r-r-o-w
andauthored
[tests] device_map tests for all models. (huggingface#11708)
* device_map tests for all models. * updates * Update tests/models/test_modeling_common.py Co-authored-by: Aryan <[email protected]> * fix device_map in test --------- Co-authored-by: Aryan <[email protected]>
1 parent d72184e commit 05e8677

File tree

2 files changed

+39
-37
lines changed

2 files changed

+39
-37
lines changed

tests/models/test_modeling_common.py

Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1736,6 +1736,45 @@ def test_auto_model(self, expected_max_diff=5e-5):
17361736
f"AutoModel forward pass diff: {max_diff} exceeds threshold {expected_max_diff}",
17371737
)
17381738

1739+
@parameterized.expand(
1740+
[
1741+
(-1, "You can't pass device_map as a negative int"),
1742+
("foo", "When passing device_map as a string, the value needs to be a device name"),
1743+
]
1744+
)
1745+
def test_wrong_device_map_raises_error(self, device_map, msg_substring):
1746+
init_dict, _ = self.prepare_init_args_and_inputs_for_common()
1747+
model = self.model_class(**init_dict)
1748+
with tempfile.TemporaryDirectory() as tmpdir:
1749+
model.save_pretrained(tmpdir)
1750+
with self.assertRaises(ValueError) as err_ctx:
1751+
_ = self.model_class.from_pretrained(tmpdir, device_map=device_map)
1752+
1753+
assert msg_substring in str(err_ctx.exception)
1754+
1755+
@parameterized.expand([0, "cuda", torch.device("cuda")])
1756+
@require_torch_gpu
1757+
def test_passing_non_dict_device_map_works(self, device_map):
1758+
init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
1759+
model = self.model_class(**init_dict).eval()
1760+
with tempfile.TemporaryDirectory() as tmpdir:
1761+
model.save_pretrained(tmpdir)
1762+
loaded_model = self.model_class.from_pretrained(tmpdir, device_map=device_map)
1763+
_ = loaded_model(**inputs_dict)
1764+
1765+
@parameterized.expand([("", "cuda"), ("", torch.device("cuda"))])
1766+
@require_torch_gpu
1767+
def test_passing_dict_device_map_works(self, name, device):
1768+
# There are other valid dict-based `device_map` values too. It's best to refer to
1769+
# the docs for those: https://huggingface.co/docs/accelerate/en/concept_guides/big_model_inference#the-devicemap.
1770+
init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
1771+
model = self.model_class(**init_dict).eval()
1772+
device_map = {name: device}
1773+
with tempfile.TemporaryDirectory() as tmpdir:
1774+
model.save_pretrained(tmpdir)
1775+
loaded_model = self.model_class.from_pretrained(tmpdir, device_map=device_map)
1776+
_ = loaded_model(**inputs_dict)
1777+
17391778

17401779
@is_staging_test
17411780
class ModelPushToHubTester(unittest.TestCase):

tests/models/unets/test_models_unet_2d_condition.py

Lines changed: 0 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,6 @@
4646
require_peft_backend,
4747
require_torch_accelerator,
4848
require_torch_accelerator_with_fp16,
49-
require_torch_gpu,
5049
skip_mps,
5150
slow,
5251
torch_all_close,
@@ -1084,42 +1083,6 @@ def test_load_sharded_checkpoint_device_map_from_hub_local_subfolder(self):
10841083
assert loaded_model
10851084
assert new_output.sample.shape == (4, 4, 16, 16)
10861085

1087-
@parameterized.expand(
1088-
[
1089-
(-1, "You can't pass device_map as a negative int"),
1090-
("foo", "When passing device_map as a string, the value needs to be a device name"),
1091-
]
1092-
)
1093-
def test_wrong_device_map_raises_error(self, device_map, msg_substring):
1094-
with self.assertRaises(ValueError) as err_ctx:
1095-
_ = self.model_class.from_pretrained(
1096-
"hf-internal-testing/unet2d-sharded-dummy-subfolder", subfolder="unet", device_map=device_map
1097-
)
1098-
1099-
assert msg_substring in str(err_ctx.exception)
1100-
1101-
@parameterized.expand([0, "cuda", torch.device("cuda"), torch.device("cuda:0")])
1102-
@require_torch_gpu
1103-
def test_passing_non_dict_device_map_works(self, device_map):
1104-
_, inputs_dict = self.prepare_init_args_and_inputs_for_common()
1105-
loaded_model = self.model_class.from_pretrained(
1106-
"hf-internal-testing/unet2d-sharded-dummy-subfolder", subfolder="unet", device_map=device_map
1107-
)
1108-
output = loaded_model(**inputs_dict)
1109-
assert output.sample.shape == (4, 4, 16, 16)
1110-
1111-
@parameterized.expand([("", "cuda"), ("", torch.device("cuda"))])
1112-
@require_torch_gpu
1113-
def test_passing_dict_device_map_works(self, name, device_map):
1114-
# There are other valid dict-based `device_map` values too. It's best to refer to
1115-
# the docs for those: https://huggingface.co/docs/accelerate/en/concept_guides/big_model_inference#the-devicemap.
1116-
_, inputs_dict = self.prepare_init_args_and_inputs_for_common()
1117-
loaded_model = self.model_class.from_pretrained(
1118-
"hf-internal-testing/unet2d-sharded-dummy-subfolder", subfolder="unet", device_map={name: device_map}
1119-
)
1120-
output = loaded_model(**inputs_dict)
1121-
assert output.sample.shape == (4, 4, 16, 16)
1122-
11231086
@require_peft_backend
11241087
def test_load_attn_procs_raise_warning(self):
11251088
init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()

0 commit comments

Comments
 (0)