Skip to content

Commit e5c43b8

Browse files
DN6sayakpaul
andauthored
[CI] Fix Fast GPU tests on PR (huggingface#10912)
* update * update * update * update * update --------- Co-authored-by: Sayak Paul <[email protected]>
1 parent 9a8e8db commit e5c43b8

File tree

4 files changed

+13
-9
lines changed

4 files changed

+13
-9
lines changed

.github/workflows/pr_tests_gpu.yml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,8 @@ on:
1111
- "src/diffusers/loaders/lora_base.py"
1212
- "src/diffusers/loaders/lora_pipeline.py"
1313
- "src/diffusers/loaders/peft.py"
14+
- "tests/pipelines/test_pipelines_common.py"
15+
- "tests/models/test_modeling_common.py"
1416
workflow_dispatch:
1517

1618
concurrency:

tests/models/test_modeling_common.py

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1169,17 +1169,16 @@ def test_disk_offload_without_safetensors(self):
11691169
base_output = model(**inputs_dict)
11701170

11711171
model_size = compute_module_sizes(model)[""]
1172+
max_size = int(self.model_split_percents[0] * model_size)
1173+
# Force disk offload by setting very small CPU memory
1174+
max_memory = {0: max_size, "cpu": int(0.1 * max_size)}
1175+
11721176
with tempfile.TemporaryDirectory() as tmp_dir:
11731177
model.cpu().save_pretrained(tmp_dir, safe_serialization=False)
1174-
11751178
with self.assertRaises(ValueError):
1176-
max_size = int(self.model_split_percents[0] * model_size)
1177-
max_memory = {0: max_size, "cpu": max_size}
11781179
# This errors out because it's missing an offload folder
11791180
new_model = self.model_class.from_pretrained(tmp_dir, device_map="auto", max_memory=max_memory)
11801181

1181-
max_size = int(self.model_split_percents[0] * model_size)
1182-
max_memory = {0: max_size, "cpu": max_size}
11831182
new_model = self.model_class.from_pretrained(
11841183
tmp_dir, device_map="auto", max_memory=max_memory, offload_folder=tmp_dir
11851184
)

tests/models/transformers/test_models_transformer_omnigen.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ class OmniGenTransformerTests(ModelTesterMixin, unittest.TestCase):
3030
model_class = OmniGenTransformer2DModel
3131
main_input_name = "hidden_states"
3232
uses_custom_attn_processor = True
33+
model_split_percents = [0.1, 0.1, 0.1]
3334

3435
@property
3536
def dummy_input(self):
@@ -73,9 +74,9 @@ def prepare_init_args_and_inputs_for_common(self):
7374
"num_attention_heads": 4,
7475
"num_key_value_heads": 4,
7576
"intermediate_size": 32,
76-
"num_layers": 1,
77+
"num_layers": 20,
7778
"pad_token_id": 0,
78-
"vocab_size": 100,
79+
"vocab_size": 1000,
7980
"in_channels": 4,
8081
"time_step_dim": 4,
8182
"rope_scaling": {"long_factor": list(range(1, 3)), "short_factor": list(range(1, 3))},

tests/models/transformers/test_models_transformer_sd3.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@
3333
class SD3TransformerTests(ModelTesterMixin, unittest.TestCase):
3434
model_class = SD3Transformer2DModel
3535
main_input_name = "hidden_states"
36+
model_split_percents = [0.8, 0.8, 0.9]
3637

3738
@property
3839
def dummy_input(self):
@@ -67,7 +68,7 @@ def prepare_init_args_and_inputs_for_common(self):
6768
"sample_size": 32,
6869
"patch_size": 1,
6970
"in_channels": 4,
70-
"num_layers": 1,
71+
"num_layers": 4,
7172
"attention_head_dim": 8,
7273
"num_attention_heads": 4,
7374
"caption_projection_dim": 32,
@@ -107,6 +108,7 @@ def test_gradient_checkpointing_is_applied(self):
107108
class SD35TransformerTests(ModelTesterMixin, unittest.TestCase):
108109
model_class = SD3Transformer2DModel
109110
main_input_name = "hidden_states"
111+
model_split_percents = [0.8, 0.8, 0.9]
110112

111113
@property
112114
def dummy_input(self):
@@ -141,7 +143,7 @@ def prepare_init_args_and_inputs_for_common(self):
141143
"sample_size": 32,
142144
"patch_size": 1,
143145
"in_channels": 4,
144-
"num_layers": 2,
146+
"num_layers": 4,
145147
"attention_head_dim": 8,
146148
"num_attention_heads": 4,
147149
"caption_projection_dim": 32,

0 commit comments

Comments
 (0)