Skip to content
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
52 changes: 52 additions & 0 deletions tests/models/glm_image/test_modeling_glm_image.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@

import unittest

import pytest
from parameterized import parameterized

from transformers import (
Expand Down Expand Up @@ -283,6 +284,30 @@ def test_sdpa_can_dispatch_on_flash(self):
def test_multi_gpu_data_parallel_forward(self):
pass

@pytest.mark.xfail(
reason="GlmImage has a VQ module that uses `weight.data` directly in forward which prevent offloading on that module"
)
def test_disk_offload_safetensors(self):
pass

@pytest.mark.xfail(
reason="GlmImage has a VQ module that uses `weight.data` directly in forward which prevent offloading on that module"
)
def test_disk_offload_bin(self):
pass

@pytest.mark.xfail(
reason="GlmImage has a VQ module that uses `weight.data` directly in forward which prevent offloading on that module"
)
def test_cpu_offload(self):
pass

@pytest.mark.xfail(
reason="GlmImage has a VQ module that uses `weight.data` directly in forward which prevent offloading on that module"
)
def test_model_parallelism(self):
pass

@unittest.skip("Error with compilation")
def test_generate_from_inputs_embeds_with_static_cache(self):
pass
Expand Down Expand Up @@ -337,6 +362,12 @@ def test_training_gradient_checkpointing_use_reentrant(self):
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass

@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant_true(self):
pass
Comment on lines +365 to +369
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

As far as I can see this is the only test that is currently blocking us on CI


@unittest.skip(reason="GlmImageVisionModel does not support training")
def test_retain_grad_hidden_states_attentions(self):
pass
Expand All @@ -345,6 +376,27 @@ def test_retain_grad_hidden_states_attentions(self):
def test_generate_compile_model_forward_fullgraph(self):
pass

@unittest.skip(
reason="GlmImage is a multimodal model that requires pixel_values and image_grid_thw. "
"This test drops all inputs except input_ids which causes NoneType iteration error."
)
def test_flash_attention_2_continue_generate_with_position_ids(self):
pass

@unittest.skip(
reason="GlmImage is a multimodal model that requires pixel_values and image_grid_thw. "
"This test only uses input_ids and attention_mask which causes NoneType iteration error."
)
def test_flash_attn_2_fp32_ln(self):
pass

@unittest.skip(
reason="GlmImage is a multimodal model that requires pixel_values and image_grid_thw. "
"This test only uses input_ids and attention_mask which causes NoneType iteration error."
)
def test_flash_attn_2_from_config(self):
pass


@require_torch
@slow
Expand Down