|
| 1 | +from typing import Any, Callable, Dict, Optional, Tuple |
| 2 | +import torch |
| 3 | +from ..helpers.cache_helper import make_dynamic_cache |
| 4 | +from ..helpers.config_helper import update_config, check_hasattr, _pick |
| 5 | + |
| 6 | +__TASK__ = "MoE" |
| 7 | + |
| 8 | + |
| 9 | +def reduce_model_config(config: Any) -> Dict[str, Any]: |
| 10 | + """Reduces a model size.""" |
| 11 | + kwargs: Dict[str, Any] = {} |
| 12 | + if hasattr(config, "num_hidden_layers"): |
| 13 | + config.num_hidden_layers = min(config.num_hidden_layers, 2) |
| 14 | + if hasattr(config, "vision_config") and hasattr(config.vision_config, "num_hidden_layers"): |
| 15 | + config.vision_config.num_hidden_layers = min(config.vision_config.num_hidden_layers, 2) |
| 16 | + if hasattr(config, "audio_processor") and hasattr( |
| 17 | + config.audio_processor, "num_hidden_layers" |
| 18 | + ): |
| 19 | + config.audio_processor.num_hidden_layers = min( |
| 20 | + config.audio_processor.num_hidden_layers, 2 |
| 21 | + ) |
| 22 | + if hasattr(config, "audio_processor") and hasattr(config.audio_processor, "attention_dim"): |
| 23 | + config.audio_processor.attention_dim = min(config.audio_processor.attention_dim, 2) |
| 24 | + update_config(config, kwargs) |
| 25 | + return kwargs |
| 26 | + |
| 27 | + |
| 28 | +def get_inputs( |
| 29 | + model: torch.nn.Module, |
| 30 | + config: Optional[Any], |
| 31 | + dummy_max_token_id: int, |
| 32 | + num_key_value_heads: int, |
| 33 | + num_hidden_layers: int, |
| 34 | + head_dim: int, |
| 35 | + width: int, |
| 36 | + height: int, |
| 37 | + num_channels: int, |
| 38 | + batch_size: int = 2, |
| 39 | + sequence_length: int = 30, |
| 40 | + sequence_length2: int = 3, |
| 41 | + n_images: int = 2, |
| 42 | + dynamic_rope: bool = False, |
| 43 | + **kwargs, # unused |
| 44 | +): |
| 45 | + """ |
| 46 | + Generates input for task ``text-generation``. |
| 47 | +
|
| 48 | + :param model: model to get the missing information |
| 49 | + :param config: configuration used to generate the model |
| 50 | + :param head_dim: last dimension of the cache |
| 51 | + :param dummy_max_token_id: dummy max token id |
| 52 | + :param batch_size: batch size |
| 53 | + :param sequence_length: sequence length |
| 54 | + :param sequence_length2: new sequence length |
| 55 | + :param n_images: number of images |
| 56 | + :param width: width of the image |
| 57 | + :param height: height of the image |
| 58 | + :param num_channels: number of channels |
| 59 | + :param dynamic_rope: use dynamic rope (see :class:`transformers.LlamaConfig`) |
| 60 | + :return: dictionary |
| 61 | + """ |
| 62 | + batch = torch.export.Dim("batch", min=1, max=1024) |
| 63 | + seq_length = "seq_length" # torch.export.Dim("seq_length", min=1, max=4096) |
| 64 | + cache_length = "cache_length" # torch.export.Dim("cache_length", min=1, max=4096) |
| 65 | + images = "images" # torch.export.Dim("images", min=1, max=4096) |
| 66 | + |
| 67 | + shapes = { |
| 68 | + "input_ids": {0: batch, 1: seq_length}, |
| 69 | + "attention_mask": { |
| 70 | + 0: batch, |
| 71 | + 1: "cache+seq", # cache_length + seq_length |
| 72 | + }, |
| 73 | + "position_ids": { |
| 74 | + 0: batch, |
| 75 | + 1: "cache+seq", # cache_length + seq_length |
| 76 | + }, |
| 77 | + "past_key_values": [ |
| 78 | + [{0: batch, 2: cache_length} for _ in range(num_hidden_layers)], |
| 79 | + [{0: batch, 2: cache_length} for _ in range(num_hidden_layers)], |
| 80 | + ], |
| 81 | + "pixel_values": {0: batch, 1: images}, |
| 82 | + "image_attention_mask": {0: batch, 1: seq_length, 2: images}, |
| 83 | + } |
| 84 | + inputs = dict( |
| 85 | + input_ids=torch.randint(0, dummy_max_token_id, (batch_size, sequence_length2)).to( |
| 86 | + torch.int64 |
| 87 | + ), |
| 88 | + attention_mask=torch.ones((batch_size, sequence_length + sequence_length2)).to( |
| 89 | + torch.int64 |
| 90 | + ), |
| 91 | + position_ids=torch.arange(sequence_length, sequence_length + sequence_length2) |
| 92 | + .to(torch.int64) |
| 93 | + .expand((batch_size, -1)), |
| 94 | + past_key_values=make_dynamic_cache( |
| 95 | + [ |
| 96 | + ( |
| 97 | + torch.randn(batch_size, num_key_value_heads, sequence_length, head_dim), |
| 98 | + torch.randn(batch_size, num_key_value_heads, sequence_length, head_dim), |
| 99 | + ) |
| 100 | + for i in range(num_hidden_layers) |
| 101 | + ] |
| 102 | + ), |
| 103 | + image_attention_mask=torch.ones((batch_size, sequence_length2, n_images)).to( |
| 104 | + torch.int64 |
| 105 | + ), |
| 106 | + pixel_values=torch.ones((batch_size, n_images, num_channels, width, height)).to( |
| 107 | + torch.int64 |
| 108 | + ), |
| 109 | + ) |
| 110 | + return dict(inputs=inputs, dynamic_shapes=shapes) |
| 111 | + |
| 112 | + |
| 113 | +def random_input_kwargs(config: Any) -> Tuple[Dict[str, Any], Callable]: |
| 114 | + """ |
| 115 | + Inputs kwargs. |
| 116 | +
|
| 117 | + If the configuration is None, the function selects typical dimensions. |
| 118 | + """ |
| 119 | + if config is not None: |
| 120 | + check_hasattr( |
| 121 | + config, |
| 122 | + "vocab_size", |
| 123 | + "hidden_size", |
| 124 | + "num_attention_heads", |
| 125 | + ("num_key_value_heads", "num_attention_heads"), |
| 126 | + "intermediate_size", |
| 127 | + "hidden_size", |
| 128 | + "vision_config", |
| 129 | + "audio_processor", |
| 130 | + ) |
| 131 | + check_hasattr(config.vision_config, "image_size", "num_channels") |
| 132 | + kwargs = dict( |
| 133 | + batch_size=2, |
| 134 | + sequence_length=30, |
| 135 | + sequence_length2=3, |
| 136 | + head_dim=( |
| 137 | + 16 |
| 138 | + if config is None |
| 139 | + else getattr(config, "head_dim", config.hidden_size // config.num_attention_heads) |
| 140 | + ), |
| 141 | + dummy_max_token_id=31999 if config is None else config.vocab_size - 1, |
| 142 | + num_hidden_layers=4 if config is None else config.num_hidden_layers, |
| 143 | + num_key_value_heads=( |
| 144 | + 8 |
| 145 | + if config is None |
| 146 | + else _pick(config, "num_key_value_heads", "num_attention_heads") |
| 147 | + ), |
| 148 | + intermediate_size=1024 if config is None else config.intermediate_size, |
| 149 | + hidden_size=512 if config is None else config.hidden_size, |
| 150 | + width=224 if config is None else config.vision_config.image_size, |
| 151 | + height=224 if config is None else config.vision_config.image_size, |
| 152 | + num_channels=3 if config is None else config.vision_config.num_channels, |
| 153 | + ) |
| 154 | + return kwargs, get_inputs |
0 commit comments