|
| 1 | +from typing import Any, Callable, Dict, Optional, Tuple |
| 2 | +import torch |
| 3 | +from ..helpers.config_helper import update_config, check_hasattr |
| 4 | + |
| 5 | +__TASK__ = "text-to-image" |
| 6 | + |
| 7 | + |
| 8 | +def reduce_model_config(config: Any) -> Dict[str, Any]: |
| 9 | + """Reduces a model size.""" |
| 10 | + check_hasattr(config, "sample_size", "cross_attention_dim") |
| 11 | + kwargs = dict( |
| 12 | + sample_size=min(config["sample_size"], 32), |
| 13 | + cross_attention_dim=min(config["cross_attention_dim"], 64), |
| 14 | + ) |
| 15 | + update_config(config, kwargs) |
| 16 | + return kwargs |
| 17 | + |
| 18 | + |
| 19 | +def get_inputs( |
| 20 | + model: torch.nn.Module, |
| 21 | + config: Optional[Any], |
| 22 | + batch_size: int, |
| 23 | + sequence_length: int, |
| 24 | + cache_length: int, |
| 25 | + in_channels: int, |
| 26 | + sample_size: int, |
| 27 | + cross_attention_dim: int, |
| 28 | + add_second_input: bool = False, |
| 29 | + **kwargs, # unused |
| 30 | +): |
| 31 | + """ |
| 32 | + Generates inputs for task ``text-to-image``. |
| 33 | + Example: |
| 34 | +
|
| 35 | + :: |
| 36 | +
|
| 37 | + sample:T10s2x4x96x96[-3.7734375,4.359375:A-0.043463995395642184] |
| 38 | + timestep:T7s=101 |
| 39 | + encoder_hidden_states:T10s2x77x1024[-6.58203125,13.0234375:A-0.16780663634440257] |
| 40 | + """ |
| 41 | + assert ( |
| 42 | + "cls_cache" not in kwargs |
| 43 | + ), f"Not yet implemented for cls_cache={kwargs['cls_cache']!r}." |
| 44 | + batch = torch.export.Dim("batch", min=1, max=1024) |
| 45 | + shapes = { |
| 46 | + "sample": {0: batch}, |
| 47 | + "timestep": {}, |
| 48 | + "encoder_hidden_states": {0: batch, 1: "encoder_length"}, |
| 49 | + } |
| 50 | + inputs = dict( |
| 51 | + sample=torch.randn((batch_size, sequence_length, sample_size, sample_size)).to( |
| 52 | + torch.float32 |
| 53 | + ), |
| 54 | + timestep=torch.tensor([101], dtype=torch.int64), |
| 55 | + encoder_hidden_states=torch.randn( |
| 56 | + (batch_size, sequence_length, cross_attention_dim) |
| 57 | + ).to(torch.float32), |
| 58 | + ) |
| 59 | + res = dict(inputs=inputs, dynamic_shapes=shapes) |
| 60 | + if add_second_input: |
| 61 | + res["inputs2"] = get_inputs( |
| 62 | + model=model, |
| 63 | + config=config, |
| 64 | + batch_size=batch_size + 1, |
| 65 | + sequence_length=sequence_length, |
| 66 | + cache_length=cache_length + 1, |
| 67 | + in_channels=in_channels, |
| 68 | + sample_size=sample_size, |
| 69 | + cross_attention_dim=cross_attention_dim, |
| 70 | + **kwargs, |
| 71 | + )["inputs"] |
| 72 | + return res |
| 73 | + |
| 74 | + |
| 75 | +def random_input_kwargs(config: Any) -> Tuple[Dict[str, Any], Callable]: |
| 76 | + """ |
| 77 | + Inputs kwargs. |
| 78 | +
|
| 79 | + If the configuration is None, the function selects typical dimensions. |
| 80 | + """ |
| 81 | + if config is not None: |
| 82 | + check_hasattr(config, "sample_size", "cross_attention_dim", "in_channels") |
| 83 | + kwargs = dict( |
| 84 | + batch_size=2, |
| 85 | + sequence_length=config["in_channels"], |
| 86 | + cache_length=77, |
| 87 | + in_channels=config["in_channels"], |
| 88 | + sample_size=config["sample_size"], |
| 89 | + cross_attention_dim=config["cross_attention_dim"], |
| 90 | + ) |
| 91 | + return kwargs, get_inputs |
0 commit comments