diff --git a/dfm/src/common/utils/save_video.py b/dfm/src/common/utils/save_video.py index 901efa57..dd7d66a0 100644 --- a/dfm/src/common/utils/save_video.py +++ b/dfm/src/common/utils/save_video.py @@ -44,5 +44,4 @@ def save_video( "output_params": ["-f", "mp4"], } - print("video_save_path", video_save_path) imageio.mimsave(video_save_path, grid, "mp4", **kwargs) diff --git a/dfm/src/megatron/data/common/diffusion_energon_datamodule.py b/dfm/src/megatron/data/common/diffusion_energon_datamodule.py index 59a97b73..f59a55fc 100644 --- a/dfm/src/megatron/data/common/diffusion_energon_datamodule.py +++ b/dfm/src/megatron/data/common/diffusion_energon_datamodule.py @@ -55,7 +55,11 @@ def __post_init__(self): self.sequence_length = self.dataset.seq_length def build_datasets(self, context: DatasetBuildContext): - return self.dataset.train_dataloader(), self.dataset.val_dataloader(), self.dataset.test_dataloader() + return ( + iter(self.dataset.train_dataloader()), + iter(self.dataset.val_dataloader()), + iter(self.dataset.val_dataloader()), + ) class DiffusionDataModule(EnergonMultiModalDataModule): diff --git a/dfm/src/megatron/data/common/diffusion_sample.py b/dfm/src/megatron/data/common/diffusion_sample.py index 8efcc79c..702a392c 100644 --- a/dfm/src/megatron/data/common/diffusion_sample.py +++ b/dfm/src/megatron/data/common/diffusion_sample.py @@ -80,11 +80,14 @@ def to_dict(self) -> dict: def __add__(self, other: Any) -> int: """Adds the sequence length of this sample with another sample or integer.""" if isinstance(other, DiffusionSample): - # Combine the values of the two instances - return self.seq_len_q.item() + other.seq_len_q.item() + # Use padded length if available (for CP), otherwise use unpadded + self_len = self.seq_len_q_padded.item() if self.seq_len_q_padded is not None else self.seq_len_q.item() + other_len = other.seq_len_q_padded.item() if other.seq_len_q_padded is not None else other.seq_len_q.item() + return self_len + other_len elif isinstance(other, int): - # Add an integer to the value - return self.seq_len_q.item() + other + # Use padded length if available (for CP), otherwise use unpadded + self_len = self.seq_len_q_padded.item() if self.seq_len_q_padded is not None else self.seq_len_q.item() + return self_len + other raise NotImplementedError def __radd__(self, other: Any) -> int: @@ -92,13 +95,20 @@ def __radd__(self, other: Any) -> int: # This is called if sum or other operations start with a non-DiffusionSample object. # e.g., sum([DiffusionSample(1), DiffusionSample(2)]) -> the 0 + DiffusionSample(1) calls __radd__. if isinstance(other, int): - return self.seq_len_q.item() + other + # Use padded length if available (for CP), otherwise use unpadded + self_len = self.seq_len_q_padded.item() if self.seq_len_q_padded is not None else self.seq_len_q.item() + return self_len + other raise NotImplementedError def __lt__(self, other: Any) -> bool: """Compares this sample's sequence length with another sample or integer.""" if isinstance(other, DiffusionSample): - return self.seq_len_q.item() < other.seq_len_q.item() + # Use padded length if available (for CP), otherwise use unpadded + self_len = self.seq_len_q_padded.item() if self.seq_len_q_padded is not None else self.seq_len_q.item() + other_len = other.seq_len_q_padded.item() if other.seq_len_q_padded is not None else other.seq_len_q.item() + return self_len < other_len elif isinstance(other, int): - return self.seq_len_q.item() < other + # Use padded length if available (for CP), otherwise use unpadded + self_len = self.seq_len_q_padded.item() if self.seq_len_q_padded is not None else self.seq_len_q.item() + return self_len < other raise NotImplementedError diff --git a/dfm/src/megatron/data/common/diffusion_task_encoder_with_sp.py b/dfm/src/megatron/data/common/diffusion_task_encoder_with_sp.py index 369d5e26..8fb45c3a 100644 --- a/dfm/src/megatron/data/common/diffusion_task_encoder_with_sp.py +++ b/dfm/src/megatron/data/common/diffusion_task_encoder_with_sp.py @@ -56,7 +56,7 @@ def __init__( self, *args, max_frames: int = None, - text_embedding_padding_size: int = 512, + text_embedding_max_length: int = 512, seq_length: int = None, patch_spatial: int = 2, patch_temporal: int = 1, @@ -65,7 +65,7 @@ def __init__( ): super().__init__(*args, **kwargs) self.max_frames = max_frames - self.text_embedding_padding_size = text_embedding_padding_size + self.text_embedding_max_length = text_embedding_max_length self.seq_length = seq_length self.patch_spatial = patch_spatial self.patch_temporal = patch_temporal diff --git a/dfm/src/megatron/data/common/sequence_packing_utils.py b/dfm/src/megatron/data/common/sequence_packing_utils.py index f551a292..612d083c 100644 --- a/dfm/src/megatron/data/common/sequence_packing_utils.py +++ b/dfm/src/megatron/data/common/sequence_packing_utils.py @@ -71,35 +71,3 @@ def first_fit_decreasing(seqlens: List[int], pack_size: int) -> List[List[int]]: """ sorted_seqlens = sorted(seqlens, reverse=True) return first_fit(sorted_seqlens, pack_size) - - -def concat_pad(tensor_list, max_seq_length): - """ - Efficiently concatenates a list of tensors along the first dimension and pads with zeros - to reach max_seq_length. - - Args: - tensor_list (list of torch.Tensor): List of tensors to concatenate and pad. - max_seq_length (int): The desired size of the first dimension of the output tensor. - - Returns: - torch.Tensor: A tensor of shape [max_seq_length, ...], where ... represents the remaining dimensions. - """ - import torch - - # Get common properties from the first tensor - other_shape = tensor_list[0].shape[1:] - dtype = tensor_list[0].dtype - device = tensor_list[0].device - - # Initialize the result tensor with zeros - result = torch.zeros((max_seq_length, *other_shape), dtype=dtype, device=device) - - current_index = 0 - for tensor in tensor_list: - length = tensor.shape[0] - # Directly assign the tensor to the result tensor without checks - result[current_index : current_index + length] = tensor - current_index += length - - return result diff --git a/dfm/src/megatron/data/dit/dit_mock_datamodule.py b/dfm/src/megatron/data/dit/dit_mock_datamodule.py index 3d5e9cf3..fea6f4cf 100644 --- a/dfm/src/megatron/data/dit/dit_mock_datamodule.py +++ b/dfm/src/megatron/data/dit/dit_mock_datamodule.py @@ -113,7 +113,7 @@ def mock_batch( seq_len_kv=seq_len_kv_packed, seq_len_kv_padded=seq_len_kv_padded_packed, latent_shape=torch.tensor([[C, T, H, W] for _ in range(number_packed_samples)], dtype=torch.int32), - pos_ids=pos_ids_packed, + pos_ids=pos_ids_packed.unsqueeze(0), video_metadata=[{"caption": f"Mock video sample {i}"} for i in range(number_packed_samples)], ) @@ -131,16 +131,19 @@ class DiTMockDataModuleConfig(DatasetProvider): dataloader_type: str = "external" task_encoder_seq_length: int = None F_latents: int = 1 - H_latents: int = 64 - W_latents: int = 96 + H_latents: int = 256 + W_latents: int = 512 patch_spatial: int = 2 patch_temporal: int = 1 - number_packed_samples: int = 3 + number_packed_samples: int = 1 context_seq_len: int = 512 context_embeddings_dim: int = 1024 def __post_init__(self): mock_ds = _MockDataset(length=1024) + kwargs = {} + if self.num_workers > 0: + kwargs["prefetch_factor"] = 8 self._train_dl = DataLoader( mock_ds, batch_size=self.micro_batch_size, @@ -157,6 +160,8 @@ def __post_init__(self): ), shuffle=False, drop_last=False, + pin_memory=True, + **kwargs, ) self._train_dl = iter(self._train_dl) self.sequence_length = self.seq_length diff --git a/dfm/src/megatron/data/dit/dit_taskencoder.py b/dfm/src/megatron/data/dit/dit_taskencoder.py index 87e9fbcf..bcf79cd0 100644 --- a/dfm/src/megatron/data/dit/dit_taskencoder.py +++ b/dfm/src/megatron/data/dit/dit_taskencoder.py @@ -31,9 +31,9 @@ class DiTTaskEncoder(DiffusionTaskEncoderWithSequencePacking): Attributes: cookers (list): A list of Cooker objects used for processing. max_frames (int, optional): The maximum number of frames to consider from the video. Defaults to None. - text_embedding_padding_size (int): The padding size for text embeddings. Defaults to 512. + text_embedding_max_length (int): The maximum length for text embeddings. Defaults to 512. Methods: - __init__(*args, max_frames=None, text_embedding_padding_size=512, **kwargs): + __init__(*args, max_frames=None, text_embedding_max_size=512, **kwargs): Initializes the BasicDiffusionTaskEncoder with optional maximum frames and text embedding padding size. encode_sample(sample: dict) -> dict: Encodes a given sample dictionary containing video and text data. @@ -71,7 +71,6 @@ def encode_sample(self, sample: dict) -> DiffusionSample: // self.patch_spatial**2 // self.patch_temporal ) - is_image = T == 1 if seq_len > self.seq_length: print(f"Skipping sample {sample['__key__']} because seq_len {seq_len} > self.seq_length {self.seq_length}") @@ -100,8 +99,8 @@ def encode_sample(self, sample: dict) -> DiffusionSample: t5_text_embeddings = torch.from_numpy(sample["pickle"]).to(torch.bfloat16) t5_text_embeddings_seq_length = t5_text_embeddings.shape[0] - if t5_text_embeddings_seq_length > self.text_embedding_padding_size: - t5_text_embeddings = t5_text_embeddings[: self.text_embedding_padding_size] + if t5_text_embeddings_seq_length > self.text_embedding_max_length: + t5_text_embeddings = t5_text_embeddings[: self.text_embedding_max_length] t5_text_mask = torch.ones(t5_text_embeddings_seq_length, dtype=torch.bfloat16) pos_ids = rearrange( diff --git a/dfm/src/megatron/model/dit/dit_layer_spec.py b/dfm/src/megatron/model/dit/dit_layer_spec.py index 97afaf1d..8fe8433c 100644 --- a/dfm/src/megatron/model/dit/dit_layer_spec.py +++ b/dfm/src/megatron/model/dit/dit_layer_spec.py @@ -20,8 +20,8 @@ import torch import torch.nn as nn +from megatron.core.jit import jit_fuser from megatron.core.transformer.attention import ( - SelfAttention, SelfAttentionSubmodules, ) from megatron.core.transformer.custom_layers.transformer_engine import ( @@ -41,7 +41,11 @@ from megatron.core.utils import make_viewless_tensor # to be imported from common -from dfm.src.megatron.model.common.dit_attention import DiTCrossAttention, DiTCrossAttentionSubmodules +from dfm.src.megatron.model.common.dit_attention import ( + DiTCrossAttention, + DiTCrossAttentionSubmodules, + DiTSelfAttention, +) @dataclass @@ -91,19 +95,24 @@ def __init__( setattr(self.adaLN_modulation[-1].weight, "sequence_parallel", config.sequence_parallel) + @jit_fuser def forward(self, timestep_emb): return self.adaLN_modulation(timestep_emb).chunk(self.n_adaln_chunks, dim=-1) + @jit_fuser def modulate(self, x, shift, scale): return x * (1 + scale) + shift + @jit_fuser def scale_add(self, residual, x, gate): return residual + gate * x + @jit_fuser def modulated_layernorm(self, x, shift, scale): input_layernorm_output = self.ln(x).type_as(x) return self.modulate(input_layernorm_output, shift, scale) + @jit_fuser def scaled_modulated_layernorm(self, residual, x, gate, shift, scale): hidden_states = self.scale_add(residual, x, gate) shifted_pre_mlp_layernorm_output = self.modulated_layernorm(hidden_states, shift, scale) @@ -156,7 +165,9 @@ def _replace_no_cp_submodules(submodules): layer_number=layer_number, ) - self.adaLN = AdaLN(config=self.config, n_adaln_chunks=9 if self.cross_attention else 6) + self.adaLN = AdaLN( + config=self.config, n_adaln_chunks=9 if not isinstance(self.cross_attention, IdentityOp) else 6 + ) def forward( self, @@ -176,7 +187,7 @@ def forward( ): timestep_emb = attention_mask - if self.cross_attention: + if not isinstance(self.cross_attention, IdentityOp): shift_full, scale_full, gate_full, shift_ca, scale_ca, gate_ca, shift_mlp, scale_mlp, gate_mlp = ( self.adaLN(timestep_emb) ) @@ -192,7 +203,7 @@ def forward( packed_seq_params=None if packed_seq_params is None else packed_seq_params["self_attention"], ) - if self.cross_attention: + if not isinstance(self.cross_attention, IdentityOp): hidden_states, pre_cross_attn_layernorm_output_ada = self.adaLN.scaled_modulated_layernorm( residual=hidden_states, x=attention_output, @@ -210,7 +221,7 @@ def forward( hidden_states, pre_mlp_layernorm_output_ada = self.adaLN.scaled_modulated_layernorm( residual=hidden_states, x=attention_output, - gate=gate_ca if self.cross_attention else gate_full, + gate=gate_ca if not isinstance(self.cross_attention, IdentityOp) else gate_full, shift=shift_mlp, scale=scale_mlp, ) @@ -234,7 +245,7 @@ def get_dit_adaln_block_with_transformer_engine_spec() -> ModuleSpec: module=DiTLayerWithAdaLN, submodules=DiTWithAdaLNSubmodules( full_self_attention=ModuleSpec( - module=SelfAttention, + module=DiTSelfAttention, params=params, submodules=SelfAttentionSubmodules( linear_qkv=TEColumnParallelLinear, diff --git a/dfm/src/megatron/model/dit/dit_model_provider.py b/dfm/src/megatron/model/dit/dit_model_provider.py index c9e80f16..4b9a6aa8 100644 --- a/dfm/src/megatron/model/dit/dit_model_provider.py +++ b/dfm/src/megatron/model/dit/dit_model_provider.py @@ -14,7 +14,6 @@ import logging from dataclasses import dataclass -from typing import Callable import torch from megatron.bridge.models.model_provider import ModelProviderMixin @@ -39,14 +38,14 @@ class DiTModelProvider(TransformerConfig, ModelProviderMixin[VisionModule]): add_bias_linear: bool = False gated_linear_unit: bool = False - num_layers: int = 28 - hidden_size: int = 1152 + num_layers: int = 12 + hidden_size: int = 384 max_img_h: int = 80 max_img_w: int = 80 max_frames: int = 34 patch_spatial: int = 2 patch_temporal: int = 1 - num_attention_heads: int = 16 + num_attention_heads: int = 6 layernorm_epsilon = 1e-6 normalization = "RMSNorm" add_bias_linear: bool = False @@ -110,52 +109,27 @@ def configure_vae(self): @dataclass -class DiT7BModelProvider(DiTModelProvider): - hidden_size: int = 4096 - max_img_h: int = 240 - max_img_w: int = 240 - max_frames: int = 128 - num_attention_heads: int = 32 +class DiTBModelProvider(DiTModelProvider): + """DiT-B""" - apply_rope_fusion: bool = True # TODO: do we support this? - additional_timestamp_channels = None # TODO: do we support this? - vae_module: str = None - vae_path: str = None + num_layers: int = 12 + hidden_size: int = 768 + num_attention_heads: int = 12 @dataclass -class DiT14BModelProvider(DiTModelProvider): - num_layers: int = 36 - hidden_size: int = 5120 - max_img_h: int = 240 - max_img_w: int = 240 - max_frames: int = 128 - num_attention_heads: int = 40 - apply_rope_fusion: bool = True - layernorm_zero_centered_gamma: bool = False - additional_timestamp_channels = None - vae_module: str = None - vae_path: str = None - loss_add_logvar: bool = True +class DiTLModelProvider(DiTModelProvider): + """DiT-L""" + + num_layers: int = 24 + hidden_size: int = 1024 + num_attention_heads: int = 16 @dataclass -class DiTLlama30BConfig(DiTModelProvider): - num_layers: int = 48 - hidden_size: int = 6144 - ffn_hidden_size: int = 16384 - num_attention_heads: int = 48 - num_query_groups: int = 8 - gated_linear_unit: int = True - bias_activation_fusion: int = True - activation_func: Callable = torch.nn.functional.silu - layernorm_epsilon: float = 1e-5 - max_frames: int = 128 - max_img_h: int = 240 - max_img_w: int = 240 - init_method_std: float = 0.01 - add_bias_linear: bool = False - seq_length: int = 256 - masked_softmax_fusion: bool = True - persist_layer_norm: bool = True - bias_dropout_fusion: bool = True +class DiTXLModelProvider(DiTModelProvider): + """DiT-XL""" + + num_layers: int = 28 + hidden_size: int = 1152 + num_attention_heads: int = 16 diff --git a/dfm/src/megatron/model/dit/dit_step.py b/dfm/src/megatron/model/dit/dit_step.py index aa829cf4..aaf3f9d0 100644 --- a/dfm/src/megatron/model/dit/dit_step.py +++ b/dfm/src/megatron/model/dit/dit_step.py @@ -50,7 +50,7 @@ def on_validation_start(self, state, batch, model): num_steps=model.config.val_generation_num_steps, is_negative_prompt=True if "neg_context_embeddings" in batch else False, ) - caption = batch["video_metadata"][0]["caption"] + caption = batch["video_metadata"][0]["caption"] if "caption" in batch["video_metadata"][0] else "no caption" latent = latent[0, None, : batch["seq_len_q"][0]] latent = rearrange( latent, @@ -157,7 +157,6 @@ def forward_step(self, state, batch, model, return_schedule_plan: bool = False): check_for_nan_in_loss = state.cfg.rerun_state_machine.check_for_nan_in_loss check_for_spiky_loss = state.cfg.rerun_state_machine.check_for_spiky_loss - # import pdb;pdb.set_trace() straggler_timer = state.straggler_timer with straggler_timer: if parallel_state.is_pipeline_last_stage(): diff --git a/dfm/src/megatron/model/dit/edm/edm_pipeline.py b/dfm/src/megatron/model/dit/edm/edm_pipeline.py index dc4a6aba..459522b3 100644 --- a/dfm/src/megatron/model/dit/edm/edm_pipeline.py +++ b/dfm/src/megatron/model/dit/edm/edm_pipeline.py @@ -179,21 +179,17 @@ def training_step( # import pdb; pdb.set_trace() # Get the input data to noise and denoise~(image, video) and the corresponding conditioner. self.net = model - x0_from_data_batch, x0, condition = self.get_data_and_condition(data_batch) + x0, condition = self.get_data_and_condition(data_batch) # Sample pertubation noise levels and N(0, 1) noises sigma, epsilon = self.draw_training_sigma_and_epsilon(x0.size(), condition) if parallel_state.is_pipeline_last_stage(): - output_batch, pred_mse, edm_loss = self.compute_loss_with_epsilon_and_sigma( - data_batch, x0_from_data_batch, x0, condition, epsilon, sigma - ) + output_batch, pred_mse, edm_loss = self.compute_loss_with_epsilon_and_sigma(x0, condition, epsilon, sigma) return output_batch, edm_loss else: - net_output = self.compute_loss_with_epsilon_and_sigma( - data_batch, x0_from_data_batch, x0, condition, epsilon, sigma - ) + net_output = self.compute_loss_with_epsilon_and_sigma(x0, condition, epsilon, sigma) return net_output def denoise(self, xt: torch.Tensor, sigma: torch.Tensor, condition: dict[str, torch.Tensor]): @@ -232,8 +228,6 @@ def denoise(self, xt: torch.Tensor, sigma: torch.Tensor, condition: dict[str, to def compute_loss_with_epsilon_and_sigma( self, - data_batch: dict[str, torch.Tensor], - x0_from_data_batch: torch.Tensor, x0: torch.Tensor, condition: dict[str, torch.Tensor], epsilon: torch.Tensor, @@ -294,14 +288,14 @@ def get_per_sigma_loss_weights(self, sigma: torch.Tensor): def get_condition_uncondition(self, data_batch: Dict): """Returns conditioning and unconditioning for classifier-free guidance.""" - _, _, condition = self.get_data_and_condition(data_batch, dropout_rate=0.0) + _, condition = self.get_data_and_condition(data_batch, dropout_rate=0.0) if "neg_context_embeddings" in data_batch: data_batch["context_embeddings"] = data_batch["neg_context_embeddings"] data_batch["context_mask"] = data_batch["context_mask"] - _, _, uncondition = self.get_data_and_condition(data_batch, dropout_rate=1.0) + _, uncondition = self.get_data_and_condition(data_batch, dropout_rate=1.0) else: - _, _, uncondition = self.get_data_and_condition(data_batch, dropout_rate=1.0) + _, uncondition = self.get_data_and_condition(data_batch, dropout_rate=1.0) return condition, uncondition @@ -419,13 +413,14 @@ def get_data_and_condition(self, data_batch: dict[str, Tensor], dropout_rate=0.2 Raw data, latent data, and conditioning information. """ # Latent state - raw_state = data_batch["video"] * self.sigma_data - # assume data is already encoded - latent_state = raw_state - - # Condition - data_batch["crossattn_emb"] = self.random_dropout_input( + latent_state = data_batch["video"] * self.sigma_data + condition = {} # Create a new dictionary for condition + # Copy all keys from data_batch except 'video' + for key, value in data_batch.items(): + if key not in ["video", "context_embeddings"]: + condition[key] = value + + condition["crossattn_emb"] = self.random_dropout_input( data_batch["context_embeddings"], dropout_rate=dropout_rate ) - - return raw_state, latent_state, data_batch + return latent_state, condition diff --git a/dfm/src/megatron/recipes/dit/dit.py b/dfm/src/megatron/recipes/dit/dit.py index 74c762cd..921b17dc 100644 --- a/dfm/src/megatron/recipes/dit/dit.py +++ b/dfm/src/megatron/recipes/dit/dit.py @@ -32,7 +32,7 @@ from dfm.src.megatron.data.common.diffusion_energon_datamodule import DiffusionDataModuleConfig from dfm.src.megatron.data.dit.dit_mock_datamodule import DiTMockDataModuleConfig -from dfm.src.megatron.model.dit.dit_model_provider import DiTModelProvider +from dfm.src.megatron.model.dit.dit_model_provider import DiTModelProvider, DiTXLModelProvider def model_config( @@ -57,7 +57,7 @@ def model_config( Returns: DiTModelProvider: Configuration for the DiT-S model. """ - return DiTModelProvider( + return DiTXLModelProvider( tensor_model_parallel_size=tensor_parallelism, pipeline_model_parallel_size=pipeline_parallelism, pipeline_dtype=pipeline_parallelism_dtype, @@ -168,10 +168,10 @@ def pretrain_config( task_encoder_seq_length=8000, packing_buffer_size=40, num_workers=10, - # mock arguments + number_packed_samples=1, F_latents=1, - H_latents=96, - W_latents=64, + H_latents=256, + W_latents=512, context_seq_len=512, context_embeddings_dim=1024, ) diff --git a/examples/megatron/recipes/dit/inference_dit_model.py b/examples/megatron/recipes/dit/inference_dit_model.py index 440d3240..a75dff42 100644 --- a/examples/megatron/recipes/dit/inference_dit_model.py +++ b/examples/megatron/recipes/dit/inference_dit_model.py @@ -19,6 +19,7 @@ import numpy as np import torch +import wandb from einops import rearrange from megatron.core import parallel_state as ps from megatron.core.tensor_parallel.random import model_parallel_cuda_manual_seed @@ -143,12 +144,7 @@ def get_pos_id_3d(self, *, t, h, w): return self.grid[:t, :h, :w] -def prepare_data_batch(args, t5_embeding_max_length=512): - tokenizer = T5TokenizerFast.from_pretrained("google-t5/t5-11b", cache_dir=args.t5_cache_dir) - text_encoder = T5EncoderModel.from_pretrained("google-t5/t5-11b", cache_dir=args.t5_cache_dir) - text_encoder.to("cuda") - text_encoder.eval() - +def prepare_data_batch(args, tokenizer, text_encoder, t5_embeding_max_length=512): print("[args.prompt]: ", args.prompt) # Encode text to T5 embedding out = encode_for_batch(tokenizer, text_encoder, [args.prompt]) @@ -254,8 +250,7 @@ def load_model_from_checkpoint(args): if isinstance(model, list): model = model[0] - model = model.cuda().to(torch.bfloat16) - model.eval() + model = model.cuda().to(torch.bfloat16).eval() print_rank_0(f"✅ Model loaded successfully from {checkpoint_path}") @@ -311,12 +306,25 @@ def set_seed(seed): set_seed(42) + rank = torch.distributed.get_rank() + if rank == 0: + gather_list = [None for _ in range(ps.get_data_parallel_world_size())] + wandb.init(project="dit-inference-video", name="inference_generation") + else: + gather_list = None + # Load model from checkpoint or initialize from scratch print_rank_0("Loading model from checkpoint...") model, diffusion_pipeline, model_config = load_model_from_checkpoint(args) + tokenizer = T5TokenizerFast.from_pretrained("google-t5/t5-11b", cache_dir=args.t5_cache_dir, dtype=torch.bfloat16) + text_encoder = T5EncoderModel.from_pretrained( + "google-t5/t5-11b", cache_dir=args.t5_cache_dir, dtype=torch.bfloat16 + ) + text_encoder.to("cuda").eval() + print_rank_0("preparing data batch...") - data_batch, state_shape = prepare_data_batch(args) + data_batch, state_shape = prepare_data_batch(args, tokenizer, text_encoder) vae = CausalVideoTokenizer.from_pretrained(args.tokenizer_model, cache_dir=args.tokenizer_cache_dir) vae.to("cuda").eval() @@ -356,6 +364,22 @@ def set_seed(seed): ) print_rank_0(f"saved video to rank={rank}_{args.video_save_path}") + torch.distributed.gather_object( + obj=(decoded_video[0], args.prompt), + object_gather_list=gather_list, + dst=0, + group=ps.get_data_parallel_group(), + ) + + if rank == 0 and wandb.run is not None: + videos = [] + for video, caption in gather_list: + video_data_transposed = video.transpose(0, 3, 1, 2) + videos.append(wandb.Video(video_data_transposed, fps=args.fps, format="mp4", caption=caption)) + wandb.log({"generated_videos": videos}) + wandb.finish() + print_rank_0("✅ All videos gathered and logged to wandb with captions") + if __name__ == "__main__": args = parse_args() diff --git a/pyproject.toml b/pyproject.toml index e2d9938d..e508957d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -95,7 +95,11 @@ build = ["setuptools", "wheel", "torch", "pybind11", "Cython>=3.0.0", "numpy<2.0 automodel = [ "nemo-automodel", ] -megatron-bridge = ["megatron-bridge"] +megatron-bridge = [ + "mediapy>=1.2.4", + "megatron-bridge", + "wandb[media]>=0.23.0", +] torch-cu124 = [ "torch", "torchvision", diff --git a/tests/unit_tests/megatron/data/common/test_diffusion_data_module.py b/tests/unit_tests/megatron/data/common/test_diffusion_data_module.py new file mode 100644 index 00000000..3e65d790 --- /dev/null +++ b/tests/unit_tests/megatron/data/common/test_diffusion_data_module.py @@ -0,0 +1,73 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from unittest.mock import Mock, patch + +from dfm.src.megatron.data.common.diffusion_energon_datamodule import ( + DiffusionDataModuleConfig, +) + + +def test_diffusion_data_module_config_initialization(): + """Test DiffusionDataModuleConfig initialization and default values.""" + + # Mock the DiffusionDataModule to avoid actual dataset loading + with patch("dfm.src.megatron.data.common.diffusion_energon_datamodule.DiffusionDataModule") as mock_data_module: + # Setup the mock to return a mock dataset with seq_length attribute + mock_dataset_instance = Mock() + mock_dataset_instance.seq_length = 2048 + mock_data_module.return_value = mock_dataset_instance + + # Create a DiffusionDataModuleConfig with required parameters + config = DiffusionDataModuleConfig( + path="/path/to/dataset", + seq_length=2048, + micro_batch_size=4, + task_encoder_seq_length=512, + packing_buffer_size=100, + global_batch_size=32, + num_workers=8, + ) + + # Verify default values + assert config.dataloader_type == "external", "Expected default dataloader_type to be 'external'" + assert config.use_train_split_for_val is False, "Expected default use_train_split_for_val to be False" + + # Verify required parameters are set correctly + assert config.path == "/path/to/dataset" + assert config.seq_length == 2048 + assert config.micro_batch_size == 4 + assert config.task_encoder_seq_length == 512 + assert config.packing_buffer_size == 100 + assert config.global_batch_size == 32 + assert config.num_workers == 8 + + # Verify that DiffusionDataModule was created in __post_init__ + assert mock_data_module.called, "DiffusionDataModule should be instantiated in __post_init__" + + # Verify the dataset attribute was set + assert config.dataset == mock_dataset_instance + + # Verify sequence_length was set from the dataset + assert config.sequence_length == 2048, "Expected sequence_length to be set from dataset.seq_length" + + # Verify the DiffusionDataModule was created with correct parameters + call_kwargs = mock_data_module.call_args.kwargs + assert call_kwargs["path"] == "/path/to/dataset" + assert call_kwargs["seq_length"] == 2048 + assert call_kwargs["micro_batch_size"] == 4 + assert call_kwargs["packing_buffer_size"] == 100 + assert call_kwargs["global_batch_size"] == 32 + assert call_kwargs["num_workers"] == 8 + assert call_kwargs["use_train_split_for_val"] is False diff --git a/tests/unit_tests/megatron/data/common/test_diffusion_sample.py b/tests/unit_tests/megatron/data/common/test_diffusion_sample.py new file mode 100644 index 00000000..feb76ecc --- /dev/null +++ b/tests/unit_tests/megatron/data/common/test_diffusion_sample.py @@ -0,0 +1,136 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch + +from dfm.src.megatron.data.common.diffusion_sample import DiffusionSample + + +def test_add(): + """Test __add__ method for DiffusionSample.""" + # Create two DiffusionSample instances with different seq_len_q + sample1 = DiffusionSample( + __key__="sample1", + __restore_key__=(), + __subflavor__=None, + __subflavors__=["default"], + video=torch.randn(3, 8, 16, 16), + context_embeddings=torch.randn(10, 512), + seq_len_q=torch.tensor(100), + ) + sample2 = DiffusionSample( + __key__="sample2", + __restore_key__=(), + __subflavor__=None, + __subflavors__=["default"], + video=torch.randn(3, 8, 16, 16), + context_embeddings=torch.randn(10, 512), + seq_len_q=torch.tensor(200), + ) + + # Test adding two DiffusionSample instances + result = sample1 + sample2 + assert result == 300, f"Expected 300, got {result}" + + # Test adding DiffusionSample with an integer + result = sample1 + 50 + assert result == 150, f"Expected 150, got {result}" + + +def test_radd(): + """Test __radd__ method for DiffusionSample.""" + # Create a DiffusionSample instance + sample = DiffusionSample( + __key__="sample", + __restore_key__=(), + __subflavor__=None, + __subflavors__=["default"], + video=torch.randn(3, 8, 16, 16), + context_embeddings=torch.randn(10, 512), + seq_len_q=torch.tensor(100), + ) + + # Test reverse addition with an integer + result = 50 + sample + assert result == 150, f"Expected 150, got {result}" + + # Test sum() function which uses __radd__ (starting with 0) + samples = [ + DiffusionSample( + __key__="sample1", + __restore_key__=(), + __subflavor__=None, + __subflavors__=["default"], + video=torch.randn(3, 8, 16, 16), + context_embeddings=torch.randn(10, 512), + seq_len_q=torch.tensor(10), + ), + DiffusionSample( + __key__="sample2", + __restore_key__=(), + __subflavor__=None, + __subflavors__=["default"], + video=torch.randn(3, 8, 16, 16), + context_embeddings=torch.randn(10, 512), + seq_len_q=torch.tensor(20), + ), + DiffusionSample( + __key__="sample3", + __restore_key__=(), + __subflavor__=None, + __subflavors__=["default"], + video=torch.randn(3, 8, 16, 16), + context_embeddings=torch.randn(10, 512), + seq_len_q=torch.tensor(30), + ), + ] + result = sum(samples) + assert result == 60, f"Expected 60, got {result}" + + +def test_lt(): + """Test __lt__ method for DiffusionSample.""" + # Create two DiffusionSample instances with different seq_len_q + sample1 = DiffusionSample( + __key__="sample1", + __restore_key__=(), + __subflavor__=None, + __subflavors__=["default"], + video=torch.randn(3, 8, 16, 16), + context_embeddings=torch.randn(10, 512), + seq_len_q=torch.tensor(100), + ) + sample2 = DiffusionSample( + __key__="sample2", + __restore_key__=(), + __subflavor__=None, + __subflavors__=["default"], + video=torch.randn(3, 8, 16, 16), + context_embeddings=torch.randn(10, 512), + seq_len_q=torch.tensor(200), + ) + + # Test comparing two DiffusionSample instances + assert sample1 < sample2, "Expected sample1 < sample2" + assert not (sample2 < sample1), "Expected not (sample2 < sample1)" + + # Test comparing DiffusionSample with an integer + assert sample1 < 150, "Expected sample1 < 150" + assert not (sample1 < 50), "Expected not (sample1 < 50)" + + # Test sorting a list of DiffusionSample instances + samples = [sample2, sample1] + sorted_samples = sorted(samples) + assert sorted_samples[0].seq_len_q.item() == 100, "Expected first element to have seq_len_q=100" + assert sorted_samples[1].seq_len_q.item() == 200, "Expected second element to have seq_len_q=200" diff --git a/tests/unit_tests/megatron/data/common/test_diffusion_task_encoder.py b/tests/unit_tests/megatron/data/common/test_diffusion_task_encoder.py new file mode 100644 index 00000000..026e1874 --- /dev/null +++ b/tests/unit_tests/megatron/data/common/test_diffusion_task_encoder.py @@ -0,0 +1,182 @@ +# Copyright (c) 2025, NVIDIA CORPORATION. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import List + +import torch + +from dfm.src.megatron.data.common.diffusion_sample import DiffusionSample +from dfm.src.megatron.data.common.diffusion_task_encoder_with_sp import DiffusionTaskEncoderWithSequencePacking + + +class ConcreteDiffusionTaskEncoder(DiffusionTaskEncoderWithSequencePacking): + """Concrete implementation for testing.""" + + def encode_sample(self, sample: dict) -> dict: + """Simple implementation for testing purposes.""" + return sample + + def batch(self, samples: List[DiffusionSample]) -> dict: + """Simple batch implementation that returns first sample as dict.""" + if len(samples) == 1: + sample = samples[0] + return dict( + video=sample.video.unsqueeze(0), + context_embeddings=sample.context_embeddings.unsqueeze(0), + context_mask=sample.context_mask.unsqueeze(0) if sample.context_mask is not None else None, + loss_mask=sample.loss_mask.unsqueeze(0) if sample.loss_mask is not None else None, + seq_len_q=sample.seq_len_q, + seq_len_q_padded=sample.seq_len_q_padded, + seq_len_kv=sample.seq_len_kv, + seq_len_kv_padded=sample.seq_len_kv_padded, + pos_ids=sample.pos_ids.unsqueeze(0) if sample.pos_ids is not None else None, + latent_shape=sample.latent_shape, + video_metadata=sample.video_metadata, + ) + else: + # For multiple samples, just return a simple dict + return {"samples": samples} + + +def create_diffusion_sample(key: str, seq_len: int, video_shape=(16, 8), embedding_dim=128) -> DiffusionSample: + """Helper function to create a DiffusionSample for testing.""" + return DiffusionSample( + __key__=key, + __restore_key__=(), + __subflavor__=None, + __subflavors__=["default"], + video=torch.randn(seq_len, video_shape[0]), + context_embeddings=torch.randn(10, embedding_dim), + context_mask=torch.ones(10), + loss_mask=torch.ones(seq_len), + seq_len_q=torch.tensor([seq_len], dtype=torch.int32), + seq_len_q_padded=torch.tensor([seq_len], dtype=torch.int32), + seq_len_kv=torch.tensor([10], dtype=torch.int32), + seq_len_kv_padded=torch.tensor([10], dtype=torch.int32), + pos_ids=torch.arange(seq_len).unsqueeze(1), + latent_shape=torch.tensor([4, 2, 4, 4], dtype=torch.int32), + video_metadata={"fps": 30, "resolution": "512x512"}, + ) + + +def test_select_samples_to_pack(): + """Test select_samples_to_pack method.""" + # Create encoder with seq_length=20 + encoder = ConcreteDiffusionTaskEncoder(seq_length=20) + + # Create samples with different sequence lengths + samples = [ + create_diffusion_sample("sample_1", seq_len=8), + create_diffusion_sample("sample_2", seq_len=12), + create_diffusion_sample("sample_3", seq_len=5), + create_diffusion_sample("sample_4", seq_len=7), + create_diffusion_sample("sample_5", seq_len=3), + ] + + # Call select_samples_to_pack + result = encoder.select_samples_to_pack(samples) + + # Verify result is a list of lists + assert isinstance(result, list), "Result should be a list" + assert all(isinstance(group, list) for group in result), "All elements should be lists" + + # Verify all samples are included + all_samples = [sample for group in result for sample in group] + assert len(all_samples) == len(samples), "All samples should be included" + + # Verify no bin exceeds seq_length + for group in result: + total_seq_len = sum(sample.seq_len_q.item() for sample in group) + assert total_seq_len <= encoder.seq_length, ( + f"Bin with total {total_seq_len} exceeds seq_length {encoder.seq_length}" + ) + + # Verify that bins are non-empty + assert all(len(group) > 0 for group in result), "No bin should be empty" + + print(f"✓ Successfully packed {len(samples)} samples into {len(result)} bins") + print(f" Bin sizes: {[sum(s.seq_len_q.item() for s in group) for group in result]}") + + +def test_pack_selected_samples(): + """Test pack_selected_samples method.""" + encoder = ConcreteDiffusionTaskEncoder(seq_length=100) + + # Create multiple samples to pack + sample_1_length = 10 + sample_2_length = 15 + sample_3_length = 8 + sample_1 = create_diffusion_sample("sample_1", seq_len=sample_1_length) + sample_2 = create_diffusion_sample("sample_2", seq_len=sample_2_length) + sample_3 = create_diffusion_sample("sample_3", seq_len=sample_3_length) + + samples_to_pack = [sample_1, sample_2, sample_3] + + # Pack the samples + packed_sample = encoder.pack_selected_samples(samples_to_pack) + + # Verify the packed sample is a DiffusionSample + assert isinstance(packed_sample, DiffusionSample), "Result should be a DiffusionSample" + + # Verify __key__ is concatenated + expected_key = "sample_1,sample_2,sample_3" + assert packed_sample.__key__ == expected_key, f"Key should be '{expected_key}'" + + # Verify video is concatenated along dim 0 + expected_video_len = 10 + 15 + 8 + assert packed_sample.video.shape[0] == expected_video_len, f"Video should have length {expected_video_len}" + + # Verify context_embeddings is concatenated + expected_context_len = 10 * 3 # 3 samples with 10 embeddings each + assert packed_sample.context_embeddings.shape[0] == expected_context_len, ( + f"Context embeddings should have length {expected_context_len}" + ) + + # Verify context_mask is concatenated + assert packed_sample.context_mask.shape[0] == expected_context_len, ( + f"Context mask should have length {expected_context_len}" + ) + + # Verify loss_mask is concatenated + assert packed_sample.loss_mask.shape[0] == expected_video_len, f"Loss mask should have length {expected_video_len}" + + # Verify seq_len_q is concatenated + assert packed_sample.seq_len_q.shape[0] == 3, "seq_len_q should have 3 elements" + assert torch.equal( + packed_sample.seq_len_q, torch.tensor([sample_1_length, sample_2_length, sample_3_length], dtype=torch.int32) + ), "seq_len_q values incorrect" + + assert packed_sample.seq_len_q_padded.shape[0] == 3, "seq_len_q_padded should have 3 elements" + assert torch.equal( + packed_sample.seq_len_q_padded, + torch.tensor([sample_1_length, sample_2_length, sample_3_length], dtype=torch.int32), + ), "seq_len_q_padded values incorrect" + + assert packed_sample.seq_len_kv.shape[0] == 3, "seq_len_kv should have 3 elements" + assert torch.equal(packed_sample.seq_len_kv, torch.tensor([10, 10, 10], dtype=torch.int32)), ( + "seq_len_kv values incorrect" + ) + + assert packed_sample.seq_len_kv_padded.shape[0] == 3, "seq_len_kv_padded should have 3 elements" + assert torch.equal(packed_sample.seq_len_kv_padded, torch.tensor([10, 10, 10], dtype=torch.int32)), ( + "seq_len_kv_padded values incorrect" + ) + + assert packed_sample.latent_shape.shape[0] == 3, "latent_shape should have 3 rows" + assert isinstance(packed_sample.video_metadata, list), "video_metadata should be a list" + assert len(packed_sample.video_metadata) == 3, "video_metadata should have 3 elements" + + print(f"✓ Successfully packed {len(samples_to_pack)} samples") + print(f" Packed video shape: {packed_sample.video.shape}") + print(f" Packed context embeddings shape: {packed_sample.context_embeddings.shape}") diff --git a/tests/unit_tests/megatron/data/common/test_sequence_packing_utils.py b/tests/unit_tests/megatron/data/common/test_sequence_packing_utils.py new file mode 100644 index 00000000..6370469d --- /dev/null +++ b/tests/unit_tests/megatron/data/common/test_sequence_packing_utils.py @@ -0,0 +1,95 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dfm.src.megatron.data.common.sequence_packing_utils import ( + find_first_bin_that_fits, + first_fit, + first_fit_decreasing, +) + + +def test_find_first_bin_that_fits(): + """Test find_first_bin_that_fits function.""" + # Test case: Find a bin that fits + bins = [[5, 3], [10], [2, 2, 2]] + s = 2 + bin_size = 10 + result = find_first_bin_that_fits(bins, s, bin_size) + assert result == 0, "Should return index 0 as first bin (5+3+2=10) fits" + + # Test case: No bin fits + bins = [[8, 2], [9, 1], [10]] + s = 5 + bin_size = 10 + result = find_first_bin_that_fits(bins, s, bin_size) + assert result == -1, "Should return -1 as no bin can accommodate size 5" + + # Test case: Empty bins list + bins = [] + s = 5 + bin_size = 10 + result = find_first_bin_that_fits(bins, s, bin_size) + assert result == -1, "Should return -1 for empty bins list" + + # Test case: First bin doesn't fit, but second does + bins = [[9], [5], [3]] + s = 4 + bin_size = 10 + result = find_first_bin_that_fits(bins, s, bin_size) + assert result == 1, "Should return index 1 as second bin (5+4=9) fits" + + +def test_first_fit(): + """Test first_fit bin packing algorithm.""" + # Test case: Simple packing scenario + seqlens = [5, 3, 2, 7, 4] + pack_size = 10 + result = first_fit(seqlens, pack_size) + + # Verify all sequences are packed + all_items = [item for bin in result for item in bin] + assert sum(all_items) == sum(seqlens), "Sum of all packed items should equal sum of input" + + # Verify no bin exceeds pack_size + for bin in result: + assert sum(bin) <= pack_size, f"Bin {bin} exceeds pack_size {pack_size}" + + # Verify expected packing: [5, 3, 2], [7], [4] (first-fit order) + assert len(result) == 3, "Should create 3 bins" + assert result[0] == [5, 3, 2], "First bin should contain [5, 3, 2]" + assert result[1] == [7], "Second bin should contain [7]" + assert result[2] == [4], "Third bin should contain [4]" + + +def test_first_fit_decreasing(): + """Test first_fit_decreasing bin packing algorithm.""" + # Test case: Same sequences as first_fit but sorted in decreasing order + seqlens = [5, 3, 2, 7, 4] + pack_size = 10 + result = first_fit_decreasing(seqlens, pack_size) + + # Verify all sequences are packed + all_items = [item for bin in result for item in bin] + assert sum(all_items) == sum(seqlens), "Sum of all packed items should equal sum of input" + + # Verify no bin exceeds pack_size + for bin in result: + assert sum(bin) <= pack_size, f"Bin {bin} exceeds pack_size {pack_size}" + + # Verify expected packing: sorted [7, 5, 4, 3, 2] -> [7, 3], [5, 4, 2] (more efficient) + assert len(result) <= 3, "Should create at most 3 bins" + # First-fit-decreasing should pack: [7, 3], [5, 4], [2] + assert result[0] == [7, 3], "First bin should contain [7, 3]" + assert result[1] == [5, 4], "Second bin should contain [5, 4]" + assert result[2] == [2], "Third bin should contain [2]" diff --git a/tests/unit_tests/megatron/data/dit/test_dit_mock_data.py b/tests/unit_tests/megatron/data/dit/test_dit_mock_data.py new file mode 100644 index 00000000..cb6eea2f --- /dev/null +++ b/tests/unit_tests/megatron/data/dit/test_dit_mock_data.py @@ -0,0 +1,110 @@ +# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch + +from dfm.src.megatron.data.dit.dit_mock_datamodule import mock_batch + + +def test_mock_batch(): + """Unit test for mock_batch function.""" + # Test parameters + F_latents = 2 + H_latents = 32 + W_latents = 48 + patch_temporal = 1 + patch_spatial = 2 + number_packed_samples = 3 + context_seq_len = 77 + context_embeddings_dim = 768 + + # Generate mock batch + batch = mock_batch( + F_latents=F_latents, + H_latents=H_latents, + W_latents=W_latents, + patch_temporal=patch_temporal, + patch_spatial=patch_spatial, + number_packed_samples=number_packed_samples, + context_seq_len=context_seq_len, + context_embeddings_dim=context_embeddings_dim, + ) + + # Calculate expected dimensions + C = 16 # channels (hardcoded in mock_batch) + T = F_latents + H = H_latents + W = W_latents + seq_len_q = (T // patch_temporal) * (H // patch_spatial) * (W // patch_spatial) + total_seq_len_q = seq_len_q * number_packed_samples + total_seq_len_kv = context_seq_len * number_packed_samples + + # Verify batch structure and shapes + assert "video" in batch + assert "context_embeddings" in batch + assert "context_mask" in batch + assert "loss_mask" in batch + assert "seq_len_q" in batch + assert "seq_len_q_padded" in batch + assert "seq_len_kv" in batch + assert "seq_len_kv_padded" in batch + assert "latent_shape" in batch + assert "pos_ids" in batch + assert "video_metadata" in batch + + # Check video shape: [1, total_seq_len_q, patch_features] + patch_features = patch_spatial * patch_spatial * patch_temporal * C + assert batch["video"].shape == (1, total_seq_len_q, patch_features) + assert batch["video"].dtype == torch.bfloat16 + + # Check context embeddings shape: [1, total_seq_len_kv, context_embeddings_dim] + assert batch["context_embeddings"].shape == (1, total_seq_len_kv, context_embeddings_dim) + assert batch["context_embeddings"].dtype == torch.bfloat16 + + # Check context mask shape: [1, total_seq_len_kv] + assert batch["context_mask"].shape == (1, total_seq_len_kv) + assert batch["context_mask"].dtype == torch.bfloat16 + + # Check loss mask shape: [1, total_seq_len_q] + assert batch["loss_mask"].shape == (1, total_seq_len_q) + assert batch["loss_mask"].dtype == torch.bfloat16 + + # Check sequence length tensors + assert batch["seq_len_q"].shape == (number_packed_samples,) + assert batch["seq_len_q_padded"].shape == (number_packed_samples,) + assert batch["seq_len_kv"].shape == (number_packed_samples,) + assert batch["seq_len_kv_padded"].shape == (number_packed_samples,) + + # Check all seq_len_q values are correct + assert torch.all(batch["seq_len_q"] == seq_len_q) + assert torch.all(batch["seq_len_q_padded"] == seq_len_q) + assert torch.all(batch["seq_len_kv"] == context_seq_len) + assert torch.all(batch["seq_len_kv_padded"] == context_seq_len) + + # Check latent shape tensor + assert batch["latent_shape"].shape == (number_packed_samples, 4) + expected_latent_shape = torch.tensor([C, T, H, W], dtype=torch.int32) + for i in range(number_packed_samples): + assert torch.all(batch["latent_shape"][i] == expected_latent_shape) + + # Check pos_ids shape + assert batch["pos_ids"].shape == (1, total_seq_len_q, 3) # 3D position encoding + + # Check video metadata + assert len(batch["video_metadata"]) == number_packed_samples + for i, metadata in enumerate(batch["video_metadata"]): + assert "caption" in metadata + assert metadata["caption"] == f"Mock video sample {i}" + + print("All tests passed!") diff --git a/tests/unit_tests/megatron/data/dit/test_dit_task_encoder.py b/tests/unit_tests/megatron/data/dit/test_dit_task_encoder.py new file mode 100644 index 00000000..d2833433 --- /dev/null +++ b/tests/unit_tests/megatron/data/dit/test_dit_task_encoder.py @@ -0,0 +1,197 @@ +# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch + +from dfm.src.megatron.data.dit.dit_taskencoder import DiTTaskEncoder, PosID3D + + +class TestDiTTaskEncoder: + """Test class for DiTTaskEncoder.""" + + def test_encode_sample(self, monkeypatch): + """Test the encode_sample method with valid input.""" + # Mock parallel_state functions + from unittest.mock import MagicMock + + from megatron.core import parallel_state + from megatron.energon import WorkerConfig + + monkeypatch.setattr(parallel_state, "get_tensor_model_parallel_world_size", lambda: 1) + monkeypatch.setattr(parallel_state, "get_context_parallel_world_size", lambda: 1) + + # Mock WorkerConfig for @stateless decorator + mock_worker_config = MagicMock() + mock_worker_config.worker_seed.return_value = 42 + monkeypatch.setattr(WorkerConfig, "active_worker_config", mock_worker_config) + + patch_spatial = 2 + patch_temporal = 2 + task_encoder = DiTTaskEncoder( + seq_length=256, + patch_spatial=patch_spatial, + patch_temporal=patch_temporal, + text_embedding_max_length=384, + ) + + C, T, H, W = 16, 8, 8, 10 + video_latent = torch.randn(1, C, T, H, W, dtype=torch.bfloat16) + text_embedding_seq_len = 178 + text_hidden_dim = 512 + text_embeddings = torch.randn(text_embedding_seq_len, text_hidden_dim, dtype=torch.bfloat16) + + sample = { + "pth": video_latent, + "json": {"resolution": [H, W], "fps": 24, "duration": 1.0}, + "pickle": text_embeddings, + "__key__": "test_sample_001", + "__restore_key__": ("test_sample_001",), + "__subflavors__": ["video"], + } + + # Call encode_sample + result = task_encoder.encode_sample(sample) + + # Verify the output structure + assert hasattr(result, "video"), "Result should have 'video' attribute" + assert hasattr(result, "context_embeddings"), "Result should have 'context_embeddings' attribute" + assert hasattr(result, "context_mask"), "Result should have 'context_mask' attribute" + assert hasattr(result, "loss_mask"), "Result should have 'loss_mask' attribute" + assert hasattr(result, "seq_len_q"), "Result should have 'seq_len_q' attribute" + assert hasattr(result, "seq_len_q_padded"), "Result should have 'seq_len_q_padded' attribute" + assert hasattr(result, "seq_len_kv"), "Result should have 'seq_len_kv' attribute" + assert hasattr(result, "seq_len_kv_padded"), "Result should have 'seq_len_kv_padded' attribute" + assert hasattr(result, "pos_ids"), "Result should have 'pos_ids' attribute" + assert hasattr(result, "latent_shape"), "Result should have 'latent_shape' attribute" + + expected_seq_len = (T // patch_temporal) * (H // patch_spatial) * (W // patch_spatial) + expected_seq_len_q_padded = 64 * 2 + expected_video_features = patch_spatial * patch_spatial * patch_temporal * C + expected_seq_len_kv_padded = 64 * 3 + + assert result.video.shape[0] == expected_seq_len_q_padded, ( + f"Expected video seq_len {expected_seq_len_q_padded}, got {result.video.shape[0]}" + ) + assert result.video.shape[1] == expected_video_features, ( + f"Expected video feature dim {expected_video_features}, got {result.video.shape[1]}" + ) + + assert result.context_embeddings.shape[0] == expected_seq_len_kv_padded, ( + f"Expected context_embeddings seq_len {expected_seq_len_kv_padded}, got {result.context_embeddings.shape[0]}" + ) + + # Verify dtypes + assert result.video.dtype == torch.bfloat16 + assert result.context_embeddings.dtype == torch.bfloat16 + assert result.context_mask.dtype == torch.bfloat16 + assert result.loss_mask.dtype == torch.bfloat16 + assert result.seq_len_q.dtype == torch.int32 + assert result.seq_len_q_padded.dtype == torch.int32 + assert result.seq_len_kv.dtype == torch.int32 + assert result.seq_len_kv_padded.dtype == torch.int32 + assert result.latent_shape.dtype == torch.int32 + assert result.pos_ids.dtype == torch.int64 # TODO: should it be changed to int32? + + # Verify no NaN or Inf values + assert not torch.isnan(result.video).any(), "Video output contains NaN values" + assert not torch.isinf(result.video).any(), "Video output contains Inf values" + + assert result.seq_len_q.item() == expected_seq_len, ( + f"Expected seq_len_q {expected_seq_len}, got {result.seq_len_q.item()}" + ) + print( + f"result.seq_len_q_padded.item() = {result.seq_len_q_padded.item()}, expected_seq_len_q_padded = {expected_seq_len_q_padded}" + ) + assert result.seq_len_q_padded.item() == expected_seq_len_q_padded, ( + f"Expected seq_len_q_padded {expected_seq_len_q_padded}, got {result.seq_len_q_padded.item()}" + ) + assert result.seq_len_kv.item() == text_embedding_seq_len, ( + f"Expected seq_len_kv {text_embedding_seq_len}, got {result.seq_len_kv.item()}" + ) + assert result.seq_len_kv_padded.item() == expected_seq_len_kv_padded, ( + f"Expected seq_len_kv_padded {expected_seq_len_kv_padded}, got {result.seq_len_kv_padded.item()}" + ) + + # # Verify latent_shape + assert torch.equal(result.latent_shape, torch.tensor([C, T, H, W], dtype=torch.int32)), ( + "latent_shape does not match original video shape" + ) + + # # Verify pos_ids shape + assert result.pos_ids.shape[0] == expected_seq_len_q_padded, ( + f"Expected pos_ids seq_len {expected_seq_len_q_padded}, got {result.pos_ids.shape[0]}" + ) + assert result.pos_ids.shape[1] == 3, ( + f"Expected pos_ids to have 3 dimensions (T, H, W), got {result.pos_ids.shape[1]}" + ) + + # # Verify metadata + assert result.__key__ == "test_sample_001", "Key mismatch" + assert result.video_metadata == sample["json"], "Metadata mismatch" + + print("encode_sample test passed successfully with output shapes:") + print(f" video: {result.video.shape}") + print(f" context_embeddings: {result.context_embeddings.shape}") + print(f" pos_ids: {result.pos_ids.shape}") + + +class TestPosID3D: + """Test class for PosID3D.""" + + def test_get_pos_id_3d_values(self): + """Test that get_pos_id_3d returns correct position values.""" + pos_id = PosID3D(max_t=8, max_h=16, max_w=16) + + t, h, w = 3, 4, 5 + result = pos_id.get_pos_id_3d(t=t, h=h, w=w) + + # Check shape + assert result.shape == (t, h, w, 3), f"Expected shape ({t}, {h}, {w}, 3), got {result.shape}" + + # Check dtype + assert result.dtype == torch.int64 or result.dtype == torch.long, ( + f"Expected dtype torch.int64 or torch.long, got {result.dtype}" + ) + + # Check that values are correct for specific positions + # First position (0, 0, 0) should be [0, 0, 0] + assert torch.equal(result[0, 0, 0], torch.tensor([0, 0, 0])), ( + f"Position [0, 0, 0] should be [0, 0, 0], got {result[0, 0, 0]}" + ) + + # Position (1, 2, 3) should be [1, 2, 3] + assert torch.equal(result[1, 2, 3], torch.tensor([1, 2, 3])), ( + f"Position [1, 2, 3] should be [1, 2, 3], got {result[1, 2, 3]}" + ) + + # Last position (t-1, h-1, w-1) should be [t-1, h-1, w-1] + assert torch.equal(result[t - 1, h - 1, w - 1], torch.tensor([t - 1, h - 1, w - 1])), ( + f"Position [{t - 1}, {h - 1}, {w - 1}] should be [{t - 1}, {h - 1}, {w - 1}], got {result[t - 1, h - 1, w - 1]}" + ) + + # Verify all temporal positions in first spatial location + for i in range(t): + assert result[i, 0, 0, 0] == i, ( + f"Temporal position at [{i}, 0, 0, 0] should be {i}, got {result[i, 0, 0, 0]}" + ) + + # Verify all height positions in first t and w location + for i in range(h): + assert result[0, i, 0, 1] == i, ( + f"Height position at [0, {i}, 0, 1] should be {i}, got {result[0, i, 0, 1]}" + ) + + # Verify all width positions in first t and h location + for i in range(w): + assert result[0, 0, i, 2] == i, f"Width position at [0, 0, {i}, 2] should be {i}, got {result[0, 0, i, 2]}" diff --git a/tests/unit_tests/megatron/model/dit/__init__.py b/tests/unit_tests/megatron/model/dit/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unit_tests/megatron/model/dit/edm/__init__.py b/tests/unit_tests/megatron/model/dit/edm/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unit_tests/megatron/model/dit/edm/test_edm_pipeline.py b/tests/unit_tests/megatron/model/dit/edm/test_edm_pipeline.py new file mode 100644 index 00000000..024cb1bc --- /dev/null +++ b/tests/unit_tests/megatron/model/dit/edm/test_edm_pipeline.py @@ -0,0 +1,400 @@ +# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +import torch + +from dfm.src.common.utils.batch_ops import batch_mul +from dfm.src.megatron.model.dit.edm.edm_pipeline import EDMPipeline + + +class _DummyModel: + """Dummy model for testing that mimics the DiT network interface.""" + + def __call__(self, x, timesteps, **condition): + # Return zeros matching input shape + return torch.zeros_like(x) + + +@pytest.mark.skipif(not torch.cuda.is_available(), reason="Requires CUDA") +class TestEDMPipeline: + """Test class for EDMPipeline with shared setup.""" + + def setup_method(self, method, monkeypatch=None): + """Set up test fixtures before each test method.""" + # Stub parallel_state functions to avoid requiring initialization + from megatron.core import parallel_state + + if monkeypatch: + monkeypatch.setattr( + parallel_state, "get_data_parallel_rank", lambda with_context_parallel=False: 0, raising=False + ) + monkeypatch.setattr(parallel_state, "get_context_parallel_world_size", lambda: 1, raising=False) + monkeypatch.setattr(parallel_state, "is_pipeline_last_stage", lambda: True, raising=False) + monkeypatch.setattr(parallel_state, "get_context_parallel_group", lambda: None, raising=False) + + # Create pipeline with common parameters + self.sigma_data = 0.5 + self.pipeline = EDMPipeline( + vae=None, + p_mean=0.0, + p_std=1.0, + sigma_max=80.0, + sigma_min=0.0002, + sigma_data=self.sigma_data, + seed=1234, + ) + + # Create and assign dummy model + self.model = _DummyModel() + self.pipeline.net = self.model + + # Create common test data shapes + self.batch_size = 2 + self.channels = 4 + self.height = self.width = 8 + + # Create common test tensors + self.x0 = torch.randn(self.batch_size, self.channels, self.height, self.width).to( + **self.pipeline.tensor_kwargs + ) + self.sigma = torch.ones(self.batch_size).to(**self.pipeline.tensor_kwargs) * 1.0 + self.condition = {"crossattn_emb": torch.randn(self.batch_size, 10, 512).to(**self.pipeline.tensor_kwargs)} + self.epsilon = torch.randn(self.batch_size, self.channels, self.height, self.width).to( + **self.pipeline.tensor_kwargs + ) + + def test_denoise(self, monkeypatch): + """Test the denoise method produces correct output shapes and values.""" + # Initialize with monkeypatch + self.setup_method(None, monkeypatch) + + # Create test inputs (xt on CPU for conversion test) + xt = torch.randn(self.batch_size, self.channels, self.height, self.width) + sigma = torch.ones(self.batch_size) * 1.0 + + # Test Case 1: is_pipeline_last_stage = True + # Call denoise + x0_pred, eps_pred = self.pipeline.denoise(xt, sigma, self.condition) + + # Verify outputs have correct shapes + assert x0_pred.shape == xt.shape, f"Expected x0_pred shape {xt.shape}, got {x0_pred.shape}" + assert eps_pred.shape == xt.shape, f"Expected eps_pred shape {xt.shape}, got {eps_pred.shape}" + + # Verify outputs are on CUDA with correct dtype + assert x0_pred.device.type == "cuda" + assert x0_pred.dtype == torch.bfloat16 + assert eps_pred.device.type == "cuda" + assert eps_pred.dtype == torch.bfloat16 + + # Verify the outputs follow the expected formulas + # Convert inputs to expected dtype/device for comparison + xt_converted = xt.to(**self.pipeline.tensor_kwargs) + sigma_converted = sigma.to(**self.pipeline.tensor_kwargs) + + # Get scaling factors + c_skip, c_out, c_in, c_noise = self.pipeline.scaling(sigma=sigma_converted) + + # Since model returns zeros, net_output = 0 + # Expected: x0_pred = c_skip * xt + c_out * 0 = c_skip * xt + expected_x0_pred = batch_mul(c_skip, xt_converted) + assert torch.allclose(x0_pred, expected_x0_pred, rtol=1e-3, atol=1e-5), "x0_pred doesn't match expected value" + + # Expected: eps_pred = (xt - x0_pred) / sigma + expected_eps_pred = batch_mul(xt_converted - x0_pred, 1.0 / sigma_converted) + assert torch.allclose(eps_pred, expected_eps_pred, rtol=1e-3, atol=1e-5), ( + "eps_pred doesn't match expected value" + ) + + # Test Case 2: is_pipeline_last_stage = False + # Mock is_pipeline_last_stage to return False + from megatron.core import parallel_state + + monkeypatch.setattr(parallel_state, "is_pipeline_last_stage", lambda: False) + + # Call denoise again + net_output = self.pipeline.denoise(xt, sigma, self.condition) + + # Verify output is a single tensor (not a tuple) + assert isinstance(net_output, torch.Tensor), "Expected net_output to be a single tensor when not last stage" + assert not isinstance(net_output, tuple), "Expected net_output to not be a tuple when not last stage" + + # Verify output has correct shape (same as model output) + assert net_output.shape == xt.shape, f"Expected net_output shape {xt.shape}, got {net_output.shape}" + + # Verify output is on CUDA with correct dtype + assert net_output.device.type == "cuda" + assert net_output.dtype == torch.bfloat16 + + # Since model returns zeros, net_output should be zeros + assert torch.allclose(net_output, torch.zeros_like(xt_converted), rtol=1e-3, atol=1e-5), ( + "net_output doesn't match expected value (zeros from dummy model)" + ) + + def test_compute_loss_with_epsilon_and_sigma(self, monkeypatch): + """Test the compute_loss_with_epsilon_and_sigma method produces correct output shapes and values.""" + # Initialize with monkeypatch + self.setup_method(None, monkeypatch) + + # Create test inputs + data_batch = {"video": self.x0} + x0_from_data_batch = self.x0 + + # Call compute_loss_with_epsilon_and_sigma + output_batch, pred_mse, edm_loss = self.pipeline.compute_loss_with_epsilon_and_sigma( + self.x0, self.condition, self.epsilon, self.sigma + ) + + # Verify output_batch contains expected keys + assert "x0" in output_batch + assert "xt" in output_batch + assert "sigma" in output_batch + assert "weights_per_sigma" in output_batch + assert "condition" in output_batch + assert "model_pred" in output_batch + assert "mse_loss" in output_batch + assert "edm_loss" in output_batch + + # Verify shapes + assert output_batch["x0"].shape == self.x0.shape + assert output_batch["xt"].shape == self.x0.shape + assert output_batch["sigma"].shape == self.sigma.shape + assert output_batch["weights_per_sigma"].shape == self.sigma.shape + assert pred_mse.shape == self.x0.shape + assert edm_loss.shape == self.x0.shape + + # Verify the loss computation follows expected formulas + # 1. Compute expected xt from marginal probability + mean, std = self.pipeline.sde.marginal_prob(self.x0, self.sigma) + expected_xt = mean + batch_mul(std, self.epsilon) + assert torch.allclose(output_batch["xt"], expected_xt, rtol=1e-3, atol=1e-5), "xt doesn't match expected value" + + # 2. Verify loss weights + expected_weights = (self.sigma**2 + self.sigma_data**2) / (self.sigma * self.sigma_data) ** 2 + assert torch.allclose(output_batch["weights_per_sigma"], expected_weights, rtol=1e-3, atol=1e-5), ( + "weights_per_sigma doesn't match expected value" + ) + + # 3. Verify edm_loss = weights * (x0 - x0_pred)^2 + x0_pred = output_batch["model_pred"]["x0_pred"] + expected_pred_mse = (self.x0 - x0_pred) ** 2 + assert torch.allclose(pred_mse, expected_pred_mse, rtol=1e-3, atol=1e-5), ( + "pred_mse doesn't match expected value" + ) + + expected_edm_loss = batch_mul(expected_pred_mse, expected_weights) + assert torch.allclose(edm_loss, expected_edm_loss, rtol=1e-3, atol=1e-5), ( + "edm_loss doesn't match expected value" + ) + + # 4. Verify scalar losses are proper means + assert torch.isclose(output_batch["mse_loss"], pred_mse.mean(), rtol=1e-3, atol=1e-5) + assert torch.isclose(output_batch["edm_loss"], edm_loss.mean(), rtol=1e-3, atol=1e-5) + + def test_training_step(self, monkeypatch): + """Test the training_step method with mocked compute_loss_with_epsilon_and_sigma.""" + from unittest.mock import patch + + # Initialize with monkeypatch + self.setup_method(None, monkeypatch) + + # Create test data batch + data_batch = { + "video": self.x0, + "context_embeddings": torch.randn(self.batch_size, 10, 512).to(**self.pipeline.tensor_kwargs), + } + iteration = 0 + + # Test Case 1: is_pipeline_last_stage = True + # Mock compute_loss_with_epsilon_and_sigma to return expected values + mock_output_batch = { + "x0": self.x0, + "xt": torch.randn_like(self.x0), + "sigma": self.sigma, + "weights_per_sigma": torch.ones_like(self.sigma), + "condition": self.condition, + "model_pred": {"x0_pred": torch.randn_like(self.x0), "eps_pred": torch.randn_like(self.x0)}, + "mse_loss": torch.tensor(0.5, **self.pipeline.tensor_kwargs), + "edm_loss": torch.tensor(0.3, **self.pipeline.tensor_kwargs), + } + mock_pred_mse = torch.randn_like(self.x0) + mock_edm_loss = torch.randn_like(self.x0) + + with patch.object( + self.pipeline, + "compute_loss_with_epsilon_and_sigma", + return_value=(mock_output_batch, mock_pred_mse, mock_edm_loss), + ) as mock_compute_loss: + # Call training_step + result = self.pipeline.training_step(self.model, data_batch, iteration) + + # Verify compute_loss_with_epsilon_and_sigma was called once + assert mock_compute_loss.call_count == 1 + + # Verify return values are correct (output_batch, edm_loss) + assert len(result) == 2 + output_batch, edm_loss = result + assert output_batch == mock_output_batch + assert torch.equal(edm_loss, mock_edm_loss) + + # Test Case 2: is_pipeline_last_stage = False + # Mock is_pipeline_last_stage to return False + from megatron.core import parallel_state + + monkeypatch.setattr(parallel_state, "is_pipeline_last_stage", lambda: False) + + # Mock compute_loss_with_epsilon_and_sigma to return net_output only + mock_net_output = torch.randn_like(self.x0) + + with patch.object( + self.pipeline, "compute_loss_with_epsilon_and_sigma", return_value=mock_net_output + ) as mock_compute_loss: + # Call training_step + result = self.pipeline.training_step(self.model, data_batch, iteration) + + # Verify compute_loss_with_epsilon_and_sigma was called once + assert mock_compute_loss.call_count == 1 + + # Verify return value is just net_output (not a tuple) + assert torch.equal(result, mock_net_output) + + def test_get_data_and_condition(self, monkeypatch): + """Test the get_data_and_condition method with different dropout rates.""" + # Initialize with monkeypatch + self.setup_method(None, monkeypatch) + + # Create test data batch + video_data = torch.randn(self.batch_size, self.channels, self.height, self.width).to( + **self.pipeline.tensor_kwargs + ) + context_embeddings = torch.randn(self.batch_size, 10, 512).to(**self.pipeline.tensor_kwargs) + + data_batch = {"video": video_data.clone(), "context_embeddings": context_embeddings.clone()} + + # Test Case 1: With default dropout_rate (0.2) + latent_state, condition = self.pipeline.get_data_and_condition(data_batch.copy(), dropout_rate=0.2) + + # Verify raw_state is video * sigma_data + expected_raw_state = video_data * self.sigma_data + assert torch.allclose(latent_state, expected_raw_state, rtol=1e-3, atol=1e-5), ( + "raw_state doesn't match expected value (video * sigma_data)" + ) + + # Verify condition contains crossattn_emb + assert "crossattn_emb" in condition, "condition should contain 'crossattn_emb' key" + assert condition["crossattn_emb"].shape == context_embeddings.shape, ( + f"Expected crossattn_emb shape {context_embeddings.shape}, got {condition['crossattn_emb'].shape}" + ) + + # Verify crossattn_emb is on CUDA with correct dtype + assert condition["crossattn_emb"].device.type == "cuda" + assert condition["crossattn_emb"].dtype == torch.bfloat16 + + # Test Case 2: With dropout_rate=0.0 (no dropout, should keep all values) + data_batch_no_dropout = {"video": video_data.clone(), "context_embeddings": context_embeddings.clone()} + latent_state_no_dropout, condition_no_dropout = self.pipeline.get_data_and_condition( + data_batch_no_dropout, dropout_rate=0.0 + ) + + # With dropout_rate=0.0, crossattn_emb should equal context_embeddings + assert torch.allclose(condition_no_dropout["crossattn_emb"], context_embeddings, rtol=1e-3, atol=1e-5), ( + "With dropout_rate=0.0, crossattn_emb should equal original context_embeddings" + ) + + # Test Case 3: With dropout_rate=1.0 (complete dropout, should zero out all values) + data_batch_full_dropout = {"video": video_data.clone(), "context_embeddings": context_embeddings.clone()} + latent_state_full_dropout, condition_full_dropout = self.pipeline.get_data_and_condition( + data_batch_full_dropout, dropout_rate=1.0 + ) + + # With dropout_rate=1.0, crossattn_emb should be all zeros + expected_zeros = torch.zeros_like(context_embeddings) + assert torch.allclose(condition_full_dropout["crossattn_emb"], expected_zeros, rtol=1e-3, atol=1e-5), ( + "With dropout_rate=1.0, crossattn_emb should be all zeros" + ) + + # test latent_state_full_dropout and latent_state_no_dropout are equal to each other + assert torch.allclose(latent_state_full_dropout, latent_state_no_dropout, rtol=1e-3, atol=1e-5), ( + "latent_state_full_dropout and latent_state_no_dropout should be equal to each other" + ) + assert torch.allclose(latent_state_no_dropout, video_data * self.sigma_data, rtol=1e-3, atol=1e-5), ( + "latent_state with dropout=0 should equal video data * sigma_data" + ) + + def test_get_x0_fn_from_batch(self, monkeypatch): + """Test the get_x0_fn_from_batch method returns a callable with correct guidance behavior.""" + from unittest.mock import patch + + # Initialize with monkeypatch + self.setup_method(None, monkeypatch) + + # Create test data batch + video_data = torch.randn(self.batch_size, self.channels, self.height, self.width).to( + **self.pipeline.tensor_kwargs + ) + context_embeddings = torch.randn(self.batch_size, 10, 512).to(**self.pipeline.tensor_kwargs) + + data_batch = {"video": video_data, "context_embeddings": context_embeddings} + + # Create mock condition and uncondition + mock_condition = {"crossattn_emb": torch.randn(self.batch_size, 10, 512).to(**self.pipeline.tensor_kwargs)} + mock_uncondition = {"crossattn_emb": torch.randn(self.batch_size, 10, 512).to(**self.pipeline.tensor_kwargs)} + + # Mock get_condition_uncondition to return our mock conditions + with patch.object(self.pipeline, "get_condition_uncondition", return_value=(mock_condition, mock_uncondition)): + # Test Case 1: Default guidance (1.5) + guidance = 1.5 + x0_fn = self.pipeline.get_x0_fn_from_batch(data_batch, guidance=guidance) + + # Verify x0_fn is callable + assert callable(x0_fn), "get_x0_fn_from_batch should return a callable" + + # Create test inputs for the returned function + noise_x = torch.randn(self.batch_size, self.channels, self.height, self.width).to( + **self.pipeline.tensor_kwargs + ) + sigma = torch.ones(self.batch_size).to(**self.pipeline.tensor_kwargs) * 1.0 + + # Create mock outputs for denoise calls + mock_cond_x0 = torch.randn_like(noise_x) + mock_uncond_x0 = torch.randn_like(noise_x) + mock_eps = torch.randn_like(noise_x) # dummy eps_pred (not used in x0_fn) + + # Mock denoise to return different values for condition vs uncondition + call_count = [0] + + def mock_denoise(xt, sig, cond): + call_count[0] += 1 + if call_count[0] == 1: # First call (with condition) + return mock_cond_x0, mock_eps + else: # Second call (with uncondition) + return mock_uncond_x0, mock_eps + + with patch.object(self.pipeline, "denoise", side_effect=mock_denoise): + # Call the returned x0_fn + result = x0_fn(noise_x, sigma) + + # Verify denoise was called twice + assert call_count[0] == 2, "mock_denoise should be called twice (condition and uncondition)" + + # Verify the result follows the guidance formula: cond_x0 + guidance * (cond_x0 - uncond_x0) + expected_result = mock_cond_x0 + guidance * (mock_cond_x0 - mock_uncond_x0) + assert torch.allclose(result, expected_result, rtol=1e-3, atol=1e-5), ( + "x0_fn output doesn't match expected guidance formula" + ) + + # Verify output shape and dtype + assert result.shape == noise_x.shape, f"Expected result shape {noise_x.shape}, got {result.shape}" + assert result.device.type == "cuda" + assert result.dtype == torch.bfloat16 diff --git a/tests/unit_tests/megatron/model/dit/test_data_preprocess.py b/tests/unit_tests/megatron/model/dit/test_data_preprocess.py new file mode 100644 index 00000000..526a3584 --- /dev/null +++ b/tests/unit_tests/megatron/model/dit/test_data_preprocess.py @@ -0,0 +1,154 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import pytest +import torch +from megatron.core.packed_seq_params import PackedSeqParams + +from dfm.src.megatron.model.dit.dit_data_process import ( + encode_seq_length, + get_batch_on_this_cp_rank, +) + + +@pytest.mark.skipif(not torch.cuda.is_available(), reason="Requires CUDA") +class TestEncodeSeqLength: + """Test suite for encode_seq_length and get_batch_on_this_cp_rank functions.""" + + def setup_method(self, method): + """Set up test fixtures before each test method.""" + # Create basic batch for encode_seq_length tests + self.basic_batch = { + "seq_len_q": torch.tensor([10, 20, 15], dtype=torch.int32, device="cuda"), + "seq_len_kv": torch.tensor([5, 10, 8], dtype=torch.int32, device="cuda"), + "seq_len_q_padded": torch.tensor([12, 24, 16], dtype=torch.int32, device="cuda"), + "seq_len_kv_padded": torch.tensor([8, 12, 10], dtype=torch.int32, device="cuda"), + "video": torch.randn(3, 100, 512, device="cuda"), + } + + # Create packed sequence parameters for get_batch_on_this_cp_rank tests + # Note: For cp_size=2, total_tokens must be divisible by (world_size * 2) = 4 + cu_seqlens_q = torch.tensor([0, 10, 30, 45], dtype=torch.int32, device="cuda") + cu_seqlens_kv = torch.tensor([0, 6, 16, 24], dtype=torch.int32, device="cuda") + cu_seqlens_q_padded = torch.tensor([0, 12, 36, 52], dtype=torch.int32, device="cuda") + cu_seqlens_kv_padded = torch.tensor([0, 8, 20, 32], dtype=torch.int32, device="cuda") + + # Create batch with packed parameters for context parallelism tests + self.batch_with_cp = { + "video": torch.randn(3, 52, 512, device="cuda"), + "loss_mask": torch.ones(3, 52, device="cuda"), + "pos_ids": torch.arange(52, device="cuda").unsqueeze(0).expand(3, -1), + "context_embeddings": torch.randn(3, 32, 512, device="cuda"), + "context_mask": torch.ones(3, 32, device="cuda"), + "packed_seq_params": { + "self_attention": PackedSeqParams( + cu_seqlens_q=cu_seqlens_q, + cu_seqlens_kv=cu_seqlens_q, + cu_seqlens_q_padded=cu_seqlens_q_padded, + cu_seqlens_kv_padded=cu_seqlens_q_padded, + qkv_format="thd", + ), + "cross_attention": PackedSeqParams( + cu_seqlens_q=cu_seqlens_q, + cu_seqlens_kv=cu_seqlens_kv, + cu_seqlens_q_padded=cu_seqlens_q_padded, + cu_seqlens_kv_padded=cu_seqlens_kv_padded, + qkv_format="thd", + ), + }, + } + + def test_encode_seq_length_with_seq_lens(self): + """Test encode_seq_length creates packed_seq_params when seq_len_q and seq_len_kv are present.""" + qkv_format = "thd" + result = encode_seq_length(self.basic_batch, format=qkv_format) + + # Check that packed_seq_params is created + assert "packed_seq_params" in result + assert "self_attention" in result["packed_seq_params"] + assert "cross_attention" in result["packed_seq_params"] + + # Check self_attention params + self_attn = result["packed_seq_params"]["self_attention"] + assert isinstance(self_attn, PackedSeqParams) + assert self_attn.qkv_format == qkv_format + + # Verify cumulative sum for q (self_attention uses cu_seqlens_q for both q and kv) + expected_cu_seqlens_q = torch.tensor([0, 10, 30, 45], dtype=torch.int32, device="cuda") + assert torch.equal(self_attn.cu_seqlens_q, expected_cu_seqlens_q) + assert torch.equal(self_attn.cu_seqlens_kv, expected_cu_seqlens_q) + + # Verify cumulative sum for q_padded + expected_cu_seqlens_q_padded = torch.tensor([0, 12, 36, 52], dtype=torch.int32, device="cuda") + assert torch.equal(self_attn.cu_seqlens_q_padded, expected_cu_seqlens_q_padded) + assert torch.equal(self_attn.cu_seqlens_kv_padded, expected_cu_seqlens_q_padded) + + # Check cross_attention params + cross_attn = result["packed_seq_params"]["cross_attention"] + assert isinstance(cross_attn, PackedSeqParams) + assert cross_attn.qkv_format == qkv_format + + # Verify cumulative sum for kv (cross_attention uses different kv lengths) + expected_cu_seqlens_kv = torch.tensor([0, 5, 15, 23], dtype=torch.int32, device="cuda") + assert torch.equal(cross_attn.cu_seqlens_q, expected_cu_seqlens_q) + assert torch.equal(cross_attn.cu_seqlens_kv, expected_cu_seqlens_kv) + + # Verify cumulative sum for kv_padded + expected_cu_seqlens_kv_padded = torch.tensor([0, 8, 20, 30], dtype=torch.int32, device="cuda") + assert torch.equal(cross_attn.cu_seqlens_q_padded, expected_cu_seqlens_q_padded) + assert torch.equal(cross_attn.cu_seqlens_kv_padded, expected_cu_seqlens_kv_padded) + + def test_get_batch_on_this_cp_rank(self, monkeypatch): + """Test that get_batch_on_this_cp_rank returns data unchanged when cp_size=1.""" + # Stub parallel_state functions to avoid requiring initialization + from megatron.core import parallel_state + + monkeypatch.setattr(parallel_state, "get_context_parallel_world_size", lambda: 1, raising=False) + + # Store original shapes + original_video_shape = self.batch_with_cp["video"].shape + original_context_shape = self.batch_with_cp["context_embeddings"].shape + + # Clone the batch to ensure we can compare with original + original_video = self.batch_with_cp["video"].clone() + original_context = self.batch_with_cp["context_embeddings"].clone() + + result = get_batch_on_this_cp_rank(self.batch_with_cp) + + # Data should remain unchanged when cp_size=1 + assert result["video"].shape == original_video_shape + assert result["context_embeddings"].shape == original_context_shape + assert torch.equal(result["video"], original_video) + assert torch.equal(result["context_embeddings"], original_context) + + def test_get_batch_on_this_cp_rank_with_context_parallelism(self, monkeypatch): + """Test that get_batch_on_this_cp_rank partitions data when cp_size>1.""" + # Stub parallel_state functions with cp_size=2, cp_rank=0 + from megatron.core import parallel_state + + monkeypatch.setattr(parallel_state, "get_context_parallel_world_size", lambda: 2, raising=False) + monkeypatch.setattr(parallel_state, "get_context_parallel_rank", lambda: 0, raising=False) + + result = get_batch_on_this_cp_rank(self.batch_with_cp) + + # Verify that data tensors were modified (should be partitioned) + # For self-attention keys (video, loss_mask, pos_ids) - 52 tokens / 2 ranks = 26 tokens + assert result["video"].shape[1] == 26 # Partitioned to half + assert result["loss_mask"].shape[1] == 26 + assert result["pos_ids"].shape[1] == 26 + + # For cross-attention keys (context_embeddings, context_mask) - 32 tokens / 2 ranks = 16 tokens + assert result["context_embeddings"].shape[1] == 16 # Partitioned to half + assert result["context_mask"].shape[1] == 16 diff --git a/tests/unit_tests/megatron/model/dit/test_dit_layer_spec.py b/tests/unit_tests/megatron/model/dit/test_dit_layer_spec.py new file mode 100644 index 00000000..e936a231 --- /dev/null +++ b/tests/unit_tests/megatron/model/dit/test_dit_layer_spec.py @@ -0,0 +1,344 @@ +# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +import torch +from megatron.core.packed_seq_params import PackedSeqParams +from megatron.core.transformer.identity_op import IdentityOp +from megatron.core.transformer.transformer_config import TransformerConfig + +from dfm.src.megatron.model.dit.dit_layer_spec import ( + AdaLN, + DiTLayerWithAdaLN, + RMSNorm, + get_dit_adaln_block_with_transformer_engine_spec, +) + + +def test_rmsnorm_basic(): + """Test RMSNorm forward pass with basic properties.""" + hidden_size = 768 + batch_size = 2 + seq_len = 16 + + # Create RMSNorm instance + rms_norm = RMSNorm(hidden_size=hidden_size, eps=1e-6) + + # Create random input tensor + x = torch.randn(seq_len, batch_size, hidden_size) + + # Forward pass + output = rms_norm(x) + + # Check output shape matches input shape + assert output.shape == x.shape, f"Expected shape {x.shape}, got {output.shape}" + + # Check that weight parameter exists and has correct shape + assert rms_norm.weight.shape == (hidden_size,), ( + f"Expected weight shape ({hidden_size},), got {rms_norm.weight.shape}" + ) + + # Check that output is not NaN or Inf + assert not torch.isnan(output).any(), "Output contains NaN values" + assert not torch.isinf(output).any(), "Output contains Inf values" + + # Check that RMS normalization approximately normalizes the variance along last dim + # After RMS norm (before scaling by weight), variance should be close to 1 + normalized = rms_norm._norm(x.float()).type_as(x) + rms_variance = normalized.pow(2).mean(-1) + assert torch.allclose(rms_variance, torch.ones_like(rms_variance), atol=1e-5), ( + "RMS normalization should result in variance close to 1" + ) + + +def test_adaln_forward_chunking(): + """Test AdaLN forward pass returns correct number of chunks.""" + hidden_size = 768 + batch_size = 2 + n_adaln_chunks = 4 + + # Create TransformerConfig object + config = TransformerConfig( + num_layers=1, + hidden_size=hidden_size, + num_attention_heads=8, + layernorm_epsilon=1e-6, + sequence_parallel=False, + ) + + # Create AdaLN instance with 9 chunks (for full DiT layer with cross attention) + adaln = AdaLN(config=config, n_adaln_chunks=n_adaln_chunks, use_adaln_lora=True, adaln_lora_dim=256) + + # Create timestep embedding input + timestep_emb = torch.randn(batch_size, hidden_size) + + # Forward pass should return n_adaln_chunks tensors + chunks = adaln(timestep_emb) + + # Check that we get the correct number of chunks + assert len(chunks) == n_adaln_chunks, f"Expected {n_adaln_chunks} chunks, got {len(chunks)}" + + # Check that each chunk has the correct shape + for i, chunk in enumerate(chunks): + assert chunk.shape == (batch_size, hidden_size), ( + f"Chunk {i} has shape {chunk.shape}, expected ({batch_size}, {hidden_size})" + ) + + +def test_adaln_modulation_methods(): + """Test AdaLN modulation and scaling methods.""" + hidden_size = 512 + seq_len = 8 + batch_size = 2 + + # Create TransformerConfig object + config = TransformerConfig( + num_layers=1, + hidden_size=hidden_size, + num_attention_heads=8, + layernorm_epsilon=1e-5, + sequence_parallel=False, + ) + + # Create AdaLN instance + adaln = AdaLN(config=config, n_adaln_chunks=6) + + # Create test tensors + x = torch.randn(seq_len, batch_size, hidden_size) + shift = torch.randn(batch_size, hidden_size) + scale = torch.randn(batch_size, hidden_size) + gate = torch.randn(batch_size, hidden_size) + residual = torch.randn(seq_len, batch_size, hidden_size) + + # Test modulate method + modulated = adaln.modulate(x, shift, scale) + assert modulated.shape == x.shape, f"Modulated output shape {modulated.shape} != input shape {x.shape}" + # Verify the modulation formula: x * (1 + scale) + shift + expected_modulated = x * (1 + scale) + shift + assert torch.allclose(modulated, expected_modulated, atol=1e-6), "Modulate formula incorrect" + + # Test scale_add method + scaled_added = adaln.scale_add(residual, x, gate) + assert scaled_added.shape == residual.shape, ( + f"scale_add output shape {scaled_added.shape} != residual shape {residual.shape}" + ) + # Verify the formula: residual + gate * x + expected_scaled_added = residual + gate * x + assert torch.allclose(scaled_added, expected_scaled_added, atol=1e-6), "scale_add formula incorrect" + + # Test modulated_layernorm method + modulated_ln = adaln.modulated_layernorm(x, shift, scale) + assert modulated_ln.shape == x.shape, ( + f"modulated_layernorm output shape {modulated_ln.shape} != input shape {x.shape}" + ) + assert not torch.isnan(modulated_ln).any(), "modulated_layernorm output contains NaN" + + # Test scaled_modulated_layernorm method + hidden_states, shifted_output = adaln.scaled_modulated_layernorm(residual, x, gate, shift, scale) + assert hidden_states.shape == residual.shape, ( + f"hidden_states shape {hidden_states.shape} != residual shape {residual.shape}" + ) + assert shifted_output.shape == x.shape, f"shifted_output shape {shifted_output.shape} != x shape {x.shape}" + assert not torch.isnan(hidden_states).any(), "hidden_states contains NaN" + assert not torch.isnan(shifted_output).any(), "shifted_output contains NaN" + + +@pytest.mark.skipif(not torch.cuda.is_available(), reason="Requires CUDA") +class TestDiTLayerWithAdaLN: + """Test class for DiTLayerWithAdaLN with shared setup.""" + + def setup_method(self, method=None, monkeypatch=None): + """Set up test fixtures before each test method.""" + # Stub parallel_state functions to avoid requiring initialization + from megatron.core import parallel_state + + if monkeypatch: + monkeypatch.setattr( + parallel_state, + "get_data_parallel_rank", + lambda with_context_parallel=False, **kwargs: 0, + raising=False, + ) + monkeypatch.setattr(parallel_state, "get_context_parallel_world_size", lambda **kwargs: 1, raising=False) + monkeypatch.setattr(parallel_state, "is_pipeline_last_stage", lambda **kwargs: True, raising=False) + monkeypatch.setattr(parallel_state, "get_context_parallel_group", lambda **kwargs: None, raising=False) + monkeypatch.setattr( + parallel_state, + "get_data_parallel_group", + lambda with_context_parallel=False, **kwargs: None, + raising=False, + ) + monkeypatch.setattr( + parallel_state, "get_tensor_model_parallel_group", lambda **kwargs: None, raising=False + ) + + # Common dimensions + self.hidden_size = 512 + self.seq_len = 16 + self.batch_size = 2 + self.context_len = 32 + + # Create TransformerConfig object + self.config = TransformerConfig( + num_layers=1, + hidden_size=self.hidden_size, + num_attention_heads=8, + ffn_hidden_size=self.hidden_size * 4, + layernorm_epsilon=1e-6, + sequence_parallel=False, + bias_activation_fusion=False, + bias_dropout_fusion=False, + bf16=False, + fp16=False, + params_dtype=torch.float32, + apply_residual_connection_post_layernorm=False, + add_bias_linear=False, + gated_linear_unit=False, + activation_func=torch.nn.functional.gelu, + num_query_groups=None, + attention_dropout=0.0, + hidden_dropout=0.0, + ) + + def test_dit_layer_without_cross_attention(self, monkeypatch): + """Test DiTLayerWithAdaLN forward pass without cross attention.""" + # Initialize with monkeypatch + self.setup_method(None, monkeypatch) + + # Create submodules without cross attention using real attention modules + submodules = get_dit_adaln_block_with_transformer_engine_spec().submodules + submodules.cross_attention = IdentityOp + dit_layer = ( + DiTLayerWithAdaLN( + config=self.config, + submodules=submodules, + layer_number=1, + ) + .to("cuda") + .to(torch.bfloat16) + ) + + # Create input tensors + hidden_states = torch.randn(self.seq_len, self.batch_size, self.hidden_size) + hidden_states = hidden_states.reshape(1, -1, self.hidden_size).to("cuda") + hidden_states = hidden_states.transpose(0, 1).to(torch.bfloat16) + timestep_emb = torch.randn(1, self.hidden_size).to("cuda").to(torch.bfloat16) # This acts as attention_mask + + cu_seqlens_q = torch.arange( + 0, (self.batch_size + 1) * self.seq_len, self.seq_len, dtype=torch.int32, device="cuda" + ) + + packed_seq_params = { + "self_attention": PackedSeqParams( + cu_seqlens_q=cu_seqlens_q, + cu_seqlens_q_padded=cu_seqlens_q, + cu_seqlens_kv=cu_seqlens_q, + cu_seqlens_kv_padded=cu_seqlens_q, + qkv_format="thd", + ), + } + + # Forward pass + output, _ = dit_layer( + hidden_states=hidden_states, + attention_mask=timestep_emb, + context=None, + context_mask=None, + packed_seq_params=packed_seq_params, + ) + + # Check output shape + assert output.shape == hidden_states.shape, f"Output shape {output.shape} != input shape {hidden_states.shape}" + + # Check that output is valid (no NaN or Inf) + assert not torch.isnan(output).any(), "Output contains NaN values" + assert not torch.isinf(output).any(), "Output contains Inf values" + + # Check that adaLN has 6 chunks (for layer without cross attention) + assert dit_layer.adaLN.n_adaln_chunks == 6, ( + f"Expected 6 adaLN chunks without cross attention, got {dit_layer.adaLN.n_adaln_chunks}" + ) + + def test_dit_layer_with_cross_attention(self, monkeypatch): + """Test DiTLayerWithAdaLN forward pass with cross attention.""" + # Initialize with monkeypatch + self.setup_method(None, monkeypatch) + + # Create submodules with cross attention using real attention modules + submodules = get_dit_adaln_block_with_transformer_engine_spec().submodules + + dit_layer = ( + DiTLayerWithAdaLN( + config=self.config, + submodules=submodules, + layer_number=1, + ) + .to("cuda") + .to(torch.bfloat16) + ) + + # Create input tensors + hidden_states = torch.randn(1, self.seq_len * self.batch_size, self.hidden_size) + hidden_states = hidden_states.reshape(1, -1, self.hidden_size).to("cuda") + hidden_states = hidden_states.transpose(0, 1).to(torch.bfloat16) + timestep_emb = torch.randn(1, self.hidden_size).to("cuda").to(torch.bfloat16) # This acts as attention_mask + context = torch.randn(1, self.context_len * self.batch_size, self.hidden_size) + context = context.reshape(1, -1, self.hidden_size).to("cuda") + context = context.transpose(0, 1).to(torch.bfloat16) + context_mask = None + + cu_seqlens_q = torch.arange( + 0, (self.batch_size + 1) * self.seq_len, self.seq_len, dtype=torch.int32, device="cuda" + ) + cu_seqlens_kv = torch.arange( + 0, (self.batch_size + 1) * self.context_len, self.context_len, dtype=torch.int32, device="cuda" + ) + + packed_seq_params = { + "self_attention": PackedSeqParams( + cu_seqlens_q=cu_seqlens_q, + cu_seqlens_q_padded=cu_seqlens_q, + cu_seqlens_kv=cu_seqlens_q, + cu_seqlens_kv_padded=cu_seqlens_q, + qkv_format="thd", + ), + "cross_attention": PackedSeqParams( + cu_seqlens_q=cu_seqlens_q, + cu_seqlens_q_padded=cu_seqlens_q, + cu_seqlens_kv=cu_seqlens_kv, + cu_seqlens_kv_padded=cu_seqlens_kv, + qkv_format="thd", + ), + } + + # Forward pass + output, _ = dit_layer( + hidden_states=hidden_states, + attention_mask=timestep_emb, + context=context, + context_mask=context_mask, + packed_seq_params=packed_seq_params, + ) + + # Check output shape + assert output.shape == hidden_states.shape, f"Output shape {output.shape} != input shape {hidden_states.shape}" + + # Check that output is valid (no NaN or Inf) + assert not torch.isnan(output).any(), "Output contains NaN values" + assert not torch.isinf(output).any(), "Output contains Inf values" + + # Check that adaLN has 9 chunks (for layer with cross attention) + assert dit_layer.adaLN.n_adaln_chunks == 9, ( + f"Expected 9 adaLN chunks with cross attention, got {dit_layer.adaLN.n_adaln_chunks}" + ) diff --git a/tests/unit_tests/megatron/model/dit/test_dit_model.py b/tests/unit_tests/megatron/model/dit/test_dit_model.py new file mode 100644 index 00000000..ad111285 --- /dev/null +++ b/tests/unit_tests/megatron/model/dit/test_dit_model.py @@ -0,0 +1,185 @@ +# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +import torch +from einops import rearrange +from megatron.core.packed_seq_params import PackedSeqParams +from megatron.core.transformer.transformer_config import TransformerConfig + +from dfm.src.megatron.data.dit.dit_taskencoder import pos_id_3d +from dfm.src.megatron.model.dit.dit_model import DiTCrossAttentionModel + + +@pytest.mark.skipif(not torch.cuda.is_available(), reason="Requires CUDA") +class TestDiTCrossAttentionModel: + """Test class for DiTCrossAttentionModel.""" + + def setup_method(self, method): + """Set up test fixtures before each test method.""" + self.batch_size = 2 + self.hidden_size = 512 + self.num_layers = 1 + self.num_attention_heads = 4 + # Dimensions chosen so that seq_len = (max_frames/patch_temporal) * (max_img_h/patch_spatial) * (max_img_w/patch_spatial) + # seq_len = (8/2) * (8/2) * (8/2) = 4 * 4 * 4 = 64 + self.max_img_h = 8 + self.max_img_w = 8 + self.max_frames = 16 + self.patch_spatial = 2 + self.patch_temporal = 2 + self.seq_len = ( + (self.max_frames // self.patch_temporal) + * (self.max_img_h // self.patch_spatial) + * (self.max_img_w // self.patch_spatial) + ) + self.in_channels = 16 + self.out_channels = 16 + self.crossattn_seq_len = 256 + + def test_forward_full_pipeline(self, monkeypatch): + """Test the forward method with full pre/post processing.""" + # Mock parallel_state functions + from megatron.core import parallel_state + + monkeypatch.setattr(parallel_state, "is_pipeline_first_stage", lambda: True) + monkeypatch.setattr(parallel_state, "is_pipeline_last_stage", lambda: True) + monkeypatch.setattr(parallel_state, "get_tensor_model_parallel_world_size", lambda: 1) + monkeypatch.setattr(parallel_state, "get_pipeline_model_parallel_world_size", lambda: 1) + monkeypatch.setattr(parallel_state, "get_data_parallel_world_size", lambda with_context_parallel=False: 1) + monkeypatch.setattr(parallel_state, "get_context_parallel_world_size", lambda: 1) + monkeypatch.setattr(parallel_state, "get_tensor_model_parallel_group", lambda **kwargs: None) + monkeypatch.setattr(parallel_state, "get_data_parallel_group", lambda **kwargs: None) + monkeypatch.setattr(parallel_state, "get_context_parallel_group", lambda **kwargs: None) + monkeypatch.setattr(parallel_state, "get_tensor_and_data_parallel_group", lambda **kwargs: None) + + # Create config + config = TransformerConfig( + num_layers=self.num_layers, + hidden_size=self.hidden_size, + num_attention_heads=self.num_attention_heads, + use_cpu_initialization=True, + perform_initialization=True, + tensor_model_parallel_size=1, + pipeline_model_parallel_size=1, + sequence_parallel=False, + add_bias_linear=False, + ) + + # Create model + model = ( + DiTCrossAttentionModel( + config=config, + pre_process=True, + post_process=True, + max_img_h=self.max_img_h, + max_img_w=self.max_img_w, + max_frames=self.max_frames, + patch_spatial=self.patch_spatial, + patch_temporal=self.patch_temporal, + in_channels=self.in_channels, + out_channels=self.out_channels, + ) + .cuda() + .to(torch.bfloat16) + ) + + # Create input tensors on CUDA + # x should be [B, S, C] where C = in_channels * patch_spatial^2 + x = torch.randn( + self.batch_size, + self.seq_len, + self.in_channels * self.patch_spatial**2, + dtype=torch.bfloat16, + device="cuda", + ) + x = x.reshape(1, -1, self.in_channels * self.patch_spatial**2) + + import math + + sigma_min = 0.0002 + sigma_max = 80.0 + c_noise_min = 0.25 * math.log(sigma_min) + c_noise_max = 0.25 * math.log(sigma_max) + timesteps = (torch.rand(1, device="cuda") * (c_noise_max - c_noise_min) + c_noise_min).to(torch.bfloat16) + + # crossattn_emb should be [B, S_cross, D] + crossattn_emb = torch.randn( + self.batch_size, self.crossattn_seq_len, self.hidden_size, dtype=torch.bfloat16, device="cuda" + ) + crossattn_emb = crossattn_emb.reshape(1, -1, self.hidden_size) + pos_ids = rearrange( + pos_id_3d.get_pos_id_3d( + t=self.max_frames // self.patch_temporal, + h=self.max_img_h // self.patch_spatial, + w=self.max_img_w // self.patch_spatial, + ), + "T H W d -> (T H W) d", + ) + pos_ids = pos_ids.unsqueeze(0).expand(self.batch_size, -1, -1) + pos_ids = pos_ids.reshape(1, -1, 3).cuda() + + cu_seqlens_q = torch.arange( + 0, (self.batch_size + 1) * self.seq_len, self.seq_len, dtype=torch.int32, device="cuda" + ) + cu_seqlens_kv_cross = torch.arange( + 0, (self.batch_size + 1) * self.crossattn_seq_len, self.crossattn_seq_len, dtype=torch.int32, device="cuda" + ) + + packed_seq_params = { + "self_attention": PackedSeqParams( + cu_seqlens_q=cu_seqlens_q, + cu_seqlens_kv=cu_seqlens_q, + cu_seqlens_q_padded=cu_seqlens_q, + cu_seqlens_kv_padded=cu_seqlens_q, + qkv_format="thd", + ), + "cross_attention": PackedSeqParams( + cu_seqlens_q=cu_seqlens_q, + cu_seqlens_kv=cu_seqlens_kv_cross, + cu_seqlens_q_padded=cu_seqlens_q, + cu_seqlens_kv_padded=cu_seqlens_kv_cross, + qkv_format="thd", + ), + } + + # Run forward pass with original crossattn_emb + with torch.no_grad(): + output_original = model( + x=x, + timesteps=timesteps, + crossattn_emb=crossattn_emb, + pos_ids=pos_ids, + packed_seq_params=packed_seq_params, + ) + + # Verify output shape + # Expected output: [B, S, patch_spatial^2 * patch_temporal * out_channels] + expected_out_channels = self.patch_spatial**2 * self.patch_temporal * self.out_channels + expected_shape = (1, self.batch_size * self.seq_len, expected_out_channels) + + assert output_original.shape == expected_shape, ( + f"Expected output shape {expected_shape}, got {output_original.shape}" + ) + + # Verify output is not NaN or Inf + assert not torch.isnan(output_original).any(), "Output contains NaN values" + assert not torch.isinf(output_original).any(), "Output contains Inf values" + + # Verify output dtype + assert output_original.dtype == torch.bfloat16, ( + f"Expected output dtype torch.bfloat16, got {output_original.dtype}" + ) + + print(f"Forward pass successful with output shape: {output_original.shape}.") diff --git a/tests/unit_tests/megatron/model/dit/test_dit_model_provider.py b/tests/unit_tests/megatron/model/dit/test_dit_model_provider.py new file mode 100644 index 00000000..23171f7c --- /dev/null +++ b/tests/unit_tests/megatron/model/dit/test_dit_model_provider.py @@ -0,0 +1,109 @@ +# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from unittest.mock import MagicMock, patch + +import pytest +import torch + +from dfm.src.megatron.model.dit.dit_model import DiTCrossAttentionModel +from dfm.src.megatron.model.dit.dit_model_provider import DiTModelProvider + + +@pytest.mark.skipif(not torch.cuda.is_available(), reason="Requires CUDA") +class TestDiTModelProvider: + """Test class for DiTModelProvider.""" + + def setup_method(self, method): + """Set up test fixtures before each test method.""" + self.hidden_size = 1152 + self.num_layers = 28 + self.num_attention_heads = 16 + + def test_provide(self, monkeypatch): + """Test that provide() returns a DiTCrossAttentionModel instance.""" + # Mock parallel_state functions + from megatron.core import parallel_state + + monkeypatch.setattr(parallel_state, "is_pipeline_first_stage", lambda: True) + monkeypatch.setattr(parallel_state, "is_pipeline_last_stage", lambda: True) + monkeypatch.setattr(parallel_state, "get_tensor_model_parallel_world_size", lambda: 1) + monkeypatch.setattr(parallel_state, "get_pipeline_model_parallel_world_size", lambda: 1) + monkeypatch.setattr(parallel_state, "get_data_parallel_world_size", lambda with_context_parallel=False: 1) + monkeypatch.setattr(parallel_state, "get_context_parallel_world_size", lambda: 1) + monkeypatch.setattr(parallel_state, "get_tensor_model_parallel_group", lambda **kwargs: None) + monkeypatch.setattr(parallel_state, "get_data_parallel_group", lambda **kwargs: None) + monkeypatch.setattr(parallel_state, "get_context_parallel_group", lambda **kwargs: None) + monkeypatch.setattr(parallel_state, "get_tensor_and_data_parallel_group", lambda **kwargs: None) + + # Create provider instance + provider = DiTModelProvider( + hidden_size=self.hidden_size, + num_layers=self.num_layers, + num_attention_heads=self.num_attention_heads, + ) + + # Call provide method + model = provider.provide() + + # Check that the model is an instance of DiTCrossAttentionModel + assert isinstance(model, DiTCrossAttentionModel), f"Expected DiTCrossAttentionModel, got {type(model)}" + + # Check that the model config matches provider config + assert model.config.hidden_size == self.hidden_size, ( + f"Expected hidden_size {self.hidden_size}, got {model.config.hidden_size}" + ) + assert model.config.num_layers == self.num_layers, ( + f"Expected num_layers {self.num_layers}, got {model.config.num_layers}" + ) + assert model.config.num_attention_heads == self.num_attention_heads, ( + f"Expected num_attention_heads {self.num_attention_heads}, got {model.config.num_attention_heads}" + ) + + def test_configure_vae(self): + """Test that configure_vae() dynamically imports the VAE module.""" + # Create a mock VAE class + mock_vae_instance = MagicMock() + mock_vae_class = MagicMock() + mock_vae_class.from_pretrained.return_value = mock_vae_instance + + # Mock the dynamic_import function + with patch("dfm.src.megatron.model.dit.dit_model_provider.dynamic_import") as mock_dynamic_import: + mock_dynamic_import.return_value = mock_vae_class + + # Create provider instance + provider = DiTModelProvider( + hidden_size=self.hidden_size, + num_layers=self.num_layers, + num_attention_heads=self.num_attention_heads, + vae_module="dfm.src.common.tokenizers.cosmos.cosmos1.causal_video_tokenizer.CausalVideoTokenizer", + vae_name="Cosmos-0.1-Tokenizer-CV4x8x8", + vae_cache_folder="/path/to/cache", + ) + + # Call configure_vae + vae_result = provider.configure_vae() + + # Verify that dynamic_import was called with correct module path + mock_dynamic_import.assert_called_once_with( + "dfm.src.common.tokenizers.cosmos.cosmos1.causal_video_tokenizer.CausalVideoTokenizer" + ) + + # Verify that from_pretrained was called with correct parameters + mock_vae_class.from_pretrained.assert_called_once_with( + "Cosmos-0.1-Tokenizer-CV4x8x8", cache_dir="/path/to/cache" + ) + + # Verify that the returned value is the mock VAE instance + assert vae_result is mock_vae_instance, "Expected the VAE instance to be returned" diff --git a/tests/unit_tests/megatron/model/dit/test_dit_step.py b/tests/unit_tests/megatron/model/dit/test_dit_step.py new file mode 100644 index 00000000..fd3eb5a0 --- /dev/null +++ b/tests/unit_tests/megatron/model/dit/test_dit_step.py @@ -0,0 +1,121 @@ +# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +from functools import partial +from unittest.mock import MagicMock, patch + +import torch + +from dfm.src.megatron.model.dit.dit_step import DITForwardStep + + +class TestDITForwardStep(unittest.TestCase): + """Unit tests for DITForwardStep class.""" + + def setUp(self): + """Set up test fixtures.""" + self.dit_forward_step = DITForwardStep() + + @patch("dfm.src.megatron.model.dit.dit_step.parallel_state.is_pipeline_last_stage") + def test_forward_step_with_pipeline_stages(self, mock_is_last_stage): + """Test forward_step method for both last and non-last pipeline stages.""" + batch_size = 4 + + # Test 1: Pipeline last stage with loss_mask + mock_is_last_stage.return_value = True + + # Create mock state + mock_state = MagicMock() + mock_timers = MagicMock() + mock_state.timers = lambda x: mock_timers + mock_state.cfg.rerun_state_machine.check_for_nan_in_loss = True + mock_state.cfg.rerun_state_machine.check_for_spiky_loss = False + mock_straggler_timer = MagicMock() + mock_state.straggler_timer = mock_straggler_timer + + # Create mock model + mock_model = MagicMock() + + # Create batch with loss_mask + loss_mask = torch.ones(batch_size) + batch = {"loss_mask": loss_mask} + + # Mock diffusion pipeline output + output_batch = MagicMock() + mock_loss = torch.randn(batch_size, 10) # Loss with additional dimension + + self.dit_forward_step.diffusion_pipeline.training_step = MagicMock(return_value=(output_batch, mock_loss)) + + # Act + output_tensor, loss_function = self.dit_forward_step.forward_step( + mock_state, batch, mock_model, return_schedule_plan=False + ) + + # Assert for last stage + mock_timers.stop.assert_called_once() + self.dit_forward_step.diffusion_pipeline.training_step.assert_called_once_with(mock_model, batch, 0) + + # Verify output tensor has the correct shape (mean over last dimension) + self.assertEqual(output_tensor.shape, (batch_size,)) + torch.testing.assert_close(output_tensor, torch.mean(mock_loss, dim=-1)) + + # Verify loss function is a partial function + self.assertIsInstance(loss_function, partial) + + # Verify straggler timer was used as context manager + mock_straggler_timer.__enter__.assert_called_once() + mock_straggler_timer.__exit__.assert_called_once() + + # Test 2: NOT pipeline last stage + mock_is_last_stage.return_value = False + + # Reset mocks for second test + mock_timers.reset_mock() + mock_straggler_timer.reset_mock() + + # Create new mock state + mock_state = MagicMock() + mock_state.timers = lambda x: mock_timers + mock_state.cfg.rerun_state_machine.check_for_nan_in_loss = False + mock_state.cfg.rerun_state_machine.check_for_spiky_loss = True + mock_state.straggler_timer = mock_straggler_timer + + # Create batch without loss_mask + batch = {} + + self.dit_forward_step.diffusion_pipeline.training_step = MagicMock(return_value=mock_loss) + + # Act + output_tensor, loss_function = self.dit_forward_step.forward_step( + mock_state, batch, mock_model, return_schedule_plan=False + ) + + # Assert for not last stage + mock_timers.stop.assert_called_once() + self.dit_forward_step.diffusion_pipeline.training_step.assert_called_once_with(mock_model, batch, 0) + + # Verify output tensor is directly returned (no mean operation) + torch.testing.assert_close(output_tensor, mock_loss) + + # Verify loss function is a partial function + self.assertIsInstance(loss_function, partial) + + # Verify straggler timer was used as context manager + mock_straggler_timer.__enter__.assert_called_once() + mock_straggler_timer.__exit__.assert_called_once() + + +if __name__ == "__main__": + unittest.main() diff --git a/uv.lock b/uv.lock index a78bc26c..84987ecb 100644 --- a/uv.lock +++ b/uv.lock @@ -359,6 +359,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/af/0f/3b8fdc946b4d9cc8cc1e8af42c4e409468c84441b933d037e101b3d72d86/astroid-3.3.11-py3-none-any.whl", hash = "sha256:54c760ae8322ece1abd213057c4b5bba7c49818853fc901ef09719a60dbf9dec", size = 275612, upload-time = "2025-07-13T18:04:21.07Z" }, ] +[[package]] +name = "asttokens" +version = "3.0.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/be/a5/8e3f9b6771b0b408517c82d97aed8f2036509bc247d46114925e32fe33f0/asttokens-3.0.1.tar.gz", hash = "sha256:71a4ee5de0bde6a31d64f6b13f2293ac190344478f081c3d1bccfcf5eacb0cb7", size = 62308, upload-time = "2025-11-15T16:43:48.578Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d2/39/e7eaf1799466a4aef85b6a4fe7bd175ad2b1c6345066aa33f1f58d4b18d0/asttokens-3.0.1-py3-none-any.whl", hash = "sha256:15a3ebc0f43c2d0a50eeafea25e19046c68398e487b9f1f5b517f7c0f40f976a", size = 27047, upload-time = "2025-11-15T16:43:16.109Z" }, +] + [[package]] name = "async-timeout" version = "5.0.1" @@ -569,6 +578,27 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/10/cb/f2ad4230dc2eb1a74edf38f1a38b9b52277f75bef262d8908e60d957e13c/blinker-1.9.0-py3-none-any.whl", hash = "sha256:ba0efaa9080b619ff2f3459d1d500c57bddea4a6b424b60a91141db6fd2f08bc", size = 8458, upload-time = "2024-11-08T17:25:46.184Z" }, ] +[[package]] +name = "bokeh" +version = "3.9.0.dev4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "contourpy", version = "1.3.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" }, + { name = "contourpy", version = "1.3.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, + { name = "jinja2" }, + { name = "narwhals" }, + { name = "numpy" }, + { name = "packaging" }, + { name = "pillow" }, + { name = "pyyaml" }, + { name = "tornado", marker = "sys_platform != 'emscripten'" }, + { name = "xyzservices" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/db/5d/c63242f0c0568d5abf03542a793744645af8a97eaceedc728ef523c1ea81/bokeh-3.9.0.dev4.tar.gz", hash = "sha256:2548bb85da0c8d3500e0c3047c5e80b93e162b1eaf98cefec94e55bb24f5e154", size = 6532652, upload-time = "2025-11-14T12:08:56.451Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5b/da/e13e773372b59ff36d35d5e6dd32be4dd90a8709fb390846d373fe1eb274/bokeh-3.9.0.dev4-py3-none-any.whl", hash = "sha256:f6604f55e5049b73d1c4711b8becff59b0fc7dcb9d8ff0c386553f5caf306eb3", size = 7211655, upload-time = "2025-11-14T12:08:54.038Z" }, +] + [[package]] name = "botocore" version = "1.40.70" @@ -1310,6 +1340,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/3b/5e/6f8d874366788ad5d549e9ba258037d974dda6e004843be1bda794571701/datasets-4.4.1-py3-none-any.whl", hash = "sha256:c1163de5211e42546079ab355cc0250c7e6db16eb209ac5ac6252f801f596c44", size = 511591, upload-time = "2025-11-05T16:00:36.365Z" }, ] +[[package]] +name = "decorator" +version = "5.2.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/43/fa/6d96a0978d19e17b68d634497769987b16c8f4cd0a7a05048bec693caa6b/decorator-5.2.1.tar.gz", hash = "sha256:65f266143752f734b0a7cc83c46f4618af75b8c5911b00ccb61d0ac9b6da0360", size = 56711, upload-time = "2025-02-24T04:41:34.073Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4e/8c/f3147f5c4b73e7550fe5f9352eaa956ae838d5c51eb58e7a25b9f3e2643b/decorator-5.2.1-py3-none-any.whl", hash = "sha256:d316bb415a2d9e2d2b3abcc4084c6502fc09240e292cd76a76afc106a1c8e04a", size = 9190, upload-time = "2025-02-24T04:41:32.565Z" }, +] + [[package]] name = "defusedxml" version = "0.8.0rc2" @@ -1440,6 +1479,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/36/f4/c6e662dade71f56cd2f3735141b265c3c79293c109549c1e6933b0651ffc/exceptiongroup-1.3.0-py3-none-any.whl", hash = "sha256:4d111e6e0c13d0644cad6ddaa7ed0261a0b36971f6d23e7ec9b4b9097da78a10", size = 16674, upload-time = "2025-05-10T17:42:49.33Z" }, ] +[[package]] +name = "executing" +version = "2.2.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/cc/28/c14e053b6762b1044f34a13aab6859bbf40456d37d23aa286ac24cfd9a5d/executing-2.2.1.tar.gz", hash = "sha256:3632cc370565f6648cc328b32435bd120a1e4ebb20c77e3fdde9a13cd1e533c4", size = 1129488, upload-time = "2025-09-01T09:48:10.866Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c1/ea/53f2148663b321f21b5a606bd5f191517cf40b7072c0497d3c92c4a13b1e/executing-2.2.1-py2.py3-none-any.whl", hash = "sha256:760643d3452b4d777d295bb167ccc74c64a81df23fb5e08eff250c425a4b2017", size = 28317, upload-time = "2025-09-01T09:48:08.5Z" }, +] + [[package]] name = "fastapi" version = "0.1.17" @@ -2212,6 +2260,82 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/cb/b1/3846dd7f199d53cb17f49cba7e651e9ce294d8497c8c150530ed11865bb8/iniconfig-2.3.0-py3-none-any.whl", hash = "sha256:f631c04d2c48c52b84d0d0549c99ff3859c98df65b3101406327ecc7d53fbf12", size = 7484, upload-time = "2025-10-18T21:55:41.639Z" }, ] +[[package]] +name = "ipython" +version = "8.37.0" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version < '3.11' and sys_platform == 'darwin'", + "python_full_version < '3.11' and platform_machine == 'aarch64' and sys_platform == 'linux'", + "python_full_version < '3.11' and platform_machine != 'aarch64' and sys_platform == 'linux'", + "python_full_version < '3.11' and sys_platform != 'darwin' and sys_platform != 'linux'", +] +dependencies = [ + { name = "colorama", marker = "python_full_version < '3.11' and sys_platform == 'win32'" }, + { name = "decorator", marker = "python_full_version < '3.11'" }, + { name = "exceptiongroup", marker = "python_full_version < '3.11'" }, + { name = "jedi", marker = "python_full_version < '3.11'" }, + { name = "matplotlib-inline", marker = "python_full_version < '3.11'" }, + { name = "pexpect", marker = "python_full_version < '3.11' and sys_platform != 'emscripten' and sys_platform != 'win32'" }, + { name = "prompt-toolkit", marker = "python_full_version < '3.11'" }, + { name = "pygments", marker = "python_full_version < '3.11'" }, + { name = "stack-data", marker = "python_full_version < '3.11'" }, + { name = "traitlets", marker = "python_full_version < '3.11'" }, + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/85/31/10ac88f3357fc276dc8a64e8880c82e80e7459326ae1d0a211b40abf6665/ipython-8.37.0.tar.gz", hash = "sha256:ca815841e1a41a1e6b73a0b08f3038af9b2252564d01fc405356d34033012216", size = 5606088, upload-time = "2025-05-31T16:39:09.613Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/91/d0/274fbf7b0b12643cbbc001ce13e6a5b1607ac4929d1b11c72460152c9fc3/ipython-8.37.0-py3-none-any.whl", hash = "sha256:ed87326596b878932dbcb171e3e698845434d8c61b8d8cd474bf663041a9dcf2", size = 831864, upload-time = "2025-05-31T16:39:06.38Z" }, +] + +[[package]] +name = "ipython" +version = "9.7.0" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version >= '3.13' and sys_platform == 'darwin'", + "python_full_version == '3.12.*' and sys_platform == 'darwin'", + "python_full_version >= '3.13' and platform_machine == 'aarch64' and sys_platform == 'linux'", + "python_full_version == '3.12.*' and platform_machine == 'aarch64' and sys_platform == 'linux'", + "python_full_version >= '3.13' and platform_machine != 'aarch64' and sys_platform == 'linux'", + "python_full_version == '3.12.*' and platform_machine != 'aarch64' and sys_platform == 'linux'", + "python_full_version >= '3.13' and sys_platform != 'darwin' and sys_platform != 'linux'", + "python_full_version == '3.12.*' and sys_platform != 'darwin' and sys_platform != 'linux'", + "python_full_version == '3.11.*' and sys_platform == 'darwin'", + "python_full_version == '3.11.*' and platform_machine == 'aarch64' and sys_platform == 'linux'", + "python_full_version == '3.11.*' and platform_machine != 'aarch64' and sys_platform == 'linux'", + "python_full_version == '3.11.*' and sys_platform != 'darwin' and sys_platform != 'linux'", +] +dependencies = [ + { name = "colorama", marker = "python_full_version >= '3.11' and sys_platform == 'win32'" }, + { name = "decorator", marker = "python_full_version >= '3.11'" }, + { name = "ipython-pygments-lexers", marker = "python_full_version >= '3.11'" }, + { name = "jedi", marker = "python_full_version >= '3.11'" }, + { name = "matplotlib-inline", marker = "python_full_version >= '3.11'" }, + { name = "pexpect", marker = "python_full_version >= '3.11' and sys_platform != 'emscripten' and sys_platform != 'win32'" }, + { name = "prompt-toolkit", marker = "python_full_version >= '3.11'" }, + { name = "pygments", marker = "python_full_version >= '3.11'" }, + { name = "stack-data", marker = "python_full_version >= '3.11'" }, + { name = "traitlets", marker = "python_full_version >= '3.11'" }, + { name = "typing-extensions", marker = "python_full_version == '3.11.*'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/29/e6/48c74d54039241a456add616464ea28c6ebf782e4110d419411b83dae06f/ipython-9.7.0.tar.gz", hash = "sha256:5f6de88c905a566c6a9d6c400a8fed54a638e1f7543d17aae2551133216b1e4e", size = 4422115, upload-time = "2025-11-05T12:18:54.646Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/05/aa/62893d6a591d337aa59dcc4c6f6c842f1fe20cd72c8c5c1f980255243252/ipython-9.7.0-py3-none-any.whl", hash = "sha256:bce8ac85eb9521adc94e1845b4c03d88365fd6ac2f4908ec4ed1eb1b0a065f9f", size = 618911, upload-time = "2025-11-05T12:18:52.484Z" }, +] + +[[package]] +name = "ipython-pygments-lexers" +version = "1.1.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pygments", marker = "python_full_version >= '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ef/4c/5dd1d8af08107f88c7f741ead7a40854b8ac24ddf9ae850afbcf698aa552/ipython_pygments_lexers-1.1.1.tar.gz", hash = "sha256:09c0138009e56b6854f9535736f4171d855c8c08a563a0dcd8022f78355c7e81", size = 8393, upload-time = "2025-01-17T11:24:34.505Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d9/33/1f075bf72b0b747cb3288d011319aaf64083cf2efef8354174e3ed4540e2/ipython_pygments_lexers-1.1.1-py3-none-any.whl", hash = "sha256:a9462224a505ade19a605f71f8fa63c2048833ce50abc86768a0d81d876dc81c", size = 8074, upload-time = "2025-01-17T11:24:33.271Z" }, +] + [[package]] name = "isort" version = "6.1.0" @@ -2230,6 +2354,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/04/96/92447566d16df59b2a776c0fb82dbc4d9e07cd95062562af01e408583fc4/itsdangerous-2.2.0-py3-none-any.whl", hash = "sha256:c6242fc49e35958c8b15141343aa660db5fc54d4f13a1db01a3f5891b98700ef", size = 16234, upload-time = "2024-04-16T21:28:14.499Z" }, ] +[[package]] +name = "jedi" +version = "0.19.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "parso" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/72/3a/79a912fbd4d8dd6fbb02bf69afd3bb72cf0c729bb3063c6f4498603db17a/jedi-0.19.2.tar.gz", hash = "sha256:4770dc3de41bde3966b02eb84fbcf557fb33cce26ad23da12c742fb50ecb11f0", size = 1231287, upload-time = "2024-11-11T01:41:42.873Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c0/5a/9cac0c82afec3d09ccd97c8b6502d48f165f9124db81b4bcb90b4af974ee/jedi-0.19.2-py2.py3-none-any.whl", hash = "sha256:a8ef22bde8490f57fe5c7681a3c83cb58874daf72b4784de3cce5b6ef6edb5b9", size = 1572278, upload-time = "2024-11-11T01:41:40.175Z" }, +] + [[package]] name = "jinja2" version = "3.1.6" @@ -2624,6 +2760,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/9a/cc/3fe688ff1355010937713164caacf9ed443675ac48a997bab6ed23b3f7c0/matplotlib-3.10.7-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3886e47f64611046bc1db523a09dd0a0a6bed6081e6f90e13806dd1d1d1b5e91", size = 8693919, upload-time = "2025-10-09T00:27:58.41Z" }, ] +[[package]] +name = "matplotlib-inline" +version = "0.2.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "traitlets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c7/74/97e72a36efd4ae2bccb3463284300f8953f199b5ffbc04cbbb0ec78f74b1/matplotlib_inline-0.2.1.tar.gz", hash = "sha256:e1ee949c340d771fc39e241ea75683deb94762c8fa5f2927ec57c83c4dffa9fe", size = 8110, upload-time = "2025-10-23T09:00:22.126Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/af/33/ee4519fa02ed11a94aef9559552f3b17bb863f2ecfe1a35dc7f548cde231/matplotlib_inline-0.2.1-py3-none-any.whl", hash = "sha256:d56ce5156ba6085e00a9d54fead6ed29a9c47e215cd1bba2e976ef39f5710a76", size = 9516, upload-time = "2025-10-23T09:00:20.675Z" }, +] + [[package]] name = "mccabe" version = "0.7.0" @@ -2654,6 +2802,22 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/b3/38/89ba8ad64ae25be8de66a6d463314cf1eb366222074cfda9ee839c56a4b4/mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8", size = 9979, upload-time = "2022-08-14T12:40:09.779Z" }, ] +[[package]] +name = "mediapy" +version = "1.2.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "ipython", version = "8.37.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" }, + { name = "ipython", version = "9.7.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, + { name = "matplotlib" }, + { name = "numpy" }, + { name = "pillow" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/04/a3/e92406a5552130085bc741861de2d02e1cc16f0615373e0a4cd0016de16d/mediapy-1.2.4.tar.gz", hash = "sha256:052947a676ebd3491359a6943b144559fdc414f741b03a05c78fa7b6291e6b12", size = 26659, upload-time = "2025-05-01T10:18:27.828Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/07/cb/9419149225b44d2cec12208430044c7310dd778d5348000ebb274bc3e2ce/mediapy-1.2.4-py3-none-any.whl", hash = "sha256:dd2b2364543077a1d292cab9eca5d08c8550521523aa1cbb721fd7617ccde872", size = 26118, upload-time = "2025-05-01T10:18:26.445Z" }, +] + [[package]] name = "megatron-bridge" source = { directory = "3rdparty/Megatron-Bridge" } @@ -3086,6 +3250,24 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/5e/ec/ba3f513152cf5404e36263604d484728d47e61678c39228c36eb769199af/mlflow_tracing-3.6.0-py3-none-any.whl", hash = "sha256:a68ff03ba5129c67dc98e6871e0d5ef512dd3ee66d01e1c1a0c946c08a6d4755", size = 1281617, upload-time = "2025-11-07T18:36:23.299Z" }, ] +[[package]] +name = "moviepy" +version = "2.2.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "decorator" }, + { name = "imageio" }, + { name = "imageio-ffmpeg" }, + { name = "numpy" }, + { name = "pillow" }, + { name = "proglog" }, + { name = "python-dotenv" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/de/61/15f9476e270f64c78a834e7459ca045d669f869cec24eed26807b8cd479d/moviepy-2.2.1.tar.gz", hash = "sha256:c80cb56815ece94e5e3e2d361aa40070eeb30a09d23a24c4e684d03e16deacb1", size = 58431438, upload-time = "2025-05-21T19:31:52.601Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9a/73/7d3b2010baa0b5eb1e4dfa9e4385e89b6716be76f2fa21a6c0fe34b68e5a/moviepy-2.2.1-py3-none-any.whl", hash = "sha256:6b56803fec2ac54b557404126ac1160e65448e03798fa282bd23e8fab3795060", size = 129871, upload-time = "2025-05-21T19:31:50.11Z" }, +] + [[package]] name = "mpmath" version = "1.3.0" @@ -3366,6 +3548,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/5f/df/76d0321c3797b54b60fef9ec3bd6f4cfd124b9e422182156a1dd418722cf/myst_parser-4.0.1-py3-none-any.whl", hash = "sha256:9134e88959ec3b5780aedf8a99680ea242869d012e8821db3126d427edc9c95d", size = 84579, upload-time = "2025-02-12T10:53:02.078Z" }, ] +[[package]] +name = "narwhals" +version = "2.12.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/93/f8/e1c28f24b641871c14ccae7ba6381f3c7827789a06e947ce975ae8a9075a/narwhals-2.12.0.tar.gz", hash = "sha256:075b6d56f3a222613793e025744b129439ecdff9292ea6615dd983af7ba6ea44", size = 590404, upload-time = "2025-11-17T10:53:28.381Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0b/9a/c6f79de7ba3a0a8473129936b7b90aa461d3d46fec6f1627672b1dccf4e9/narwhals-2.12.0-py3-none-any.whl", hash = "sha256:baeba5d448a30b04c299a696bd9ee5ff73e4742143e06c49ca316b46539a7cbb", size = 425014, upload-time = "2025-11-17T10:53:26.65Z" }, +] + [[package]] name = "nemo-automodel" source = { directory = "3rdparty/Automodel" } @@ -3492,7 +3683,9 @@ docs = [ { name = "sphinx-copybutton" }, ] megatron-bridge = [ + { name = "mediapy" }, { name = "megatron-bridge" }, + { name = "wandb", extra = ["media"] }, ] test = [ { name = "coverage" }, @@ -3546,7 +3739,11 @@ docs = [ { name = "sphinx-autodoc2", specifier = ">=0.5.0" }, { name = "sphinx-copybutton", specifier = ">=0.5.2" }, ] -megatron-bridge = [{ name = "megatron-bridge", directory = "3rdparty/Megatron-Bridge" }] +megatron-bridge = [ + { name = "mediapy", specifier = ">=1.2.4" }, + { name = "megatron-bridge", directory = "3rdparty/Megatron-Bridge" }, + { name = "wandb", extras = ["media"], specifier = ">=0.23.0" }, +] test = [ { name = "coverage", specifier = ">=7.8.1" }, { name = "flake8", specifier = ">=7.2.0" }, @@ -4165,6 +4362,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/70/44/5191d2e4026f86a2a109053e194d3ba7a31a2d10a9c2348368c63ed4e85a/pandas-2.3.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:3869faf4bd07b3b66a9f462417d0ca3a9df29a9f6abd5d0d0dbab15dac7abe87", size = 13202175, upload-time = "2025-09-29T23:31:59.173Z" }, ] +[[package]] +name = "parso" +version = "0.8.5" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d4/de/53e0bcf53d13e005bd8c92e7855142494f41171b34c2536b86187474184d/parso-0.8.5.tar.gz", hash = "sha256:034d7354a9a018bdce352f48b2a8a450f05e9d6ee85db84764e9b6bd96dafe5a", size = 401205, upload-time = "2025-08-23T15:15:28.028Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/16/32/f8e3c85d1d5250232a5d3477a2a28cc291968ff175caeadaf3cc19ce0e4a/parso-0.8.5-py2.py3-none-any.whl", hash = "sha256:646204b5ee239c396d040b90f9e272e9a8017c630092bf59980beb62fd033887", size = 106668, upload-time = "2025-08-23T15:15:25.663Z" }, +] + [[package]] name = "pathspec" version = "0.12.1" @@ -4174,102 +4380,118 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/cc/20/ff623b09d963f88bfde16306a54e12ee5ea43e9b597108672ff3a408aad6/pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08", size = 31191, upload-time = "2023-12-10T22:30:43.14Z" }, ] +[[package]] +name = "pexpect" +version = "4.9.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "ptyprocess" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/42/92/cc564bf6381ff43ce1f4d06852fc19a2f11d180f23dc32d9588bee2f149d/pexpect-4.9.0.tar.gz", hash = "sha256:ee7d41123f3c9911050ea2c2dac107568dc43b2d3b0c7557a33212c398ead30f", size = 166450, upload-time = "2023-11-25T09:07:26.339Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9e/c3/059298687310d527a58bb01f3b1965787ee3b40dce76752eda8b44e9a2c5/pexpect-4.9.0-py2.py3-none-any.whl", hash = "sha256:7236d1e080e4936be2dc3e326cec0af72acf9212a7e1d060210e70a47e253523", size = 63772, upload-time = "2023-11-25T06:56:14.81Z" }, +] + [[package]] name = "pillow" -version = "12.0.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/5a/b0/cace85a1b0c9775a9f8f5d5423c8261c858760e2466c79b2dd184638b056/pillow-12.0.0.tar.gz", hash = "sha256:87d4f8125c9988bfbed67af47dd7a953e2fc7b0cc1e7800ec6d2080d490bb353", size = 47008828, upload-time = "2025-10-15T18:24:14.008Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/5d/08/26e68b6b5da219c2a2cb7b563af008b53bb8e6b6fcb3fa40715fcdb2523a/pillow-12.0.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:3adfb466bbc544b926d50fe8f4a4e6abd8c6bffd28a26177594e6e9b2b76572b", size = 5289809, upload-time = "2025-10-15T18:21:27.791Z" }, - { url = "https://files.pythonhosted.org/packages/cb/e9/4e58fb097fb74c7b4758a680aacd558810a417d1edaa7000142976ef9d2f/pillow-12.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1ac11e8ea4f611c3c0147424eae514028b5e9077dd99ab91e1bd7bc33ff145e1", size = 4650606, upload-time = "2025-10-15T18:21:29.823Z" }, - { url = "https://files.pythonhosted.org/packages/4b/e0/1fa492aa9f77b3bc6d471c468e62bfea1823056bf7e5e4f1914d7ab2565e/pillow-12.0.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d49e2314c373f4c2b39446fb1a45ed333c850e09d0c59ac79b72eb3b95397363", size = 6221023, upload-time = "2025-10-15T18:21:31.415Z" }, - { url = "https://files.pythonhosted.org/packages/c1/09/4de7cd03e33734ccd0c876f0251401f1314e819cbfd89a0fcb6e77927cc6/pillow-12.0.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c7b2a63fd6d5246349f3d3f37b14430d73ee7e8173154461785e43036ffa96ca", size = 8024937, upload-time = "2025-10-15T18:21:33.453Z" }, - { url = "https://files.pythonhosted.org/packages/2e/69/0688e7c1390666592876d9d474f5e135abb4acb39dcb583c4dc5490f1aff/pillow-12.0.0-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d64317d2587c70324b79861babb9c09f71fbb780bad212018874b2c013d8600e", size = 6334139, upload-time = "2025-10-15T18:21:35.395Z" }, - { url = "https://files.pythonhosted.org/packages/ed/1c/880921e98f525b9b44ce747ad1ea8f73fd7e992bafe3ca5e5644bf433dea/pillow-12.0.0-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d77153e14b709fd8b8af6f66a3afbb9ed6e9fc5ccf0b6b7e1ced7b036a228782", size = 7026074, upload-time = "2025-10-15T18:21:37.219Z" }, - { url = "https://files.pythonhosted.org/packages/28/03/96f718331b19b355610ef4ebdbbde3557c726513030665071fd025745671/pillow-12.0.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:32ed80ea8a90ee3e6fa08c21e2e091bba6eda8eccc83dbc34c95169507a91f10", size = 6448852, upload-time = "2025-10-15T18:21:39.168Z" }, - { url = "https://files.pythonhosted.org/packages/3a/a0/6a193b3f0cc9437b122978d2c5cbce59510ccf9a5b48825096ed7472da2f/pillow-12.0.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:c828a1ae702fc712978bda0320ba1b9893d99be0badf2647f693cc01cf0f04fa", size = 7117058, upload-time = "2025-10-15T18:21:40.997Z" }, - { url = "https://files.pythonhosted.org/packages/a7/c4/043192375eaa4463254e8e61f0e2ec9a846b983929a8d0a7122e0a6d6fff/pillow-12.0.0-cp310-cp310-win32.whl", hash = "sha256:bd87e140e45399c818fac4247880b9ce719e4783d767e030a883a970be632275", size = 6295431, upload-time = "2025-10-15T18:21:42.518Z" }, - { url = "https://files.pythonhosted.org/packages/92/c6/c2f2fc7e56301c21827e689bb8b0b465f1b52878b57471a070678c0c33cd/pillow-12.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:455247ac8a4cfb7b9bc45b7e432d10421aea9fc2e74d285ba4072688a74c2e9d", size = 7000412, upload-time = "2025-10-15T18:21:44.404Z" }, - { url = "https://files.pythonhosted.org/packages/b2/d2/5f675067ba82da7a1c238a73b32e3fd78d67f9d9f80fbadd33a40b9c0481/pillow-12.0.0-cp310-cp310-win_arm64.whl", hash = "sha256:6ace95230bfb7cd79ef66caa064bbe2f2a1e63d93471c3a2e1f1348d9f22d6b7", size = 2435903, upload-time = "2025-10-15T18:21:46.29Z" }, - { url = "https://files.pythonhosted.org/packages/0e/5a/a2f6773b64edb921a756eb0729068acad9fc5208a53f4a349396e9436721/pillow-12.0.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:0fd00cac9c03256c8b2ff58f162ebcd2587ad3e1f2e397eab718c47e24d231cc", size = 5289798, upload-time = "2025-10-15T18:21:47.763Z" }, - { url = "https://files.pythonhosted.org/packages/2e/05/069b1f8a2e4b5a37493da6c5868531c3f77b85e716ad7a590ef87d58730d/pillow-12.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a3475b96f5908b3b16c47533daaa87380c491357d197564e0ba34ae75c0f3257", size = 4650589, upload-time = "2025-10-15T18:21:49.515Z" }, - { url = "https://files.pythonhosted.org/packages/61/e3/2c820d6e9a36432503ead175ae294f96861b07600a7156154a086ba7111a/pillow-12.0.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:110486b79f2d112cf6add83b28b627e369219388f64ef2f960fef9ebaf54c642", size = 6230472, upload-time = "2025-10-15T18:21:51.052Z" }, - { url = "https://files.pythonhosted.org/packages/4f/89/63427f51c64209c5e23d4d52071c8d0f21024d3a8a487737caaf614a5795/pillow-12.0.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:5269cc1caeedb67e6f7269a42014f381f45e2e7cd42d834ede3c703a1d915fe3", size = 8033887, upload-time = "2025-10-15T18:21:52.604Z" }, - { url = "https://files.pythonhosted.org/packages/f6/1b/c9711318d4901093c15840f268ad649459cd81984c9ec9887756cca049a5/pillow-12.0.0-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:aa5129de4e174daccbc59d0a3b6d20eaf24417d59851c07ebb37aeb02947987c", size = 6343964, upload-time = "2025-10-15T18:21:54.619Z" }, - { url = "https://files.pythonhosted.org/packages/41/1e/db9470f2d030b4995083044cd8738cdd1bf773106819f6d8ba12597d5352/pillow-12.0.0-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bee2a6db3a7242ea309aa7ee8e2780726fed67ff4e5b40169f2c940e7eb09227", size = 7034756, upload-time = "2025-10-15T18:21:56.151Z" }, - { url = "https://files.pythonhosted.org/packages/cc/b0/6177a8bdd5ee4ed87cba2de5a3cc1db55ffbbec6176784ce5bb75aa96798/pillow-12.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:90387104ee8400a7b4598253b4c406f8958f59fcf983a6cea2b50d59f7d63d0b", size = 6458075, upload-time = "2025-10-15T18:21:57.759Z" }, - { url = "https://files.pythonhosted.org/packages/bc/5e/61537aa6fa977922c6a03253a0e727e6e4a72381a80d63ad8eec350684f2/pillow-12.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:bc91a56697869546d1b8f0a3ff35224557ae7f881050e99f615e0119bf934b4e", size = 7125955, upload-time = "2025-10-15T18:21:59.372Z" }, - { url = "https://files.pythonhosted.org/packages/1f/3d/d5033539344ee3cbd9a4d69e12e63ca3a44a739eb2d4c8da350a3d38edd7/pillow-12.0.0-cp311-cp311-win32.whl", hash = "sha256:27f95b12453d165099c84f8a8bfdfd46b9e4bda9e0e4b65f0635430027f55739", size = 6298440, upload-time = "2025-10-15T18:22:00.982Z" }, - { url = "https://files.pythonhosted.org/packages/4d/42/aaca386de5cc8bd8a0254516957c1f265e3521c91515b16e286c662854c4/pillow-12.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:b583dc9070312190192631373c6c8ed277254aa6e6084b74bdd0a6d3b221608e", size = 6999256, upload-time = "2025-10-15T18:22:02.617Z" }, - { url = "https://files.pythonhosted.org/packages/ba/f1/9197c9c2d5708b785f631a6dfbfa8eb3fb9672837cb92ae9af812c13b4ed/pillow-12.0.0-cp311-cp311-win_arm64.whl", hash = "sha256:759de84a33be3b178a64c8ba28ad5c135900359e85fb662bc6e403ad4407791d", size = 2436025, upload-time = "2025-10-15T18:22:04.598Z" }, - { url = "https://files.pythonhosted.org/packages/2c/90/4fcce2c22caf044e660a198d740e7fbc14395619e3cb1abad12192c0826c/pillow-12.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:53561a4ddc36facb432fae7a9d8afbfaf94795414f5cdc5fc52f28c1dca90371", size = 5249377, upload-time = "2025-10-15T18:22:05.993Z" }, - { url = "https://files.pythonhosted.org/packages/fd/e0/ed960067543d080691d47d6938ebccbf3976a931c9567ab2fbfab983a5dd/pillow-12.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:71db6b4c1653045dacc1585c1b0d184004f0d7e694c7b34ac165ca70c0838082", size = 4650343, upload-time = "2025-10-15T18:22:07.718Z" }, - { url = "https://files.pythonhosted.org/packages/e7/a1/f81fdeddcb99c044bf7d6faa47e12850f13cee0849537a7d27eeab5534d4/pillow-12.0.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:2fa5f0b6716fc88f11380b88b31fe591a06c6315e955c096c35715788b339e3f", size = 6232981, upload-time = "2025-10-15T18:22:09.287Z" }, - { url = "https://files.pythonhosted.org/packages/88/e1/9098d3ce341a8750b55b0e00c03f1630d6178f38ac191c81c97a3b047b44/pillow-12.0.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:82240051c6ca513c616f7f9da06e871f61bfd7805f566275841af15015b8f98d", size = 8041399, upload-time = "2025-10-15T18:22:10.872Z" }, - { url = "https://files.pythonhosted.org/packages/a7/62/a22e8d3b602ae8cc01446d0c57a54e982737f44b6f2e1e019a925143771d/pillow-12.0.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:55f818bd74fe2f11d4d7cbc65880a843c4075e0ac7226bc1a23261dbea531953", size = 6347740, upload-time = "2025-10-15T18:22:12.769Z" }, - { url = "https://files.pythonhosted.org/packages/4f/87/424511bdcd02c8d7acf9f65caa09f291a519b16bd83c3fb3374b3d4ae951/pillow-12.0.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b87843e225e74576437fd5b6a4c2205d422754f84a06942cfaf1dc32243e45a8", size = 7040201, upload-time = "2025-10-15T18:22:14.813Z" }, - { url = "https://files.pythonhosted.org/packages/dc/4d/435c8ac688c54d11755aedfdd9f29c9eeddf68d150fe42d1d3dbd2365149/pillow-12.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:c607c90ba67533e1b2355b821fef6764d1dd2cbe26b8c1005ae84f7aea25ff79", size = 6462334, upload-time = "2025-10-15T18:22:16.375Z" }, - { url = "https://files.pythonhosted.org/packages/2b/f2/ad34167a8059a59b8ad10bc5c72d4d9b35acc6b7c0877af8ac885b5f2044/pillow-12.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:21f241bdd5080a15bc86d3466a9f6074a9c2c2b314100dd896ac81ee6db2f1ba", size = 7134162, upload-time = "2025-10-15T18:22:17.996Z" }, - { url = "https://files.pythonhosted.org/packages/0c/b1/a7391df6adacf0a5c2cf6ac1cf1fcc1369e7d439d28f637a847f8803beb3/pillow-12.0.0-cp312-cp312-win32.whl", hash = "sha256:dd333073e0cacdc3089525c7df7d39b211bcdf31fc2824e49d01c6b6187b07d0", size = 6298769, upload-time = "2025-10-15T18:22:19.923Z" }, - { url = "https://files.pythonhosted.org/packages/a2/0b/d87733741526541c909bbf159e338dcace4f982daac6e5a8d6be225ca32d/pillow-12.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:9fe611163f6303d1619bbcb653540a4d60f9e55e622d60a3108be0d5b441017a", size = 7001107, upload-time = "2025-10-15T18:22:21.644Z" }, - { url = "https://files.pythonhosted.org/packages/bc/96/aaa61ce33cc98421fb6088af2a03be4157b1e7e0e87087c888e2370a7f45/pillow-12.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:7dfb439562f234f7d57b1ac6bc8fe7f838a4bd49c79230e0f6a1da93e82f1fad", size = 2436012, upload-time = "2025-10-15T18:22:23.621Z" }, - { url = "https://files.pythonhosted.org/packages/62/f2/de993bb2d21b33a98d031ecf6a978e4b61da207bef02f7b43093774c480d/pillow-12.0.0-cp313-cp313-ios_13_0_arm64_iphoneos.whl", hash = "sha256:0869154a2d0546545cde61d1789a6524319fc1897d9ee31218eae7a60ccc5643", size = 4045493, upload-time = "2025-10-15T18:22:25.758Z" }, - { url = "https://files.pythonhosted.org/packages/0e/b6/bc8d0c4c9f6f111a783d045310945deb769b806d7574764234ffd50bc5ea/pillow-12.0.0-cp313-cp313-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:a7921c5a6d31b3d756ec980f2f47c0cfdbce0fc48c22a39347a895f41f4a6ea4", size = 4120461, upload-time = "2025-10-15T18:22:27.286Z" }, - { url = "https://files.pythonhosted.org/packages/5d/57/d60d343709366a353dc56adb4ee1e7d8a2cc34e3fbc22905f4167cfec119/pillow-12.0.0-cp313-cp313-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:1ee80a59f6ce048ae13cda1abf7fbd2a34ab9ee7d401c46be3ca685d1999a399", size = 3576912, upload-time = "2025-10-15T18:22:28.751Z" }, - { url = "https://files.pythonhosted.org/packages/a4/a4/a0a31467e3f83b94d37568294b01d22b43ae3c5d85f2811769b9c66389dd/pillow-12.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:c50f36a62a22d350c96e49ad02d0da41dbd17ddc2e29750dbdba4323f85eb4a5", size = 5249132, upload-time = "2025-10-15T18:22:30.641Z" }, - { url = "https://files.pythonhosted.org/packages/83/06/48eab21dd561de2914242711434c0c0eb992ed08ff3f6107a5f44527f5e9/pillow-12.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5193fde9a5f23c331ea26d0cf171fbf67e3f247585f50c08b3e205c7aeb4589b", size = 4650099, upload-time = "2025-10-15T18:22:32.73Z" }, - { url = "https://files.pythonhosted.org/packages/fc/bd/69ed99fd46a8dba7c1887156d3572fe4484e3f031405fcc5a92e31c04035/pillow-12.0.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:bde737cff1a975b70652b62d626f7785e0480918dece11e8fef3c0cf057351c3", size = 6230808, upload-time = "2025-10-15T18:22:34.337Z" }, - { url = "https://files.pythonhosted.org/packages/ea/94/8fad659bcdbf86ed70099cb60ae40be6acca434bbc8c4c0d4ef356d7e0de/pillow-12.0.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:a6597ff2b61d121172f5844b53f21467f7082f5fb385a9a29c01414463f93b07", size = 8037804, upload-time = "2025-10-15T18:22:36.402Z" }, - { url = "https://files.pythonhosted.org/packages/20/39/c685d05c06deecfd4e2d1950e9a908aa2ca8bc4e6c3b12d93b9cafbd7837/pillow-12.0.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0b817e7035ea7f6b942c13aa03bb554fc44fea70838ea21f8eb31c638326584e", size = 6345553, upload-time = "2025-10-15T18:22:38.066Z" }, - { url = "https://files.pythonhosted.org/packages/38/57/755dbd06530a27a5ed74f8cb0a7a44a21722ebf318edbe67ddbd7fb28f88/pillow-12.0.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f4f1231b7dec408e8670264ce63e9c71409d9583dd21d32c163e25213ee2a344", size = 7037729, upload-time = "2025-10-15T18:22:39.769Z" }, - { url = "https://files.pythonhosted.org/packages/ca/b6/7e94f4c41d238615674d06ed677c14883103dce1c52e4af16f000338cfd7/pillow-12.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6e51b71417049ad6ab14c49608b4a24d8fb3fe605e5dfabfe523b58064dc3d27", size = 6459789, upload-time = "2025-10-15T18:22:41.437Z" }, - { url = "https://files.pythonhosted.org/packages/9c/14/4448bb0b5e0f22dd865290536d20ec8a23b64e2d04280b89139f09a36bb6/pillow-12.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:d120c38a42c234dc9a8c5de7ceaaf899cf33561956acb4941653f8bdc657aa79", size = 7130917, upload-time = "2025-10-15T18:22:43.152Z" }, - { url = "https://files.pythonhosted.org/packages/dd/ca/16c6926cc1c015845745d5c16c9358e24282f1e588237a4c36d2b30f182f/pillow-12.0.0-cp313-cp313-win32.whl", hash = "sha256:4cc6b3b2efff105c6a1656cfe59da4fdde2cda9af1c5e0b58529b24525d0a098", size = 6302391, upload-time = "2025-10-15T18:22:44.753Z" }, - { url = "https://files.pythonhosted.org/packages/6d/2a/dd43dcfd6dae9b6a49ee28a8eedb98c7d5ff2de94a5d834565164667b97b/pillow-12.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:4cf7fed4b4580601c4345ceb5d4cbf5a980d030fd5ad07c4d2ec589f95f09905", size = 7007477, upload-time = "2025-10-15T18:22:46.838Z" }, - { url = "https://files.pythonhosted.org/packages/77/f0/72ea067f4b5ae5ead653053212af05ce3705807906ba3f3e8f58ddf617e6/pillow-12.0.0-cp313-cp313-win_arm64.whl", hash = "sha256:9f0b04c6b8584c2c193babcccc908b38ed29524b29dd464bc8801bf10d746a3a", size = 2435918, upload-time = "2025-10-15T18:22:48.399Z" }, - { url = "https://files.pythonhosted.org/packages/f5/5e/9046b423735c21f0487ea6cb5b10f89ea8f8dfbe32576fe052b5ba9d4e5b/pillow-12.0.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:7fa22993bac7b77b78cae22bad1e2a987ddf0d9015c63358032f84a53f23cdc3", size = 5251406, upload-time = "2025-10-15T18:22:49.905Z" }, - { url = "https://files.pythonhosted.org/packages/12/66/982ceebcdb13c97270ef7a56c3969635b4ee7cd45227fa707c94719229c5/pillow-12.0.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:f135c702ac42262573fe9714dfe99c944b4ba307af5eb507abef1667e2cbbced", size = 4653218, upload-time = "2025-10-15T18:22:51.587Z" }, - { url = "https://files.pythonhosted.org/packages/16/b3/81e625524688c31859450119bf12674619429cab3119eec0e30a7a1029cb/pillow-12.0.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:c85de1136429c524e55cfa4e033b4a7940ac5c8ee4d9401cc2d1bf48154bbc7b", size = 6266564, upload-time = "2025-10-15T18:22:53.215Z" }, - { url = "https://files.pythonhosted.org/packages/98/59/dfb38f2a41240d2408096e1a76c671d0a105a4a8471b1871c6902719450c/pillow-12.0.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:38df9b4bfd3db902c9c2bd369bcacaf9d935b2fff73709429d95cc41554f7b3d", size = 8069260, upload-time = "2025-10-15T18:22:54.933Z" }, - { url = "https://files.pythonhosted.org/packages/dc/3d/378dbea5cd1874b94c312425ca77b0f47776c78e0df2df751b820c8c1d6c/pillow-12.0.0-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7d87ef5795da03d742bf49439f9ca4d027cde49c82c5371ba52464aee266699a", size = 6379248, upload-time = "2025-10-15T18:22:56.605Z" }, - { url = "https://files.pythonhosted.org/packages/84/b0/d525ef47d71590f1621510327acec75ae58c721dc071b17d8d652ca494d8/pillow-12.0.0-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:aff9e4d82d082ff9513bdd6acd4f5bd359f5b2c870907d2b0a9c5e10d40c88fe", size = 7066043, upload-time = "2025-10-15T18:22:58.53Z" }, - { url = "https://files.pythonhosted.org/packages/61/2c/aced60e9cf9d0cde341d54bf7932c9ffc33ddb4a1595798b3a5150c7ec4e/pillow-12.0.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:8d8ca2b210ada074d57fcee40c30446c9562e542fc46aedc19baf758a93532ee", size = 6490915, upload-time = "2025-10-15T18:23:00.582Z" }, - { url = "https://files.pythonhosted.org/packages/ef/26/69dcb9b91f4e59f8f34b2332a4a0a951b44f547c4ed39d3e4dcfcff48f89/pillow-12.0.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:99a7f72fb6249302aa62245680754862a44179b545ded638cf1fef59befb57ef", size = 7157998, upload-time = "2025-10-15T18:23:02.627Z" }, - { url = "https://files.pythonhosted.org/packages/61/2b/726235842220ca95fa441ddf55dd2382b52ab5b8d9c0596fe6b3f23dafe8/pillow-12.0.0-cp313-cp313t-win32.whl", hash = "sha256:4078242472387600b2ce8d93ade8899c12bf33fa89e55ec89fe126e9d6d5d9e9", size = 6306201, upload-time = "2025-10-15T18:23:04.709Z" }, - { url = "https://files.pythonhosted.org/packages/c0/3d/2afaf4e840b2df71344ababf2f8edd75a705ce500e5dc1e7227808312ae1/pillow-12.0.0-cp313-cp313t-win_amd64.whl", hash = "sha256:2c54c1a783d6d60595d3514f0efe9b37c8808746a66920315bfd34a938d7994b", size = 7013165, upload-time = "2025-10-15T18:23:06.46Z" }, - { url = "https://files.pythonhosted.org/packages/6f/75/3fa09aa5cf6ed04bee3fa575798ddf1ce0bace8edb47249c798077a81f7f/pillow-12.0.0-cp313-cp313t-win_arm64.whl", hash = "sha256:26d9f7d2b604cd23aba3e9faf795787456ac25634d82cd060556998e39c6fa47", size = 2437834, upload-time = "2025-10-15T18:23:08.194Z" }, - { url = "https://files.pythonhosted.org/packages/54/2a/9a8c6ba2c2c07b71bec92cf63e03370ca5e5f5c5b119b742bcc0cde3f9c5/pillow-12.0.0-cp314-cp314-ios_13_0_arm64_iphoneos.whl", hash = "sha256:beeae3f27f62308f1ddbcfb0690bf44b10732f2ef43758f169d5e9303165d3f9", size = 4045531, upload-time = "2025-10-15T18:23:10.121Z" }, - { url = "https://files.pythonhosted.org/packages/84/54/836fdbf1bfb3d66a59f0189ff0b9f5f666cee09c6188309300df04ad71fa/pillow-12.0.0-cp314-cp314-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:d4827615da15cd59784ce39d3388275ec093ae3ee8d7f0c089b76fa87af756c2", size = 4120554, upload-time = "2025-10-15T18:23:12.14Z" }, - { url = "https://files.pythonhosted.org/packages/0d/cd/16aec9f0da4793e98e6b54778a5fbce4f375c6646fe662e80600b8797379/pillow-12.0.0-cp314-cp314-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:3e42edad50b6909089750e65c91aa09aaf1e0a71310d383f11321b27c224ed8a", size = 3576812, upload-time = "2025-10-15T18:23:13.962Z" }, - { url = "https://files.pythonhosted.org/packages/f6/b7/13957fda356dc46339298b351cae0d327704986337c3c69bb54628c88155/pillow-12.0.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:e5d8efac84c9afcb40914ab49ba063d94f5dbdf5066db4482c66a992f47a3a3b", size = 5252689, upload-time = "2025-10-15T18:23:15.562Z" }, - { url = "https://files.pythonhosted.org/packages/fc/f5/eae31a306341d8f331f43edb2e9122c7661b975433de5e447939ae61c5da/pillow-12.0.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:266cd5f2b63ff316d5a1bba46268e603c9caf5606d44f38c2873c380950576ad", size = 4650186, upload-time = "2025-10-15T18:23:17.379Z" }, - { url = "https://files.pythonhosted.org/packages/86/62/2a88339aa40c4c77e79108facbd307d6091e2c0eb5b8d3cf4977cfca2fe6/pillow-12.0.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:58eea5ebe51504057dd95c5b77d21700b77615ab0243d8152793dc00eb4faf01", size = 6230308, upload-time = "2025-10-15T18:23:18.971Z" }, - { url = "https://files.pythonhosted.org/packages/c7/33/5425a8992bcb32d1cb9fa3dd39a89e613d09a22f2c8083b7bf43c455f760/pillow-12.0.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f13711b1a5ba512d647a0e4ba79280d3a9a045aaf7e0cc6fbe96b91d4cdf6b0c", size = 8039222, upload-time = "2025-10-15T18:23:20.909Z" }, - { url = "https://files.pythonhosted.org/packages/d8/61/3f5d3b35c5728f37953d3eec5b5f3e77111949523bd2dd7f31a851e50690/pillow-12.0.0-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6846bd2d116ff42cba6b646edf5bf61d37e5cbd256425fa089fee4ff5c07a99e", size = 6346657, upload-time = "2025-10-15T18:23:23.077Z" }, - { url = "https://files.pythonhosted.org/packages/3a/be/ee90a3d79271227e0f0a33c453531efd6ed14b2e708596ba5dd9be948da3/pillow-12.0.0-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c98fa880d695de164b4135a52fd2e9cd7b7c90a9d8ac5e9e443a24a95ef9248e", size = 7038482, upload-time = "2025-10-15T18:23:25.005Z" }, - { url = "https://files.pythonhosted.org/packages/44/34/a16b6a4d1ad727de390e9bd9f19f5f669e079e5826ec0f329010ddea492f/pillow-12.0.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:fa3ed2a29a9e9d2d488b4da81dcb54720ac3104a20bf0bd273f1e4648aff5af9", size = 6461416, upload-time = "2025-10-15T18:23:27.009Z" }, - { url = "https://files.pythonhosted.org/packages/b6/39/1aa5850d2ade7d7ba9f54e4e4c17077244ff7a2d9e25998c38a29749eb3f/pillow-12.0.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:d034140032870024e6b9892c692fe2968493790dd57208b2c37e3fb35f6df3ab", size = 7131584, upload-time = "2025-10-15T18:23:29.752Z" }, - { url = "https://files.pythonhosted.org/packages/bf/db/4fae862f8fad0167073a7733973bfa955f47e2cac3dc3e3e6257d10fab4a/pillow-12.0.0-cp314-cp314-win32.whl", hash = "sha256:1b1b133e6e16105f524a8dec491e0586d072948ce15c9b914e41cdadd209052b", size = 6400621, upload-time = "2025-10-15T18:23:32.06Z" }, - { url = "https://files.pythonhosted.org/packages/2b/24/b350c31543fb0107ab2599464d7e28e6f856027aadda995022e695313d94/pillow-12.0.0-cp314-cp314-win_amd64.whl", hash = "sha256:8dc232e39d409036af549c86f24aed8273a40ffa459981146829a324e0848b4b", size = 7142916, upload-time = "2025-10-15T18:23:34.71Z" }, - { url = "https://files.pythonhosted.org/packages/0f/9b/0ba5a6fd9351793996ef7487c4fdbde8d3f5f75dbedc093bb598648fddf0/pillow-12.0.0-cp314-cp314-win_arm64.whl", hash = "sha256:d52610d51e265a51518692045e372a4c363056130d922a7351429ac9f27e70b0", size = 2523836, upload-time = "2025-10-15T18:23:36.967Z" }, - { url = "https://files.pythonhosted.org/packages/f5/7a/ceee0840aebc579af529b523d530840338ecf63992395842e54edc805987/pillow-12.0.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:1979f4566bb96c1e50a62d9831e2ea2d1211761e5662afc545fa766f996632f6", size = 5255092, upload-time = "2025-10-15T18:23:38.573Z" }, - { url = "https://files.pythonhosted.org/packages/44/76/20776057b4bfd1aef4eeca992ebde0f53a4dce874f3ae693d0ec90a4f79b/pillow-12.0.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:b2e4b27a6e15b04832fe9bf292b94b5ca156016bbc1ea9c2c20098a0320d6cf6", size = 4653158, upload-time = "2025-10-15T18:23:40.238Z" }, - { url = "https://files.pythonhosted.org/packages/82/3f/d9ff92ace07be8836b4e7e87e6a4c7a8318d47c2f1463ffcf121fc57d9cb/pillow-12.0.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:fb3096c30df99fd01c7bf8e544f392103d0795b9f98ba71a8054bcbf56b255f1", size = 6267882, upload-time = "2025-10-15T18:23:42.434Z" }, - { url = "https://files.pythonhosted.org/packages/9f/7a/4f7ff87f00d3ad33ba21af78bfcd2f032107710baf8280e3722ceec28cda/pillow-12.0.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:7438839e9e053ef79f7112c881cef684013855016f928b168b81ed5835f3e75e", size = 8071001, upload-time = "2025-10-15T18:23:44.29Z" }, - { url = "https://files.pythonhosted.org/packages/75/87/fcea108944a52dad8cca0715ae6247e271eb80459364a98518f1e4f480c1/pillow-12.0.0-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5d5c411a8eaa2299322b647cd932586b1427367fd3184ffbb8f7a219ea2041ca", size = 6380146, upload-time = "2025-10-15T18:23:46.065Z" }, - { url = "https://files.pythonhosted.org/packages/91/52/0d31b5e571ef5fd111d2978b84603fce26aba1b6092f28e941cb46570745/pillow-12.0.0-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d7e091d464ac59d2c7ad8e7e08105eaf9dafbc3883fd7265ffccc2baad6ac925", size = 7067344, upload-time = "2025-10-15T18:23:47.898Z" }, - { url = "https://files.pythonhosted.org/packages/7b/f4/2dd3d721f875f928d48e83bb30a434dee75a2531bca839bb996bb0aa5a91/pillow-12.0.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:792a2c0be4dcc18af9d4a2dfd8a11a17d5e25274a1062b0ec1c2d79c76f3e7f8", size = 6491864, upload-time = "2025-10-15T18:23:49.607Z" }, - { url = "https://files.pythonhosted.org/packages/30/4b/667dfcf3d61fc309ba5a15b141845cece5915e39b99c1ceab0f34bf1d124/pillow-12.0.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:afbefa430092f71a9593a99ab6a4e7538bc9eabbf7bf94f91510d3503943edc4", size = 7158911, upload-time = "2025-10-15T18:23:51.351Z" }, - { url = "https://files.pythonhosted.org/packages/a2/2f/16cabcc6426c32218ace36bf0d55955e813f2958afddbf1d391849fee9d1/pillow-12.0.0-cp314-cp314t-win32.whl", hash = "sha256:3830c769decf88f1289680a59d4f4c46c72573446352e2befec9a8512104fa52", size = 6408045, upload-time = "2025-10-15T18:23:53.177Z" }, - { url = "https://files.pythonhosted.org/packages/35/73/e29aa0c9c666cf787628d3f0dcf379f4791fba79f4936d02f8b37165bdf8/pillow-12.0.0-cp314-cp314t-win_amd64.whl", hash = "sha256:905b0365b210c73afb0ebe9101a32572152dfd1c144c7e28968a331b9217b94a", size = 7148282, upload-time = "2025-10-15T18:23:55.316Z" }, - { url = "https://files.pythonhosted.org/packages/c1/70/6b41bdcddf541b437bbb9f47f94d2db5d9ddef6c37ccab8c9107743748a4/pillow-12.0.0-cp314-cp314t-win_arm64.whl", hash = "sha256:99353a06902c2e43b43e8ff74ee65a7d90307d82370604746738a1e0661ccca7", size = 2525630, upload-time = "2025-10-15T18:23:57.149Z" }, - { url = "https://files.pythonhosted.org/packages/1d/b3/582327e6c9f86d037b63beebe981425d6811104cb443e8193824ef1a2f27/pillow-12.0.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:b22bd8c974942477156be55a768f7aa37c46904c175be4e158b6a86e3a6b7ca8", size = 5215068, upload-time = "2025-10-15T18:23:59.594Z" }, - { url = "https://files.pythonhosted.org/packages/fd/d6/67748211d119f3b6540baf90f92fae73ae51d5217b171b0e8b5f7e5d558f/pillow-12.0.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:805ebf596939e48dbb2e4922a1d3852cfc25c38160751ce02da93058b48d252a", size = 4614994, upload-time = "2025-10-15T18:24:01.669Z" }, - { url = "https://files.pythonhosted.org/packages/2d/e1/f8281e5d844c41872b273b9f2c34a4bf64ca08905668c8ae730eedc7c9fa/pillow-12.0.0-pp311-pypy311_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cae81479f77420d217def5f54b5b9d279804d17e982e0f2fa19b1d1e14ab5197", size = 5246639, upload-time = "2025-10-15T18:24:03.403Z" }, - { url = "https://files.pythonhosted.org/packages/94/5a/0d8ab8ffe8a102ff5df60d0de5af309015163bf710c7bb3e8311dd3b3ad0/pillow-12.0.0-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:aeaefa96c768fc66818730b952a862235d68825c178f1b3ffd4efd7ad2edcb7c", size = 6986839, upload-time = "2025-10-15T18:24:05.344Z" }, - { url = "https://files.pythonhosted.org/packages/20/2e/3434380e8110b76cd9eb00a363c484b050f949b4bbe84ba770bb8508a02c/pillow-12.0.0-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:09f2d0abef9e4e2f349305a4f8cc784a8a6c2f58a8c4892eea13b10a943bd26e", size = 5313505, upload-time = "2025-10-15T18:24:07.137Z" }, - { url = "https://files.pythonhosted.org/packages/57/ca/5a9d38900d9d74785141d6580950fe705de68af735ff6e727cb911b64740/pillow-12.0.0-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bdee52571a343d721fb2eb3b090a82d959ff37fc631e3f70422e0c2e029f3e76", size = 5963654, upload-time = "2025-10-15T18:24:09.579Z" }, - { url = "https://files.pythonhosted.org/packages/95/7e/f896623c3c635a90537ac093c6a618ebe1a90d87206e42309cb5d98a1b9e/pillow-12.0.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:b290fd8aa38422444d4b50d579de197557f182ef1068b75f5aa8558638b8d0a5", size = 6997850, upload-time = "2025-10-15T18:24:11.495Z" }, +version = "11.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f3/0d/d0d6dea55cd152ce3d6767bb38a8fc10e33796ba4ba210cbab9354b6d238/pillow-11.3.0.tar.gz", hash = "sha256:3828ee7586cd0b2091b6209e5ad53e20d0649bbe87164a459d0676e035e8f523", size = 47113069, upload-time = "2025-07-01T09:16:30.666Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4c/5d/45a3553a253ac8763f3561371432a90bdbe6000fbdcf1397ffe502aa206c/pillow-11.3.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1b9c17fd4ace828b3003dfd1e30bff24863e0eb59b535e8f80194d9cc7ecf860", size = 5316554, upload-time = "2025-07-01T09:13:39.342Z" }, + { url = "https://files.pythonhosted.org/packages/7c/c8/67c12ab069ef586a25a4a79ced553586748fad100c77c0ce59bb4983ac98/pillow-11.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:65dc69160114cdd0ca0f35cb434633c75e8e7fad4cf855177a05bf38678f73ad", size = 4686548, upload-time = "2025-07-01T09:13:41.835Z" }, + { url = "https://files.pythonhosted.org/packages/2f/bd/6741ebd56263390b382ae4c5de02979af7f8bd9807346d068700dd6d5cf9/pillow-11.3.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7107195ddc914f656c7fc8e4a5e1c25f32e9236ea3ea860f257b0436011fddd0", size = 5859742, upload-time = "2025-07-03T13:09:47.439Z" }, + { url = "https://files.pythonhosted.org/packages/ca/0b/c412a9e27e1e6a829e6ab6c2dca52dd563efbedf4c9c6aa453d9a9b77359/pillow-11.3.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:cc3e831b563b3114baac7ec2ee86819eb03caa1a2cef0b481a5675b59c4fe23b", size = 7633087, upload-time = "2025-07-03T13:09:51.796Z" }, + { url = "https://files.pythonhosted.org/packages/59/9d/9b7076aaf30f5dd17e5e5589b2d2f5a5d7e30ff67a171eb686e4eecc2adf/pillow-11.3.0-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f1f182ebd2303acf8c380a54f615ec883322593320a9b00438eb842c1f37ae50", size = 5963350, upload-time = "2025-07-01T09:13:43.865Z" }, + { url = "https://files.pythonhosted.org/packages/f0/16/1a6bf01fb622fb9cf5c91683823f073f053005c849b1f52ed613afcf8dae/pillow-11.3.0-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4445fa62e15936a028672fd48c4c11a66d641d2c05726c7ec1f8ba6a572036ae", size = 6631840, upload-time = "2025-07-01T09:13:46.161Z" }, + { url = "https://files.pythonhosted.org/packages/7b/e6/6ff7077077eb47fde78739e7d570bdcd7c10495666b6afcd23ab56b19a43/pillow-11.3.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:71f511f6b3b91dd543282477be45a033e4845a40278fa8dcdbfdb07109bf18f9", size = 6074005, upload-time = "2025-07-01T09:13:47.829Z" }, + { url = "https://files.pythonhosted.org/packages/c3/3a/b13f36832ea6d279a697231658199e0a03cd87ef12048016bdcc84131601/pillow-11.3.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:040a5b691b0713e1f6cbe222e0f4f74cd233421e105850ae3b3c0ceda520f42e", size = 6708372, upload-time = "2025-07-01T09:13:52.145Z" }, + { url = "https://files.pythonhosted.org/packages/6c/e4/61b2e1a7528740efbc70b3d581f33937e38e98ef3d50b05007267a55bcb2/pillow-11.3.0-cp310-cp310-win32.whl", hash = "sha256:89bd777bc6624fe4115e9fac3352c79ed60f3bb18651420635f26e643e3dd1f6", size = 6277090, upload-time = "2025-07-01T09:13:53.915Z" }, + { url = "https://files.pythonhosted.org/packages/a9/d3/60c781c83a785d6afbd6a326ed4d759d141de43aa7365725cbcd65ce5e54/pillow-11.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:19d2ff547c75b8e3ff46f4d9ef969a06c30ab2d4263a9e287733aa8b2429ce8f", size = 6985988, upload-time = "2025-07-01T09:13:55.699Z" }, + { url = "https://files.pythonhosted.org/packages/9f/28/4f4a0203165eefb3763939c6789ba31013a2e90adffb456610f30f613850/pillow-11.3.0-cp310-cp310-win_arm64.whl", hash = "sha256:819931d25e57b513242859ce1876c58c59dc31587847bf74cfe06b2e0cb22d2f", size = 2422899, upload-time = "2025-07-01T09:13:57.497Z" }, + { url = "https://files.pythonhosted.org/packages/db/26/77f8ed17ca4ffd60e1dcd220a6ec6d71210ba398cfa33a13a1cd614c5613/pillow-11.3.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:1cd110edf822773368b396281a2293aeb91c90a2db00d78ea43e7e861631b722", size = 5316531, upload-time = "2025-07-01T09:13:59.203Z" }, + { url = "https://files.pythonhosted.org/packages/cb/39/ee475903197ce709322a17a866892efb560f57900d9af2e55f86db51b0a5/pillow-11.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9c412fddd1b77a75aa904615ebaa6001f169b26fd467b4be93aded278266b288", size = 4686560, upload-time = "2025-07-01T09:14:01.101Z" }, + { url = "https://files.pythonhosted.org/packages/d5/90/442068a160fd179938ba55ec8c97050a612426fae5ec0a764e345839f76d/pillow-11.3.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7d1aa4de119a0ecac0a34a9c8bde33f34022e2e8f99104e47a3ca392fd60e37d", size = 5870978, upload-time = "2025-07-03T13:09:55.638Z" }, + { url = "https://files.pythonhosted.org/packages/13/92/dcdd147ab02daf405387f0218dcf792dc6dd5b14d2573d40b4caeef01059/pillow-11.3.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:91da1d88226663594e3f6b4b8c3c8d85bd504117d043740a8e0ec449087cc494", size = 7641168, upload-time = "2025-07-03T13:10:00.37Z" }, + { url = "https://files.pythonhosted.org/packages/6e/db/839d6ba7fd38b51af641aa904e2960e7a5644d60ec754c046b7d2aee00e5/pillow-11.3.0-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:643f189248837533073c405ec2f0bb250ba54598cf80e8c1e043381a60632f58", size = 5973053, upload-time = "2025-07-01T09:14:04.491Z" }, + { url = "https://files.pythonhosted.org/packages/f2/2f/d7675ecae6c43e9f12aa8d58b6012683b20b6edfbdac7abcb4e6af7a3784/pillow-11.3.0-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:106064daa23a745510dabce1d84f29137a37224831d88eb4ce94bb187b1d7e5f", size = 6640273, upload-time = "2025-07-01T09:14:06.235Z" }, + { url = "https://files.pythonhosted.org/packages/45/ad/931694675ede172e15b2ff03c8144a0ddaea1d87adb72bb07655eaffb654/pillow-11.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:cd8ff254faf15591e724dc7c4ddb6bf4793efcbe13802a4ae3e863cd300b493e", size = 6082043, upload-time = "2025-07-01T09:14:07.978Z" }, + { url = "https://files.pythonhosted.org/packages/3a/04/ba8f2b11fc80d2dd462d7abec16351b45ec99cbbaea4387648a44190351a/pillow-11.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:932c754c2d51ad2b2271fd01c3d121daaa35e27efae2a616f77bf164bc0b3e94", size = 6715516, upload-time = "2025-07-01T09:14:10.233Z" }, + { url = "https://files.pythonhosted.org/packages/48/59/8cd06d7f3944cc7d892e8533c56b0acb68399f640786313275faec1e3b6f/pillow-11.3.0-cp311-cp311-win32.whl", hash = "sha256:b4b8f3efc8d530a1544e5962bd6b403d5f7fe8b9e08227c6b255f98ad82b4ba0", size = 6274768, upload-time = "2025-07-01T09:14:11.921Z" }, + { url = "https://files.pythonhosted.org/packages/f1/cc/29c0f5d64ab8eae20f3232da8f8571660aa0ab4b8f1331da5c2f5f9a938e/pillow-11.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:1a992e86b0dd7aeb1f053cd506508c0999d710a8f07b4c791c63843fc6a807ac", size = 6986055, upload-time = "2025-07-01T09:14:13.623Z" }, + { url = "https://files.pythonhosted.org/packages/c6/df/90bd886fabd544c25addd63e5ca6932c86f2b701d5da6c7839387a076b4a/pillow-11.3.0-cp311-cp311-win_arm64.whl", hash = "sha256:30807c931ff7c095620fe04448e2c2fc673fcbb1ffe2a7da3fb39613489b1ddd", size = 2423079, upload-time = "2025-07-01T09:14:15.268Z" }, + { url = "https://files.pythonhosted.org/packages/40/fe/1bc9b3ee13f68487a99ac9529968035cca2f0a51ec36892060edcc51d06a/pillow-11.3.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:fdae223722da47b024b867c1ea0be64e0df702c5e0a60e27daad39bf960dd1e4", size = 5278800, upload-time = "2025-07-01T09:14:17.648Z" }, + { url = "https://files.pythonhosted.org/packages/2c/32/7e2ac19b5713657384cec55f89065fb306b06af008cfd87e572035b27119/pillow-11.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:921bd305b10e82b4d1f5e802b6850677f965d8394203d182f078873851dada69", size = 4686296, upload-time = "2025-07-01T09:14:19.828Z" }, + { url = "https://files.pythonhosted.org/packages/8e/1e/b9e12bbe6e4c2220effebc09ea0923a07a6da1e1f1bfbc8d7d29a01ce32b/pillow-11.3.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:eb76541cba2f958032d79d143b98a3a6b3ea87f0959bbe256c0b5e416599fd5d", size = 5871726, upload-time = "2025-07-03T13:10:04.448Z" }, + { url = "https://files.pythonhosted.org/packages/8d/33/e9200d2bd7ba00dc3ddb78df1198a6e80d7669cce6c2bdbeb2530a74ec58/pillow-11.3.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:67172f2944ebba3d4a7b54f2e95c786a3a50c21b88456329314caaa28cda70f6", size = 7644652, upload-time = "2025-07-03T13:10:10.391Z" }, + { url = "https://files.pythonhosted.org/packages/41/f1/6f2427a26fc683e00d985bc391bdd76d8dd4e92fac33d841127eb8fb2313/pillow-11.3.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:97f07ed9f56a3b9b5f49d3661dc9607484e85c67e27f3e8be2c7d28ca032fec7", size = 5977787, upload-time = "2025-07-01T09:14:21.63Z" }, + { url = "https://files.pythonhosted.org/packages/e4/c9/06dd4a38974e24f932ff5f98ea3c546ce3f8c995d3f0985f8e5ba48bba19/pillow-11.3.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:676b2815362456b5b3216b4fd5bd89d362100dc6f4945154ff172e206a22c024", size = 6645236, upload-time = "2025-07-01T09:14:23.321Z" }, + { url = "https://files.pythonhosted.org/packages/40/e7/848f69fb79843b3d91241bad658e9c14f39a32f71a301bcd1d139416d1be/pillow-11.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3e184b2f26ff146363dd07bde8b711833d7b0202e27d13540bfe2e35a323a809", size = 6086950, upload-time = "2025-07-01T09:14:25.237Z" }, + { url = "https://files.pythonhosted.org/packages/0b/1a/7cff92e695a2a29ac1958c2a0fe4c0b2393b60aac13b04a4fe2735cad52d/pillow-11.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:6be31e3fc9a621e071bc17bb7de63b85cbe0bfae91bb0363c893cbe67247780d", size = 6723358, upload-time = "2025-07-01T09:14:27.053Z" }, + { url = "https://files.pythonhosted.org/packages/26/7d/73699ad77895f69edff76b0f332acc3d497f22f5d75e5360f78cbcaff248/pillow-11.3.0-cp312-cp312-win32.whl", hash = "sha256:7b161756381f0918e05e7cb8a371fff367e807770f8fe92ecb20d905d0e1c149", size = 6275079, upload-time = "2025-07-01T09:14:30.104Z" }, + { url = "https://files.pythonhosted.org/packages/8c/ce/e7dfc873bdd9828f3b6e5c2bbb74e47a98ec23cc5c74fc4e54462f0d9204/pillow-11.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:a6444696fce635783440b7f7a9fc24b3ad10a9ea3f0ab66c5905be1c19ccf17d", size = 6986324, upload-time = "2025-07-01T09:14:31.899Z" }, + { url = "https://files.pythonhosted.org/packages/16/8f/b13447d1bf0b1f7467ce7d86f6e6edf66c0ad7cf44cf5c87a37f9bed9936/pillow-11.3.0-cp312-cp312-win_arm64.whl", hash = "sha256:2aceea54f957dd4448264f9bf40875da0415c83eb85f55069d89c0ed436e3542", size = 2423067, upload-time = "2025-07-01T09:14:33.709Z" }, + { url = "https://files.pythonhosted.org/packages/1e/93/0952f2ed8db3a5a4c7a11f91965d6184ebc8cd7cbb7941a260d5f018cd2d/pillow-11.3.0-cp313-cp313-ios_13_0_arm64_iphoneos.whl", hash = "sha256:1c627742b539bba4309df89171356fcb3cc5a9178355b2727d1b74a6cf155fbd", size = 2128328, upload-time = "2025-07-01T09:14:35.276Z" }, + { url = "https://files.pythonhosted.org/packages/4b/e8/100c3d114b1a0bf4042f27e0f87d2f25e857e838034e98ca98fe7b8c0a9c/pillow-11.3.0-cp313-cp313-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:30b7c02f3899d10f13d7a48163c8969e4e653f8b43416d23d13d1bbfdc93b9f8", size = 2170652, upload-time = "2025-07-01T09:14:37.203Z" }, + { url = "https://files.pythonhosted.org/packages/aa/86/3f758a28a6e381758545f7cdb4942e1cb79abd271bea932998fc0db93cb6/pillow-11.3.0-cp313-cp313-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:7859a4cc7c9295f5838015d8cc0a9c215b77e43d07a25e460f35cf516df8626f", size = 2227443, upload-time = "2025-07-01T09:14:39.344Z" }, + { url = "https://files.pythonhosted.org/packages/01/f4/91d5b3ffa718df2f53b0dc109877993e511f4fd055d7e9508682e8aba092/pillow-11.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ec1ee50470b0d050984394423d96325b744d55c701a439d2bd66089bff963d3c", size = 5278474, upload-time = "2025-07-01T09:14:41.843Z" }, + { url = "https://files.pythonhosted.org/packages/f9/0e/37d7d3eca6c879fbd9dba21268427dffda1ab00d4eb05b32923d4fbe3b12/pillow-11.3.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7db51d222548ccfd274e4572fdbf3e810a5e66b00608862f947b163e613b67dd", size = 4686038, upload-time = "2025-07-01T09:14:44.008Z" }, + { url = "https://files.pythonhosted.org/packages/ff/b0/3426e5c7f6565e752d81221af9d3676fdbb4f352317ceafd42899aaf5d8a/pillow-11.3.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:2d6fcc902a24ac74495df63faad1884282239265c6839a0a6416d33faedfae7e", size = 5864407, upload-time = "2025-07-03T13:10:15.628Z" }, + { url = "https://files.pythonhosted.org/packages/fc/c1/c6c423134229f2a221ee53f838d4be9d82bab86f7e2f8e75e47b6bf6cd77/pillow-11.3.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f0f5d8f4a08090c6d6d578351a2b91acf519a54986c055af27e7a93feae6d3f1", size = 7639094, upload-time = "2025-07-03T13:10:21.857Z" }, + { url = "https://files.pythonhosted.org/packages/ba/c9/09e6746630fe6372c67c648ff9deae52a2bc20897d51fa293571977ceb5d/pillow-11.3.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c37d8ba9411d6003bba9e518db0db0c58a680ab9fe5179f040b0463644bc9805", size = 5973503, upload-time = "2025-07-01T09:14:45.698Z" }, + { url = "https://files.pythonhosted.org/packages/d5/1c/a2a29649c0b1983d3ef57ee87a66487fdeb45132df66ab30dd37f7dbe162/pillow-11.3.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:13f87d581e71d9189ab21fe0efb5a23e9f28552d5be6979e84001d3b8505abe8", size = 6642574, upload-time = "2025-07-01T09:14:47.415Z" }, + { url = "https://files.pythonhosted.org/packages/36/de/d5cc31cc4b055b6c6fd990e3e7f0f8aaf36229a2698501bcb0cdf67c7146/pillow-11.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:023f6d2d11784a465f09fd09a34b150ea4672e85fb3d05931d89f373ab14abb2", size = 6084060, upload-time = "2025-07-01T09:14:49.636Z" }, + { url = "https://files.pythonhosted.org/packages/d5/ea/502d938cbaeec836ac28a9b730193716f0114c41325db428e6b280513f09/pillow-11.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:45dfc51ac5975b938e9809451c51734124e73b04d0f0ac621649821a63852e7b", size = 6721407, upload-time = "2025-07-01T09:14:51.962Z" }, + { url = "https://files.pythonhosted.org/packages/45/9c/9c5e2a73f125f6cbc59cc7087c8f2d649a7ae453f83bd0362ff7c9e2aee2/pillow-11.3.0-cp313-cp313-win32.whl", hash = "sha256:a4d336baed65d50d37b88ca5b60c0fa9d81e3a87d4a7930d3880d1624d5b31f3", size = 6273841, upload-time = "2025-07-01T09:14:54.142Z" }, + { url = "https://files.pythonhosted.org/packages/23/85/397c73524e0cd212067e0c969aa245b01d50183439550d24d9f55781b776/pillow-11.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:0bce5c4fd0921f99d2e858dc4d4d64193407e1b99478bc5cacecba2311abde51", size = 6978450, upload-time = "2025-07-01T09:14:56.436Z" }, + { url = "https://files.pythonhosted.org/packages/17/d2/622f4547f69cd173955194b78e4d19ca4935a1b0f03a302d655c9f6aae65/pillow-11.3.0-cp313-cp313-win_arm64.whl", hash = "sha256:1904e1264881f682f02b7f8167935cce37bc97db457f8e7849dc3a6a52b99580", size = 2423055, upload-time = "2025-07-01T09:14:58.072Z" }, + { url = "https://files.pythonhosted.org/packages/dd/80/a8a2ac21dda2e82480852978416cfacd439a4b490a501a288ecf4fe2532d/pillow-11.3.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:4c834a3921375c48ee6b9624061076bc0a32a60b5532b322cc0ea64e639dd50e", size = 5281110, upload-time = "2025-07-01T09:14:59.79Z" }, + { url = "https://files.pythonhosted.org/packages/44/d6/b79754ca790f315918732e18f82a8146d33bcd7f4494380457ea89eb883d/pillow-11.3.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:5e05688ccef30ea69b9317a9ead994b93975104a677a36a8ed8106be9260aa6d", size = 4689547, upload-time = "2025-07-01T09:15:01.648Z" }, + { url = "https://files.pythonhosted.org/packages/49/20/716b8717d331150cb00f7fdd78169c01e8e0c219732a78b0e59b6bdb2fd6/pillow-11.3.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:1019b04af07fc0163e2810167918cb5add8d74674b6267616021ab558dc98ced", size = 5901554, upload-time = "2025-07-03T13:10:27.018Z" }, + { url = "https://files.pythonhosted.org/packages/74/cf/a9f3a2514a65bb071075063a96f0a5cf949c2f2fce683c15ccc83b1c1cab/pillow-11.3.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f944255db153ebb2b19c51fe85dd99ef0ce494123f21b9db4877ffdfc5590c7c", size = 7669132, upload-time = "2025-07-03T13:10:33.01Z" }, + { url = "https://files.pythonhosted.org/packages/98/3c/da78805cbdbee9cb43efe8261dd7cc0b4b93f2ac79b676c03159e9db2187/pillow-11.3.0-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1f85acb69adf2aaee8b7da124efebbdb959a104db34d3a2cb0f3793dbae422a8", size = 6005001, upload-time = "2025-07-01T09:15:03.365Z" }, + { url = "https://files.pythonhosted.org/packages/6c/fa/ce044b91faecf30e635321351bba32bab5a7e034c60187fe9698191aef4f/pillow-11.3.0-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:05f6ecbeff5005399bb48d198f098a9b4b6bdf27b8487c7f38ca16eeb070cd59", size = 6668814, upload-time = "2025-07-01T09:15:05.655Z" }, + { url = "https://files.pythonhosted.org/packages/7b/51/90f9291406d09bf93686434f9183aba27b831c10c87746ff49f127ee80cb/pillow-11.3.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:a7bc6e6fd0395bc052f16b1a8670859964dbd7003bd0af2ff08342eb6e442cfe", size = 6113124, upload-time = "2025-07-01T09:15:07.358Z" }, + { url = "https://files.pythonhosted.org/packages/cd/5a/6fec59b1dfb619234f7636d4157d11fb4e196caeee220232a8d2ec48488d/pillow-11.3.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:83e1b0161c9d148125083a35c1c5a89db5b7054834fd4387499e06552035236c", size = 6747186, upload-time = "2025-07-01T09:15:09.317Z" }, + { url = "https://files.pythonhosted.org/packages/49/6b/00187a044f98255225f172de653941e61da37104a9ea60e4f6887717e2b5/pillow-11.3.0-cp313-cp313t-win32.whl", hash = "sha256:2a3117c06b8fb646639dce83694f2f9eac405472713fcb1ae887469c0d4f6788", size = 6277546, upload-time = "2025-07-01T09:15:11.311Z" }, + { url = "https://files.pythonhosted.org/packages/e8/5c/6caaba7e261c0d75bab23be79f1d06b5ad2a2ae49f028ccec801b0e853d6/pillow-11.3.0-cp313-cp313t-win_amd64.whl", hash = "sha256:857844335c95bea93fb39e0fa2726b4d9d758850b34075a7e3ff4f4fa3aa3b31", size = 6985102, upload-time = "2025-07-01T09:15:13.164Z" }, + { url = "https://files.pythonhosted.org/packages/f3/7e/b623008460c09a0cb38263c93b828c666493caee2eb34ff67f778b87e58c/pillow-11.3.0-cp313-cp313t-win_arm64.whl", hash = "sha256:8797edc41f3e8536ae4b10897ee2f637235c94f27404cac7297f7b607dd0716e", size = 2424803, upload-time = "2025-07-01T09:15:15.695Z" }, + { url = "https://files.pythonhosted.org/packages/73/f4/04905af42837292ed86cb1b1dabe03dce1edc008ef14c473c5c7e1443c5d/pillow-11.3.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:d9da3df5f9ea2a89b81bb6087177fb1f4d1c7146d583a3fe5c672c0d94e55e12", size = 5278520, upload-time = "2025-07-01T09:15:17.429Z" }, + { url = "https://files.pythonhosted.org/packages/41/b0/33d79e377a336247df6348a54e6d2a2b85d644ca202555e3faa0cf811ecc/pillow-11.3.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:0b275ff9b04df7b640c59ec5a3cb113eefd3795a8df80bac69646ef699c6981a", size = 4686116, upload-time = "2025-07-01T09:15:19.423Z" }, + { url = "https://files.pythonhosted.org/packages/49/2d/ed8bc0ab219ae8768f529597d9509d184fe8a6c4741a6864fea334d25f3f/pillow-11.3.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0743841cabd3dba6a83f38a92672cccbd69af56e3e91777b0ee7f4dba4385632", size = 5864597, upload-time = "2025-07-03T13:10:38.404Z" }, + { url = "https://files.pythonhosted.org/packages/b5/3d/b932bb4225c80b58dfadaca9d42d08d0b7064d2d1791b6a237f87f661834/pillow-11.3.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:2465a69cf967b8b49ee1b96d76718cd98c4e925414ead59fdf75cf0fd07df673", size = 7638246, upload-time = "2025-07-03T13:10:44.987Z" }, + { url = "https://files.pythonhosted.org/packages/09/b5/0487044b7c096f1b48f0d7ad416472c02e0e4bf6919541b111efd3cae690/pillow-11.3.0-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:41742638139424703b4d01665b807c6468e23e699e8e90cffefe291c5832b027", size = 5973336, upload-time = "2025-07-01T09:15:21.237Z" }, + { url = "https://files.pythonhosted.org/packages/a8/2d/524f9318f6cbfcc79fbc004801ea6b607ec3f843977652fdee4857a7568b/pillow-11.3.0-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:93efb0b4de7e340d99057415c749175e24c8864302369e05914682ba642e5d77", size = 6642699, upload-time = "2025-07-01T09:15:23.186Z" }, + { url = "https://files.pythonhosted.org/packages/6f/d2/a9a4f280c6aefedce1e8f615baaa5474e0701d86dd6f1dede66726462bbd/pillow-11.3.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7966e38dcd0fa11ca390aed7c6f20454443581d758242023cf36fcb319b1a874", size = 6083789, upload-time = "2025-07-01T09:15:25.1Z" }, + { url = "https://files.pythonhosted.org/packages/fe/54/86b0cd9dbb683a9d5e960b66c7379e821a19be4ac5810e2e5a715c09a0c0/pillow-11.3.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:98a9afa7b9007c67ed84c57c9e0ad86a6000da96eaa638e4f8abe5b65ff83f0a", size = 6720386, upload-time = "2025-07-01T09:15:27.378Z" }, + { url = "https://files.pythonhosted.org/packages/e7/95/88efcaf384c3588e24259c4203b909cbe3e3c2d887af9e938c2022c9dd48/pillow-11.3.0-cp314-cp314-win32.whl", hash = "sha256:02a723e6bf909e7cea0dac1b0e0310be9d7650cd66222a5f1c571455c0a45214", size = 6370911, upload-time = "2025-07-01T09:15:29.294Z" }, + { url = "https://files.pythonhosted.org/packages/2e/cc/934e5820850ec5eb107e7b1a72dd278140731c669f396110ebc326f2a503/pillow-11.3.0-cp314-cp314-win_amd64.whl", hash = "sha256:a418486160228f64dd9e9efcd132679b7a02a5f22c982c78b6fc7dab3fefb635", size = 7117383, upload-time = "2025-07-01T09:15:31.128Z" }, + { url = "https://files.pythonhosted.org/packages/d6/e9/9c0a616a71da2a5d163aa37405e8aced9a906d574b4a214bede134e731bc/pillow-11.3.0-cp314-cp314-win_arm64.whl", hash = "sha256:155658efb5e044669c08896c0c44231c5e9abcaadbc5cd3648df2f7c0b96b9a6", size = 2511385, upload-time = "2025-07-01T09:15:33.328Z" }, + { url = "https://files.pythonhosted.org/packages/1a/33/c88376898aff369658b225262cd4f2659b13e8178e7534df9e6e1fa289f6/pillow-11.3.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:59a03cdf019efbfeeed910bf79c7c93255c3d54bc45898ac2a4140071b02b4ae", size = 5281129, upload-time = "2025-07-01T09:15:35.194Z" }, + { url = "https://files.pythonhosted.org/packages/1f/70/d376247fb36f1844b42910911c83a02d5544ebd2a8bad9efcc0f707ea774/pillow-11.3.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:f8a5827f84d973d8636e9dc5764af4f0cf2318d26744b3d902931701b0d46653", size = 4689580, upload-time = "2025-07-01T09:15:37.114Z" }, + { url = "https://files.pythonhosted.org/packages/eb/1c/537e930496149fbac69efd2fc4329035bbe2e5475b4165439e3be9cb183b/pillow-11.3.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ee92f2fd10f4adc4b43d07ec5e779932b4eb3dbfbc34790ada5a6669bc095aa6", size = 5902860, upload-time = "2025-07-03T13:10:50.248Z" }, + { url = "https://files.pythonhosted.org/packages/bd/57/80f53264954dcefeebcf9dae6e3eb1daea1b488f0be8b8fef12f79a3eb10/pillow-11.3.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c96d333dcf42d01f47b37e0979b6bd73ec91eae18614864622d9b87bbd5bbf36", size = 7670694, upload-time = "2025-07-03T13:10:56.432Z" }, + { url = "https://files.pythonhosted.org/packages/70/ff/4727d3b71a8578b4587d9c276e90efad2d6fe0335fd76742a6da08132e8c/pillow-11.3.0-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4c96f993ab8c98460cd0c001447bff6194403e8b1d7e149ade5f00594918128b", size = 6005888, upload-time = "2025-07-01T09:15:39.436Z" }, + { url = "https://files.pythonhosted.org/packages/05/ae/716592277934f85d3be51d7256f3636672d7b1abfafdc42cf3f8cbd4b4c8/pillow-11.3.0-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:41342b64afeba938edb034d122b2dda5db2139b9a4af999729ba8818e0056477", size = 6670330, upload-time = "2025-07-01T09:15:41.269Z" }, + { url = "https://files.pythonhosted.org/packages/e7/bb/7fe6cddcc8827b01b1a9766f5fdeb7418680744f9082035bdbabecf1d57f/pillow-11.3.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:068d9c39a2d1b358eb9f245ce7ab1b5c3246c7c8c7d9ba58cfa5b43146c06e50", size = 6114089, upload-time = "2025-07-01T09:15:43.13Z" }, + { url = "https://files.pythonhosted.org/packages/8b/f5/06bfaa444c8e80f1a8e4bff98da9c83b37b5be3b1deaa43d27a0db37ef84/pillow-11.3.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:a1bc6ba083b145187f648b667e05a2534ecc4b9f2784c2cbe3089e44868f2b9b", size = 6748206, upload-time = "2025-07-01T09:15:44.937Z" }, + { url = "https://files.pythonhosted.org/packages/f0/77/bc6f92a3e8e6e46c0ca78abfffec0037845800ea38c73483760362804c41/pillow-11.3.0-cp314-cp314t-win32.whl", hash = "sha256:118ca10c0d60b06d006be10a501fd6bbdfef559251ed31b794668ed569c87e12", size = 6377370, upload-time = "2025-07-01T09:15:46.673Z" }, + { url = "https://files.pythonhosted.org/packages/4a/82/3a721f7d69dca802befb8af08b7c79ebcab461007ce1c18bd91a5d5896f9/pillow-11.3.0-cp314-cp314t-win_amd64.whl", hash = "sha256:8924748b688aa210d79883357d102cd64690e56b923a186f35a82cbc10f997db", size = 7121500, upload-time = "2025-07-01T09:15:48.512Z" }, + { url = "https://files.pythonhosted.org/packages/89/c7/5572fa4a3f45740eaab6ae86fcdf7195b55beac1371ac8c619d880cfe948/pillow-11.3.0-cp314-cp314t-win_arm64.whl", hash = "sha256:79ea0d14d3ebad43ec77ad5272e6ff9bba5b679ef73375ea760261207fa8e0aa", size = 2512835, upload-time = "2025-07-01T09:15:50.399Z" }, + { url = "https://files.pythonhosted.org/packages/6f/8b/209bd6b62ce8367f47e68a218bffac88888fdf2c9fcf1ecadc6c3ec1ebc7/pillow-11.3.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:3cee80663f29e3843b68199b9d6f4f54bd1d4a6b59bdd91bceefc51238bcb967", size = 5270556, upload-time = "2025-07-01T09:16:09.961Z" }, + { url = "https://files.pythonhosted.org/packages/2e/e6/231a0b76070c2cfd9e260a7a5b504fb72da0a95279410fa7afd99d9751d6/pillow-11.3.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:b5f56c3f344f2ccaf0dd875d3e180f631dc60a51b314295a3e681fe8cf851fbe", size = 4654625, upload-time = "2025-07-01T09:16:11.913Z" }, + { url = "https://files.pythonhosted.org/packages/13/f4/10cf94fda33cb12765f2397fc285fa6d8eb9c29de7f3185165b702fc7386/pillow-11.3.0-pp310-pypy310_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:e67d793d180c9df62f1f40aee3accca4829d3794c95098887edc18af4b8b780c", size = 4874207, upload-time = "2025-07-03T13:11:10.201Z" }, + { url = "https://files.pythonhosted.org/packages/72/c9/583821097dc691880c92892e8e2d41fe0a5a3d6021f4963371d2f6d57250/pillow-11.3.0-pp310-pypy310_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d000f46e2917c705e9fb93a3606ee4a819d1e3aa7a9b442f6444f07e77cf5e25", size = 6583939, upload-time = "2025-07-03T13:11:15.68Z" }, + { url = "https://files.pythonhosted.org/packages/3b/8e/5c9d410f9217b12320efc7c413e72693f48468979a013ad17fd690397b9a/pillow-11.3.0-pp310-pypy310_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:527b37216b6ac3a12d7838dc3bd75208ec57c1c6d11ef01902266a5a0c14fc27", size = 4957166, upload-time = "2025-07-01T09:16:13.74Z" }, + { url = "https://files.pythonhosted.org/packages/62/bb/78347dbe13219991877ffb3a91bf09da8317fbfcd4b5f9140aeae020ad71/pillow-11.3.0-pp310-pypy310_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:be5463ac478b623b9dd3937afd7fb7ab3d79dd290a28e2b6df292dc75063eb8a", size = 5581482, upload-time = "2025-07-01T09:16:16.107Z" }, + { url = "https://files.pythonhosted.org/packages/d9/28/1000353d5e61498aaeaaf7f1e4b49ddb05f2c6575f9d4f9f914a3538b6e1/pillow-11.3.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:8dc70ca24c110503e16918a658b869019126ecfe03109b754c402daff12b3d9f", size = 6984596, upload-time = "2025-07-01T09:16:18.07Z" }, + { url = "https://files.pythonhosted.org/packages/9e/e3/6fa84033758276fb31da12e5fb66ad747ae83b93c67af17f8c6ff4cc8f34/pillow-11.3.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7c8ec7a017ad1bd562f93dbd8505763e688d388cde6e4a010ae1486916e713e6", size = 5270566, upload-time = "2025-07-01T09:16:19.801Z" }, + { url = "https://files.pythonhosted.org/packages/5b/ee/e8d2e1ab4892970b561e1ba96cbd59c0d28cf66737fc44abb2aec3795a4e/pillow-11.3.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:9ab6ae226de48019caa8074894544af5b53a117ccb9d3b3dcb2871464c829438", size = 4654618, upload-time = "2025-07-01T09:16:21.818Z" }, + { url = "https://files.pythonhosted.org/packages/f2/6d/17f80f4e1f0761f02160fc433abd4109fa1548dcfdca46cfdadaf9efa565/pillow-11.3.0-pp311-pypy311_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:fe27fb049cdcca11f11a7bfda64043c37b30e6b91f10cb5bab275806c32f6ab3", size = 4874248, upload-time = "2025-07-03T13:11:20.738Z" }, + { url = "https://files.pythonhosted.org/packages/de/5f/c22340acd61cef960130585bbe2120e2fd8434c214802f07e8c03596b17e/pillow-11.3.0-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:465b9e8844e3c3519a983d58b80be3f668e2a7a5db97f2784e7079fbc9f9822c", size = 6583963, upload-time = "2025-07-03T13:11:26.283Z" }, + { url = "https://files.pythonhosted.org/packages/31/5e/03966aedfbfcbb4d5f8aa042452d3361f325b963ebbadddac05b122e47dd/pillow-11.3.0-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5418b53c0d59b3824d05e029669efa023bbef0f3e92e75ec8428f3799487f361", size = 4957170, upload-time = "2025-07-01T09:16:23.762Z" }, + { url = "https://files.pythonhosted.org/packages/cc/2d/e082982aacc927fc2cab48e1e731bdb1643a1406acace8bed0900a61464e/pillow-11.3.0-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:504b6f59505f08ae014f724b6207ff6222662aab5cc9542577fb084ed0676ac7", size = 5581505, upload-time = "2025-07-01T09:16:25.593Z" }, + { url = "https://files.pythonhosted.org/packages/34/e7/ae39f538fd6844e982063c3a5e4598b8ced43b9633baa3a85ef33af8c05c/pillow-11.3.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:c84d689db21a1c397d001aa08241044aa2069e7587b398c8cc63020390b1c1b8", size = 6984598, upload-time = "2025-07-01T09:16:27.732Z" }, ] [[package]] @@ -4281,6 +4503,19 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/73/cb/ac7874b3e5d58441674fb70742e6c374b28b0c7cb988d37d991cde47166c/platformdirs-4.5.0-py3-none-any.whl", hash = "sha256:e578a81bb873cbb89a41fcc904c7ef523cc18284b7e3b3ccf06aca1403b7ebd3", size = 18651, upload-time = "2025-10-08T17:44:47.223Z" }, ] +[[package]] +name = "plotly" +version = "6.5.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "narwhals" }, + { name = "packaging" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/94/05/1199e2a03ce6637960bc1e951ca0f928209a48cfceb57355806a88f214cf/plotly-6.5.0.tar.gz", hash = "sha256:d5d38224883fd38c1409bef7d6a8dc32b74348d39313f3c52ca998b8e447f5c8", size = 7013624, upload-time = "2025-11-17T18:39:24.523Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e7/c3/3031c931098de393393e1f93a38dc9ed6805d86bb801acc3cf2d5bd1e6b7/plotly-6.5.0-py3-none-any.whl", hash = "sha256:5ac851e100367735250206788a2b1325412aa4a4917a4fe3e6f0bc5aa6f3d90a", size = 9893174, upload-time = "2025-11-17T18:39:20.351Z" }, +] + [[package]] name = "pluggy" version = "1.6.0" @@ -4318,6 +4553,30 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/02/c7/5613524e606ea1688b3bdbf48aa64bafb6d0a4ac3750274c43b6158a390f/prettytable-3.16.0-py3-none-any.whl", hash = "sha256:b5eccfabb82222f5aa46b798ff02a8452cf530a352c31bddfa29be41242863aa", size = 33863, upload-time = "2025-03-24T19:39:02.359Z" }, ] +[[package]] +name = "proglog" +version = "0.1.12" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "tqdm" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c2/af/c108866c452eda1132f3d6b3cb6be2ae8430c97e9309f38ca9dbd430af37/proglog-0.1.12.tar.gz", hash = "sha256:361ee074721c277b89b75c061336cb8c5f287c92b043efa562ccf7866cda931c", size = 8794, upload-time = "2025-05-09T14:36:18.316Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c1/1b/f7ea6cde25621cd9236541c66ff018f4268012a534ec31032bcb187dc5e7/proglog-0.1.12-py3-none-any.whl", hash = "sha256:ccaafce51e80a81c65dc907a460c07ccb8ec1f78dc660cfd8f9ec3a22f01b84c", size = 6337, upload-time = "2025-05-09T14:36:16.798Z" }, +] + +[[package]] +name = "prompt-toolkit" +version = "3.0.52" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "wcwidth" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a1/96/06e01a7b38dce6fe1db213e061a4602dd6032a8a97ef6c1a862537732421/prompt_toolkit-3.0.52.tar.gz", hash = "sha256:28cde192929c8e7321de85de1ddbe736f1375148b02f2e17edd840042b1be855", size = 434198, upload-time = "2025-08-27T15:24:02.057Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/84/03/0d3ce49e2505ae70cf43bc5bb3033955d2fc9f932163e84dc0779cc47f48/prompt_toolkit-3.0.52-py3-none-any.whl", hash = "sha256:9aac639a3bbd33284347de5ad8d68ecc044b91a762dc39b7c21095fcd6a19955", size = 391431, upload-time = "2025-08-27T15:23:59.498Z" }, +] + [[package]] name = "propcache" version = "0.4.1" @@ -4473,6 +4732,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/c9/ad/33b2ccec09bf96c2b2ef3f9a6f66baac8253d7565d8839e024a6b905d45d/psutil-7.1.3-cp37-abi3-win_arm64.whl", hash = "sha256:bd0d69cee829226a761e92f28140bec9a5ee9d5b4fb4b0cc589068dbfff559b1", size = 244608, upload-time = "2025-11-02T12:26:36.136Z" }, ] +[[package]] +name = "ptyprocess" +version = "0.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/20/e5/16ff212c1e452235a90aeb09066144d0c5a6a8c0834397e03f5224495c4e/ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220", size = 70762, upload-time = "2020-12-28T15:15:30.155Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/22/a6/858897256d0deac81a172289110f31629fc4cee19b6f01283303e18c8db3/ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35", size = 13993, upload-time = "2020-12-28T15:15:28.35Z" }, +] + [[package]] name = "pulp" version = "3.3.0" @@ -4482,6 +4750,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/99/6c/64cafaceea3f99927e84b38a362ec6a8f24f33061c90bda77dfe1cd4c3c6/pulp-3.3.0-py3-none-any.whl", hash = "sha256:dd6ad2d63f196d1254eddf9dcff5cd224912c1f046120cb7c143c5b0eda63fae", size = 16387700, upload-time = "2025-09-18T08:14:53.368Z" }, ] +[[package]] +name = "pure-eval" +version = "0.2.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/cd/05/0a34433a064256a578f1783a10da6df098ceaa4a57bbeaa96a6c0352786b/pure_eval-0.2.3.tar.gz", hash = "sha256:5f4e983f40564c576c7c8635ae88db5956bb2229d7e9237d03b3c0b0190eaf42", size = 19752, upload-time = "2024-07-21T12:58:21.801Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8e/37/efad0257dc6e593a18957422533ff0f87ede7c9c6ea010a2177d738fb82f/pure_eval-0.2.3-py3-none-any.whl", hash = "sha256:1db8e35b67b3d218d818ae653e27f06c3aa420901fa7b081ca98cbedc874e0d0", size = 11842, upload-time = "2024-07-21T12:58:20.04Z" }, +] + [[package]] name = "pyarrow" version = "22.0.0" @@ -4967,6 +5244,42 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/c4/43/80f67e0336cb2fc725f8e06f7fe35c1d0fe946f4d2b8b2175e797e07349e/qwen_vl_utils-0.0.14-py3-none-any.whl", hash = "sha256:5e28657bfd031e56bd447c5901b58ddfc3835285ed100f4c56580e0ade054e96", size = 8120, upload-time = "2025-09-23T09:38:56.297Z" }, ] +[[package]] +name = "rdkit" +version = "2025.9.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, + { name = "pillow" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/12/86/ffa67bd5a49f5b9e809785b8628f1a53065c7c38f70fad8638a78c459cc9/rdkit-2025.9.1-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:46ea08f6ba7ed57dba015a5e4dc42776dc0359121959a70a5487ca81611ccace", size = 31690565, upload-time = "2025-10-06T09:04:02.002Z" }, + { url = "https://files.pythonhosted.org/packages/dc/8f/cd7fa64e38c94e174cc5dfce939d9aa1a93833252671ff07511591be0686/rdkit-2025.9.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e50a89b2755e25d7d4a69e7125daa9d96242449e2796c7eefb58e468de7933ae", size = 29138210, upload-time = "2025-10-06T09:04:06.436Z" }, + { url = "https://files.pythonhosted.org/packages/26/d1/12b91963ebc0097985bb158097c8fb27a8c89c1fa7ab6b69a504accd0254/rdkit-2025.9.1-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:ed5030c377da16c5888f742cf81e223db29921cf03b62d31209e2aa3c4bc297b", size = 34764104, upload-time = "2025-10-06T09:04:10.347Z" }, + { url = "https://files.pythonhosted.org/packages/1f/14/3faee55479e8a101e646e2e78359a49138fa42bbde328033b235005ef95c/rdkit-2025.9.1-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:e5d6cb67637890bdebe5cb43c2882bd39c7ba1a6c22e0c7609b98a91895a361a", size = 36227927, upload-time = "2025-10-06T09:04:17.736Z" }, + { url = "https://files.pythonhosted.org/packages/37/04/4e0f4ae1e2ea49f8dc828d62882380621b2a06bcba9704bc15549d246843/rdkit-2025.9.1-cp310-cp310-win_amd64.whl", hash = "sha256:707bf5e965b736f3a45f14de29ae569197b5391b7c39c1c53bc6f17bbb36d622", size = 23562396, upload-time = "2025-10-06T09:04:21.661Z" }, + { url = "https://files.pythonhosted.org/packages/36/bf/cd7258a752b21b4c1051b7c0acd6fb2eb7787ebcaa35598843edd6dc9ade/rdkit-2025.9.1-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:00f7c5b3b095843f0f09df61c4e56c7f1468d4943919fa13d4b6155b790e6880", size = 31691070, upload-time = "2025-10-06T09:04:25.405Z" }, + { url = "https://files.pythonhosted.org/packages/99/57/97a248b83abf6f0d8ff659987d1209e84b18927f9c5c8692c21ed00c3ba1/rdkit-2025.9.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:41246621a072c107855a09cb7bb25bf4480a86223ab7f9121f2a08636ddd8c7a", size = 29138690, upload-time = "2025-10-06T09:04:29.072Z" }, + { url = "https://files.pythonhosted.org/packages/12/d0/ddd5f09917cfbcdfdb87fd40870d38b93ef8ef159489c88da2bcaec36a8d/rdkit-2025.9.1-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:9fa4b8c77c5a3107484130c089a3626eb61d078d23e5ebca87a7a84e238f4063", size = 34760030, upload-time = "2025-10-06T09:04:32.914Z" }, + { url = "https://files.pythonhosted.org/packages/ba/7f/7dc9cd00a6820c82c7c06328c8d73fd131a73ed12a499064db7a0a8cbff3/rdkit-2025.9.1-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:fefdb04534f4875aacf6a9a54a8f77ba482c55289f9de5fb8b8f66dccfdc2b05", size = 36226948, upload-time = "2025-10-06T09:04:37.093Z" }, + { url = "https://files.pythonhosted.org/packages/8b/b5/122cf75a4ab4abe430db157b72b42e483d815ee2c7b214ecfa0de8680156/rdkit-2025.9.1-cp311-cp311-win_amd64.whl", hash = "sha256:22dbecbeca83eee100417b3289dceb23974598f01a047284c196f863ee2b9d75", size = 23563087, upload-time = "2025-10-06T09:04:40.412Z" }, + { url = "https://files.pythonhosted.org/packages/d8/37/0875c5af275031b57e3da0e103a85ff2d3fdbacfde5ab459f9bfed4d774e/rdkit-2025.9.1-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:4b068562a0fc9a17db77e76b77e10957c56fa69f0a0b3a25bd6e35e924237e59", size = 31766217, upload-time = "2025-10-06T09:04:43.675Z" }, + { url = "https://files.pythonhosted.org/packages/0e/1b/ceebffc8d123594992f90f453dc666c9730a74841692c862996d335c25b0/rdkit-2025.9.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:58218214f799100f6cb3b12e7bf3abdf3e17c1b5959d42a6c797b0f98f501bab", size = 29182056, upload-time = "2025-10-06T09:04:47.686Z" }, + { url = "https://files.pythonhosted.org/packages/23/8e/f5ffc57520212bcd0937f175e550faf9c1bdab9e6c3b86401e734c01cc06/rdkit-2025.9.1-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:b25c37c452ec41a5b5a2fce9aef9f637b569cfea464c3cfa95e81477123cd08f", size = 34646881, upload-time = "2025-10-06T09:04:52.322Z" }, + { url = "https://files.pythonhosted.org/packages/57/00/83ad99664bb500ee4864964c2b4099bdfb621b58fbc9ae265766e224fbd6/rdkit-2025.9.1-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:cf7f05cea1e1c11662490e9bd5f95d44591dddc4c8f6a792d0eca6394edc606b", size = 36167555, upload-time = "2025-10-06T09:04:56.036Z" }, + { url = "https://files.pythonhosted.org/packages/dd/79/ed349426406a832fe99eb1c206e629e2194a1024cc22a5e8e7db8207b7d0/rdkit-2025.9.1-cp312-cp312-win_amd64.whl", hash = "sha256:a22d77f9b6b3c00d25001d323fb3febde7e96534f6e7cee8b84967220e729eea", size = 23578781, upload-time = "2025-10-06T09:05:00.134Z" }, + { url = "https://files.pythonhosted.org/packages/95/b7/b04ca14b404852700f273767070f5cd5b7500d9ca2257e41ed68de6c3955/rdkit-2025.9.1-cp313-cp313-macosx_10_15_x86_64.whl", hash = "sha256:42f3c4225aa17a53681e38fcbb53e44fa02a7937abe58fcb7605368f396008f3", size = 31765114, upload-time = "2025-10-06T09:05:04.204Z" }, + { url = "https://files.pythonhosted.org/packages/3a/2e/220f55ba68b2745c9f15fe3db94a1013ebd41e087534757a9b1dbaff393a/rdkit-2025.9.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:b6a14105beafa319d781854cc24acd6341cd600cd02e1659f93ac130b9d2787c", size = 29181058, upload-time = "2025-10-06T09:05:07.723Z" }, + { url = "https://files.pythonhosted.org/packages/88/b5/9501e4bc30d787b8266e5edd8268bec71972af1a7f717c7b837fdc1dd98a/rdkit-2025.9.1-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:a6fded29ef405c09e48e2fd62c1680948c98f7d34f11ca209fc7690f44cd0f4e", size = 34645507, upload-time = "2025-10-06T09:05:11.493Z" }, + { url = "https://files.pythonhosted.org/packages/bb/bb/c562dc38db3e23c4a22459552c4642c3e6a59dc50ff42ba917c2d7095110/rdkit-2025.9.1-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:9c6923647f4041df8c39b1aff147b36545e66573f331ce929418176849caebfe", size = 36166073, upload-time = "2025-10-06T09:05:15.796Z" }, + { url = "https://files.pythonhosted.org/packages/d4/01/3ba5d12ce9941767b2c4ab0fa672e2606b1d36b2b7f0798bce846930708b/rdkit-2025.9.1-cp313-cp313-win_amd64.whl", hash = "sha256:5ee6b3f2880a0d62930855ac3d56a21c518d952e9b660f60e2f49d0183e36b9f", size = 23577803, upload-time = "2025-10-06T09:05:19.128Z" }, + { url = "https://files.pythonhosted.org/packages/7d/94/225792e12046b492e5c37671480e58d2bff14035c0d18122fa2b7abca155/rdkit-2025.9.1-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:fa71303ae06d5d7aea6e76a5321a02f83ed4057ca10566269502b511a6ba8c48", size = 31767003, upload-time = "2025-10-06T09:05:22.734Z" }, + { url = "https://files.pythonhosted.org/packages/80/46/b06b28b43d70a83cfd35a1ede3b549d2b2bf9547e898bb95aa84e9ed10e8/rdkit-2025.9.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:02c8d361b0e7d0adda46687a82777a34be0b913cbbdacbedb88b7c066262de43", size = 29201272, upload-time = "2025-10-06T09:05:26.205Z" }, + { url = "https://files.pythonhosted.org/packages/ab/80/b9f990de716344e7c09dafaadc1c7397d15e78c02ecb984a2408ca2b3970/rdkit-2025.9.1-cp314-cp314-manylinux_2_28_aarch64.whl", hash = "sha256:3fe0a66acd4065cc379f0d46c9b7c89b3822642a44683311bd269093edbab967", size = 34676950, upload-time = "2025-10-06T09:05:29.88Z" }, + { url = "https://files.pythonhosted.org/packages/fe/76/e1acd4a03f31cf323bd4899062ae954df029d06b1f8794bb1a03dbbf9f5c/rdkit-2025.9.1-cp314-cp314-manylinux_2_28_x86_64.whl", hash = "sha256:1d6c0f30f206067648f567e62e294d98a55bf40c550684f3da54ea061305f5b5", size = 36174898, upload-time = "2025-10-06T09:05:34.096Z" }, + { url = "https://files.pythonhosted.org/packages/d5/0f/39b8fc050830146a011ed86b7c5c738b30fd22fb0532e91984ceef6d00a8/rdkit-2025.9.1-cp314-cp314-win_amd64.whl", hash = "sha256:5393ef7c40c7552d54f569057a4914159b0cd1a634f110e5a884659550f1ff0b", size = 24057571, upload-time = "2025-10-06T09:05:37.368Z" }, +] + [[package]] name = "referencing" version = "0.37.0" @@ -5939,6 +6252,20 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/a9/5c/bfd6bd0bf979426d405cc6e71eceb8701b148b16c21d2dc3c261efc61c7b/sqlparse-0.5.3-py3-none-any.whl", hash = "sha256:cf2196ed3418f3ba5de6af7e82c694a9fbdbfecccdfc72e281548517081f16ca", size = 44415, upload-time = "2024-12-10T12:05:27.824Z" }, ] +[[package]] +name = "stack-data" +version = "0.6.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "asttokens" }, + { name = "executing" }, + { name = "pure-eval" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/28/e3/55dcc2cfbc3ca9c29519eb6884dd1415ecb53b0e934862d3559ddcb7e20b/stack_data-0.6.3.tar.gz", hash = "sha256:836a778de4fec4dcd1dcd89ed8abff8a221f58308462e1c4aa2a3cf30148f0b9", size = 44707, upload-time = "2023-09-30T13:58:05.479Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f1/7b/ce1eafaf1a76852e2ec9b22edecf1daa58175c090266e9f6c64afcd81d91/stack_data-0.6.3-py3-none-any.whl", hash = "sha256:d5558e0c25a4cb0853cddad3d77da9891a08cb85dd9f9f91b9f8cd66e511e695", size = 24521, upload-time = "2023-09-30T13:58:03.53Z" }, +] + [[package]] name = "starlette" version = "0.49.3" @@ -6390,6 +6717,25 @@ dependencies = [ { name = "torch", marker = "sys_platform == 'never'" }, ] +[[package]] +name = "tornado" +version = "6.5.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/09/ce/1eb500eae19f4648281bb2186927bb062d2438c2e5093d1360391afd2f90/tornado-6.5.2.tar.gz", hash = "sha256:ab53c8f9a0fa351e2c0741284e06c7a45da86afb544133201c5cc8578eb076a0", size = 510821, upload-time = "2025-08-08T18:27:00.78Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f6/48/6a7529df2c9cc12efd2e8f5dd219516184d703b34c06786809670df5b3bd/tornado-6.5.2-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:2436822940d37cde62771cff8774f4f00b3c8024fe482e16ca8387b8a2724db6", size = 442563, upload-time = "2025-08-08T18:26:42.945Z" }, + { url = "https://files.pythonhosted.org/packages/f2/b5/9b575a0ed3e50b00c40b08cbce82eb618229091d09f6d14bce80fc01cb0b/tornado-6.5.2-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:583a52c7aa94ee046854ba81d9ebb6c81ec0fd30386d96f7640c96dad45a03ef", size = 440729, upload-time = "2025-08-08T18:26:44.473Z" }, + { url = "https://files.pythonhosted.org/packages/1b/4e/619174f52b120efcf23633c817fd3fed867c30bff785e2cd5a53a70e483c/tornado-6.5.2-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b0fe179f28d597deab2842b86ed4060deec7388f1fd9c1b4a41adf8af058907e", size = 444295, upload-time = "2025-08-08T18:26:46.021Z" }, + { url = "https://files.pythonhosted.org/packages/95/fa/87b41709552bbd393c85dd18e4e3499dcd8983f66e7972926db8d96aa065/tornado-6.5.2-cp39-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b186e85d1e3536d69583d2298423744740986018e393d0321df7340e71898882", size = 443644, upload-time = "2025-08-08T18:26:47.625Z" }, + { url = "https://files.pythonhosted.org/packages/f9/41/fb15f06e33d7430ca89420283a8762a4e6b8025b800ea51796ab5e6d9559/tornado-6.5.2-cp39-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e792706668c87709709c18b353da1f7662317b563ff69f00bab83595940c7108", size = 443878, upload-time = "2025-08-08T18:26:50.599Z" }, + { url = "https://files.pythonhosted.org/packages/11/92/fe6d57da897776ad2e01e279170ea8ae726755b045fe5ac73b75357a5a3f/tornado-6.5.2-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:06ceb1300fd70cb20e43b1ad8aaee0266e69e7ced38fa910ad2e03285009ce7c", size = 444549, upload-time = "2025-08-08T18:26:51.864Z" }, + { url = "https://files.pythonhosted.org/packages/9b/02/c8f4f6c9204526daf3d760f4aa555a7a33ad0e60843eac025ccfd6ff4a93/tornado-6.5.2-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:74db443e0f5251be86cbf37929f84d8c20c27a355dd452a5cfa2aada0d001ec4", size = 443973, upload-time = "2025-08-08T18:26:53.625Z" }, + { url = "https://files.pythonhosted.org/packages/ae/2d/f5f5707b655ce2317190183868cd0f6822a1121b4baeae509ceb9590d0bd/tornado-6.5.2-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:b5e735ab2889d7ed33b32a459cac490eda71a1ba6857b0118de476ab6c366c04", size = 443954, upload-time = "2025-08-08T18:26:55.072Z" }, + { url = "https://files.pythonhosted.org/packages/e8/59/593bd0f40f7355806bf6573b47b8c22f8e1374c9b6fd03114bd6b7a3dcfd/tornado-6.5.2-cp39-abi3-win32.whl", hash = "sha256:c6f29e94d9b37a95013bb669616352ddb82e3bfe8326fccee50583caebc8a5f0", size = 445023, upload-time = "2025-08-08T18:26:56.677Z" }, + { url = "https://files.pythonhosted.org/packages/c7/2a/f609b420c2f564a748a2d80ebfb2ee02a73ca80223af712fca591386cafb/tornado-6.5.2-cp39-abi3-win_amd64.whl", hash = "sha256:e56a5af51cc30dd2cae649429af65ca2f6571da29504a07995175df14c18f35f", size = 445427, upload-time = "2025-08-08T18:26:57.91Z" }, + { url = "https://files.pythonhosted.org/packages/5e/4f/e1f65e8f8c76d73658b33d33b81eed4322fb5085350e4328d5c956f0c8f9/tornado-6.5.2-cp39-abi3-win_arm64.whl", hash = "sha256:d6c33dc3672e3a1f3618eb63b7ef4683a7688e7b9e6e8f0d9aa5726360a004af", size = 444456, upload-time = "2025-08-08T18:26:59.207Z" }, +] + [[package]] name = "tqdm" version = "4.67.1" @@ -6402,6 +6748,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/d0/30/dc54f88dd4a2b5dc8a0279bdd7270e735851848b762aeb1c1184ed1f6b14/tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2", size = 78540, upload-time = "2024-11-24T20:12:19.698Z" }, ] +[[package]] +name = "traitlets" +version = "5.14.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/eb/79/72064e6a701c2183016abbbfedaba506d81e30e232a68c9f0d6f6fcd1574/traitlets-5.14.3.tar.gz", hash = "sha256:9ed0579d3502c94b4b3732ac120375cda96f923114522847de4b3bb98b96b6b7", size = 161621, upload-time = "2024-04-19T11:11:49.746Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/00/c0/8f5d070730d7836adc9c9b6408dec68c6ced86b304a9b26a14df072a6e8c/traitlets-5.14.3-py3-none-any.whl", hash = "sha256:b74e89e397b1ed28cc831db7aea759ba6640cb3de13090ca145426688ff1ac4f", size = 85359, upload-time = "2024-04-19T11:11:46.763Z" }, +] + [[package]] name = "transformer-engine" version = "2.9.0+70f53666" @@ -6550,6 +6905,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/1c/a0/5ecfae12d78ea036a746c071e4c13b54b28d641efbba61d2947c73b3e6f9/wandb-0.23.0-py3-none-win_arm64.whl", hash = "sha256:fa0181b02ce4d1993588f4a728d8b73ae487eb3cb341e6ce01c156be7a98ec72", size = 17678649, upload-time = "2025-11-11T21:06:27.289Z" }, ] +[package.optional-dependencies] +media = [ + { name = "bokeh" }, + { name = "imageio" }, + { name = "moviepy" }, + { name = "numpy" }, + { name = "pillow" }, + { name = "plotly" }, + { name = "rdkit" }, + { name = "soundfile" }, +] + [[package]] name = "watchfiles" version = "1.1.1" @@ -7014,6 +7381,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/7b/d9/8d95e906764a386a3d3b596f3c68bb63687dfca806373509f51ce8eea81f/xxhash-3.6.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:15e0dac10eb9309508bfc41f7f9deaa7755c69e35af835db9cb10751adebc35d", size = 31565, upload-time = "2025-10-02T14:37:06.966Z" }, ] +[[package]] +name = "xyzservices" +version = "2025.11.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ee/0f/022795fc1201e7c29e742a509913badb53ce0b38f64b6db859e2f6339da9/xyzservices-2025.11.0.tar.gz", hash = "sha256:2fc72b49502b25023fd71e8f532fb4beddbbf0aa124d90ea25dba44f545e17ce", size = 1135703, upload-time = "2025-11-22T11:31:51.82Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ef/5c/2c189d18d495dd0fa3f27ccc60762bbc787eed95b9b0147266e72bb76585/xyzservices-2025.11.0-py3-none-any.whl", hash = "sha256:de66a7599a8d6dad63980b77defd1d8f5a5a9cb5fc8774ea1c6e89ca7c2a3d2f", size = 93916, upload-time = "2025-11-22T11:31:50.525Z" }, +] + [[package]] name = "yarl" version = "1.22.0"