|
| 1 | +from datetime import timedelta |
| 2 | +from typing import List, Optional, Tuple, Union |
| 3 | + |
| 4 | +import torch |
| 5 | +from accelerate import Accelerator, DistributedType, InitProcessGroupKwargs |
| 6 | +from accelerate.state import AcceleratorState |
| 7 | +from lmms_eval.api.model import lmms |
| 8 | +from lmms_eval.models.video_llava import VideoLLaVA as VL |
| 9 | +from loguru import logger |
| 10 | +from transformers import (AutoConfig, AutoModelForCausalLM, AutoTokenizer, |
| 11 | + GenerationConfig, VideoLlavaForConditionalGeneration, |
| 12 | + VideoLlavaProcessor) |
| 13 | + |
| 14 | +from llmc.utils.registry_factory import MODEL_REGISTRY |
| 15 | + |
| 16 | +from .llama import Llama |
| 17 | + |
| 18 | + |
| 19 | +@MODEL_REGISTRY |
| 20 | +class VideoLLaVA(Llama): |
| 21 | + def __init__(self, config, device_map=None, use_cache=False): |
| 22 | + super().__init__(config, device_map, use_cache) |
| 23 | + |
| 24 | + def build_model(self): |
| 25 | + self.vlm_model_config = AutoConfig.from_pretrained( |
| 26 | + self.model_path, trust_remote_code=True |
| 27 | + ) |
| 28 | + if not self.use_cache: |
| 29 | + self.vlm_model_config.text_config.use_cache = False |
| 30 | + logger.info(f'self.vlm_model_config : {self.vlm_model_config}') |
| 31 | + self.vlm_model = VideoLlavaForConditionalGeneration.from_pretrained( |
| 32 | + self.model_path, |
| 33 | + config=self.vlm_model_config, |
| 34 | + torch_dtype=self.torch_dtype, |
| 35 | + low_cpu_mem_usage=True, |
| 36 | + ) |
| 37 | + self.eval_name = 'VideoLLaVAHfEval' |
| 38 | + self.mm_model = self.vlm_model |
| 39 | + logger.info(f'self.vlm_model : {self.vlm_model}') |
| 40 | + self.video_tower = self.vlm_model.video_tower |
| 41 | + self.image_tower = self.vlm_model.image_tower |
| 42 | + self.vision_projector = self.vlm_model.multi_modal_projector |
| 43 | + self.model = self.vlm_model.language_model |
| 44 | + self.model_config = self.vlm_model_config.text_config |
| 45 | + self.pruning_config = { |
| 46 | + 'is_video_model': True, |
| 47 | + 'image_token_length': self.vlm_model_config.image_seq_length, |
| 48 | + 'video_token_length': self.vlm_model_config.video_seq_length, |
| 49 | + 'select_layer': self.vlm_model_config.vision_feature_layer, |
| 50 | + 'select_feature': self.vlm_model_config.vision_feature_select_strategy, |
| 51 | + 'image_token_index': self.vlm_model_config.image_token_index, |
| 52 | + 'video_token_index': self.vlm_model_config.video_token_index, |
| 53 | + } |
| 54 | + |
| 55 | + |
| 56 | +@MODEL_REGISTRY |
| 57 | +class VideoLLaVAHfEval(VL): |
| 58 | + def __init__( |
| 59 | + self, |
| 60 | + llmc_model, |
| 61 | + pretrained: str = 'LanguageBind/Video-LLaVA-7B-hf', |
| 62 | + truncation: Optional[bool] = True, |
| 63 | + device: Optional[str] = 'cuda:0', |
| 64 | + dtype: Optional[Union[str, torch.dtype]] = 'auto', |
| 65 | + batch_size: Optional[Union[int, str]] = 1, |
| 66 | + trust_remote_code: Optional[bool] = False, |
| 67 | + revision=None, |
| 68 | + attn_implementation=( |
| 69 | + 'sdpa' if torch.__version__ > '2.1.2' else 'eager' |
| 70 | + ), |
| 71 | + # inference implementation for attention, can be "sdpa", "eager", "flash_attention_2". |
| 72 | + # Seems FA2 is not effective during inference: |
| 73 | + # https://discuss.huggingface.co/t/flash-attention-has-no-effect-on-inference/73453/5 |
| 74 | + device_map='cuda:0', |
| 75 | + conv_template='llava_v1', |
| 76 | + use_cache=True, |
| 77 | + truncate_context=False, |
| 78 | + num_frames: int = 8, |
| 79 | + # whether to truncate the context in generation, |
| 80 | + # set it False for LLaVA-1.6 |
| 81 | + **kwargs, |
| 82 | + ) -> None: |
| 83 | + lmms.__init__(self) |
| 84 | + accelerator_kwargs = InitProcessGroupKwargs(timeout=timedelta(weeks=52)) |
| 85 | + accelerator = Accelerator(kwargs_handlers=[accelerator_kwargs]) |
| 86 | + if accelerator.num_processes > 1: |
| 87 | + self._device = torch.device(f'cuda:{accelerator.local_process_index}') |
| 88 | + self.device_map = f'cuda:{accelerator.local_process_index}' |
| 89 | + elif accelerator.num_processes == 1 and device_map == 'auto': |
| 90 | + self._device = torch.device(device) |
| 91 | + self.device_map = device_map |
| 92 | + else: |
| 93 | + self._device = torch.device(f'cuda:{accelerator.local_process_index}') |
| 94 | + self.device_map = f'cuda:{accelerator.local_process_index}' |
| 95 | + |
| 96 | + self.pretrained = pretrained |
| 97 | + self._model = llmc_model.cuda() |
| 98 | + self._processor = VideoLlavaProcessor.from_pretrained(pretrained) |
| 99 | + self.prompt = 'USER: <video>{}? ASSISTANT:' |
| 100 | + self.num_frames = num_frames |
| 101 | + assert ( |
| 102 | + num_frames == 8 |
| 103 | + ), 'num_frames must be 8' |
| 104 | + # self.model_name = get_model_name_from_path(pretrained) |
| 105 | + # self._tokenizer, self._model, self.processor, |
| 106 | + # self._max_length = load_pretrained_model(pretrained, |
| 107 | + # None, self.model_name, device_map=self.device_map) |
| 108 | + # self.video_processor = self.processor["video"] |
| 109 | + self._config = self._model.config |
| 110 | + self.model.eval() |
| 111 | + self.model.tie_weights() |
| 112 | + self.truncation = truncation |
| 113 | + self.batch_size_per_gpu = int(batch_size) |
| 114 | + self.conv_template = conv_template |
| 115 | + self.use_cache = use_cache |
| 116 | + self.truncate_context = truncate_context |
| 117 | + # assert self.batch_size_per_gpu == 1, |
| 118 | + # "Llava currently does not support batched generation. |
| 119 | + # See https://github.com/haotian-liu/LLaVA/issues/754. HF Llava also has this issue." |
| 120 | + if accelerator.num_processes > 1: |
| 121 | + assert accelerator.distributed_type in [ |
| 122 | + DistributedType.FSDP, |
| 123 | + DistributedType.MULTI_GPU, |
| 124 | + DistributedType.DEEPSPEED, |
| 125 | + ], 'Unsupported distributed type provided. Only DDP and FSDP are supported.' |
| 126 | + if accelerator.distributed_type == DistributedType.DEEPSPEED: |
| 127 | + kwargs = { |
| 128 | + 'train_micro_batch_size_per_gpu': self.batch_size_per_gpu, |
| 129 | + 'train_batch_size': self.batch_size_per_gpu |
| 130 | + * accelerator.num_processes, |
| 131 | + } |
| 132 | + AcceleratorState().deepspeed_plugin.deepspeed_config_process( |
| 133 | + must_match=True, **kwargs |
| 134 | + ) |
| 135 | + logger.info( |
| 136 | + 'Detected that you are using DistributedType.DEEPSPEED. ' + |
| 137 | + 'Make sure you run `accelerate config` and set zero stage to 0' |
| 138 | + ) |
| 139 | + if ( |
| 140 | + accelerator.distributed_type == DistributedType.FSDP |
| 141 | + or accelerator.distributed_type == DistributedType.DEEPSPEED |
| 142 | + ): |
| 143 | + self._model = accelerator.prepare(self.model) |
| 144 | + else: |
| 145 | + self._model = accelerator.prepare_model( |
| 146 | + self.model, evaluation_mode=True |
| 147 | + ) |
| 148 | + self.accelerator = accelerator |
| 149 | + if self.accelerator.is_local_main_process: |
| 150 | + logger.info( |
| 151 | + f'Using {accelerator.num_processes} devices with data parallelism' |
| 152 | + ) |
| 153 | + self._rank = self.accelerator.local_process_index |
| 154 | + self._world_size = self.accelerator.num_processes |
| 155 | + elif accelerator.num_processes == 1 and device_map == 'auto': |
| 156 | + logger.info( |
| 157 | + f'Using {accelerator.num_processes} devices with tensor parallelism' |
| 158 | + ) |
| 159 | + self._rank = 0 |
| 160 | + self._word_size = 1 |
| 161 | + else: |
| 162 | + logger.info(f'Using single device: {self._device}') |
| 163 | + self.model.to(self._device) |
| 164 | + self._rank = 0 |
| 165 | + self._world_size = 1 |
0 commit comments