Skip to content

Commit 0680e07

Browse files
committed
style: fix linting issues with black and isort
1 parent fee4800 commit 0680e07

File tree

4 files changed

+5
-14
lines changed

4 files changed

+5
-14
lines changed

examples/nanovlm/nanovlm_train.sh

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,5 @@
11
#!/bin/bash
2-
export PYTHONPATH=/mnt/afs/niuyuwei/Job/lmms-engine/src:$PYTHONPATH
3-
export WANDB_DISABLED=true
4-
export WANDB_MODE=disabled
5-
6-
DATASET_PATH="./data/llava_next.yaml"
2+
DATASET_PATH="/path/to/dataset.yaml"
73
PROCESSOR_NAME="LMMs-Lab-Speedrun/NanoVLM_Init"
84
MODEL_PATH="LMMs-Lab-Speedrun/NanoVLM_Init"
95
ATTN_IMPLEMENTATION="flash_attention_2"

src/lmms_engine/datasets/processor/nanovlm_processor.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -94,9 +94,7 @@ def get_qwen_template_labels(
9494
image_start_from = 0
9595
video_start_from = 0
9696
if add_system_prompt and hf_messages[0]["role"] != "system":
97-
input_id += self._apply_chat_template(
98-
[{"role": "system", "content": system_message}], tokenize=True
99-
)
97+
input_id += self._apply_chat_template([{"role": "system", "content": system_message}], tokenize=True)
10098
target += [-100] * len(input_id)
10199

102100
for message in hf_messages:

src/lmms_engine/models/nanovlm/configuration_nanovlm.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -42,9 +42,7 @@ def __init__(
4242
self.projector_hidden_size = projector_hidden_size
4343
self.projector_num_layers = int(projector_num_layers)
4444
self.projector_hidden_act = projector_hidden_act
45-
self.vision_feature_dim = int(
46-
vision_feature_dim or getattr(self.vision_config, "hidden_size", 1152)
47-
)
45+
self.vision_feature_dim = int(vision_feature_dim or getattr(self.vision_config, "hidden_size", 1152))
4846
self.image_token_count = int(image_token_count)
4947
self.vocab_size = getattr(self.text_config, "vocab_size", None)
5048
super().__init__(**kwargs)

src/lmms_engine/models/nanovlm/modeling_nanovlm.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
from transformers.activations import ACT2FN
77
from transformers.generation import GenerationMixin
88
from transformers.modeling_utils import PreTrainedModel
9+
910
from .configuration_nanovlm import NanovlmConfig
1011

1112

@@ -101,9 +102,7 @@ def set_output_embeddings(self, new_embeddings):
101102
return self.language_model.set_output_embeddings(new_embeddings)
102103

103104
def resize_token_embeddings(self, new_num_tokens: int, pad_to_multiple_of: Optional[int] = None):
104-
return self.language_model.resize_token_embeddings(
105-
new_num_tokens, pad_to_multiple_of=pad_to_multiple_of
106-
)
105+
return self.language_model.resize_token_embeddings(new_num_tokens, pad_to_multiple_of=pad_to_multiple_of)
107106

108107
def forward(
109108
self,

0 commit comments

Comments
 (0)