Skip to content

Commit 2f5e324

Browse files
[NFC] Remove redundant torch.no_grad in models and pipelines (vllm-project#854)
Signed-off-by: yuanheng <jonathan.zhaoyh@gmail.com>
1 parent 16be03b commit 2f5e324

File tree

9 files changed

+0
-14
lines changed

9 files changed

+0
-14
lines changed

vllm_omni/diffusion/models/bagel/bagel_transformer.py

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -714,7 +714,6 @@ def prepare_prompts(self, curr_kvlens, curr_rope, prompts, tokenizer, new_token_
714714

715715
return generation_input, newlens, new_rope
716716

717-
@torch.no_grad
718717
def forward_cache_update_text(
719718
self,
720719
past_key_values: NaiveCache,
@@ -821,7 +820,6 @@ def prepare_vae_images(self, curr_kvlens, curr_rope, images, transforms, new_tok
821820

822821
return generation_input, newlens, new_rope
823822

824-
@torch.no_grad
825823
def forward_cache_update_vae(
826824
self,
827825
vae_model,
@@ -946,7 +944,6 @@ def prepare_vit_images(self, curr_kvlens, curr_rope, images, transforms, new_tok
946944

947945
return generation_input, newlens, new_rope
948946

949-
@torch.no_grad
950947
def forward_cache_update_vit(
951948
self,
952949
past_key_values: NaiveCache,
@@ -1064,7 +1061,6 @@ def prepare_input(self, curr_kvlens, curr_rope, image_sizes, new_token_ids=None)
10641061
def prepare_vae_latent(self, curr_kvlens, curr_rope, image_sizes, new_token_ids):
10651062
return self.prepare_input(curr_kvlens, curr_rope, image_sizes, new_token_ids)
10661063

1067-
@torch.no_grad
10681064
def generate_image(
10691065
self,
10701066
packed_text_ids: torch.LongTensor,
@@ -1121,7 +1117,6 @@ def generate_image(
11211117
unpacked_latent = x_t.split((packed_seqlens - 2).tolist())
11221118
return unpacked_latent
11231119

1124-
@torch.no_grad
11251120
def _forward_flow(
11261121
self,
11271122
x_t: torch.Tensor,

vllm_omni/diffusion/models/flux2_klein/pipeline_flux2_klein.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -644,7 +644,6 @@ def current_timestep(self):
644644
def interrupt(self):
645645
return self._interrupt
646646

647-
@torch.no_grad()
648647
def forward(
649648
self,
650649
req: OmniDiffusionRequest,

vllm_omni/diffusion/models/ovis_image/pipeline_ovis_image.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -516,7 +516,6 @@ def current_timestep(self):
516516
def interrupt(self):
517517
return self._interrupt
518518

519-
@torch.no_grad()
520519
def forward(
521520
self,
522521
req: OmniDiffusionRequest,

vllm_omni/diffusion/models/stable_audio/pipeline_stable_audio.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -347,7 +347,6 @@ def prepare_latents(
347347
latents = latents * self.scheduler.init_noise_sigma
348348
return latents
349349

350-
@torch.no_grad()
351350
def forward(
352351
self,
353352
req: OmniDiffusionRequest,

vllm_omni/diffusion/models/wan2_2/pipeline_wan2_2.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -278,7 +278,6 @@ def num_timesteps(self):
278278
def current_timestep(self):
279279
return self._current_timestep
280280

281-
@torch.no_grad()
282281
def forward(
283282
self,
284283
req: OmniDiffusionRequest,

vllm_omni/diffusion/models/wan2_2/pipeline_wan2_2_i2v.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -254,7 +254,6 @@ def encode_image(
254254
image_embeds = self.image_encoder(pixel_values, output_hidden_states=True)
255255
return image_embeds.hidden_states[-2]
256256

257-
@torch.no_grad()
258257
def forward(
259258
self,
260259
req: OmniDiffusionRequest,

vllm_omni/diffusion/models/wan2_2/pipeline_wan2_2_ti2v.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -193,7 +193,6 @@ def num_timesteps(self):
193193
def current_timestep(self):
194194
return self._current_timestep
195195

196-
@torch.no_grad()
197196
def forward(
198197
self,
199198
req: OmniDiffusionRequest,

vllm_omni/diffusion/models/z_image/pipeline_z_image.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -312,7 +312,6 @@ def num_timesteps(self):
312312
def interrupt(self):
313313
return self._interrupt
314314

315-
@torch.no_grad()
316315
def forward(
317316
self,
318317
req: OmniDiffusionRequest,

vllm_omni/model_executor/models/qwen2_5_omni/qwen2_5_omni_token2wav.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1258,7 +1258,6 @@ def forward(
12581258

12591259
return output
12601260

1261-
@torch.no_grad()
12621261
def sample(
12631262
self,
12641263
conditioning_vector,
@@ -1333,7 +1332,6 @@ def ode_function(time_step, hidden_states):
13331332
generated_mel_spectrogram = generated_waveform.permute(0, 2, 1)
13341333
return generated_mel_spectrogram
13351334

1336-
@torch.no_grad()
13371335
def fast_block_sample(
13381336
self,
13391337
conditioning_vector: torch.Tensor,

0 commit comments

Comments
 (0)