Skip to content

Commit e3f67db

Browse files
patrickvonplatenJimmy
authored andcommitted
Correct sigmas cpu settings (huggingface#6708)
1 parent 7fbd405 commit e3f67db

14 files changed

+28
-28
lines changed

src/diffusers/schedulers/scheduling_consistency_models.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -98,7 +98,7 @@ def __init__(
9898
self.custom_timesteps = False
9999
self.is_scale_input_called = False
100100
self._step_index = None
101-
self.sigmas.to("cpu") # to avoid too much CPU/GPU communication
101+
self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication
102102

103103
def index_for_timestep(self, timestep, schedule_timesteps=None):
104104
if schedule_timesteps is None:
@@ -231,7 +231,7 @@ def set_timesteps(
231231
self.timesteps = torch.from_numpy(timesteps).to(device=device)
232232

233233
self._step_index = None
234-
self.sigmas.to("cpu") # to avoid too much CPU/GPU communication
234+
self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication
235235

236236
# Modified _convert_to_karras implementation that takes in ramp as argument
237237
def _convert_to_karras(self, ramp):

src/diffusers/schedulers/scheduling_deis_multistep.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -187,7 +187,7 @@ def __init__(
187187
self.model_outputs = [None] * solver_order
188188
self.lower_order_nums = 0
189189
self._step_index = None
190-
self.sigmas.to("cpu") # to avoid too much CPU/GPU communication
190+
self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication
191191

192192
@property
193193
def step_index(self):
@@ -255,7 +255,7 @@ def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.devic
255255

256256
# add an index counter for schedulers that allow duplicated timesteps
257257
self._step_index = None
258-
self.sigmas.to("cpu") # to avoid too much CPU/GPU communication
258+
self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication
259259

260260
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample
261261
def _threshold_sample(self, sample: torch.FloatTensor) -> torch.FloatTensor:

src/diffusers/schedulers/scheduling_dpmsolver_multistep.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -227,7 +227,7 @@ def __init__(
227227
self.model_outputs = [None] * solver_order
228228
self.lower_order_nums = 0
229229
self._step_index = None
230-
self.sigmas.to("cpu") # to avoid too much CPU/GPU communication
230+
self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication
231231

232232
@property
233233
def step_index(self):
@@ -311,7 +311,7 @@ def set_timesteps(self, num_inference_steps: int = None, device: Union[str, torc
311311

312312
# add an index counter for schedulers that allow duplicated timesteps
313313
self._step_index = None
314-
self.sigmas.to("cpu") # to avoid too much CPU/GPU communication
314+
self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication
315315

316316
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample
317317
def _threshold_sample(self, sample: torch.FloatTensor) -> torch.FloatTensor:

src/diffusers/schedulers/scheduling_dpmsolver_multistep_inverse.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -213,7 +213,7 @@ def __init__(
213213
self.model_outputs = [None] * solver_order
214214
self.lower_order_nums = 0
215215
self._step_index = None
216-
self.sigmas.to("cpu") # to avoid too much CPU/GPU communication
216+
self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication
217217
self.use_karras_sigmas = use_karras_sigmas
218218

219219
@property
@@ -294,7 +294,7 @@ def set_timesteps(self, num_inference_steps: int = None, device: Union[str, torc
294294

295295
# add an index counter for schedulers that allow duplicated timesteps
296296
self._step_index = None
297-
self.sigmas.to("cpu") # to avoid too much CPU/GPU communication
297+
self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication
298298

299299
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample
300300
def _threshold_sample(self, sample: torch.FloatTensor) -> torch.FloatTensor:

src/diffusers/schedulers/scheduling_dpmsolver_sde.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -198,7 +198,7 @@ def __init__(
198198
self.noise_sampler = None
199199
self.noise_sampler_seed = noise_sampler_seed
200200
self._step_index = None
201-
self.sigmas.to("cpu") # to avoid too much CPU/GPU communication
201+
self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication
202202

203203
# Copied from diffusers.schedulers.scheduling_heun_discrete.HeunDiscreteScheduler.index_for_timestep
204204
def index_for_timestep(self, timestep, schedule_timesteps=None):
@@ -348,7 +348,7 @@ def set_timesteps(
348348
self.mid_point_sigma = None
349349

350350
self._step_index = None
351-
self.sigmas.to("cpu") # to avoid too much CPU/GPU communication
351+
self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication
352352
self.noise_sampler = None
353353

354354
# for exp beta schedules, such as the one for `pipeline_shap_e.py`

src/diffusers/schedulers/scheduling_dpmsolver_singlestep.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -210,7 +210,7 @@ def __init__(
210210
self.sample = None
211211
self.order_list = self.get_order_list(num_train_timesteps)
212212
self._step_index = None
213-
self.sigmas.to("cpu") # to avoid too much CPU/GPU communication
213+
self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication
214214

215215
def get_order_list(self, num_inference_steps: int) -> List[int]:
216216
"""
@@ -315,7 +315,7 @@ def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.devic
315315

316316
# add an index counter for schedulers that allow duplicated timesteps
317317
self._step_index = None
318-
self.sigmas.to("cpu") # to avoid too much CPU/GPU communication
318+
self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication
319319

320320
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample
321321
def _threshold_sample(self, sample: torch.FloatTensor) -> torch.FloatTensor:

src/diffusers/schedulers/scheduling_euler_ancestral_discrete.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -216,7 +216,7 @@ def __init__(
216216
self.is_scale_input_called = False
217217

218218
self._step_index = None
219-
self.sigmas.to("cpu") # to avoid too much CPU/GPU communication
219+
self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication
220220

221221
@property
222222
def init_noise_sigma(self):
@@ -300,7 +300,7 @@ def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.devic
300300

301301
self.timesteps = torch.from_numpy(timesteps).to(device=device)
302302
self._step_index = None
303-
self.sigmas.to("cpu") # to avoid too much CPU/GPU communication
303+
self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication
304304

305305
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._init_step_index
306306
def _init_step_index(self, timestep):

src/diffusers/schedulers/scheduling_euler_discrete.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -237,7 +237,7 @@ def __init__(
237237
self.use_karras_sigmas = use_karras_sigmas
238238

239239
self._step_index = None
240-
self.sigmas.to("cpu") # to avoid too much CPU/GPU communication
240+
self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication
241241

242242
@property
243243
def init_noise_sigma(self):
@@ -342,7 +342,7 @@ def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.devic
342342

343343
self.sigmas = torch.cat([sigmas, torch.zeros(1, device=sigmas.device)])
344344
self._step_index = None
345-
self.sigmas.to("cpu") # to avoid too much CPU/GPU communication
345+
self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication
346346

347347
def _sigma_to_t(self, sigma, log_sigmas):
348348
# get log sigma

src/diffusers/schedulers/scheduling_heun_discrete.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -148,7 +148,7 @@ def __init__(
148148
self.use_karras_sigmas = use_karras_sigmas
149149

150150
self._step_index = None
151-
self.sigmas.to("cpu") # to avoid too much CPU/GPU communication
151+
self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication
152152

153153
def index_for_timestep(self, timestep, schedule_timesteps=None):
154154
if schedule_timesteps is None:
@@ -270,7 +270,7 @@ def set_timesteps(
270270
self.dt = None
271271

272272
self._step_index = None
273-
self.sigmas.to("cpu") # to avoid too much CPU/GPU communication
273+
self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication
274274

275275
# (YiYi Notes: keep this for now since we are keeping add_noise function which use index_for_timestep)
276276
# for exp beta schedules, such as the one for `pipeline_shap_e.py`

src/diffusers/schedulers/scheduling_k_dpm_2_ancestral_discrete.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -140,7 +140,7 @@ def __init__(
140140
# set all values
141141
self.set_timesteps(num_train_timesteps, None, num_train_timesteps)
142142
self._step_index = None
143-
self.sigmas.to("cpu") # to avoid too much CPU/GPU communication
143+
self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication
144144

145145
# Copied from diffusers.schedulers.scheduling_heun_discrete.HeunDiscreteScheduler.index_for_timestep
146146
def index_for_timestep(self, timestep, schedule_timesteps=None):
@@ -300,7 +300,7 @@ def set_timesteps(
300300
self._index_counter = defaultdict(int)
301301

302302
self._step_index = None
303-
self.sigmas.to("cpu") # to avoid too much CPU/GPU communication
303+
self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication
304304

305305
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t
306306
def _sigma_to_t(self, sigma, log_sigmas):

0 commit comments

Comments
 (0)