@@ -202,7 +202,7 @@ def test_flux_the_last_ben(self):
202202 self .pipeline .load_lora_weights ("TheLastBen/Jon_Snow_Flux_LoRA" , weight_name = "jon_snow.safetensors" )
203203 self .pipeline .fuse_lora ()
204204 self .pipeline .unload_lora_weights ()
205- self .pipeline = self .pipeline .to ("cuda" )
205+ self .pipeline = self .pipeline .to (torch_device )
206206
207207 prompt = "jon snow eating pizza with ketchup"
208208
@@ -227,7 +227,7 @@ def test_flux_kohya(self):
227227 # Instead of calling `enable_model_cpu_offload()`, we do a cuda placement here because the CI
228228 # run supports it. We have about 34GB RAM in the CI runner which kills the test when run with
229229 # `enable_model_cpu_offload()`.
230- self .pipeline = self .pipeline .to ("cuda" )
230+ self .pipeline = self .pipeline .to (torch_device )
231231
232232 prompt = "The cat with a brain slug earring"
233233 out = self .pipeline (
@@ -249,7 +249,7 @@ def test_flux_kohya_with_text_encoder(self):
249249 self .pipeline .load_lora_weights ("cocktailpeanut/optimus" , weight_name = "optimus.safetensors" )
250250 self .pipeline .fuse_lora ()
251251 self .pipeline .unload_lora_weights ()
252- self .pipeline = self .pipeline .to ("cuda" )
252+ self .pipeline = self .pipeline .to (torch_device )
253253
254254 prompt = "optimus is cleaning the house with broomstick"
255255 out = self .pipeline (
@@ -271,7 +271,7 @@ def test_flux_xlabs(self):
271271 self .pipeline .load_lora_weights ("XLabs-AI/flux-lora-collection" , weight_name = "disney_lora.safetensors" )
272272 self .pipeline .fuse_lora ()
273273 self .pipeline .unload_lora_weights ()
274- self .pipeline = self .pipeline .to ("cuda" )
274+ self .pipeline = self .pipeline .to (torch_device )
275275
276276 prompt = "A blue jay standing on a large basket of rainbow macarons, disney style"
277277
0 commit comments