|  | 
| 68 | 68 | logger = get_logger(__name__) | 
| 69 | 69 | logger.warning( | 
| 70 | 70 |     "diffusers.utils.testing_utils' is deprecated and will be removed in a future version. " | 
| 71 |  | -    "Please use `diffusers.utils.torch_utils` instead. " | 
|  | 71 | +    "Determinism and device backend utilities have been moved to `diffusers.utils.torch_utils`. " | 
| 72 | 72 | ) | 
| 73 | 73 | _required_peft_version = is_peft_available() and version.parse( | 
| 74 | 74 |     version.parse(importlib.metadata.version("peft")).base_version | 
| @@ -804,10 +804,9 @@ def export_to_ply(mesh, output_ply_path: str = None): | 
| 804 | 804 |                 f.write(format.pack(*vertex)) | 
| 805 | 805 | 
 | 
| 806 | 806 |         if faces is not None: | 
| 807 |  | -            format = struct.Struct("<B3I") | 
| 808 | 807 |             for tri in faces.tolist(): | 
| 809 | 808 |                 f.write(format.pack(len(tri), *tri)) | 
| 810 |  | - | 
|  | 809 | +            format = struct.Struct("<B3I") | 
| 811 | 810 |     return output_ply_path | 
| 812 | 811 | 
 | 
| 813 | 812 | 
 | 
| @@ -1147,23 +1146,23 @@ def enable_full_determinism(): | 
| 1147 | 1146 |     Helper function for reproducible behavior during distributed training. See | 
| 1148 | 1147 |     - https://pytorch.org/docs/stable/notes/randomness.html for pytorch | 
| 1149 | 1148 |     """ | 
| 1150 |  | -    #  Enable PyTorch deterministic mode. This potentially requires either the environment | 
| 1151 |  | -    #  variable 'CUDA_LAUNCH_BLOCKING' or 'CUBLAS_WORKSPACE_CONFIG' to be set, | 
| 1152 |  | -    # depending on the CUDA version, so we set them both here | 
| 1153 |  | -    os.environ["CUDA_LAUNCH_BLOCKING"] = "1" | 
| 1154 |  | -    os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":16:8" | 
| 1155 |  | -    torch.use_deterministic_algorithms(True) | 
|  | 1149 | +    from .torch_utils import enable_full_determinism as _enable_full_determinism | 
| 1156 | 1150 | 
 | 
| 1157 |  | -    # Enable CUDNN deterministic mode | 
| 1158 |  | -    torch.backends.cudnn.deterministic = True | 
| 1159 |  | -    torch.backends.cudnn.benchmark = False | 
| 1160 |  | -    torch.backends.cuda.matmul.allow_tf32 = False | 
|  | 1151 | +    logger.warning( | 
|  | 1152 | +        "enable_full_determinism has been moved to diffusers.utils.torch_utils. " | 
|  | 1153 | +        "Importing from diffusers.utils.testing_utils is deprecated and will be removed in a future version." | 
|  | 1154 | +    ) | 
|  | 1155 | +    return _enable_full_determinism() | 
| 1161 | 1156 | 
 | 
| 1162 | 1157 | 
 | 
| 1163 | 1158 | def disable_full_determinism(): | 
| 1164 |  | -    os.environ["CUDA_LAUNCH_BLOCKING"] = "0" | 
| 1165 |  | -    os.environ["CUBLAS_WORKSPACE_CONFIG"] = "" | 
| 1166 |  | -    torch.use_deterministic_algorithms(False) | 
|  | 1159 | +    from .torch_utils import disable_full_determinism as _disable_full_determinism | 
|  | 1160 | + | 
|  | 1161 | +    logger.warning( | 
|  | 1162 | +        "disable_full_determinism has been moved to diffusers.utils.torch_utils. " | 
|  | 1163 | +        "Importing from diffusers.utils.testing_utils is deprecated and will be removed in a future version." | 
|  | 1164 | +    ) | 
|  | 1165 | +    return _disable_full_determinism() | 
| 1167 | 1166 | 
 | 
| 1168 | 1167 | 
 | 
| 1169 | 1168 | # Utils for custom and alternative accelerator devices | 
| @@ -1285,43 +1284,85 @@ def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], | 
| 1285 | 1284 | 
 | 
| 1286 | 1285 | # These are callables which automatically dispatch the function specific to the accelerator | 
| 1287 | 1286 | def backend_manual_seed(device: str, seed: int): | 
| 1288 |  | -    return _device_agnostic_dispatch(device, BACKEND_MANUAL_SEED, seed) | 
|  | 1287 | +    from .torch_utils import backend_manual_seed as _backend_manual_seed | 
|  | 1288 | + | 
|  | 1289 | +    logger.warning( | 
|  | 1290 | +        "backend_manual_seed has been moved to diffusers.utils.torch_utils. " | 
|  | 1291 | +        "diffusers.utils.testing_utils is deprecated and will be removed in a future version." | 
|  | 1292 | +    ) | 
|  | 1293 | +    return _backend_manual_seed(device, seed) | 
| 1289 | 1294 | 
 | 
| 1290 | 1295 | 
 | 
| 1291 | 1296 | def backend_synchronize(device: str): | 
| 1292 |  | -    return _device_agnostic_dispatch(device, BACKEND_SYNCHRONIZE) | 
|  | 1297 | +    from .torch_utils import backend_synchronize as _backend_synchronize | 
|  | 1298 | + | 
|  | 1299 | +    logger.warning( | 
|  | 1300 | +        "backend_synchronize has been moved to diffusers.utils.torch_utils. " | 
|  | 1301 | +        "diffusers.utils.testing_utils is deprecated and will be removed in a future version." | 
|  | 1302 | +    ) | 
|  | 1303 | +    return _backend_synchronize(device) | 
| 1293 | 1304 | 
 | 
| 1294 | 1305 | 
 | 
| 1295 | 1306 | def backend_empty_cache(device: str): | 
| 1296 |  | -    return _device_agnostic_dispatch(device, BACKEND_EMPTY_CACHE) | 
|  | 1307 | +    from .torch_utils import backend_empty_cache as _backend_empty_cache | 
|  | 1308 | + | 
|  | 1309 | +    logger.warning( | 
|  | 1310 | +        "backend_empty_cache has been moved to diffusers.utils.torch_utils. " | 
|  | 1311 | +        "diffusers.utils.testing_utils is deprecated and will be removed in a future version." | 
|  | 1312 | +    ) | 
|  | 1313 | +    return _backend_empty_cache(device) | 
| 1297 | 1314 | 
 | 
| 1298 | 1315 | 
 | 
| 1299 | 1316 | def backend_device_count(device: str): | 
| 1300 |  | -    return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT) | 
|  | 1317 | +    from .torch_utils import backend_device_count as _backend_device_count | 
|  | 1318 | + | 
|  | 1319 | +    logger.warning( | 
|  | 1320 | +        "backend_device_count has been moved to diffusers.utils.torch_utils. " | 
|  | 1321 | +        "diffusers.utils.testing_utils is deprecated and will be removed in a future version." | 
|  | 1322 | +    ) | 
|  | 1323 | +    return _backend_device_count(device) | 
| 1301 | 1324 | 
 | 
| 1302 | 1325 | 
 | 
| 1303 | 1326 | def backend_reset_peak_memory_stats(device: str): | 
| 1304 |  | -    return _device_agnostic_dispatch(device, BACKEND_RESET_PEAK_MEMORY_STATS) | 
|  | 1327 | +    from .torch_utils import backend_reset_peak_memory_stats as _backend_reset_peak_memory_stats | 
|  | 1328 | + | 
|  | 1329 | +    logger.warning( | 
|  | 1330 | +        "backend_reset_peak_memory_stats has been moved to diffusers.utils.torch_utils. " | 
|  | 1331 | +        "diffusers.utils.testing_utils is deprecated and will be removed in a future version." | 
|  | 1332 | +    ) | 
|  | 1333 | +    return _backend_reset_peak_memory_stats(device) | 
| 1305 | 1334 | 
 | 
| 1306 | 1335 | 
 | 
| 1307 | 1336 | def backend_reset_max_memory_allocated(device: str): | 
| 1308 |  | -    return _device_agnostic_dispatch(device, BACKEND_RESET_MAX_MEMORY_ALLOCATED) | 
|  | 1337 | +    from .torch_utils import backend_reset_max_memory_allocated as _backend_reset_max_memory_allocated | 
|  | 1338 | + | 
|  | 1339 | +    logger.warning( | 
|  | 1340 | +        "backend_reset_max_memory_allocated has been moved to diffusers.utils.torch_utils. " | 
|  | 1341 | +        "diffusers.utils.testing_utils is deprecated and will be removed in a future version." | 
|  | 1342 | +    ) | 
|  | 1343 | +    return _backend_reset_max_memory_allocated(device) | 
| 1309 | 1344 | 
 | 
| 1310 | 1345 | 
 | 
| 1311 | 1346 | def backend_max_memory_allocated(device: str): | 
| 1312 |  | -    return _device_agnostic_dispatch(device, BACKEND_MAX_MEMORY_ALLOCATED) | 
|  | 1347 | +    from .torch_utils import backend_max_memory_allocated as _backend_max_memory_allocated | 
|  | 1348 | + | 
|  | 1349 | +    logger.warning( | 
|  | 1350 | +        "backend_max_memory_allocated has been moved to diffusers.utils.torch_utils. " | 
|  | 1351 | +        "diffusers.utils.testing_utils is deprecated and will be removed in a future version." | 
|  | 1352 | +    ) | 
|  | 1353 | +    return _backend_max_memory_allocated(device) | 
| 1313 | 1354 | 
 | 
| 1314 | 1355 | 
 | 
| 1315 | 1356 | # These are callables which return boolean behaviour flags and can be used to specify some | 
| 1316 | 1357 | # device agnostic alternative where the feature is unsupported. | 
| 1317 | 1358 | def backend_supports_training(device: str): | 
| 1318 |  | -    if not is_torch_available(): | 
| 1319 |  | -        return False | 
| 1320 |  | - | 
| 1321 |  | -    if device not in BACKEND_SUPPORTS_TRAINING: | 
| 1322 |  | -        device = "default" | 
|  | 1359 | +    from .torch_utils import backend_supports_training as _backend_supports_training | 
| 1323 | 1360 | 
 | 
| 1324 |  | -    return BACKEND_SUPPORTS_TRAINING[device] | 
|  | 1361 | +    logger.warning( | 
|  | 1362 | +        "backend_supports_training has been moved to diffusers.utils.torch_utils. " | 
|  | 1363 | +        "diffusers.utils.testing_utils is deprecated and will be removed in a future version." | 
|  | 1364 | +    ) | 
|  | 1365 | +    return _backend_supports_training(device) | 
| 1325 | 1366 | 
 | 
| 1326 | 1367 | 
 | 
| 1327 | 1368 | # Guard for when Torch is not available | 
|  | 
0 commit comments