@@ -242,6 +242,26 @@ Example::
242
242
243
243
Trainer(accelerator=MyOwnAcc())
244
244
245
+ .. note ::
246
+
247
+ If the ``devices `` flag is not defined, it will assume ``devices `` to be ``"auto" `` and fetch the ``auto_device_count ``
248
+ from the accelerator.
249
+
250
+ .. code-block :: python
251
+
252
+ # This is part of the built-in `GPUAccelerator`
253
+ class GPUAccelerator (Accelerator ):
254
+ """ Accelerator for GPU devices."""
255
+
256
+ @ staticmethod
257
+ def auto_device_count () -> int :
258
+ """ Get the devices when set to auto."""
259
+ return torch.cuda.device_count()
260
+
261
+
262
+ # Training with GPU Accelerator using total number of gpus available on the system
263
+ Trainer(accelerator = " gpu" )
264
+
245
265
.. warning :: Passing training strategies (e.g., ``"ddp"``) to ``accelerator`` has been deprecated in v1.5.0
246
266
and will be removed in v1.7.0. Please use the ``strategy `` argument instead.
247
267
@@ -580,6 +600,26 @@ based on the accelerator type (``"cpu", "gpu", "tpu", "ipu", "auto"``).
580
600
# Training with IPU Accelerator using 4 ipus
581
601
trainer = Trainer(devices = " auto" , accelerator = " ipu" )
582
602
603
+ .. note ::
604
+
605
+ If the ``devices `` flag is not defined, it will assume ``devices `` to be ``"auto" `` and fetch the ``auto_device_count ``
606
+ from the accelerator.
607
+
608
+ .. code-block :: python
609
+
610
+ # This is part of the built-in `GPUAccelerator`
611
+ class GPUAccelerator (Accelerator ):
612
+ """ Accelerator for GPU devices."""
613
+
614
+ @ staticmethod
615
+ def auto_device_count () -> int :
616
+ """ Get the devices when set to auto."""
617
+ return torch.cuda.device_count()
618
+
619
+
620
+ # Training with GPU Accelerator using total number of gpus available on the system
621
+ Trainer(accelerator = " gpu" )
622
+
583
623
enable_checkpointing
584
624
^^^^^^^^^^^^^^^^^^^^
585
625
0 commit comments