@@ -1177,27 +1177,38 @@ Properties
1177
1177
callback_metrics
1178
1178
****************
1179
1179
1180
- The metrics available to callbacks. These are automatically set when you log via ` self.log `
1180
+ The metrics available to callbacks.
1181
1181
1182
- .. code-block :: python
1182
+ This includes metrics logged via :meth: ` ~lightning.pytorch.core.module.LightningModule.log `.
1183
1183
1184
- def training_step (self , batch , batch_idx ):
1185
- self .log(" a_val" , 2 )
1184
+ ..code-block:: python
1186
1185
1186
+ def training_step(self, batch, batch_idx):
1187
+ self.log("a_val", 2.0)
1187
1188
1188
1189
callback_metrics = trainer.callback_metrics
1189
- assert callback_metrics[" a_val" ] == 2
1190
+ assert callback_metrics["a_val"] == 2.0
1190
1191
1191
- current_epoch
1192
- *************
1192
+ logged_metrics
1193
+ **************
1193
1194
1194
- The number of epochs run .
1195
+ The metrics sent to the loggers .
1195
1196
1196
- .. code-block :: python
1197
+ This includes metrics logged via :meth: `~lightning.pytorch.core.module.LightningModule.log ` with the
1198
+ :paramref: `~lightning.pytorch.core.module.LightningModule.log.logger ` argument set.
1197
1199
1198
- if trainer.current_epoch >= 10 :
1199
- ...
1200
+ progress_bar_metrics
1201
+ ********************
1200
1202
1203
+ The metrics sent to the progress bar.
1204
+
1205
+ This includes metrics logged via :meth: `~lightning.pytorch.core.module.LightningModule.log ` with the
1206
+ :paramref: `~lightning.pytorch.core.module.LightningModule.log.prog_bar ` argument set.
1207
+
1208
+ current_epoch
1209
+ *************
1210
+
1211
+ The current epoch, updated after the epoch end hooks are run.
1201
1212
1202
1213
datamodule
1203
1214
**********
@@ -1211,64 +1222,33 @@ The current datamodule, which is used by the trainer.
1211
1222
is_last_batch
1212
1223
*************
1213
1224
1214
- Whether trainer is executing last batch in the current epoch.
1215
-
1216
- .. code-block :: python
1217
-
1218
- if trainer.is_last_batch:
1219
- ...
1225
+ Whether trainer is executing the last batch.
1220
1226
1221
1227
global_step
1222
1228
***********
1223
1229
1224
1230
The number of optimizer steps taken (does not reset each epoch).
1225
- This includes multiple optimizers (if enabled).
1226
-
1227
- .. code-block :: python
1228
1231
1229
- if trainer.global_step >= 100 :
1230
- ...
1232
+ This includes multiple optimizers (if enabled).
1231
1233
1232
1234
logger
1233
1235
*******
1234
1236
1235
- The current logger being used. Here's an example using tensorboard
1236
-
1237
- .. code-block :: python
1238
-
1239
- logger = trainer.logger
1240
- tensorboard = logger.experiment
1241
-
1237
+ The first :class: `~lightning.pytorch.loggers.logger.Logger ` being used.
1242
1238
1243
1239
loggers
1244
1240
********
1245
1241
1246
- The list of loggers currently being used by the Trainer .
1242
+ The list of class:` ~lightning.pytorch. loggers.logger.Logger ` used.
1247
1243
1248
- .. code-block :: python
1244
+ ..code-block:: python
1249
1245
1250
- # List of Logger objects
1251
- loggers = trainer.loggers
1252
- for logger in loggers:
1246
+ for logger in trainer.loggers:
1253
1247
logger.log_metrics({"foo": 1.0})
1254
1248
1255
-
1256
- logged_metrics
1257
- **************
1258
-
1259
- The metrics sent to the logger (visualizer).
1260
-
1261
- .. code-block :: python
1262
-
1263
- def training_step (self , batch , batch_idx ):
1264
- self .log(" a_val" , 2 , logger = True )
1265
-
1266
-
1267
- logged_metrics = trainer.logged_metrics
1268
- assert logged_metrics[" a_val" ] == 2
1269
-
1270
1249
log_dir
1271
1250
*******
1251
+
1272
1252
The directory for the current experiment. Use this to save images to, etc...
1273
1253
1274
1254
.. code-block :: python
@@ -1277,49 +1257,32 @@ The directory for the current experiment. Use this to save images to, etc...
1277
1257
img = ...
1278
1258
save_img(img, self .trainer.log_dir)
1279
1259
1280
-
1281
-
1282
1260
is_global_zero
1283
1261
**************
1284
1262
1285
- Whether this process is the global zero in multi-node training
1263
+ Whether this process is the global zero in multi-node training.
1286
1264
1287
1265
.. code-block :: python
1288
1266
1289
1267
def training_step (self , batch , batch_idx ):
1290
1268
if self .trainer.is_global_zero:
1291
1269
print (" in node 0, accelerator 0" )
1292
1270
1293
- progress_bar_metrics
1294
- ********************
1295
-
1296
- The metrics sent to the progress bar.
1297
-
1298
- .. code-block :: python
1299
-
1300
- def training_step (self , batch , batch_idx ):
1301
- self .log(" a_val" , 2 , prog_bar = True )
1302
-
1303
-
1304
- progress_bar_metrics = trainer.progress_bar_metrics
1305
- assert progress_bar_metrics[" a_val" ] == 2
1306
-
1307
-
1308
- predict_dataloaders
1309
- *******************
1310
-
1311
- The current predict dataloaders of the trainer.
1312
- Note that property returns a list of predict dataloaders.
1313
-
1314
- .. code-block :: python
1271
+ estimated_stepping_batches
1272
+ **************************
1315
1273
1316
- used_predict_dataloaders = trainer.predict_dataloaders
1274
+ The estimated number of batches that will `` optimizer.step() `` during training.
1317
1275
1276
+ This accounts for gradient accumulation and the current trainer configuration. This might sets up your training
1277
+ dataloader if hadn't been set up already.
1318
1278
1319
- estimated_stepping_batches
1320
- **************************
1279
+ ..code-block:: python
1321
1280
1322
- Check out :meth: `~lightning.pytorch.trainer.trainer.Trainer.estimated_stepping_batches `.
1281
+ def configure_optimizers(self):
1282
+ optimizer = ...
1283
+ stepping_batches = self.trainer.estimated_stepping_batches
1284
+ scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr=1e-3, total_steps=stepping_batches)
1285
+ return [optimizer], [scheduler]
1323
1286
1324
1287
state
1325
1288
*****
@@ -1397,35 +1360,47 @@ both conditions are met. If any of these arguments is not set, it won't be consi
1397
1360
trainer.fit(model)
1398
1361
1399
1362
1400
- train_dataloader
1401
- ****************
1363
+ num_training_batches
1364
+ ********************
1402
1365
1403
- The current train dataloader of the trainer.
1366
+ The number of training batches that will be used during `` trainer.fit() `` .
1404
1367
1405
- .. code-block :: python
1368
+ num_sanity_val_batches
1369
+ **********************
1406
1370
1407
- used_train_dataloader = trainer.train_dataloader
1371
+ The number of validation batches that will be used during the sanity-checking part of `` trainer.fit() ``.
1408
1372
1373
+ num_val_batches
1374
+ ***************
1409
1375
1410
- test_dataloaders
1376
+ The number of validation batches that will be used during ``trainer.fit() `` or ``trainer.validate() ``.
1377
+
1378
+ num_test_batches
1411
1379
****************
1412
1380
1413
- The current test dataloaders of the trainer.
1414
- Note that property returns a list of test dataloaders.
1381
+ The number of test batches that will be used during ``trainer.test() ``.
1415
1382
1383
+ num_predict_batches
1384
+ *******************
1416
1385
1417
- .. code-block :: python
1386
+ The number of prediction batches that will be used during ``trainer.predict() ``.
1387
+
1388
+ train_dataloader
1389
+ ****************
1418
1390
1419
- used_test_dataloaders = trainer.test_dataloaders
1391
+ The training dataloader(s) used during `` trainer.fit() ``.
1420
1392
1421
1393
val_dataloaders
1422
1394
***************
1423
1395
1396
+ The validation dataloader(s) used during ``trainer.fit() `` or ``trainer.validate() ``.
1424
1397
1425
- The current val dataloaders of the trainer.
1426
- Note that property returns a list of val dataloaders.
1398
+ test_dataloaders
1399
+ ****************
1427
1400
1401
+ The test dataloader(s) used during ``trainer.test() ``.
1428
1402
1429
- .. code-block :: python
1403
+ predict_dataloaders
1404
+ *******************
1430
1405
1431
- used_val_dataloaders = trainer.val_dataloaders
1406
+ The prediction dataloader(s) used during `` trainer.predict() ``.
0 commit comments