@@ -170,7 +170,7 @@ To enable full Nebula compatibility with PyTorch-based training scripts, modify
170
170
# # List all checkpoints
171
171
ckpts = nm.list_checkpoints()
172
172
# # Get Latest checkpoint path
173
- latest_ckpt_path = ml .get_latest_checkpoint_path(" checkpoint" , persisted_storage_path)
173
+ latest_ckpt_path = nm .get_latest_checkpoint_path(" checkpoint" , persisted_storage_path)
174
174
```
175
175
176
176
# [Using DeepSpeed](#tab/DEEPSPEED)
@@ -205,16 +205,16 @@ latest_ckpt_path = ml.get_latest_checkpoint_path("checkpoint", persisted_storage
205
205
config_params[" persistent_storage_path" ] = " <YOUR STORAGE PATH>"
206
206
config_params[" persistent_time_interval" ] = 10
207
207
208
- nebula_checkpoint_callback = ml .NebulaCallback(
208
+ nebula_checkpoint_callback = nm .NebulaCallback(
209
209
** ** , # Original ModelCheckpoint params
210
210
config_params = config_params, # customize the config of init nebula
211
211
)
212
212
```
213
213
214
- Next, add `ml .NebulaCheckpointIO()` as a plugin to your `Trainer` , and modify the `trainer.save_checkpoint()` storage parameters as shown:
214
+ Next, add `nm .NebulaCheckpointIO()` as a plugin to your `Trainer` , and modify the `trainer.save_checkpoint()` storage parameters as shown:
215
215
216
216
```python
217
- trainer = Trainer(plugins = [ml .NebulaCheckpointIO()], # add NebulaCheckpointIO as a plugin
217
+ trainer = Trainer(plugins = [nm .NebulaCheckpointIO()], # add NebulaCheckpointIO as a plugin
218
218
callbacks = [nebula_checkpoint_callback]) # use NebulaCallback as a plugin
219
219
```
220
220
0 commit comments