diff --git a/examples/demo-upload-download.py b/examples/demo-upload-download.py index e00756e..f5979b0 100644 --- a/examples/demo-upload-download.py +++ b/examples/demo-upload-download.py @@ -12,11 +12,11 @@ # Upload the model checkpoint litmodels.upload_model( "./boring-checkpoint.pt", - "jirka/kaggle/boring-model", + "lightning-ai/jirka/lit-boring-model", ) # Download the model checkpoint - model_path = litmodels.download_model("jirka/kaggle/boring-model", download_dir="./my-models") + model_path = litmodels.download_model("lightning-ai/jirka/lit-boring-model", download_dir="./my-models") print(f"Model downloaded to {model_path}") # Load the model checkpoint diff --git a/examples/resume-lightning-training.py b/examples/resume-lightning-training.py index 427bb0d..3a06e5a 100644 --- a/examples/resume-lightning-training.py +++ b/examples/resume-lightning-training.py @@ -2,31 +2,24 @@ This example demonstrates how to resume training of a model using the `download_model` function. """ -import torch.utils.data as data -import torchvision as tv +import os + from lightning import Trainer +from lightning.pytorch.demos.boring_classes import BoringModel from litmodels import download_model -from sample_model import LitAutoEncoder # Define the model name - this should be unique to your model # The format is //: -MY_MODEL_NAME = "jirka/kaggle/lit-auto-encoder-callback:latest" +MY_MODEL_NAME = "lightning-ai/jirka/lit-boring-callback:latest" if __name__ == "__main__": - dataset = tv.datasets.MNIST(".", download=True, transform=tv.transforms.ToTensor()) - train, val = data.random_split(dataset, [55000, 5000]) - - model_path = download_model(name=MY_MODEL_NAME, download_dir="my_models") + model_files = download_model(name=MY_MODEL_NAME, download_dir="my_models") + model_path = os.path.join("my_models", model_files[0]) print(f"model: {model_path}") - # autoencoder = LitAutoEncoder.load_from_checkpoint(checkpoint_path=model_path) - trainer = Trainer( - max_epochs=4, - ) + trainer = Trainer(max_epochs=4) trainer.fit( - LitAutoEncoder(), - data.DataLoader(train, batch_size=256), - data.DataLoader(val, batch_size=256), + BoringModel(), ckpt_path=model_path, ) diff --git a/examples/sample_model.py b/examples/sample_model.py deleted file mode 100644 index 664a284..0000000 --- a/examples/sample_model.py +++ /dev/null @@ -1,28 +0,0 @@ -import torch -import torch.nn as nn -from lightning import LightningModule -from torch.nn.functional import mse_loss - - -class LitAutoEncoder(LightningModule): - def __init__(self): - super().__init__() - self.encoder = nn.Sequential(nn.Linear(28 * 28, 128), nn.ReLU(), nn.Linear(128, 3)) - self.decoder = nn.Sequential(nn.Linear(3, 128), nn.ReLU(), nn.Linear(128, 28 * 28)) - - def forward(self, x): - # in lightning, forward defines the prediction/inference actions - return self.encoder(x) - - def training_step(self, batch, batch_idx): - # training_step defines the train loop. It is independent of forward - x, _ = batch - x = x.view(x.size(0), -1) - z = self.encoder(x) - x_hat = self.decoder(z) - loss = mse_loss(x_hat, x) - self.log("train_loss", loss) - return loss - - def configure_optimizers(self): - return torch.optim.AdamW(self.parameters(), lr=1e-4) diff --git a/examples/train-model-and-simple-save.py b/examples/train-model-and-simple-save.py index 3dee83b..27af953 100644 --- a/examples/train-model-and-simple-save.py +++ b/examples/train-model-and-simple-save.py @@ -2,29 +2,18 @@ This example demonstrates how to train a model and upload it to the cloud using the `upload_model` function. """ -import torch.utils.data as data -import torchvision as tv from lightning import Trainer +from lightning.pytorch.demos.boring_classes import BoringModel from litmodels import upload_model -from sample_model import LitAutoEncoder # Define the model name - this should be unique to your model # The format is // -MY_MODEL_NAME = "jirka/kaggle/lit-auto-encoder-simple" +MY_MODEL_NAME = "lightning-ai/jirka/lit-boring-simple" if __name__ == "__main__": - dataset = tv.datasets.MNIST(".", download=True, transform=tv.transforms.ToTensor()) - train, val = data.random_split(dataset, [55000, 5000]) - - autoencoder = LitAutoEncoder() - trainer = Trainer(max_epochs=2) - trainer.fit( - autoencoder, - data.DataLoader(train, batch_size=256), - data.DataLoader(val, batch_size=256), - ) + trainer.fit(BoringModel()) checkpoint_path = getattr(trainer.checkpoint_callback, "best_model_path") print(f"best: {checkpoint_path}") upload_model(model=checkpoint_path, name=MY_MODEL_NAME) diff --git a/examples/train-model-with-lightning-callback.py b/examples/train-model-with-lightning-callback.py index 100ce9c..d8db785 100644 --- a/examples/train-model-with-lightning-callback.py +++ b/examples/train-model-with-lightning-callback.py @@ -2,29 +2,18 @@ Train a model with a Lightning callback that uploads the best model to the cloud after each epoch. """ -import torch.utils.data as data -import torchvision as tv from lightning import Trainer -from litmodels.integrations import LitModelCheckpoint -from sample_model import LitAutoEncoder +from lightning.pytorch.demos.boring_classes import BoringModel +from litmodels.integrations import LightningModelCheckpoint # Define the model name - this should be unique to your model # The format is // -MY_MODEL_NAME = "lightning-ai/jirka/lit-auto-encoder-callback" +MY_MODEL_NAME = "lightning-ai/jirka/lit-boring-callback" if __name__ == "__main__": - dataset = tv.datasets.MNIST(".", download=True, transform=tv.transforms.ToTensor()) - train, val = data.random_split(dataset, [55000, 5000]) - - autoencoder = LitAutoEncoder() - trainer = Trainer( max_epochs=2, - callbacks=LitModelCheckpoint(model_name=MY_MODEL_NAME), - ) - trainer.fit( - autoencoder, - data.DataLoader(train, batch_size=256), - data.DataLoader(val, batch_size=256), + callbacks=LightningModelCheckpoint(model_name=MY_MODEL_NAME), ) + trainer.fit(BoringModel()) diff --git a/examples/train-model-with-lightning-logger.py b/examples/train-model-with-lightning-logger.py index 6c3fba9..b179000 100644 --- a/examples/train-model-with-lightning-logger.py +++ b/examples/train-model-with-lightning-logger.py @@ -7,53 +7,19 @@ """ -import os - -from lightning import LightningModule, Trainer +from lightning import Trainer +from lightning.pytorch.demos.boring_classes import BoringModel from litlogger import LightningLogger -from psutil import cpu_count -from torch import nn, optim -from torch.utils.data import DataLoader -from torchvision.datasets import MNIST -from torchvision.transforms import ToTensor - - -class LitAutoEncoder(LightningModule): - def __init__(self, lr=1e-3, inp_size=28): - super().__init__() - self.encoder = nn.Sequential(nn.Linear(inp_size * inp_size, 64), nn.ReLU(), nn.Linear(64, 3)) - self.decoder = nn.Sequential(nn.Linear(3, 64), nn.ReLU(), nn.Linear(64, inp_size * inp_size)) - self.lr = lr - self.save_hyperparameters() +class DemoModel(BoringModel): def training_step(self, batch, batch_idx): - x, y = batch - x = x.view(x.size(0), -1) - z = self.encoder(x) - x_hat = self.decoder(z) - loss = nn.functional.mse_loss(x_hat, x) - # log metrics - self.log("train_loss", loss) - return loss - - def configure_optimizers(self): - return optim.Adam(self.parameters(), lr=self.lr) + output = super().training_step(batch, batch_idx) + self.log("train_loss", output["loss"]) + return output if __name__ == "__main__": - # init the autoencoder - autoencoder = LitAutoEncoder(lr=1e-3, inp_size=28) - - # setup data - train_loader = DataLoader( - dataset=MNIST(os.getcwd(), download=True, transform=ToTensor()), - batch_size=32, - shuffle=True, - num_workers=cpu_count(), - persistent_workers=True, - ) - # configure the logger lit_logger = LightningLogger(log_model=True) @@ -61,4 +27,4 @@ def configure_optimizers(self): trainer = Trainer(max_epochs=5, logger=lit_logger) # train the model - trainer.fit(model=autoencoder, train_dataloaders=train_loader) + trainer.fit(model=DemoModel())