Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions examples/demo-upload-download.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,11 +12,11 @@
# Upload the model checkpoint
litmodels.upload_model(
"./boring-checkpoint.pt",
"jirka/kaggle/boring-model",
"lightning-ai/jirka/lit-boring-model",
)

# Download the model checkpoint
model_path = litmodels.download_model("jirka/kaggle/boring-model", download_dir="./my-models")
model_path = litmodels.download_model("lightning-ai/jirka/lit-boring-model", download_dir="./my-models")
print(f"Model downloaded to {model_path}")

# Load the model checkpoint
Expand Down
23 changes: 8 additions & 15 deletions examples/resume-lightning-training.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,31 +2,24 @@
This example demonstrates how to resume training of a model using the `download_model` function.
"""

import torch.utils.data as data
import torchvision as tv
import os

from lightning import Trainer
from lightning.pytorch.demos.boring_classes import BoringModel
from litmodels import download_model
from sample_model import LitAutoEncoder

# Define the model name - this should be unique to your model
# The format is <organization>/<teamspace>/<model-name>:<model-version>
MY_MODEL_NAME = "jirka/kaggle/lit-auto-encoder-callback:latest"
MY_MODEL_NAME = "lightning-ai/jirka/lit-boring-callback:latest"


if __name__ == "__main__":
dataset = tv.datasets.MNIST(".", download=True, transform=tv.transforms.ToTensor())
train, val = data.random_split(dataset, [55000, 5000])

model_path = download_model(name=MY_MODEL_NAME, download_dir="my_models")
model_files = download_model(name=MY_MODEL_NAME, download_dir="my_models")
model_path = os.path.join("my_models", model_files[0])
print(f"model: {model_path}")
# autoencoder = LitAutoEncoder.load_from_checkpoint(checkpoint_path=model_path)

trainer = Trainer(
max_epochs=4,
)
trainer = Trainer(max_epochs=4)
trainer.fit(
LitAutoEncoder(),
data.DataLoader(train, batch_size=256),
data.DataLoader(val, batch_size=256),
BoringModel(),
ckpt_path=model_path,
)
28 changes: 0 additions & 28 deletions examples/sample_model.py

This file was deleted.

17 changes: 3 additions & 14 deletions examples/train-model-and-simple-save.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,29 +2,18 @@
This example demonstrates how to train a model and upload it to the cloud using the `upload_model` function.
"""

import torch.utils.data as data
import torchvision as tv
from lightning import Trainer
from lightning.pytorch.demos.boring_classes import BoringModel
from litmodels import upload_model
from sample_model import LitAutoEncoder

# Define the model name - this should be unique to your model
# The format is <organization>/<teamspace>/<model-name>
MY_MODEL_NAME = "jirka/kaggle/lit-auto-encoder-simple"
MY_MODEL_NAME = "lightning-ai/jirka/lit-boring-simple"


if __name__ == "__main__":
dataset = tv.datasets.MNIST(".", download=True, transform=tv.transforms.ToTensor())
train, val = data.random_split(dataset, [55000, 5000])

autoencoder = LitAutoEncoder()

trainer = Trainer(max_epochs=2)
trainer.fit(
autoencoder,
data.DataLoader(train, batch_size=256),
data.DataLoader(val, batch_size=256),
)
trainer.fit(BoringModel())
checkpoint_path = getattr(trainer.checkpoint_callback, "best_model_path")
print(f"best: {checkpoint_path}")
upload_model(model=checkpoint_path, name=MY_MODEL_NAME)
21 changes: 5 additions & 16 deletions examples/train-model-with-lightning-callback.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,29 +2,18 @@
Train a model with a Lightning callback that uploads the best model to the cloud after each epoch.
"""

import torch.utils.data as data
import torchvision as tv
from lightning import Trainer
from litmodels.integrations import LitModelCheckpoint
from sample_model import LitAutoEncoder
from lightning.pytorch.demos.boring_classes import BoringModel
from litmodels.integrations import LightningModelCheckpoint

# Define the model name - this should be unique to your model
# The format is <organization>/<teamspace>/<model-name>
MY_MODEL_NAME = "lightning-ai/jirka/lit-auto-encoder-callback"
MY_MODEL_NAME = "lightning-ai/jirka/lit-boring-callback"


if __name__ == "__main__":
dataset = tv.datasets.MNIST(".", download=True, transform=tv.transforms.ToTensor())
train, val = data.random_split(dataset, [55000, 5000])

autoencoder = LitAutoEncoder()

trainer = Trainer(
max_epochs=2,
callbacks=LitModelCheckpoint(model_name=MY_MODEL_NAME),
)
trainer.fit(
autoencoder,
data.DataLoader(train, batch_size=256),
data.DataLoader(val, batch_size=256),
callbacks=LightningModelCheckpoint(model_name=MY_MODEL_NAME),
)
trainer.fit(BoringModel())
48 changes: 7 additions & 41 deletions examples/train-model-with-lightning-logger.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,58 +7,24 @@

"""

import os

from lightning import LightningModule, Trainer
from lightning import Trainer
from lightning.pytorch.demos.boring_classes import BoringModel
from litlogger import LightningLogger
from psutil import cpu_count
from torch import nn, optim
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
from torchvision.transforms import ToTensor


class LitAutoEncoder(LightningModule):
def __init__(self, lr=1e-3, inp_size=28):
super().__init__()

self.encoder = nn.Sequential(nn.Linear(inp_size * inp_size, 64), nn.ReLU(), nn.Linear(64, 3))
self.decoder = nn.Sequential(nn.Linear(3, 64), nn.ReLU(), nn.Linear(64, inp_size * inp_size))
self.lr = lr
self.save_hyperparameters()

class DemoModel(BoringModel):
def training_step(self, batch, batch_idx):
x, y = batch
x = x.view(x.size(0), -1)
z = self.encoder(x)
x_hat = self.decoder(z)
loss = nn.functional.mse_loss(x_hat, x)
# log metrics
self.log("train_loss", loss)
return loss

def configure_optimizers(self):
return optim.Adam(self.parameters(), lr=self.lr)
output = super().training_step(batch, batch_idx)
self.log("train_loss", output["loss"])
return output


if __name__ == "__main__":
# init the autoencoder
autoencoder = LitAutoEncoder(lr=1e-3, inp_size=28)

# setup data
train_loader = DataLoader(
dataset=MNIST(os.getcwd(), download=True, transform=ToTensor()),
batch_size=32,
shuffle=True,
num_workers=cpu_count(),
persistent_workers=True,
)

# configure the logger
lit_logger = LightningLogger(log_model=True)

# pass logger to the Trainer
trainer = Trainer(max_epochs=5, logger=lit_logger)

# train the model
trainer.fit(model=autoencoder, train_dataloaders=train_loader)
trainer.fit(model=DemoModel())
Loading