@@ -62,35 +62,78 @@ Use LitLogger for any usecase (training, inference, agents, etc).
6262
6363Add LitLogger to any training framework, PyTorch, Jax, TensorFlow, Numpy, SKLearn, etc...
6464
65+ <div align =' center ' >
66+
67+ <img alt =" LitServe " src =" https://github.com/user-attachments/assets/50d9a2f7-17d0-4448-ad21-6be600ab53fc " width =" 800px " style =" max-width : 100% ;" >
68+
69+   ;
70+ </div >
71+
6572``` python
66- import litlogger
73+ import torch
74+ import torch.nn as nn
75+ import torch.optim as optim
76+ from litlogger import LightningLogger
77+ import os
78+
79+ # define a simple neural network
80+ class SimpleModel (nn .Module ):
81+ def __init__ (self ):
82+ super ().__init__ ()
83+ self .linear = nn.Linear(10 , 1 )
84+
85+ def forward (self , x ):
86+ return self .linear(x)
87+
88+ def train ():
89+ # initialize LightningLogger
90+ logger = LightningLogger(metadata = {" task" : " model_training" , " model_name" : " SimpleModel" })
91+
92+ # hyperparameters
93+ num_epochs = 10
94+ learning_rate = 0.01
95+
96+ # model, loss, and optimizer
97+ model = SimpleModel()
98+ criterion = nn.MSELoss()
99+ optimizer = optim.SGD(model.parameters(), lr = learning_rate)
100+
101+ # dummy data
102+ X_train = torch.randn(100 , 10 )
103+ y_train = torch.randn(100 , 1 )
104+
105+ # training loop
106+ for epoch in range (num_epochs):
107+ optimizer.zero_grad()
108+ outputs = model(X_train)
109+ loss = criterion(outputs, y_train)
110+ loss.backward()
111+ optimizer.step()
112+
113+ # log training loss
114+ logger.log_metrics({" train_loss" : loss.item()}, step = epoch)
115+ print (f " Epoch [ { epoch+ 1 } / { num_epochs} ], Loss: { loss.item():.4f } " )
116+
117+ # log the trained model
118+ logger.log_model(model)
119+ print (" model logged." )
120+
121+ # create a dummy artifact file and log it
122+ with open (" model_config.txt" , " w" ) as f:
123+ f.write(f " learning_rate: { learning_rate} \n " )
124+ f.write(f " num_epochs: { num_epochs} \n " )
125+ logger.log_model_artifact(" model_config.txt" )
126+ print (" model config artifact logged." )
127+
128+ # Clean up the dummy artifact file after logging
129+ os.remove(" model_config.txt" )
67130
68- # Initialize experiment with name and metadata
69- litlogger.init(
70- name = " my-experiment" ,
71- metadata = {
72- " learning_rate" : " 0.001" ,
73- " batch_size" : " 32" ,
74- " model" : " resnet50" ,
75- },
76- )
77-
78- # Simulate a training loop
79- for step in range (100 ):
80- loss = 1.0 / (step + 1 )
81- accuracy = min (0.95 , step / 100.0 )
82-
83- # Log metrics
84- litlogger.log(
85- {" train/loss" : loss, " train/accuracy" : accuracy},
86- step = step,
87- )
88-
89- # Optionally log a file
90- litlogger.log_file(" /path/to/config.txt" )
91-
92- # Finalize the experiment
93- litlogger.finalize()
131+ # finalize the logger when training is done
132+ logger.finalize()
133+ print (" training complete and logger finalized." )
134+
135+ if __name__ == " __main__" :
136+ train()
94137```
95138</details >
96139
@@ -193,6 +236,13 @@ trainer.fit(LoggingBoringModel(), BoringDataModule())
193236<summary >Example: Long-running experiment simulator</summary >
194237This is a fun example that simulates a long model training run.
195238
239+ <div align =' center ' >
240+
241+ <img alt =" LitServe " src =" https://github.com/user-attachments/assets/fd15aa32-2b56-4324-81b6-c87c86db8a3b " width =" 800px " style =" max-width : 100% ;" >
242+
243+   ;
244+ </div >
245+
196246``` python
197247import random
198248from time import sleep
@@ -205,7 +255,7 @@ litlogger.init(name="loss-simulator")
205255current_loss = 0.09
206256
207257# Total number of steps
208- total_steps = 1000000
258+ total_steps = 10000
209259
210260for i in range (total_steps):
211261 if (i + 1 ) % 5 == 0 :
0 commit comments