Skip to content

Commit 85c2cdc

Browse files
Shorna AlamShorna Alam
authored andcommitted
finished alterations to lab 2
1 parent 015a74f commit 85c2cdc

File tree

4 files changed

+530
-184
lines changed

4 files changed

+530
-184
lines changed

lab2/Part1_MNIST.ipynb

Lines changed: 16 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -61,12 +61,13 @@
6161
},
6262
"outputs": [],
6363
"source": [
64-
"# Import Tensorflow 2.0\n",
64+
"#Import Comet\n",
6565
"%pip install comet_ml\n",
6666
"import comet_ml\n",
6767
"comet_ml.init(project_name=\"6.s191lab2.1.1\")\n",
6868
"comet_model_1 = comet_ml.Experiment()\n",
6969
"\n",
70+
"# Import Tensorflow 2.0\n",
7071
"%tensorflow_version 2.x\n",
7172
"import tensorflow as tf \n",
7273
"\n",
@@ -276,7 +277,8 @@
276277
"BATCH_SIZE = 64\n",
277278
"EPOCHS = 5\n",
278279
"\n",
279-
"model.fit(train_images, train_labels, batch_size=BATCH_SIZE, epochs=EPOCHS)"
280+
"model.fit(train_images, train_labels, batch_size=BATCH_SIZE, epochs=EPOCHS)\n",
281+
"comet_model_1.end()"
280282
]
281283
},
282284
{
@@ -419,6 +421,9 @@
419421
},
420422
"outputs": [],
421423
"source": [
424+
"comet_ml.init(project_name=\"6.s191lab2.1.2\")\n",
425+
"comet_model_2 = comet_ml.Experiment()\n",
426+
"\n",
422427
"'''TODO: Define the compile operation with your optimizer and learning rate of choice'''\n",
423428
"cnn_model.compile(optimizer='''TODO''', loss='''TODO''', metrics=['accuracy']) # TODO"
424429
]
@@ -441,7 +446,8 @@
441446
"outputs": [],
442447
"source": [
443448
"'''TODO: Use model.fit to train the CNN model, with the same batch_size and number of epochs previously used.'''\n",
444-
"cnn_model.fit('''TODO''')"
449+
"cnn_model.fit('''TODO''')\n",
450+
"comet_model_2.end() "
445451
]
446452
},
447453
{
@@ -652,6 +658,9 @@
652658
"plotter = mdl.util.PeriodicPlotter(sec=2, xlabel='Iterations', ylabel='Loss', scale='semilogy')\n",
653659
"optimizer = tf.keras.optimizers.SGD(learning_rate=1e-2) # define our optimizer\n",
654660
"\n",
661+
"comet_ml.init(project_name=\"6.s191lab2.1.3\")\n",
662+
"comet_model_3 = comet_ml.Experiment()\n",
663+
"\n",
655664
"if hasattr(tqdm, '_instances'): tqdm._instances.clear() # clear if it exists\n",
656665
"\n",
657666
"for idx in tqdm(range(0, train_images.shape[0], batch_size)):\n",
@@ -666,6 +675,7 @@
666675
"\n",
667676
" #'''TODO: compute the categorical cross entropy loss\n",
668677
" loss_value = tf.keras.backend.sparse_categorical_crossentropy('''TODO''', '''TODO''') # TODO\n",
678+
" comet_model_3.log_metric(\"loss\", loss_value.numpy().mean(), step=idx)\n",
669679
"\n",
670680
" loss_history.append(loss_value.numpy().mean()) # append the loss to the loss_history record\n",
671681
" plotter.plot(loss_history.get())\n",
@@ -674,7 +684,9 @@
674684
" '''TODO: Use the tape to compute the gradient against all parameters in the CNN model.\n",
675685
" Use cnn_model.trainable_variables to access these parameters.''' \n",
676686
" grads = # TODO\n",
677-
" optimizer.apply_gradients(zip(grads, cnn_model.trainable_variables))\n"
687+
" optimizer.apply_gradients(zip(grads, cnn_model.trainable_variables))\n",
688+
"\n",
689+
"comet_model_3.end()\n"
678690
]
679691
},
680692
{

lab2/Part2_FaceDetection.ipynb

Lines changed: 16 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -75,6 +75,12 @@
7575
},
7676
"outputs": [],
7777
"source": [
78+
"# Import Comet\n",
79+
"!pip install comet_ml\n",
80+
"import comet_ml\n",
81+
"comet_ml.init(project_name=\"6.s191lab2.2.1\")\n",
82+
"comet_model_1 = comet_ml.Experiment()\n",
83+
"\n",
7884
"# Import Tensorflow 2.0\n",
7985
"%tensorflow_version 2.x\n",
8086
"import tensorflow as tf\n",
@@ -300,9 +306,11 @@
300306
" x, y = loader.get_batch(batch_size)\n",
301307
" loss = standard_train_step(x, y)\n",
302308
"\n",
309+
" comet_model_1.log_metric(\"loss\", loss.numpy().mean(), idx)\n",
303310
" # Record the loss and plot the evolution of the loss as a function of training\n",
304311
" loss_history.append(loss.numpy().mean())\n",
305-
" plotter.plot(loss_history.get())"
312+
" plotter.plot(loss_history.get())\n",
313+
"comet_model_1.end()"
306314
]
307315
},
308316
{
@@ -756,6 +764,9 @@
756764
"source": [
757765
"### Training the SS-VAE ###\n",
758766
"\n",
767+
"comet_ml.init(project_name=\"6.s191lab2.2.2\")\n",
768+
"comet_model_2 = comet_ml.Experiment()\n",
769+
"\n",
759770
"# Hyperparameters\n",
760771
"batch_size = 32\n",
761772
"learning_rate = 5e-4\n",
@@ -806,10 +817,13 @@
806817
" (x, y) = loader.get_batch(batch_size)\n",
807818
" # loss optimization\n",
808819
" loss = ss_vae_train_step(x, y)\n",
820+
" comet_model_2.log_metric(\"loss\", loss, step=j)\n",
809821
" \n",
810822
" # plot the progress every 200 steps\n",
811823
" if j % 500 == 0: \n",
812-
" mdl.util.plot_sample(x, y, ss_vae)"
824+
" mdl.util.plot_sample(x, y, ss_vae)\n",
825+
" \n",
826+
"comet_model_2.end()"
813827
]
814828
},
815829
{

lab2/solutions/Part1_MNIST_Solution.ipynb

Lines changed: 178 additions & 150 deletions
Large diffs are not rendered by default.

lab2/solutions/Part2_FaceDetection_Solution.ipynb

Lines changed: 320 additions & 28 deletions
Large diffs are not rendered by default.

0 commit comments

Comments
 (0)