Skip to content

Commit 1fec053

Browse files
committed
final facedetection solution
1 parent 7dac460 commit 1fec053

File tree

1 file changed

+20
-13
lines changed

1 file changed

+20
-13
lines changed

lab2/solutions/Part2_FaceDetection_Solution.ipynb

Lines changed: 20 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -168,8 +168,8 @@
168168
"face_images = images[np.where(labels==1)[0]]\n",
169169
"not_face_images = images[np.where(labels==0)[0]]\n",
170170
"\n",
171-
"idx_face = 25 #@param {type:\"slider\", min:0, max:50, step:1}\n",
172-
"idx_not_face = 33 #@param {type:\"slider\", min:0, max:50, step:1}\n",
171+
"idx_face = 19 #@param {type:\"slider\", min:0, max:50, step:1}\n",
172+
"idx_not_face = 8 #@param {type:\"slider\", min:0, max:50, step:1}\n",
173173
"\n",
174174
"plt.figure(figsize=(8,4))\n",
175175
"plt.subplot(1, 2, 1)\n",
@@ -773,7 +773,7 @@
773773
"\n",
774774
"# SS-VAE needs slightly more epochs to train since its more complex than \n",
775775
"# the standard classifier so we use 6 instead of 2\n",
776-
"num_epochs = 6 \n",
776+
"num_epochs = 6\n",
777777
"\n",
778778
"# instantiate a new SS-VAE model and optimizer\n",
779779
"ss_vae = SS_VAE(latent_dim)\n",
@@ -793,7 +793,7 @@
793793
" # loss, class_loss = ss_vae_loss_function('''TODO arguments''') # TODO\n",
794794
" \n",
795795
" '''TODO: use the GradientTape.gradient method to compute the gradients.\n",
796-
" Hint: this is with respect to the trainable_variables of the dbvae.'''\n",
796+
" Hint: this is with respect to the trainable_variables of the SS_VAE.'''\n",
797797
" grads = tape.gradient(loss, ss_vae.trainable_variables)\n",
798798
" # grads = tape.gradient('''TODO''', '''TODO''') # TODO\n",
799799
"\n",
@@ -821,7 +821,7 @@
821821
" \n",
822822
" # plot the progress every 200 steps\n",
823823
" if j % 500 == 0: \n",
824-
" mdl.util.plot_sample(x, y, dbvae)"
824+
" mdl.util.plot_sample(x, y, ss_vae)"
825825
]
826826
},
827827
{
@@ -859,7 +859,7 @@
859859
"\n",
860860
"#### **TODO: Analysis and reflection**\n",
861861
"\n",
862-
"Complete the analysis in the code block below. Write short answers to the following questions and include them in your Debiasing Faces Lab submission to complete the `TODO`s!\n",
862+
"Complete the analysis in the code block below. Write short (~1 sentence) answers to the following questions and include them in your Debiasing Faces Lab submission to complete the `TODO`s!\n",
863863
"\n",
864864
"1. What, if any, trends do you observe comparing the samples with the highest and lowest reconstruction loss?\n",
865865
"2. Based on these observations, which features seemed harder to learn for the VAE?\n",
@@ -877,10 +877,12 @@
877877
},
878878
"outputs": [],
879879
"source": [
880+
"### Linking model performance to uncertainty and bias\n",
881+
"\n",
880882
"# Load a random sample of 5000 faces from our dataset and compute the model performance on them\n",
881883
"(x, y) = loader.get_batch(5000, only_faces=True)\n",
882-
"y_logit, z_mean, z_logsigma, x_recon = dbvae(x)\n",
883-
"loss, class_loss, vae_loss = debiasing_loss_function(x, x_recon, y, y_logit, z_mean, z_logsigma)\n",
884+
"y_logit, z_mean, z_logsigma, x_recon = ss_vae(x)\n",
885+
"loss, class_loss, vae_loss = ss_vae_loss_function(x, x_recon, y, y_logit, z_mean, z_logsigma)\n",
884886
"\n",
885887
"# Sort the results by the vae loss scores\n",
886888
"vae_loss = vae_loss.numpy()\n",
@@ -913,7 +915,7 @@
913915
"\n",
914916
"#### **TODO: Analysis and reflection**\n",
915917
"\n",
916-
"Complete the analysis in the code blocks below. Carefully inspect the different latent variables and their corresponding frequency distributions. Write short answers to the following questions and include them in your Debiasing Faces Lab submission to complete the `TODO`s!\n",
918+
"Complete the analysis in the code blocks below. Carefully inspect the different latent variables and their corresponding frequency distributions. Write short (~1 sentence) answers to the following questions and include them in your Debiasing Faces Lab submission to complete the `TODO`s!\n",
917919
"\n",
918920
"1. Pick two latent variables and describe what semantic meaning they reflect. Include screenshots of the realizations and probability distribution for the latent variables you select.\n",
919921
"2. For the latent variables selected, what can you tell about which features are under- or over-represented in the data? What might this tell us about how the model is biased?\n",
@@ -924,8 +926,10 @@
924926
{
925927
"cell_type": "code",
926928
"source": [
929+
"### Inspect different latent features\n",
930+
"\n",
927931
"#@title Change the sliders to inspect different latent features! { run: \"auto\" }\n",
928-
"idx_latent = 25 #@param {type:\"slider\", min:0, max:31, step:1}\n",
932+
"idx_latent = 8 #@param {type:\"slider\", min:0, max:31, step:1}\n",
929933
"num_steps = 15\n",
930934
"\n",
931935
"# Extract all latent samples from the desired dimension\n",
@@ -947,15 +951,16 @@
947951
" latent = baseline_latent.numpy()\n",
948952
" latent[0, idx_latent] = step\n",
949953
" # Decode the reconstruction and store\n",
950-
" recons.append(dbvae.decode(latent)[0])\n",
954+
" recons.append(ss_vae.decode(latent)[0])\n",
951955
"\n",
952956
"# Visualize all of the reconstructions!\n",
953957
"ax[1].imshow(mdl.util.create_grid_of_images(recons, (1, num_steps)))\n",
954958
"ax[1].set_xlabel(\"Latent step\")\n",
955959
"ax[1].set_ylabel(\"Visualization\");\n"
956960
],
957961
"metadata": {
958-
"id": "8qcR9uvfCJku"
962+
"id": "8qcR9uvfCJku",
963+
"cellView": "form"
959964
},
960965
"execution_count": null,
961966
"outputs": []
@@ -973,6 +978,8 @@
973978
{
974979
"cell_type": "code",
975980
"source": [
981+
"### Accuracy vs. density in latent space\n",
982+
"\n",
976983
"# Loop through every latent dimension\n",
977984
"avg_logit_per_bin = []\n",
978985
"for idx_latent in range(latent_dim): \n",
@@ -997,7 +1004,7 @@
9971004
"accuracy_per_latent = (accuracy_per_latent - accuracy_per_latent.min()) / np.ptp(accuracy_per_latent)\n",
9981005
"\n",
9991006
"# Plot the results\n",
1000-
"plt.plot(np.linspace(start, end, num_steps+1), accuracy_per_latent,'-o')\n",
1007+
"plt.plot(np.linspace(np.min(z_mean), np.max(z_mean), num_steps+1), accuracy_per_latent,'-o')\n",
10011008
"plt.xlabel(\"Latent step\")\n",
10021009
"plt.ylabel(\"Relative accuracy\")"
10031010
],

0 commit comments

Comments
 (0)