Skip to content

Commit 1f44f17

Browse files
committed
Update exercise12_HyperParameterTuning.ipynb
1 parent f87e51f commit 1f44f17

File tree

1 file changed

+12
-10
lines changed

1 file changed

+12
-10
lines changed

exercise12_HyperParameterTuning.ipynb

Lines changed: 12 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -63,8 +63,8 @@
6363
"from sklearn.model_selection import train_test_split\n",
6464
"from sklearn.preprocessing import OneHotEncoder, LabelBinarizer\n",
6565
"import tensorflow as tf\n",
66-
"import tensorflow.keras as keras\n",
67-
"import tensorflow.keras.backend as K\n",
66+
"from tensorflow import keras\n",
67+
"from keras import backend as K\n",
6868
"import time\n",
6969
"\n",
7070
"print(\n",
@@ -119,7 +119,9 @@
119119
"print(root_logdir)\n",
120120
"print(kt_logdir) # folder for keras tuner results\n",
121121
"print(tf_kt_logdir) # folder for TF checkpoints while keras tuning\n",
122-
"print(tf_logdir) # folder for TF checkpoint for best model training"
122+
"print(tf_logdir) # folder for TF checkpoint for best model training\n",
123+
"\n",
124+
"os.makedirs(tf_logdir, exist_ok=True)"
123125
]
124126
},
125127
{
@@ -157,7 +159,7 @@
157159
" flip_y=1e-2,\n",
158160
" random_state=None,\n",
159161
")\n",
160-
"encoder = OneHotEncoder(sparse=False)\n",
162+
"encoder = OneHotEncoder(sparse_output=False)\n",
161163
"# we encode as one-hot for TF model\n",
162164
"Y = encoder.fit_transform(Y.reshape(-1, 1))\n",
163165
"\n",
@@ -209,7 +211,7 @@
209211
"def build_model(hp): # with hyper parameter ranges\n",
210212
" model = keras.Sequential()\n",
211213
" # input layer\n",
212-
" model.add(keras.Input(shape=nx))\n",
214+
" model.add(keras.Input(shape=(nx, )))\n",
213215
" # hidden layers\n",
214216
" for layer in range(hp.Int(\"no_layers\", 1, 4)):\n",
215217
" model.add(\n",
@@ -260,7 +262,7 @@
260262
"model = build_model(kt.HyperParameters())\n",
261263
"hptuner = kt.RandomSearch(\n",
262264
" hypermodel=build_model,\n",
263-
" objective=\"val_categorical_accuracy\", # check performance on val data!\n",
265+
" objective='val_loss', # check performance on val data!\n",
264266
" max_trials=max_trials,\n",
265267
" executions_per_trial=executions_per_trial,\n",
266268
" overwrite=True,\n",
@@ -319,7 +321,7 @@
319321
"# we might check the best XX models in detail\n",
320322
"# for didactical purpose we choose only the very best one, located in [0]:\n",
321323
"model = hptuner.get_best_models(num_models=1)[0]\n",
322-
"model.save(tf_logdir + \"/best_model\")"
324+
"model.save(tf_logdir + \"/best_model.keras\")"
323325
]
324326
},
325327
{
@@ -356,7 +358,7 @@
356358
"outputs": [],
357359
"source": [
358360
"# load best model and reset weights\n",
359-
"model = keras.models.load_model(tf_logdir + \"/best_model\")\n",
361+
"model = keras.models.load_model(tf_logdir + \"/best_model.keras\")\n",
360362
"reset_weights(model) # start training from scratch\n",
361363
"print(model.summary())"
362364
]
@@ -391,7 +393,7 @@
391393
" callbacks=[earlystopping_cb, tensorboard_cb],\n",
392394
" verbose=verbose,\n",
393395
")\n",
394-
"model.save(tf_logdir + \"/trained_best_model\")\n",
396+
"model.save(tf_logdir + \"/trained_best_model.keras\")\n",
395397
"print(model.summary())"
396398
]
397399
},
@@ -484,7 +486,7 @@
484486
"name": "python",
485487
"nbconvert_exporter": "python",
486488
"pygments_lexer": "ipython3",
487-
"version": "3.10.6"
489+
"version": "3.12.3"
488490
}
489491
},
490492
"nbformat": 4,

0 commit comments

Comments
 (0)