-
Notifications
You must be signed in to change notification settings - Fork 6
Closed
Description
I recently encountered a TypeError when using the inner_cv function from the library. The error suggests that the convert_to_metrics_dict() function is receiving an unexpected number of arguments.
TypeError: convert_to_metrics_dict() takes 2 positional arguments but 3 were given
Upon investigating, I noticed a recent commit titled "Fix: convert_to_metrics_dict no longer accepts a third argument" that seems to have removed the third argument from the function. I am unsure on what exactly I'm doing wrong here.
from keras_tuner import (
HyperParameters,
BayesianOptimization,
RandomSearch,
Objective,
)
from keras_tuner_cv.outer_cv import OuterCV
from keras_tuner_cv.inner_cv import inner_cv
from keras_tuner_cv.utils import pd_inner_cv_get_result
import numpy as np
from sklearn.model_selection import TimeSeriesSplit
import tensorflow as tf
def search_cnn_lstm_model(hp: HyperParameters):
# Hyperparameters
lr = hp.Float("lr", min_value=1e-4, max_value=1e-2, sampling="LOG")
use_batch_norm = hp.Boolean("use_batch_norm", default=False)
conv_filters = hp.Int("conv_filters", min_value=32, max_value=128, step=16)
conv_kernel_size = hp.Int("conv_kernel_size", min_value=3, max_value=7, step=2)
lstm_units = hp.Int("lstm_units", min_value=32, max_value=256, step=8)
dense_units = hp.Int("dense_units", min_value=16, max_value=128, step=4)
add_dense_layer = hp.Boolean("add_dense_layer", default=False)
dense_dropout = hp.Float("dense_dropout", min_value=0.0, max_value=0.5, step=0.05)
model = tf.keras.models.Sequential()
model.add(
tf.keras.layers.Conv1D(
filters=conv_filters,
kernel_size=conv_kernel_size,
activation="relu",
input_shape=(X_train.shape[1], X_train.shape[2]),
)
)
model.add(tf.keras.layers.MaxPooling1D(pool_size=2))
model.add(
tf.keras.layers.LSTM(
lstm_units,
return_sequences=False,
)
)
if use_batch_norm:
model.add(tf.keras.layers.BatchNormalization())
if add_dense_layer:
model.add(tf.keras.layers.Dense(dense_units))
model.add(tf.keras.layers.Dropout(dense_dropout))
model.add(tf.keras.layers.Flatten())
model.add(
tf.keras.layers.Dense(
units=1,
activation="sigmoid",
)
)
# Compile the model
optimizer = tf.keras.optimizers.Adam(learning_rate=lr)
model.compile(
optimizer=optimizer,
loss=tf.keras.losses.BinaryCrossentropy(),
metrics=[tf.keras.metrics.BinaryAccuracy()],
)
tf.keras.utils.plot_model(
model,
to_file="model/architecture_plots/cnn_lstm_model.png",
show_shapes=True,
show_layer_activations=True,
)
return model
(
X_train,
X_test,
y_train,
y_test,
X_predict,
) = get_train_and_val_sets(sequence_length=21)
tuner = inner_cv(BayesianOptimization)(
search_cnn_lstm_model,
TimeSeriesSplit(n_splits=2),
objective="val_loss",
# objective=Objective("val_binary_accuracy", direction="max"),
save_output=True,
save_history=True,
max_trials=max_trials,
seed=42,
executions_per_trial=2,
directory="tmp/tb",
project_name="cnn_lstm_vl_innercv",
)
# Tuning
tuner.search(
X_train,
y_train,
epochs=120,
shuffle=False,
validation_data=(X_test, y_test),
batch_size=72,
callbacks=[
tf.keras.callbacks.EarlyStopping(
monitor="val_binary_accuracy", patience=10, mode="max"
),
],
verbose=True,
)Metadata
Metadata
Assignees
Labels
No labels