Skip to content

Commit b800cf7

Browse files
Update phishing_email_detection_gpt2.py
Restore the best model yet.
1 parent 048eb1b commit b800cf7

File tree

1 file changed

+4
-6
lines changed

1 file changed

+4
-6
lines changed

phishing_email_detection_gpt2.py

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -85,7 +85,7 @@
8585

8686
"""### A custom GPT2 encoder layer for text embedding"""
8787

88-
""" un - string out
88+
8989
class GPT2Layer(tf.keras.layers.Layer):
9090

9191
def __init__(self, max_seq_length, **kwargs):
@@ -183,7 +183,6 @@ def from_config(cls, config):
183183
hy_df = pd.DataFrame(history.history)
184184
print(hy_df)
185185

186-
""" # end un - string out
187186

188187
### Cerebros model:
189188

@@ -274,8 +273,8 @@ def from_config(cls, config):
274273
learning_rate = 0.0000511065
275274
epochs = 15 # [1, 100]
276275
batch_size = 20
277-
minimum_levels = 4
278-
maximum_levels = 4 # [3,7]
276+
minimum_levels = 2
277+
maximum_levels = 3 # [3,7]
279278

280279
minimum_units_per_level = 4
281280
maximum_units_per_level = 8
@@ -353,8 +352,7 @@ def from_config(cls, config):
353352
cerebros_time_per_model = cerebros_time_all_models_min / models_tried
354353

355354
print(f"Cerebros trained {models_tried} models FROM A COLD START in ONLY {cerebros_time_all_models_min} min. Cerebros took only {cerebros_time_per_model} minutes on average per model.")
356-
# Un-comment out the next line
357-
# print(f"GPT2 took {gpt_time_on_one_model_min} just to FINE TUNE one PRE - TRAINED model. Although this is a small scale test, this shows the advantage of scaling in ON timing VS ON**2 timing.")
355+
print(f"GPT2 took {gpt_time_on_one_model_min} just to FINE TUNE one PRE - TRAINED model. Although this is a small scale test, this shows the advantage of scaling in ON timing VS ON**2 timing.")
358356

359357

360358
print(f'Cerebros best accuracy achieved is {result}')

0 commit comments

Comments
 (0)