Skip to content

Commit 404436c

Browse files
committed
included new functionalities in documentation
1 parent 016e08e commit 404436c

File tree

1 file changed

+146
-1
lines changed

1 file changed

+146
-1
lines changed

README.md

Lines changed: 146 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -362,8 +362,13 @@ predictor = HybridUni(
362362

363363
predictor.create_cnnrnn(
364364
optimizer: str = 'adam',
365+
optimizer_args: dict = None,
365366
loss: str = 'mean_squared_error',
366367
metrics: str = 'mean_squared_error',
368+
conv_block_one: int = 1,
369+
conv_block_two: int = 1,
370+
rnn_block_one: int = 1,
371+
rnn_block_two: int = 1,
367372
layer_config =
368373
{
369374
'layer0': (64, 1, 'relu', 0.0, 0.0), # (filter_size, kernel_size, activation, regularization, dropout)
@@ -376,8 +381,13 @@ predictor.create_cnnrnn(
376381

377382
predictor.create_cnnlstm(
378383
optimizer: str = 'adam',
384+
optimizer_args: dict = None,
379385
loss: str = 'mean_squared_error',
380386
metrics: str = 'mean_squared_error',
387+
conv_block_one: int = 1,
388+
conv_block_two: int = 1,
389+
lstm_block_one: int = 1,
390+
lstm_block_two: int = 1,
381391
layer_config =
382392
{
383393
'layer0': (64, 1, 'relu', 0.0, 0.0), # (filter_size, kernel_size, activation, regularization, dropout)
@@ -390,8 +400,13 @@ predictor.create_cnnlstm(
390400

391401
predictor.create_cnngru(
392402
optimizer: str = 'adam',
403+
optimizer_args: dict = None,
393404
loss: str = 'mean_squared_error',
394405
metrics: str = 'mean_squared_error',
406+
conv_block_one: int = 1,
407+
conv_block_two: int = 1,
408+
gru_block_one: int = 1,
409+
gru_block_two: int = 1,
395410
layer_config =
396411
{
397412
'layer0': (64, 1, 'relu', 0.0, 0.0), # (filter_size, kernel_size, activation, regularization, dropout)
@@ -404,8 +419,13 @@ predictor.create_cnngru(
404419

405420
predictor.create_cnnbirnn(
406421
optimizer: str = 'adam',
422+
optimizer_args: dict = None,
407423
loss: str = 'mean_squared_error',
408424
metrics: str = 'mean_squared_error',
425+
conv_block_one: int = 1,
426+
conv_block_two: int = 1,
427+
birnn_block_one: int = 1,
428+
rnn_block_one: int = 1,
409429
layer_config =
410430
{
411431
'layer0': (64, 1, 'relu', 0.0, 0.0), # (filter_size, kernel_size, activation, regularization, dropout)
@@ -418,8 +438,13 @@ predictor.create_cnnbirnn(
418438

419439
predictor.create_cnnbilstm(
420440
optimizer: str = 'adam',
441+
optimizer_args: dict = None,
421442
loss: str = 'mean_squared_error',
422443
metrics: str = 'mean_squared_error',
444+
conv_block_one: int = 1,
445+
conv_block_two: int = 1,
446+
bilstm_block_one: int = 1,
447+
lstm_block_one: int = 1,
423448
layer_config =
424449
{
425450
'layer0': (64, 1, 'relu', 0.0, 0.0), # (filter_size, kernel_size, activation, regularization, dropout)
@@ -432,8 +457,13 @@ predictor.create_cnnbilstm(
432457

433458
predictor.create_cnnbigru(
434459
optimizer: str = 'adam',
460+
optimizer_args: dict = None,
435461
loss: str = 'mean_squared_error',
436462
metrics: str = 'mean_squared_error',
463+
conv_block_one: int = 1,
464+
conv_block_two: int = 1,
465+
bigru_block_one: int = 1,
466+
gru_block_one: int = 1,
437467
layer_config =
438468
{
439469
'layer0': (64, 1, 'relu', 0.0, 0.0), # (filter_size, kernel_size, activation, regularization, dropout)
@@ -491,8 +521,12 @@ predictor = PureMulti(steps_past: int, steps_future: int, data = DataFrame(), fe
491521

492522
predictor.create_mlp(
493523
optimizer: str = 'adam',
524+
optimizer_args: dict = None,
494525
loss: str = 'mean_squared_error',
495526
metrics: str = 'mean_squared_error',
527+
dense_block_one: int = 1,
528+
dense_block_two: int = 1,
529+
dense_block_three: int = 1,
496530
layer_config: dict =
497531
{
498532
'layer0': (50, 'relu', 0.0, 0.0), # (neurons, activation, regularization, dropout)
@@ -503,8 +537,12 @@ predictor.create_mlp(
503537

504538
predictor.create_rnn(
505539
optimizer: str = 'adam',
540+
optimizer_args: dict = None,
506541
loss: str = 'mean_squared_error',
507542
metrics: str = 'mean_squared_error',
543+
rnn_block_one: int = 1,
544+
rnn_block_two: int = 1,
545+
rnn_block_three: int = 1,
508546
layer_config: dict =
509547
{
510548
'layer0': (40, 'relu', 0.0, 0.0), # (neurons, activation, regularization, dropout)
@@ -515,8 +553,12 @@ predictor.create_rnn(
515553

516554
predictor.create_lstm(
517555
optimizer: str = 'adam',
556+
optimizer_args: dict = None,
518557
loss: str = 'mean_squared_error',
519558
metrics: str = 'mean_squared_error',
559+
lstm_block_one: int = 1,
560+
lstm_block_two: int = 1,
561+
lstm_block_three: int = 1,
520562
layer_config: dict =
521563
{
522564
'layer0': (40, 'relu', 0.0, 0.0), # (neurons, activation, regularization, dropout)
@@ -527,8 +569,12 @@ predictor.create_lstm(
527569

528570
predictor.create_gru(
529571
optimizer: str = 'adam',
572+
optimizer_args: dict = None,
530573
loss: str = 'mean_squared_error',
531574
metrics: str = 'mean_squared_error',
575+
gru_block_one: int = 1,
576+
gru_block_two: int = 1,
577+
gru_block_three: int = 1,
532578
layer_config: dict =
533579
{
534580
'layer0': (40, 'relu', 0.0, 0.0), # (neurons, activation, regularization, dropout)
@@ -539,8 +585,12 @@ predictor.create_gru(
539585

540586
predictor.create_cnn(
541587
optimizer: str = 'adam',
588+
optimizer_args: dict = None,
542589
loss: str = 'mean_squared_error',
543590
metrics: str = 'mean_squared_error',
591+
conv_block_one: int = 1,
592+
conv_block_two: int = 1,
593+
dense_block_one: int = 1,
544594
layer_config: dict =
545595
{
546596
'layer0': (64, 1, 'relu', 0.0, 0.0), # (filter_size, kernel_size, activation, regularization, dropout)
@@ -552,8 +602,11 @@ predictor.create_cnn(
552602

553603
predictor.create_birnn(
554604
optimizer: str = 'adam',
605+
optimizer_args: dict = None,
555606
loss: str = 'mean_squared_error',
556607
metrics: str = 'mean_squared_error',
608+
birnn_block_one: int = 1,
609+
rnn_block_one: int = 1,
557610
layer_config: dict =
558611
{
559612
'layer0': (50, 'relu', 0.0, 0.0), # (neurons, activation, regularization, dropout)
@@ -563,8 +616,11 @@ predictor.create_birnn(
563616

564617
predictor.create_bilstm(
565618
optimizer: str = 'adam',
619+
optimizer_args: dict = None,
566620
loss: str = 'mean_squared_error',
567621
metrics: str = 'mean_squared_error',
622+
bilstm_block_one: int = 1,
623+
lstm_block_one: int = 1,
568624
layer_config: dict =
569625
{
570626
'layer0': (50, 'relu', 0.0, 0.0), # (neurons, activation, regularization, dropout)
@@ -574,8 +630,11 @@ predictor.create_bilstm(
574630

575631
predictor.create_bigru(
576632
optimizer: str = 'adam',
633+
optimizer_args: dict = None,
577634
loss: str = 'mean_squared_error',
578635
metrics: str = 'mean_squared_error',
636+
bigru_block_one: int = 1,
637+
gru_block_one: int = 1,
579638
layer_config: dict =
580639
{
581640
'layer0': (50, 'relu', 0.0, 0.0), # (neurons, activation, regularization, dropout)
@@ -585,8 +644,13 @@ predictor.create_bigru(
585644

586645
predictor.create_encdec_rnn(
587646
optimizer: str = 'adam',
647+
optimizer_args: dict = None,
588648
loss: str = 'mean_squared_error',
589649
metrics: str = 'mean_squared_error',
650+
enc_rnn_block_one: int = 1,
651+
enc_rnn_block_two: int = 1,
652+
dec_rnn_block_one: int = 1,
653+
dec_rnn_block_two: int = 1,
590654
layer_config: dict =
591655
{
592656
'layer0': (100, 'relu', 0.0, 0.0), # (neurons, activation, regularization, dropout)
@@ -598,8 +662,13 @@ predictor.create_encdec_rnn(
598662

599663
predictor.create_encdec_lstm(
600664
optimizer: str = 'adam',
665+
optimizer_args: dict = None,
601666
loss: str = 'mean_squared_error',
602667
metrics: str = 'mean_squared_error',
668+
enc_lstm_block_one: int = 1,
669+
enc_lstm_block_two: int = 1,
670+
dec_lstm_block_one: int = 1,
671+
dec_lstm_block_two: int = 1,
603672
layer_config: dict =
604673
{
605674
'layer0': (100, 'relu', 0.0, 0.0), # (neurons, activation, regularization, dropout)
@@ -611,8 +680,13 @@ predictor.create_encdec_lstm(
611680

612681
predictor.create_encdec_cnn(
613682
optimizer: str = 'adam',
683+
optimizer_args: dict = None,
614684
loss: str = 'mean_squared_error',
615685
metrics: str = 'mean_squared_error',
686+
enc_conv_block_one: int = 1,
687+
enc_conv_block_two: int = 1,
688+
dec_gru_block_one: int = 1,
689+
dec_gru_block_two: int = 1,
616690
layer_config: dict =
617691
{
618692
'layer0': (64, 1, 'relu', 0.0, 0.0), # (filter_size, kernel_size, activation, regularization, dropout)
@@ -625,8 +699,13 @@ predictor.create_encdec_cnn(
625699

626700
predictor.create_encdec_gru(
627701
optimizer: str = 'adam',
702+
optimizer_args: dict = None,
628703
loss: str = 'mean_squared_error',
629704
metrics: str = 'mean_squared_error',
705+
enc_gru_block_one: int = 1,
706+
enc_gru_block_two: int = 1,
707+
dec_gru_block_one: int = 1,
708+
dec_gru_block_two: int = 1,
630709
layer_config: dict =
631710
{
632711
'layer0': (100, 'relu', 0.0, 0.0), # (neurons, activation, regularization, dropout)
@@ -680,8 +759,13 @@ predictor = HybridMulti(sub_seq: int, steps_past: int, steps_future: int, data =
680759

681760
predictor.create_cnnrnn(
682761
optimizer: str = 'adam',
762+
optimizer_args: dict = None,
683763
loss: str = 'mean_squared_error',
684764
metrics: str = 'mean_squared_error',
765+
conv_block_one: int = 1,
766+
conv_block_two: int = 1,
767+
rnn_block_one: int = 1,
768+
rnn_block_two: int = 1,
685769
layer_config =
686770
{
687771
'layer0': (64, 1, 'relu', 0.0, 0.0), # (filter_size, kernel_size, activation, regularization, dropout)
@@ -694,8 +778,13 @@ predictor.create_cnnrnn(
694778

695779
predictor.create_cnnlstm(
696780
optimizer: str = 'adam',
781+
optimizer_args: dict = None,
697782
loss: str = 'mean_squared_error',
698783
metrics: str = 'mean_squared_error',
784+
conv_block_one: int = 1,
785+
conv_block_two: int = 1,
786+
lstm_block_one: int = 1,
787+
lstm_block_two: int = 1,
699788
layer_config =
700789
{
701790
'layer0': (64, 1, 'relu', 0.0, 0.0), # (filter_size, kernel_size, activation, regularization, dropout)
@@ -708,8 +797,13 @@ predictor.create_cnnlstm(
708797

709798
predictor.create_cnngru(
710799
optimizer: str = 'adam',
800+
optimizer_args: dict = None,
711801
loss: str = 'mean_squared_error',
712802
metrics: str = 'mean_squared_error',
803+
conv_block_one: int = 1,
804+
conv_block_two: int = 1,
805+
gru_block_one: int = 1,
806+
gru_block_two: int = 1,
713807
layer_config =
714808
{
715809
'layer0': (64, 1, 'relu', 0.0, 0.0), # (filter_size, kernel_size, activation, regularization, dropout)
@@ -722,8 +816,13 @@ predictor.create_cnngru(
722816

723817
predictor.create_cnnbirnn(
724818
optimizer: str = 'adam',
819+
optimizer_args: dict = None,
725820
loss: str = 'mean_squared_error',
726821
metrics: str = 'mean_squared_error',
822+
conv_block_one: int = 1,
823+
conv_block_two: int = 1,
824+
birnn_block_one: int = 1,
825+
rnn_block_one: int = 1,
727826
layer_config =
728827
{
729828
'layer0': (64, 1, 'relu', 0.0, 0.0), # (filter_size, kernel_size, activation, regularization, dropout)
@@ -736,8 +835,13 @@ predictor.create_cnnbirnn(
736835

737836
predictor.create_cnnbilstm(
738837
optimizer: str = 'adam',
838+
optimizer_args: dict = None,
739839
loss: str = 'mean_squared_error',
740840
metrics: str = 'mean_squared_error',
841+
conv_block_one: int = 1,
842+
conv_block_two: int = 1,
843+
bilstm_block_one: int = 1,
844+
lstm_block_one: int = 1,
741845
layer_config =
742846
{
743847
'layer0': (64, 1, 'relu', 0.0, 0.0), # (filter_size, kernel_size, activation, regularization, dropout)
@@ -750,8 +854,13 @@ predictor.create_cnnbilstm(
750854

751855
predictor.create_cnnbigru(
752856
optimizer: str = 'adam',
857+
optimizer_args: dict = None,
753858
loss: str = 'mean_squared_error',
754859
metrics: str = 'mean_squared_error',
860+
conv_block_one: int = 1,
861+
conv_block_two: int = 1,
862+
bigru_block_one: int = 1,
863+
gru_block_one: int = 1,
755864
layer_config =
756865
{
757866
'layer0': (64, 1, 'relu', 0.0, 0.0), # (filter_size, kernel_size, activation, regularization, dropout)
@@ -855,6 +964,14 @@ predictor = OptimizePureUni(steps_past=5, steps_future=10, data=data, scale='sta
855964
'layer2': (2, 'relu')
856965
}
857966
],
967+
optimizer_args_range = [
968+
{
969+
'learning_rate': 0.02,
970+
},
971+
{
972+
'learning_rate': 0.0001,
973+
}
974+
]
858975
optimization_target='minimize', n_trials = 2)
859976
def create_fit_model(predictor: object, *args, **kwargs):
860977
# use optimizable create_fit_xxx method
@@ -912,6 +1029,14 @@ predictor = OptimizeHybridUni(sub_seq = 2, steps_past = 10, steps_future = 5, da
9121029
'layer4': (10, 'relu')
9131030
}
9141031
],
1032+
optimizer_args_range = [
1033+
{
1034+
'learning_rate': 0.02,
1035+
},
1036+
{
1037+
'learning_rate': 0.0001,
1038+
}
1039+
]
9151040
optimization_target='minimize', n_trials = 2)
9161041
def create_fit_model(predictor: object, *args, **kwargs):
9171042
return predictor.create_fit_cnnlstm(*args, **kwargs)
@@ -965,6 +1090,14 @@ predictor = OptimizePureMulti(
9651090
'layer2': (20, 'sigmoid')
9661091
}
9671092
],
1093+
optimizer_args_range = [
1094+
{
1095+
'learning_rate': 0.02,
1096+
},
1097+
{
1098+
'learning_rate': 0.0001,
1099+
}
1100+
]
9681101
optimization_target='minimize', n_trials = 3)
9691102
def create_fit_model(predictor: object, *args, **kwargs):
9701103
return predictor.create_fit_lstm(*args, **kwargs)
@@ -1027,6 +1160,14 @@ predictor = OptimizeHybridMulti(
10271160
'layer4': (5, 'relu')
10281161
}
10291162
],
1163+
optimizer_args_range = [
1164+
{
1165+
'learning_rate': 0.02,
1166+
},
1167+
{
1168+
'learning_rate': 0.0001,
1169+
}
1170+
]
10301171
optimization_target='minimize', n_trials = 3)
10311172
def create_fit_model(predictor: object, *args, **kwargs):
10321173
return predictor.create_fit_cnnlstm(*args, **kwargs)
@@ -1061,7 +1202,11 @@ predictor = OptimizePureMulti(...)
10611202
{...},
10621203
{...}
10631204
],
1064-
...)
1205+
optimizer_args_range = [
1206+
{...},
1207+
{...},
1208+
]
1209+
optimization_target = '...', n_trials = x)
10651210
def create_fit_model(predictor: object, *args, **kwargs): # seeker harness
10661211
return predictor.create_fit_xxx(*args, **kwargs)
10671212

0 commit comments

Comments
 (0)