Skip to content

Commit c3dee15

Browse files
committed
Changed main files
1 parent 23a8edc commit c3dee15

File tree

4 files changed

+131
-98
lines changed

4 files changed

+131
-98
lines changed

model/main/hybrid_mlnn.py

Lines changed: 97 additions & 65 deletions
Original file line numberDiff line numberDiff line change
@@ -1,106 +1,138 @@
1-
from model.optimizer.swarm import BFO, PSO
2-
from model.optimizer.evolutionary import GA, DE, CRO
1+
#!/usr/bin/env python
2+
# ------------------------------------------------------------------------------------------------------%
3+
# Created by "Thieu Nguyen" at 18:10, 06/04/2020 %
4+
# %
5+
6+
# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 %
7+
# Github: https://github.com/thieunguyen5991 %
8+
# -------------------------------------------------------------------------------------------------------%
9+
10+
from mealpy.swarm_based import PSO, BFO
11+
from mealpy.evolutionary_based import GA, DE, CRO
312
from model.root.hybrid.root_hybrid_mlnn import RootHybridMlnn
413

514

615
class GaMlnn(RootHybridMlnn):
716
def __init__(self, root_base_paras=None, root_hybrid_paras=None, ga_paras=None):
817
RootHybridMlnn.__init__(self, root_base_paras, root_hybrid_paras)
9-
self.ga_paras = ga_paras
10-
self.filename = "GA_MLNN-sliding_{}-nets_{}-ga_{}".format(root_base_paras["sliding"],
11-
[root_hybrid_paras["epoch"], root_hybrid_paras["activations"], root_hybrid_paras["hidden_size"],
12-
root_hybrid_paras["train_valid_rate"]], ga_paras)
18+
self.epoch = ga_paras["epoch"]
19+
self.pop_size = ga_paras["pop_size"]
20+
self.pc = ga_paras["pc"]
21+
self.pm = ga_paras["pm"]
22+
self.filename = "GA_MLNN-sliding_{}-{}".format(root_base_paras["sliding"], root_hybrid_paras["paras_name"])
1323

1424
def _training__(self):
15-
ga = GA.BaseGA(root_algo_paras=self.root_algo_paras, ga_paras = self.ga_paras)
16-
self.solution, self.loss_train = ga._train__()
17-
25+
md = GA.BaseGA(self._objective_function__, self.problem_size, self.domain_range, self.print_train, self.epoch, self.pop_size, self.pc, self.pm)
26+
self.solution, self.best_fit, self.loss_train = md._train__()
1827

1928

2029
class DeMlnn(RootHybridMlnn):
2130
def __init__(self, root_base_paras=None, root_hybrid_paras=None, de_paras=None):
2231
RootHybridMlnn.__init__(self, root_base_paras, root_hybrid_paras)
23-
self.de_paras = de_paras
24-
self.filename = "DE_MLNN-sliding_{}-nets_{}-de_{}".format(root_base_paras["sliding"],
25-
[root_hybrid_paras["epoch"], root_hybrid_paras["activations"], root_hybrid_paras["hidden_size"],
26-
root_hybrid_paras["train_valid_rate"]], de_paras)
32+
self.epoch = de_paras["epoch"]
33+
self.pop_size = de_paras["pop_size"]
34+
self.wf = de_paras["wf"]
35+
self.cr = de_paras["cr"]
36+
self.filename = "DE_MLNN-sliding_{}-{}".format(root_base_paras["sliding"], root_hybrid_paras["paras_name"])
2737

2838
def _training__(self):
29-
md = DE.BaseDE(root_algo_paras=self.root_algo_paras, de_paras = self.de_paras)
30-
self.solution, self.loss_train = md._train__()
31-
39+
md = DE.BaseDE(self._objective_function__, self.problem_size, self.domain_range, self.print_train, self.epoch, self.pop_size, self.wf, self.cr)
40+
self.solution, self.best_fit, self.loss_train = md._train__()
3241

3342

34-
class CroMlnn(RootHybridMlnn):
35-
def __init__(self, root_base_paras=None, root_hybrid_paras=None, cro_paras=None):
43+
class PsoMlnn(RootHybridMlnn):
44+
def __init__(self, root_base_paras=None, root_hybrid_paras=None, pso_paras=None):
3645
RootHybridMlnn.__init__(self, root_base_paras, root_hybrid_paras)
37-
self.cro_paras = cro_paras
38-
self.filename = "CRO_MLNN-sliding_{}-nets_{}-cro_{}".format(root_base_paras["sliding"],
39-
[root_hybrid_paras["epoch"], root_hybrid_paras["activations"], root_hybrid_paras["hidden_size"],
40-
root_hybrid_paras["train_valid_rate"]], cro_paras)
46+
self.epoch = pso_paras["epoch"]
47+
self.pop_size = pso_paras["pop_size"]
48+
self.c1 = pso_paras["c_minmax"][0]
49+
self.c2 = pso_paras["c_minmax"][1]
50+
self.w_min = pso_paras["w_minmax"][0]
51+
self.w_max = pso_paras["w_minmax"][1]
52+
self.filename = "PSO_MLNN-sliding_{}-{}".format(root_base_paras["sliding"], root_hybrid_paras["paras_name"])
4153

4254
def _training__(self):
43-
cro = CRO.BaseCRO(root_algo_paras=self.root_algo_paras, cro_paras = self.cro_paras)
44-
self.solution, self.loss_train = cro._train__()
55+
md = PSO.BasePSO(self._objective_function__, self.problem_size, self.domain_range, self.print_train,
56+
self.epoch, self.pop_size, self.c1, self.c2, self.w_min, self.w_max)
57+
self.solution, self.best_fit, self.loss_train = md._train__()
4558

4659

47-
48-
class OCroMlnn(RootHybridMlnn):
49-
def __init__(self, root_base_paras=None, root_hybrid_paras=None, cro_paras=None, ocro_paras=None):
60+
class BfoMlnn(RootHybridMlnn):
61+
def __init__(self, root_base_paras=None, root_hybrid_paras=None, bfo_paras=None):
5062
RootHybridMlnn.__init__(self, root_base_paras, root_hybrid_paras)
51-
self.cro_paras = cro_paras
52-
self.ocro_paras = ocro_paras
53-
self.filename = "OCRO_MLNN-sliding_{}-nets_{}-cro_para_{}-ocro_{}".format(root_base_paras["sliding"],
54-
[root_hybrid_paras["epoch"], root_hybrid_paras["activations"], root_hybrid_paras["hidden_size"],
55-
root_hybrid_paras["train_valid_rate"]], cro_paras, ocro_paras)
63+
self.pop_size = bfo_paras["pop_size"]
64+
self.Ci = bfo_paras["Ci"]
65+
self.Ped = bfo_paras["Ped"]
66+
self.Ns = bfo_paras["Ns"]
67+
self.Ned = bfo_paras["Ned"]
68+
self.Nre = bfo_paras["Nre"]
69+
self.Nc = bfo_paras["Nc"]
70+
self.attract_repels = bfo_paras["attract_repels"]
71+
self.filename = "BFO_MLNN-sliding_{}-{}".format(root_base_paras["sliding"], root_hybrid_paras["paras_name"])
5672

5773
def _training__(self):
58-
cro = CRO.OCRO(root_algo_paras=self.root_algo_paras, cro_paras = self.cro_paras, ocro_paras=self.ocro_paras)
59-
self.solution, self.loss_train = cro._train__()
60-
61-
62-
63-
64-
74+
md = BFO.BaseBFO(self._objective_function__, self.problem_size, self.domain_range, self.print_train,
75+
self.pop_size, self.Ci, self.Ped, self.Ns, self.Ned, self.Nre, self.Nc, self.attract_repels)
76+
self.solution, self.best_fit, self.loss_train = md._train__()
6577

6678

67-
68-
class PsoMlnn(RootHybridMlnn):
69-
def __init__(self, root_base_paras=None, root_hybrid_paras=None, pso_paras=None):
79+
class ABfoLSMlnn(RootHybridMlnn):
80+
def __init__(self, root_base_paras=None, root_hybrid_paras=None, abfols_paras=None):
7081
RootHybridMlnn.__init__(self, root_base_paras, root_hybrid_paras)
71-
self.pso_paras = pso_paras
72-
self.filename = "PSO_MLNN-sliding_{}-nets_{}-pso_{}".format(root_base_paras["sliding"],
73-
[root_hybrid_paras["epoch"], root_hybrid_paras["activations"], root_hybrid_paras["hidden_size"],
74-
root_hybrid_paras["train_valid_rate"]], pso_paras)
82+
self.epoch = abfols_paras["epoch"]
83+
self.pop_size = abfols_paras["pop_size"]
84+
self.Ci = abfols_paras["Ci"]
85+
self.Ped = abfols_paras["Ped"]
86+
self.Ns = abfols_paras["Ns"]
87+
self.N_minmax = abfols_paras["N_minmax"]
88+
self.filename = "ABFOLS_MLNN-sliding_{}-{}".format(root_base_paras["sliding"], root_hybrid_paras["paras_name"])
7589

7690
def _training__(self):
77-
pso = PSO.BasePSO(root_algo_paras=self.root_algo_paras, pso_paras = self.pso_paras)
78-
self.solution, self.loss_train = pso._train__()
91+
md = BFO.ABFOLS(self._objective_function__, self.problem_size, self.domain_range, self.print_train,
92+
self.epoch, self.pop_size, self.Ci, self.Ped, self.Ns, self.N_minmax)
93+
self.solution, self.best_fit, self.loss_train = md._train__()
7994

80-
class BfoMlnn(RootHybridMlnn):
81-
def __init__(self, root_base_paras=None, root_hybrid_paras=None, bfo_paras=None):
95+
96+
class CroMlnn(RootHybridMlnn):
97+
def __init__(self, root_base_paras=None, root_hybrid_paras=None, cro_paras=None):
8298
RootHybridMlnn.__init__(self, root_base_paras, root_hybrid_paras)
83-
self.bfo_paras = bfo_paras
84-
self.filename = "BFO_MLNN-sliding_{}-nets_{}-bfo_{}".format(root_base_paras["sliding"],
85-
[root_hybrid_paras["epoch"], root_hybrid_paras["activations"], root_hybrid_paras["hidden_size"],
86-
root_hybrid_paras["train_valid_rate"]], bfo_paras)
99+
self.epoch = cro_paras["epoch"]
100+
self.pop_size = cro_paras["pop_size"]
101+
self.po = cro_paras["po"]
102+
self.Fb = cro_paras["Fb"]
103+
self.Fa = cro_paras["Fa"]
104+
self.Fd = cro_paras["Fd"]
105+
self.Pd = cro_paras["Pd"]
106+
self.G = cro_paras["G"]
107+
self.GCR = cro_paras["GCR"]
108+
self.k = cro_paras["k"]
109+
self.filename = "CRO_MLNN-sliding_{}-{}".format(root_base_paras["sliding"], root_hybrid_paras["paras_name"])
87110

88111
def _training__(self):
89-
md = BFO.BaseBFO(root_algo_paras=self.root_algo_paras, bfo_paras = self.bfo_paras)
90-
self.solution, self.loss_train = md._train__()
91-
112+
md = CRO.BaseCRO(self._objective_function__, self.problem_size, self.domain_range, self.print_train,
113+
self.epoch, self.pop_size, self.po, self.Fb, self.Fa, self.Fd, self.Pd, self.G, self.GCR, self.k)
114+
self.solution, self.best_fit, self.loss_train = md._train__()
92115

93116

94-
class ABfoLSMlnn(RootHybridMlnn):
95-
def __init__(self, root_base_paras=None, root_hybrid_paras=None, abfols_paras=None):
117+
class OCroMlnn(RootHybridMlnn):
118+
def __init__(self, root_base_paras=None, root_hybrid_paras=None, ocro_paras=None):
96119
RootHybridMlnn.__init__(self, root_base_paras, root_hybrid_paras)
97-
self.abfols_paras = abfols_paras
98-
self.filename = "ABfoLS_MLNN-sliding_{}-nets_{}-abfols_{}".format(root_base_paras["sliding"],
99-
[root_hybrid_paras["epoch"], root_hybrid_paras["activations"], root_hybrid_paras["hidden_size"],
100-
root_hybrid_paras["train_valid_rate"]], abfols_paras)
120+
self.epoch = ocro_paras["epoch"]
121+
self.pop_size = ocro_paras["pop_size"]
122+
self.po = ocro_paras["po"]
123+
self.Fb = ocro_paras["Fb"]
124+
self.Fa = ocro_paras["Fa"]
125+
self.Fd = ocro_paras["Fd"]
126+
self.Pd = ocro_paras["Pd"]
127+
self.G = ocro_paras["G"]
128+
self.GCR = ocro_paras["GCR"]
129+
self.k = ocro_paras["k"]
130+
self.restart_count = ocro_paras["restart_count"]
131+
self.filename = "OCRO_MLNN-sliding_{}-{}".format(root_base_paras["sliding"], root_hybrid_paras["paras_name"])
101132

102133
def _training__(self):
103-
md = BFO.ABFOLS(root_algo_paras=self.root_algo_paras, abfols_paras=self.abfols_paras)
104-
self.solution, self.loss_train = md._train__()
134+
md = CRO.OCRO(self._objective_function__, self.problem_size, self.domain_range, self.print_train,
135+
self.epoch, self.pop_size, self.po, self.Fb, self.Fa, self.Fd, self.Pd, self.G, self.GCR, self.k, self.restart_count)
136+
self.solution, self.best_fit, self.loss_train = md._train__()
105137

106138

model/main/traditional_ffnn.py

Lines changed: 12 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,21 @@
1+
#!/usr/bin/env python
2+
# ------------------------------------------------------------------------------------------------------%
3+
# Created by "Thieu Nguyen" at 01:51, 29/03/2020 %
4+
# %
5+
6+
# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 %
7+
# Github: https://github.com/thieunguyen5991 %
8+
# -------------------------------------------------------------------------------------------------------%
9+
110
from keras.models import Sequential
211
from keras.layers import Dense
312
from model.root.traditional.root_mlnn import RootMlnn
413

14+
515
class Mlnn1HL(RootMlnn):
616
def __init__(self, root_base_paras=None, root_mlnn_paras=None):
717
RootMlnn.__init__(self, root_base_paras, root_mlnn_paras)
8-
self.filename = "MLNN-1H-sliding_{}-net_para_{}".format(root_base_paras["sliding"], root_mlnn_paras)
18+
self.filename = "MLNN-1H-sliding_{}-{}".format(root_base_paras["sliding"], root_mlnn_paras["paras_name"])
919

1020
def _training__(self):
1121
self.model = Sequential()
@@ -19,7 +29,7 @@ def _training__(self):
1929
class Mlnn2HL(RootMlnn):
2030
def __init__(self, root_base_paras=None, root_mlnn_paras=None):
2131
RootMlnn.__init__(self, root_base_paras, root_mlnn_paras)
22-
self.filename = "MLNN-2H-sliding_{}-net_para_{}".format(root_base_paras["sliding"], root_mlnn_paras)
32+
self.filename = "MLNN-2H-sliding_{}-{}".format(root_base_paras["sliding"], root_mlnn_paras["paras_name"])
2333

2434
def _training__(self):
2535
self.model = Sequential()
@@ -29,4 +39,3 @@ def _training__(self):
2939
self.model.compile(loss=self.loss, optimizer=self.optimizer)
3040
ml = self.model.fit(self.X_train, self.y_train, epochs=self.epoch, batch_size=self.batch_size, verbose=self.print_train)
3141
self.loss_train = ml.history["loss"]
32-

model/main/traditional_rnn.py

Lines changed: 21 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,21 @@
1+
#!/usr/bin/env python
2+
# ------------------------------------------------------------------------------------------------------%
3+
# Created by "Thieu Nguyen" at 02:51, 29/03/2020 %
4+
# %
5+
6+
# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 %
7+
# Github: https://github.com/thieunguyen5991 %
8+
# -------------------------------------------------------------------------------------------------------%
9+
110
from keras.models import Sequential
211
from keras.layers import Dense, LSTM, GRU, Dropout
312
from model.root.traditional.root_rnn import RootRnn
413

14+
515
class Rnn1HL(RootRnn):
616
def __init__(self, root_base_paras=None, root_rnn_paras=None):
717
RootRnn.__init__(self, root_base_paras, root_rnn_paras)
8-
self.filename = "RNN-1HL-sliding_{}-net_para_{}".format(root_base_paras["sliding"], [self.hidden_sizes, self.epoch,
9-
self.batch_size, self.learning_rate, self.activations, self.optimizer, self.loss])
18+
self.filename = "RNN-1HL-sliding_{}-{}".format(root_base_paras["sliding"], root_rnn_paras["paras_name"])
1019

1120
def _training__(self):
1221
# The RNN architecture
@@ -22,8 +31,7 @@ def _training__(self):
2231
class Rnn2HL(RootRnn):
2332
def __init__(self, root_base_paras=None, root_rnn_paras=None):
2433
RootRnn.__init__(self, root_base_paras, root_rnn_paras)
25-
self.filename = "RNN-2HL-sliding_{}-net_para_{}".format(root_base_paras["sliding"], [self.hidden_sizes, self.epoch,
26-
self.batch_size, self.learning_rate, self.activations, self.optimizer, self.loss])
34+
self.filename = "RNN-2HL-sliding_{}-{}".format(root_base_paras["sliding"], root_rnn_paras["paras_name"])
2735

2836
def _training__(self):
2937
# The RNN architecture
@@ -38,13 +46,11 @@ def _training__(self):
3846
self.loss_train = ml.history["loss"]
3947

4048

41-
42-
4349
class Lstm1HL(RootRnn):
4450
def __init__(self, root_base_paras=None, root_rnn_paras=None):
4551
RootRnn.__init__(self, root_base_paras, root_rnn_paras)
46-
self.filename = "LSTM-1HL-sliding_{}-net_para_{}".format(root_base_paras["sliding"], [self.hidden_sizes, self.epoch,
47-
self.batch_size, self.learning_rate, self.activations, self.optimizer, self.loss])
52+
self.filename = "LSTM-1HL-sliding_{}-{}".format(root_base_paras["sliding"], root_rnn_paras["paras_name"])
53+
4854
def _training__(self):
4955
# The LSTM architecture
5056
self.model = Sequential()
@@ -55,12 +61,11 @@ def _training__(self):
5561
self.loss_train = ml.history["loss"]
5662

5763

58-
5964
class Lstm2HL(RootRnn):
6065
def __init__(self, root_base_paras=None, root_rnn_paras=None):
6166
RootRnn.__init__(self, root_base_paras, root_rnn_paras)
62-
self.filename = "LSTM-2HL-sliding_{}-net_para_{}".format(root_base_paras["sliding"], [self.hidden_sizes, self.epoch,
63-
self.batch_size, self.learning_rate, self.activations, self.optimizer, self.loss])
67+
self.filename = "LSTM-2HL-sliding_{}-{}".format(root_base_paras["sliding"], root_rnn_paras["paras_name"])
68+
6469
def _training__(self):
6570
# The LSTM architecture
6671
self.model = Sequential()
@@ -72,14 +77,11 @@ def _training__(self):
7277
self.loss_train = ml.history["loss"]
7378

7479

75-
76-
77-
7880
class Gru1HL(RootRnn):
7981
def __init__(self, root_base_paras=None, root_rnn_paras=None):
8082
RootRnn.__init__(self, root_base_paras, root_rnn_paras)
81-
self.filename = "GRU-1HL-sliding_{}-net_para_{}".format(root_base_paras["sliding"], [self.hidden_sizes, self.epoch,
82-
self.batch_size, self.learning_rate, self.activations, self.optimizer, self.loss])
83+
self.filename = "GRU-1HL-sliding_{}-{}".format(root_base_paras["sliding"], root_rnn_paras["paras_name"])
84+
8385
def _training__(self):
8486
# The GRU architecture
8587
self.model = Sequential()
@@ -91,29 +93,19 @@ def _training__(self):
9193
self.loss_train = ml.history["loss"]
9294

9395

94-
9596
class Gru2HL(RootRnn):
9697
def __init__(self, root_base_paras=None, root_rnn_paras=None):
9798
RootRnn.__init__(self, root_base_paras, root_rnn_paras)
98-
self.filename = "GRU-2HL-sliding_{}-net_para_{}".format(root_base_paras["sliding"], [self.hidden_sizes, self.epoch,
99-
self.batch_size, self.learning_rate, self.activations, self.optimizer, self.loss])
99+
self.filename = "GRU-2HL-sliding_{}-{}".format(root_base_paras["sliding"], root_rnn_paras["paras_name"])
100+
100101
def _training__(self):
101102
# The GRU architecture
102103
self.model = Sequential()
103104
self.model.add(GRU(units=self.hidden_sizes[0], return_sequences=True, input_shape=(self.X_train.shape[1], 1), activation=self.activations[0]))
104105
self.model.add(Dropout(self.dropouts[0]))
105-
self.model.add(GRU(units=self.hidden_sizes[1], activation=self.activations[1]))
106+
self.model.add(GRU(units=self.hidden_sizes[1], activation=self.activations[1]))
106107
self.model.add(Dropout(self.dropouts[1]))
107108
self.model.add(Dense(units=1, activation=self.activations[2]))
108109
self.model.compile(loss=self.loss, optimizer=self.optimizer)
109110
ml = self.model.fit(self.X_train, self.y_train, epochs=self.epoch, batch_size=self.batch_size, verbose=self.print_train)
110111
self.loss_train = ml.history["loss"]
111-
112-
113-
114-
115-
116-
117-
118-
119-

model/root/hybrid/root_hybrid_mlnn.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
import utils.MathUtil as my_math
1515

1616

17-
class RootHybridMlp(RootBase):
17+
class RootHybridMlnn(RootBase):
1818
"""
1919
This is root of all hybrid multi-layer neural network (meta-heuristics + MLNN)
2020
"""

0 commit comments

Comments
 (0)