66from config import *
77
88
9- def fine_tune_model ():
9+ def Xception ():
1010 from keras .applications .xception import Xception
1111 from keras .models import Model
1212 from keras .layers import Dense , GlobalAveragePooling2D
@@ -47,7 +47,7 @@ def fine_tune_model():
4747 # layer.trainable = True
4848
4949
50- def fine_tune_inceptionresnet_v2 ():
50+ def IceptionResnet_V2 ():
5151 from keras .applications .inception_resnet_v2 import InceptionResNetV2
5252
5353 from keras .models import Model
@@ -69,7 +69,73 @@ def fine_tune_inceptionresnet_v2():
6969 # first: train only the top layers (which were randomly initialized)
7070 # i.e. freeze all convolutional Xception layers
7171 for layer in base_model .layers :
72- layer .trainable = False
72+ layer .trainable = True
73+ RMS = optimizers .RMSprop (lr = 0.001 , decay = 1e-7 )
74+ model .compile (optimizer = RMS , loss = 'categorical_crossentropy' , metrics = ['accuracy' ])
75+ # compile the model (should be done *after* setting layers to non-trainable)
76+ # model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
77+ # model.compile(optimizer=SGD(lr=0.0001, momentum=0.9), loss='categorical_crossentropy', metrics=['accuracy'])
78+ model .summary ()
79+
80+ return model
81+
82+
83+ def InceptionV3 ():
84+ from keras .applications .inception_v3 import InceptionV3
85+
86+ from keras .models import Model
87+ from keras .layers import Dense , GlobalAveragePooling2D
88+ # create the base pre-trained model
89+ # base_model = InceptionV3(weights='imagenet', include_top=False)
90+ base_model = InceptionV3 (weights = 'imagenet' , include_top = False , input_shape = input_image_shape )
91+
92+ # add a global spatial average pooling layer
93+ x = base_model .output
94+ x = GlobalAveragePooling2D ()(x )
95+ # let's add a fully-connected layer
96+ x = Dense (2048 , activation = 'relu' )(x )
97+ # and a logistic layer -- let's say we have num_classes classes
98+ predictions = Dense (num_classes , activation = 'softmax' )(x )
99+ #
100+ # # this is the model we will train
101+ model = Model (inputs = base_model .input , outputs = predictions )
102+ # first: train only the top layers (which were randomly initialized)
103+ # i.e. freeze all convolutional Xception layers
104+ for layer in base_model .layers :
105+ layer .trainable = True
106+ RMS = optimizers .RMSprop (lr = 0.001 , decay = 1e-7 )
107+ model .compile (optimizer = RMS , loss = 'categorical_crossentropy' , metrics = ['accuracy' ])
108+ # compile the model (should be done *after* setting layers to non-trainable)
109+ # model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
110+ # model.compile(optimizer=SGD(lr=0.0001, momentum=0.9), loss='categorical_crossentropy', metrics=['accuracy'])
111+ model .summary ()
112+
113+ return model
114+
115+
116+ def ResNet50 ():
117+ from keras .applications .resnet50 import ResNet50
118+
119+ from keras .models import Model
120+ from keras .layers import Dense , GlobalAveragePooling2D
121+ # create the base pre-trained model
122+ # base_model = InceptionV3(weights='imagenet', include_top=False)
123+ base_model = ResNet50 (weights = 'imagenet' , include_top = False , input_shape = input_image_shape )
124+
125+ # add a global spatial average pooling layer
126+ x = base_model .output
127+ x = GlobalAveragePooling2D ()(x )
128+ # let's add a fully-connected layer
129+ x = Dense (2048 , activation = 'relu' )(x )
130+ # and a logistic layer -- let's say we have num_classes classes
131+ predictions = Dense (num_classes , activation = 'softmax' )(x )
132+ #
133+ # # this is the model we will train
134+ model = Model (inputs = base_model .input , outputs = predictions )
135+ # first: train only the top layers (which were randomly initialized)
136+ # i.e. freeze all convolutional Xception layers
137+ for layer in base_model .layers :
138+ layer .trainable = True
73139 RMS = optimizers .RMSprop (lr = 0.001 , decay = 1e-7 )
74140 model .compile (optimizer = RMS , loss = 'categorical_crossentropy' , metrics = ['accuracy' ])
75141 # compile the model (should be done *after* setting layers to non-trainable)
0 commit comments