1
+ # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """VGG16 model for Keras.
16
+
17
+ Adapted from tf.keras.applications.vgg16.VGG16().
18
+
19
+ Related papers/blogs:
20
+ - https://arxiv.org/abs/1409.1556
21
+
22
+ """
1
23
from __future__ import absolute_import
2
24
from __future__ import division
3
25
from __future__ import print_function
6
28
7
29
layers = tf .keras .layers
8
30
31
+
9
32
def _gen_l2_regularizer (use_l2_regularizer = True , l2_weight_decay = 1e-4 ):
10
33
return tf .keras .regularizers .L2 (
11
34
l2_weight_decay ) if use_l2_regularizer else None
12
35
36
+
13
37
def vgg16 (num_classes ,
14
38
batch_size = None ,
15
39
use_l2_regularizer = True ,
16
40
batch_norm_decay = 0.9 ,
17
41
batch_norm_epsilon = 1e-5 ):
42
+ """Instantiates the VGG16 architecture
43
+
44
+ Args:
45
+ num_classes: `int` number of classes for image classification.
46
+ batch_size: Size of the batches for each step.
47
+ use_l2_regularizer: whether to use L2 regularizer on Conv/Dense layer.
48
+ batch_norm_decay: Moment of batch norm layers.
49
+ batch_norm_epsilon: Epsilon of batch borm layers.
50
+
51
+ Returns:
52
+ A Keras model instance.
18
53
54
+ """
19
55
input_shape = (224 , 224 , 3 )
20
56
img_input = layers .Input (shape = input_shape , batch_size = batch_size )
21
57
@@ -26,22 +62,23 @@ def vgg16(num_classes,
26
62
bn_axis = 1
27
63
else : # channels_last
28
64
bn_axis = 3
29
-
30
65
# Block 1
31
- x = layers .Conv2D (64 , (3 , 3 ),
32
- padding = 'same' ,
33
- kernel_regularizer = _gen_l2_regularizer (use_l2_regularizer ),
34
- name = 'block1_conv1' )(x )
66
+ x = layers .Conv2D (
67
+ 64 , (3 , 3 ),
68
+ padding = 'same' ,
69
+ kernel_regularizer = _gen_l2_regularizer (use_l2_regularizer ),
70
+ name = 'block1_conv1' )(x )
35
71
x = layers .BatchNormalization (
36
72
axis = bn_axis ,
37
73
momentum = batch_norm_decay ,
38
74
epsilon = batch_norm_epsilon ,
39
75
name = 'bn_conv1' )(x )
40
76
x = layers .Activation ('relu' )(x )
41
- x = layers .Conv2D (64 , (3 , 3 ),
42
- padding = 'same' ,
43
- kernel_regularizer = _gen_l2_regularizer (use_l2_regularizer ),
44
- name = 'block1_conv2' )(x )
77
+ x = layers .Conv2D (
78
+ 64 , (3 , 3 ),
79
+ padding = 'same' ,
80
+ kernel_regularizer = _gen_l2_regularizer (use_l2_regularizer ),
81
+ name = 'block1_conv2' )(x )
45
82
x = layers .BatchNormalization (
46
83
axis = bn_axis ,
47
84
momentum = batch_norm_decay ,
@@ -51,20 +88,22 @@ def vgg16(num_classes,
51
88
x = layers .MaxPooling2D ((2 , 2 ), strides = (2 , 2 ), name = 'block1_pool' )(x )
52
89
53
90
# Block 2
54
- x = layers .Conv2D (128 , (3 , 3 ),
55
- padding = 'same' ,
56
- kernel_regularizer = _gen_l2_regularizer (use_l2_regularizer ),
57
- name = 'block2_conv1' )(x )
91
+ x = layers .Conv2D (
92
+ 128 , (3 , 3 ),
93
+ padding = 'same' ,
94
+ kernel_regularizer = _gen_l2_regularizer (use_l2_regularizer ),
95
+ name = 'block2_conv1' )(x )
58
96
x = layers .BatchNormalization (
59
97
axis = bn_axis ,
60
98
momentum = batch_norm_decay ,
61
99
epsilon = batch_norm_epsilon ,
62
100
name = 'bn_conv3' )(x )
63
101
x = layers .Activation ('relu' )(x )
64
- x = layers .Conv2D (128 , (3 , 3 ),
65
- padding = 'same' ,
66
- kernel_regularizer = _gen_l2_regularizer (use_l2_regularizer ),
67
- name = 'block2_conv2' )(x )
102
+ x = layers .Conv2D (
103
+ 128 , (3 , 3 ),
104
+ padding = 'same' ,
105
+ kernel_regularizer = _gen_l2_regularizer (use_l2_regularizer ),
106
+ name = 'block2_conv2' )(x )
68
107
x = layers .BatchNormalization (
69
108
axis = bn_axis ,
70
109
momentum = batch_norm_decay ,
@@ -74,30 +113,33 @@ def vgg16(num_classes,
74
113
x = layers .MaxPooling2D ((2 , 2 ), strides = (2 , 2 ), name = 'block2_pool' )(x )
75
114
76
115
# Block 3
77
- x = layers .Conv2D (256 , (3 , 3 ),
78
- padding = 'same' ,
79
- kernel_regularizer = _gen_l2_regularizer (use_l2_regularizer ),
80
- name = 'block3_conv1' )(x )
116
+ x = layers .Conv2D (
117
+ 256 , (3 , 3 ),
118
+ padding = 'same' ,
119
+ kernel_regularizer = _gen_l2_regularizer (use_l2_regularizer ),
120
+ name = 'block3_conv1' )(x )
81
121
x = layers .BatchNormalization (
82
122
axis = bn_axis ,
83
123
momentum = batch_norm_decay ,
84
124
epsilon = batch_norm_epsilon ,
85
125
name = 'bn_conv5' )(x )
86
126
x = layers .Activation ('relu' )(x )
87
- x = layers .Conv2D (256 , (3 , 3 ),
88
- padding = 'same' ,
89
- kernel_regularizer = _gen_l2_regularizer (use_l2_regularizer ),
90
- name = 'block3_conv2' )(x )
127
+ x = layers .Conv2D (
128
+ 256 , (3 , 3 ),
129
+ padding = 'same' ,
130
+ kernel_regularizer = _gen_l2_regularizer (use_l2_regularizer ),
131
+ name = 'block3_conv2' )(x )
91
132
x = layers .BatchNormalization (
92
133
axis = bn_axis ,
93
134
momentum = batch_norm_decay ,
94
135
epsilon = batch_norm_epsilon ,
95
136
name = 'bn_conv6' )(x )
96
137
x = layers .Activation ('relu' )(x )
97
- x = layers .Conv2D (256 , (3 , 3 ),
98
- padding = 'same' ,
99
- kernel_regularizer = _gen_l2_regularizer (use_l2_regularizer ),
100
- name = 'block3_conv3' )(x )
138
+ x = layers .Conv2D (
139
+ 256 , (3 , 3 ),
140
+ padding = 'same' ,
141
+ kernel_regularizer = _gen_l2_regularizer (use_l2_regularizer ),
142
+ name = 'block3_conv3' )(x )
101
143
x = layers .BatchNormalization (
102
144
axis = bn_axis ,
103
145
momentum = batch_norm_decay ,
@@ -107,30 +149,33 @@ def vgg16(num_classes,
107
149
x = layers .MaxPooling2D ((2 , 2 ), strides = (2 , 2 ), name = 'block3_pool' )(x )
108
150
109
151
# Block 4
110
- x = layers .Conv2D (512 , (3 , 3 ),
111
- padding = 'same' ,
112
- kernel_regularizer = _gen_l2_regularizer (use_l2_regularizer ),
113
- name = 'block4_conv1' )(x )
152
+ x = layers .Conv2D (
153
+ 512 , (3 , 3 ),
154
+ padding = 'same' ,
155
+ kernel_regularizer = _gen_l2_regularizer (use_l2_regularizer ),
156
+ name = 'block4_conv1' )(x )
114
157
x = layers .BatchNormalization (
115
158
axis = bn_axis ,
116
159
momentum = batch_norm_decay ,
117
160
epsilon = batch_norm_epsilon ,
118
161
name = 'bn_conv8' )(x )
119
162
x = layers .Activation ('relu' )(x )
120
- x = layers .Conv2D (512 , (3 , 3 ),
121
- padding = 'same' ,
122
- kernel_regularizer = _gen_l2_regularizer (use_l2_regularizer ),
123
- name = 'block4_conv2' )(x )
163
+ x = layers .Conv2D (
164
+ 512 , (3 , 3 ),
165
+ padding = 'same' ,
166
+ kernel_regularizer = _gen_l2_regularizer (use_l2_regularizer ),
167
+ name = 'block4_conv2' )(x )
124
168
x = layers .BatchNormalization (
125
169
axis = bn_axis ,
126
170
momentum = batch_norm_decay ,
127
171
epsilon = batch_norm_epsilon ,
128
172
name = 'bn_conv9' )(x )
129
173
x = layers .Activation ('relu' )(x )
130
- x = layers .Conv2D (512 , (3 , 3 ),
131
- padding = 'same' ,
132
- kernel_regularizer = _gen_l2_regularizer (use_l2_regularizer ),
133
- name = 'block4_conv3' )(x )
174
+ x = layers .Conv2D (
175
+ 512 , (3 , 3 ),
176
+ padding = 'same' ,
177
+ kernel_regularizer = _gen_l2_regularizer (use_l2_regularizer ),
178
+ name = 'block4_conv3' )(x )
134
179
x = layers .BatchNormalization (
135
180
axis = bn_axis ,
136
181
momentum = batch_norm_decay ,
@@ -140,30 +185,33 @@ def vgg16(num_classes,
140
185
x = layers .MaxPooling2D ((2 , 2 ), strides = (2 , 2 ), name = 'block4_pool' )(x )
141
186
142
187
# Block 5
143
- x = layers .Conv2D (512 , (3 , 3 ),
144
- padding = 'same' ,
145
- kernel_regularizer = _gen_l2_regularizer (use_l2_regularizer ),
146
- name = 'block5_conv1' )(x )
188
+ x = layers .Conv2D (
189
+ 512 , (3 , 3 ),
190
+ padding = 'same' ,
191
+ kernel_regularizer = _gen_l2_regularizer (use_l2_regularizer ),
192
+ name = 'block5_conv1' )(x )
147
193
x = layers .BatchNormalization (
148
194
axis = bn_axis ,
149
195
momentum = batch_norm_decay ,
150
196
epsilon = batch_norm_epsilon ,
151
197
name = 'bn_conv11' )(x )
152
198
x = layers .Activation ('relu' )(x )
153
- x = layers .Conv2D (512 , (3 , 3 ),
154
- padding = 'same' ,
155
- kernel_regularizer = _gen_l2_regularizer (use_l2_regularizer ),
156
- name = 'block5_conv2' )(x )
199
+ x = layers .Conv2D (
200
+ 512 , (3 , 3 ),
201
+ padding = 'same' ,
202
+ kernel_regularizer = _gen_l2_regularizer (use_l2_regularizer ),
203
+ name = 'block5_conv2' )(x )
157
204
x = layers .BatchNormalization (
158
205
axis = bn_axis ,
159
206
momentum = batch_norm_decay ,
160
207
epsilon = batch_norm_epsilon ,
161
208
name = 'bn_conv12' )(x )
162
209
x = layers .Activation ('relu' )(x )
163
- x = layers .Conv2D (512 , (3 , 3 ),
164
- padding = 'same' ,
165
- kernel_regularizer = _gen_l2_regularizer (use_l2_regularizer ),
166
- name = 'block5_conv3' )(x )
210
+ x = layers .Conv2D (
211
+ 512 , (3 , 3 ),
212
+ padding = 'same' ,
213
+ kernel_regularizer = _gen_l2_regularizer (use_l2_regularizer ),
214
+ name = 'block5_conv3' )(x )
167
215
x = layers .BatchNormalization (
168
216
axis = bn_axis ,
169
217
momentum = batch_norm_decay ,
@@ -174,23 +222,17 @@ def vgg16(num_classes,
174
222
175
223
x = layers .Flatten (name = 'flatten' )(x )
176
224
x = layers .Dense (4096 ,
177
- #kernel_initializer=tf.initializers.random_normal(stddev=0.01),
178
225
kernel_regularizer = _gen_l2_regularizer (use_l2_regularizer ),
179
- #bias_regularizer=_gen_l2_regularizer(use_l2_regularizer),
180
226
name = 'fc1' )(x )
181
227
x = layers .Activation ('relu' )(x )
182
228
x = layers .Dropout (0.5 )(x )
183
229
x = layers .Dense (4096 ,
184
- #kernel_initializer=tf.initializers.random_normal(stddev=0.01),
185
230
kernel_regularizer = _gen_l2_regularizer (use_l2_regularizer ),
186
- #bias_regularizer=_gen_l2_regularizer(use_l2_regularizer),
187
231
name = 'fc2' )(x )
188
232
x = layers .Activation ('relu' )(x )
189
233
x = layers .Dropout (0.5 )(x )
190
234
x = layers .Dense (num_classes ,
191
- #kernel_initializer=tf.initializers.random_normal(stddev=0.01),
192
235
kernel_regularizer = _gen_l2_regularizer (use_l2_regularizer ),
193
- #bias_regularizer=_gen_l2_regularizer(use_l2_regularizer),
194
236
name = 'fc1000' )(x )
195
237
196
238
# A softmax that is followed by the model loss must be done cannot be done
@@ -199,4 +241,3 @@ def vgg16(num_classes,
199
241
200
242
# Create model.
201
243
return tf .keras .Model (img_input , x , name = 'vgg16' )
202
-
0 commit comments