1
+ from __future__ import absolute_import
2
+ from __future__ import division
3
+ from __future__ import print_function
4
+
5
+ import tensorflow as tf
6
+
7
+ layers = tf .keras .layers
8
+
9
+ def _gen_l2_regularizer (use_l2_regularizer = True , l2_weight_decay = 1e-4 ):
10
+ return tf .keras .regularizers .L2 (
11
+ l2_weight_decay ) if use_l2_regularizer else None
12
+
13
+ def vgg16 (num_classes ,
14
+ batch_size = None ,
15
+ use_l2_regularizer = True ,
16
+ batch_norm_decay = 0.9 ,
17
+ batch_norm_epsilon = 1e-5 ):
18
+
19
+ input_shape = (224 , 224 , 3 )
20
+ img_input = layers .Input (shape = input_shape , batch_size = batch_size )
21
+
22
+ x = img_input
23
+
24
+ if tf .keras .backend .image_data_format () == 'channels_first' :
25
+ x = layers .Permute ((3 , 1 , 2 ))(x )
26
+ bn_axis = 1
27
+ else : # channels_last
28
+ bn_axis = 3
29
+
30
+ # Block 1
31
+ x = layers .Conv2D (64 , (3 , 3 ),
32
+ padding = 'same' ,
33
+ kernel_regularizer = _gen_l2_regularizer (use_l2_regularizer ),
34
+ name = 'block1_conv1' )(x )
35
+ x = layers .BatchNormalization (
36
+ axis = bn_axis ,
37
+ momentum = batch_norm_decay ,
38
+ epsilon = batch_norm_epsilon ,
39
+ name = 'bn_conv1' )(x )
40
+ x = layers .Activation ('relu' )(x )
41
+ x = layers .Conv2D (64 , (3 , 3 ),
42
+ padding = 'same' ,
43
+ kernel_regularizer = _gen_l2_regularizer (use_l2_regularizer ),
44
+ name = 'block1_conv2' )(x )
45
+ x = layers .BatchNormalization (
46
+ axis = bn_axis ,
47
+ momentum = batch_norm_decay ,
48
+ epsilon = batch_norm_epsilon ,
49
+ name = 'bn_conv2' )(x )
50
+ x = layers .Activation ('relu' )(x )
51
+ x = layers .MaxPooling2D ((2 , 2 ), strides = (2 , 2 ), name = 'block1_pool' )(x )
52
+
53
+ # Block 2
54
+ x = layers .Conv2D (128 , (3 , 3 ),
55
+ padding = 'same' ,
56
+ kernel_regularizer = _gen_l2_regularizer (use_l2_regularizer ),
57
+ name = 'block2_conv1' )(x )
58
+ x = layers .BatchNormalization (
59
+ axis = bn_axis ,
60
+ momentum = batch_norm_decay ,
61
+ epsilon = batch_norm_epsilon ,
62
+ name = 'bn_conv3' )(x )
63
+ x = layers .Activation ('relu' )(x )
64
+ x = layers .Conv2D (128 , (3 , 3 ),
65
+ padding = 'same' ,
66
+ kernel_regularizer = _gen_l2_regularizer (use_l2_regularizer ),
67
+ name = 'block2_conv2' )(x )
68
+ x = layers .BatchNormalization (
69
+ axis = bn_axis ,
70
+ momentum = batch_norm_decay ,
71
+ epsilon = batch_norm_epsilon ,
72
+ name = 'bn_conv4' )(x )
73
+ x = layers .Activation ('relu' )(x )
74
+ x = layers .MaxPooling2D ((2 , 2 ), strides = (2 , 2 ), name = 'block2_pool' )(x )
75
+
76
+ # Block 3
77
+ x = layers .Conv2D (256 , (3 , 3 ),
78
+ padding = 'same' ,
79
+ kernel_regularizer = _gen_l2_regularizer (use_l2_regularizer ),
80
+ name = 'block3_conv1' )(x )
81
+ x = layers .BatchNormalization (
82
+ axis = bn_axis ,
83
+ momentum = batch_norm_decay ,
84
+ epsilon = batch_norm_epsilon ,
85
+ name = 'bn_conv5' )(x )
86
+ x = layers .Activation ('relu' )(x )
87
+ x = layers .Conv2D (256 , (3 , 3 ),
88
+ padding = 'same' ,
89
+ kernel_regularizer = _gen_l2_regularizer (use_l2_regularizer ),
90
+ name = 'block3_conv2' )(x )
91
+ x = layers .BatchNormalization (
92
+ axis = bn_axis ,
93
+ momentum = batch_norm_decay ,
94
+ epsilon = batch_norm_epsilon ,
95
+ name = 'bn_conv6' )(x )
96
+ x = layers .Activation ('relu' )(x )
97
+ x = layers .Conv2D (256 , (3 , 3 ),
98
+ padding = 'same' ,
99
+ kernel_regularizer = _gen_l2_regularizer (use_l2_regularizer ),
100
+ name = 'block3_conv3' )(x )
101
+ x = layers .BatchNormalization (
102
+ axis = bn_axis ,
103
+ momentum = batch_norm_decay ,
104
+ epsilon = batch_norm_epsilon ,
105
+ name = 'bn_conv7' )(x )
106
+ x = layers .Activation ('relu' )(x )
107
+ x = layers .MaxPooling2D ((2 , 2 ), strides = (2 , 2 ), name = 'block3_pool' )(x )
108
+
109
+ # Block 4
110
+ x = layers .Conv2D (512 , (3 , 3 ),
111
+ padding = 'same' ,
112
+ kernel_regularizer = _gen_l2_regularizer (use_l2_regularizer ),
113
+ name = 'block4_conv1' )(x )
114
+ x = layers .BatchNormalization (
115
+ axis = bn_axis ,
116
+ momentum = batch_norm_decay ,
117
+ epsilon = batch_norm_epsilon ,
118
+ name = 'bn_conv8' )(x )
119
+ x = layers .Activation ('relu' )(x )
120
+ x = layers .Conv2D (512 , (3 , 3 ),
121
+ padding = 'same' ,
122
+ kernel_regularizer = _gen_l2_regularizer (use_l2_regularizer ),
123
+ name = 'block4_conv2' )(x )
124
+ x = layers .BatchNormalization (
125
+ axis = bn_axis ,
126
+ momentum = batch_norm_decay ,
127
+ epsilon = batch_norm_epsilon ,
128
+ name = 'bn_conv9' )(x )
129
+ x = layers .Activation ('relu' )(x )
130
+ x = layers .Conv2D (512 , (3 , 3 ),
131
+ padding = 'same' ,
132
+ kernel_regularizer = _gen_l2_regularizer (use_l2_regularizer ),
133
+ name = 'block4_conv3' )(x )
134
+ x = layers .BatchNormalization (
135
+ axis = bn_axis ,
136
+ momentum = batch_norm_decay ,
137
+ epsilon = batch_norm_epsilon ,
138
+ name = 'bn_conv10' )(x )
139
+ x = layers .Activation ('relu' )(x )
140
+ x = layers .MaxPooling2D ((2 , 2 ), strides = (2 , 2 ), name = 'block4_pool' )(x )
141
+
142
+ # Block 5
143
+ x = layers .Conv2D (512 , (3 , 3 ),
144
+ padding = 'same' ,
145
+ kernel_regularizer = _gen_l2_regularizer (use_l2_regularizer ),
146
+ name = 'block5_conv1' )(x )
147
+ x = layers .BatchNormalization (
148
+ axis = bn_axis ,
149
+ momentum = batch_norm_decay ,
150
+ epsilon = batch_norm_epsilon ,
151
+ name = 'bn_conv11' )(x )
152
+ x = layers .Activation ('relu' )(x )
153
+ x = layers .Conv2D (512 , (3 , 3 ),
154
+ padding = 'same' ,
155
+ kernel_regularizer = _gen_l2_regularizer (use_l2_regularizer ),
156
+ name = 'block5_conv2' )(x )
157
+ x = layers .BatchNormalization (
158
+ axis = bn_axis ,
159
+ momentum = batch_norm_decay ,
160
+ epsilon = batch_norm_epsilon ,
161
+ name = 'bn_conv12' )(x )
162
+ x = layers .Activation ('relu' )(x )
163
+ x = layers .Conv2D (512 , (3 , 3 ),
164
+ padding = 'same' ,
165
+ kernel_regularizer = _gen_l2_regularizer (use_l2_regularizer ),
166
+ name = 'block5_conv3' )(x )
167
+ x = layers .BatchNormalization (
168
+ axis = bn_axis ,
169
+ momentum = batch_norm_decay ,
170
+ epsilon = batch_norm_epsilon ,
171
+ name = 'bn_conv13' )(x )
172
+ x = layers .Activation ('relu' )(x )
173
+ x = layers .MaxPooling2D ((2 , 2 ), strides = (2 , 2 ), name = 'block5_pool' )(x )
174
+
175
+ x = layers .Flatten (name = 'flatten' )(x )
176
+ x = layers .Dense (4096 ,
177
+ kernel_regularizer = _gen_l2_regularizer (use_l2_regularizer ),
178
+ name = 'fc1' )(x )
179
+ x = layers .Activation ('relu' )(x )
180
+ x = layers .Dropout (0.5 )(x )
181
+ x = layers .Dense (4096 ,
182
+ kernel_regularizer = _gen_l2_regularizer (use_l2_regularizer ),
183
+ name = 'fc2' )(x )
184
+ x = layers .Activation ('relu' )(x )
185
+ x = layers .Dropout (0.5 )(x )
186
+ x = layers .Dense (num_classes ,
187
+ kernel_regularizer = _gen_l2_regularizer (use_l2_regularizer ),
188
+ name = 'fc1000' )(x )
189
+
190
+ # A softmax that is followed by the model loss must be done cannot be done
191
+ # in float16 due to numeric issues. So we pass dtype=float32.
192
+ x = layers .Activation ('softmax' , dtype = 'float32' )(x )
193
+
194
+ # Create model.
195
+ return tf .keras .Model (img_input , x , name = 'vgg16' )
196
+
0 commit comments