forked from maples1993/Cats_vs_Dogs
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmodel.py
More file actions
103 lines (90 loc) · 4.77 KB
/
model.py
File metadata and controls
103 lines (90 loc) · 4.77 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
import tensorflow as tf
import tensorflow.contrib.layers as layers
def inference(images, n_classes):
# conv1, shape = [kernel_size, kernel_size, channels, kernel_numbers]
with tf.variable_scope("conv1") as scope:
weights = tf.get_variable("weights",
shape=[3, 3, 3, 16],
dtype=tf.float32,
initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=tf.float32))
biases = tf.get_variable("biases",
shape=[16],
dtype=tf.float32,
initializer=tf.constant_initializer(0.1))
conv = tf.nn.conv2d(images, weights, strides=[1, 1, 1, 1], padding="SAME")
pre_activation = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(pre_activation, name="conv1")
# pool1 && norm1
with tf.variable_scope("pooling1_lrn") as scope:
pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
padding="SAME", name="pooling1")
norm1 = tf.nn.lrn(pool1, depth_radius=4, bias=1.0, alpha=0.001/9.0,
beta=0.75, name='norm1')
# conv2
with tf.variable_scope("conv2") as scope:
weights = tf.get_variable("weights",
shape=[3, 3, 16, 16],
dtype=tf.float32,
initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=tf.float32))
biases = tf.get_variable("biases",
shape=[16],
dtype=tf.float32,
initializer=tf.constant_initializer(0.1))
conv = tf.nn.conv2d(norm1, weights, strides=[1, 1, 1, 1], padding="SAME")
pre_activation = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(pre_activation, name="conv2")
# pool2 && norm2
with tf.variable_scope("pooling2_lrn") as scope:
pool2 = tf.nn.max_pool(conv2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
padding="SAME", name="pooling2")
norm2 = tf.nn.lrn(pool2, depth_radius=4, bias=1.0, alpha=0.001/9.0,
beta=0.75, name='norm2')
# full-connect1
with tf.variable_scope("fc1") as scope:
reshape = layers.flatten(norm2)
dim = reshape.get_shape()[1].value
weights = tf.get_variable("weights",
shape=[dim, 128],
dtype=tf.float32,
initializer=tf.truncated_normal_initializer(stddev=0.005, dtype=tf.float32))
biases = tf.get_variable("biases",
shape=[128],
dtype=tf.float32,
initializer=tf.constant_initializer(0.1))
fc1 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name="fc1")
# full_connect2
with tf.variable_scope("fc2") as scope:
weights = tf.get_variable("weights",
shape=[128, 128],
dtype=tf.float32,
initializer=tf.truncated_normal_initializer(stddev=0.005, dtype=tf.float32))
biases = tf.get_variable("biases",
shape=[128],
dtype=tf.float32,
initializer=tf.constant_initializer(0.1))
fc2 = tf.nn.relu(tf.matmul(fc1, weights) + biases, name="fc2")
# softmax
with tf.variable_scope("softmax_linear") as scope:
weights = tf.get_variable("weights",
shape=[128, n_classes],
dtype=tf.float32,
initializer=tf.truncated_normal_initializer(stddev=0.005, dtype=tf.float32))
biases = tf.get_variable("biases",
shape=[n_classes],
dtype=tf.float32,
initializer=tf.constant_initializer(0.1))
softmax_linear = tf.add(tf.matmul(fc2, weights), biases, name="softmax_linear")
# softmax_linear = tf.nn.softmax(softmax_linear)
return softmax_linear
def losses(logits, labels):
with tf.variable_scope('loss'):
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,
labels=labels)
loss = tf.reduce_mean(cross_entropy)
return loss
def evaluation(logits, labels):
with tf.variable_scope("accuracy"):
correct = tf.nn.in_top_k(logits, labels, 1)
correct = tf.cast(correct, tf.float16)
accuracy = tf.reduce_mean(correct)
return accuracy