Skip to content

Commit 423feb3

Browse files
committed
use yapf
1 parent e5b99be commit 423feb3

8 files changed

+52
-36
lines changed

examples/quantized_net/tutorial_binarynet_cifar10.py renamed to examples/quantized_net/tutorial_binarynet_cifar10_tfrecord.py

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -45,9 +45,9 @@
4545
import numpy as np
4646
import tensorflow as tf
4747
import tensorlayer as tl
48-
from tensorlayer.layers import (BinaryConv2d, BinaryDense, Conv2d, Dense,
49-
Flatten, Input, LocalResponseNorm, MaxPool2d,
50-
Sign)
48+
from tensorlayer.layers import (
49+
BinaryConv2d, BinaryDense, Conv2d, Dense, Flatten, Input, LocalResponseNorm, MaxPool2d, Sign
50+
)
5151
from tensorlayer.models import Model
5252

5353
tl.logging.set_verbosity(tl.logging.DEBUG)
@@ -78,6 +78,7 @@ def binary_model(input_shape, n_classes):
7878
net = Model(inputs=in_net, outputs=net, name='binarynet')
7979
return net
8080

81+
8182
# training settings
8283
net = binary_model([None, 24, 24, 3], n_classes=10)
8384
batch_size = 128
@@ -92,6 +93,7 @@ def binary_model(input_shape, n_classes):
9293
optimizer = tf.optimizers.Adam(learning_rate)
9394
cost = tl.cost.cross_entropy
9495

96+
9597
def generator_train():
9698
inputs = X_train
9799
targets = y_train
@@ -137,7 +139,6 @@ def _map_fn_test(img, target):
137139
return img, target
138140

139141

140-
141142
def _train_step(network, X_batch, y_batch, cost, train_op=tf.optimizers.Adam(learning_rate=0.0001), acc=None):
142143
with tf.GradientTape() as tape:
143144
y_pred = network(X_batch)
@@ -150,9 +151,11 @@ def _train_step(network, X_batch, y_batch, cost, train_op=tf.optimizers.Adam(lea
150151
else:
151152
return _loss, None
152153

154+
153155
def accuracy(_logits, y_batch):
154156
return np.mean(np.equal(np.argmax(_logits, 1), y_batch))
155157

158+
156159
# dataset API and augmentation
157160
train_ds = tf.data.Dataset.from_generator(
158161
generator_train, output_types=(tf.float32, tf.int32)

examples/quantized_net/tutorial_binarynet_mnist_cnn.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,7 @@
66
import numpy as np
77
import tensorflow as tf
88
import tensorlayer as tl
9-
from tensorlayer.layers import (BatchNorm, BinaryConv2d, BinaryDense, Flatten,
10-
Input, MaxPool2d, Sign)
9+
from tensorlayer.layers import (BatchNorm, BinaryConv2d, BinaryDense, Flatten, Input, MaxPool2d, Sign)
1110
from tensorlayer.models import Model
1211

1312
tl.logging.set_verbosity(tl.logging.DEBUG)
@@ -16,10 +15,11 @@
1615

1716
batch_size = 128
1817

18+
1919
def model(inputs_shape, n_class=10):
2020
# In BNN, all the layers inputs are binary, with the exception of the first layer.
2121
# ref: https://github.com/itayhubara/BinaryNet.tf/blob/master/models/BNN_cifar10.py
22-
net_in = Input(inputs_shape,name='input')
22+
net_in = Input(inputs_shape, name='input')
2323
net = BinaryConv2d(32, (5, 5), (1, 1), padding='SAME', b_init=None, name='bcnn1')(net_in)
2424
net = MaxPool2d((2, 2), (2, 2), padding='SAME', name='pool1')(net)
2525
net = BatchNorm(act=tl.act.htanh, name='bn1')(net)
@@ -53,17 +53,18 @@ def _train_step(network, X_batch, y_batch, cost, train_op=tf.optimizers.Adam(lea
5353
else:
5454
return _loss, None
5555

56+
5657
def accuracy(_logits, y_batch):
5758
return np.mean(np.equal(np.argmax(_logits, 1), y_batch))
5859

60+
5961
n_epoch = 200
6062
print_freq = 5
6163

6264
net = model([None, 28, 28, 1])
6365
train_op = tf.optimizers.Adam(learning_rate=0.0001)
6466
cost = tl.cost.cross_entropy
6567

66-
6768
for epoch in range(n_epoch):
6869
start_time = time.time()
6970
train_loss, train_acc, n_batch = 0, 0, 0
@@ -93,7 +94,6 @@ def accuracy(_logits, y_batch):
9394
print(" val loss: {}".format(val_loss / val_batch))
9495
print(" val acc: {}".format(val_acc / val_batch))
9596

96-
9797
net.test()
9898
test_loss, test_acc, n_test_batch = 0, 0, 0
9999
for X_test_a, y_test_a in tl.iterate.minibatches(X_test, y_test, batch_size, shuffle=True):

examples/quantized_net/tutorial_dorefanet_cifar10.py renamed to examples/quantized_net/tutorial_dorefanet_cifar10_tfrecord.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -45,8 +45,7 @@
4545
import numpy as np
4646
import tensorflow as tf
4747
import tensorlayer as tl
48-
from tensorlayer.layers import (Conv2d, Dense, DorefaConv2d, DorefaDense,
49-
Flatten, Input, LocalResponseNorm, MaxPool2d)
48+
from tensorlayer.layers import (Conv2d, Dense, DorefaConv2d, DorefaDense, Flatten, Input, LocalResponseNorm, MaxPool2d)
5049
from tensorlayer.models import Model
5150

5251
tl.logging.set_verbosity(tl.logging.DEBUG)
@@ -72,6 +71,7 @@ def dorefanet_model(input_shape, n_classes):
7271
net = Model(inputs=in_net, outputs=net, name='dorefanet')
7372
return net
7473

74+
7575
# training settings
7676
net = dorefanet_model([None, 24, 24, 3], n_classes=10)
7777
batch_size = 128
@@ -86,6 +86,7 @@ def dorefanet_model(input_shape, n_classes):
8686
# optimizer = tf.optimizers.SGD(learning_rate)
8787
cost = tl.cost.cross_entropy
8888

89+
8990
def generator_train():
9091
inputs = X_train
9192
targets = y_train
@@ -131,7 +132,6 @@ def _map_fn_test(img, target):
131132
return img, target
132133

133134

134-
135135
def _train_step(network, X_batch, y_batch, cost, train_op=tf.optimizers.Adam(learning_rate=0.0001), acc=None):
136136
with tf.GradientTape() as tape:
137137
y_pred = network(X_batch)
@@ -144,9 +144,11 @@ def _train_step(network, X_batch, y_batch, cost, train_op=tf.optimizers.Adam(lea
144144
else:
145145
return _loss, None
146146

147+
147148
def accuracy(_logits, y_batch):
148149
return np.mean(np.equal(np.argmax(_logits, 1), y_batch))
149150

151+
150152
# dataset API and augmentation
151153
train_ds = tf.data.Dataset.from_generator(
152154
generator_train, output_types=(tf.float32, tf.int32)

examples/quantized_net/tutorial_dorefanet_mnist_cnn.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,7 @@
66
import numpy as np
77
import tensorflow as tf
88
import tensorlayer as tl
9-
from tensorlayer.layers import (BatchNorm, Dense, DorefaConv2d, DorefaDense,
10-
Flatten, Input, MaxPool2d)
9+
from tensorlayer.layers import (BatchNorm, Dense, DorefaConv2d, DorefaDense, Flatten, Input, MaxPool2d)
1110
from tensorlayer.models import Model
1211

1312
tl.logging.set_verbosity(tl.logging.DEBUG)
@@ -16,6 +15,7 @@
1615

1716
batch_size = 128
1817

18+
1919
def model(inputs_shape, n_class=10):
2020
in_net = Input(inputs_shape, name='input')
2121
net = DorefaConv2d(1, 3, 32, (5, 5), (1, 1), padding='SAME', b_init=None, name='bcnn1')(in_net)
@@ -48,17 +48,18 @@ def _train_step(network, X_batch, y_batch, cost, train_op=tf.optimizers.Adam(lea
4848
else:
4949
return _loss, None
5050

51+
5152
def accuracy(_logits, y_batch):
5253
return np.mean(np.equal(np.argmax(_logits, 1), y_batch))
5354

55+
5456
n_epoch = 200
5557
print_freq = 5
5658

5759
net = model([None, 28, 28, 1])
5860
train_op = tf.optimizers.Adam(learning_rate=0.0001)
5961
cost = tl.cost.cross_entropy
6062

61-
6263
for epoch in range(n_epoch):
6364
start_time = time.time()
6465
train_loss, train_acc, n_batch = 0, 0, 0
@@ -88,7 +89,6 @@ def accuracy(_logits, y_batch):
8889
print(" val loss: {}".format(val_loss / val_batch))
8990
print(" val acc: {}".format(val_acc / val_batch))
9091

91-
9292
net.test()
9393
test_loss, test_acc, n_test_batch = 0, 0, 0
9494
for X_test_a, y_test_a in tl.iterate.minibatches(X_test, y_test, batch_size, shuffle=True):

examples/quantized_net/tutorial_quanconv_cifar10.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -44,8 +44,7 @@
4444
import numpy as np
4545
import tensorflow as tf
4646
import tensorlayer as tl
47-
from tensorlayer.layers import (Dense, Flatten, Input, MaxPool2d,
48-
QuanConv2dWithBN, QuanDense)
47+
from tensorlayer.layers import (Dense, Flatten, Input, MaxPool2d, QuanConv2dWithBN, QuanDense)
4948
from tensorlayer.models import Model
5049

5150
tl.logging.set_verbosity(tl.logging.DEBUG)
@@ -68,6 +67,7 @@ def model(input_shape, n_classes, bitW, bitA):
6867
net = Model(inputs=in_net, outputs=net, name='dorefanet')
6968
return net
7069

70+
7171
# training settings
7272
bitW = 8
7373
bitA = 8
@@ -83,6 +83,7 @@ def model(input_shape, n_classes, bitW, bitA):
8383
optimizer = tf.optimizers.Adam(learning_rate)
8484
cost = tl.cost.cross_entropy
8585

86+
8687
def generator_train():
8788
inputs = X_train
8889
targets = y_train
@@ -128,7 +129,6 @@ def _map_fn_test(img, target):
128129
return img, target
129130

130131

131-
132132
def _train_step(network, X_batch, y_batch, cost, train_op=tf.optimizers.Adam(learning_rate=0.0001), acc=None):
133133
with tf.GradientTape() as tape:
134134
y_pred = network(X_batch)
@@ -141,9 +141,11 @@ def _train_step(network, X_batch, y_batch, cost, train_op=tf.optimizers.Adam(lea
141141
else:
142142
return _loss, None
143143

144+
144145
def accuracy(_logits, y_batch):
145146
return np.mean(np.equal(np.argmax(_logits, 1), y_batch))
146147

148+
147149
# dataset API and augmentation
148150
train_ds = tf.data.Dataset.from_generator(
149151
generator_train, output_types=(tf.float32, tf.int32)

examples/quantized_net/tutorial_quanconv_mnist.py

Lines changed: 15 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -6,27 +6,31 @@
66
import numpy as np
77
import tensorflow as tf
88
import tensorlayer as tl
9-
from tensorlayer.layers import (Dense, Dropout, Flatten, Input, MaxPool2d,
10-
QuanConv2d, QuanConv2dWithBN, QuanDense,
11-
QuanDenseLayerWithBN)
9+
from tensorlayer.layers import (
10+
Dense, Dropout, Flatten, Input, MaxPool2d, QuanConv2d, QuanConv2dWithBN, QuanDense, QuanDenseLayerWithBN
11+
)
1212
from tensorlayer.models import Model
1313

1414
tl.logging.set_verbosity(tl.logging.DEBUG)
1515

16-
1716
X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 28, 28, 1))
1817
# X_train, y_train, X_test, y_test = tl.files.load_cropped_svhn(include_extra=False)
1918

2019
batch_size = 128
2120

21+
2222
def model(inputs_shape, n_class=10):
2323
net_in = Input(inputs_shape, name="input")
2424

25-
net = QuanConv2dWithBN(n_filter=32, filter_size=(5,5),strides=(1,1),padding='SAME',act=tl.nn.relu, name='qconvbn1')(net_in)
26-
net = MaxPool2d(filter_size=(2,2),strides=(2,2),padding='SAME',name='pool1')(net)
25+
net = QuanConv2dWithBN(
26+
n_filter=32, filter_size=(5, 5), strides=(1, 1), padding='SAME', act=tl.nn.relu, name='qconvbn1'
27+
)(net_in)
28+
net = MaxPool2d(filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool1')(net)
2729

28-
net = QuanConv2dWithBN(n_filter=64, filter_size=(5,5),strides=(1,1),padding='SAME',act=tl.nn.relu, name='qconvbn2')(net)
29-
net = MaxPool2d(filter_size=(2,2),strides=(2,2),padding='SAME',name='pool2')(net)
30+
net = QuanConv2dWithBN(
31+
n_filter=64, filter_size=(5, 5), strides=(1, 1), padding='SAME', act=tl.nn.relu, name='qconvbn2'
32+
)(net)
33+
net = MaxPool2d(filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool2')(net)
3034

3135
net = Flatten(name='ft')(net)
3236

@@ -42,6 +46,7 @@ def model(inputs_shape, n_class=10):
4246
net = Model(inputs=net_in, outputs=net, name='quan')
4347
return net
4448

49+
4550
def _train_step(network, X_batch, y_batch, cost, train_op=tf.optimizers.Adam(learning_rate=0.0001), acc=None):
4651
with tf.GradientTape() as tape:
4752
y_pred = network(X_batch)
@@ -54,9 +59,11 @@ def _train_step(network, X_batch, y_batch, cost, train_op=tf.optimizers.Adam(lea
5459
else:
5560
return _loss, None
5661

62+
5763
def accuracy(_logits, y_batch):
5864
return np.mean(np.equal(np.argmax(_logits, 1), y_batch))
5965

66+
6067
n_epoch = 200
6168
print_freq = 1
6269

@@ -97,7 +104,6 @@ def accuracy(_logits, y_batch):
97104
print(" val loss: {}".format(val_loss / n_eval))
98105
print(" val acc: {}".format(val_acc / n_eval))
99106

100-
101107
# net.eval()
102108
test_loss, test_acc, n_test_batch = 0, 0, 0
103109
for X_test_a, y_test_a in tl.iterate.minibatches(X_test, y_test, batch_size, shuffle=True):

examples/quantized_net/tutorial_ternaryweight_cifar10.py renamed to examples/quantized_net/tutorial_ternaryweight_cifar10_tfrecord.py

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -44,9 +44,9 @@
4444
import numpy as np
4545
import tensorflow as tf
4646
import tensorlayer as tl
47-
from tensorlayer.layers import (Conv2d, Dense, Flatten, Input,
48-
LocalResponseNorm, MaxPool2d, TernaryConv2d,
49-
TernaryDense)
47+
from tensorlayer.layers import (
48+
Conv2d, Dense, Flatten, Input, LocalResponseNorm, MaxPool2d, TernaryConv2d, TernaryDense
49+
)
5050
from tensorlayer.models import Model
5151

5252
tl.logging.set_verbosity(tl.logging.DEBUG)
@@ -76,6 +76,7 @@ def model(input_shape, n_classes):
7676
net = Model(inputs=in_net, outputs=net, name='dorefanet')
7777
return net
7878

79+
7980
# training settings
8081
bitW = 8
8182
bitA = 8
@@ -91,6 +92,7 @@ def model(input_shape, n_classes):
9192
optimizer = tf.optimizers.Adam(learning_rate)
9293
cost = tl.cost.cross_entropy
9394

95+
9496
def generator_train():
9597
inputs = X_train
9698
targets = y_train
@@ -136,7 +138,6 @@ def _map_fn_test(img, target):
136138
return img, target
137139

138140

139-
140141
def _train_step(network, X_batch, y_batch, cost, train_op=tf.optimizers.Adam(learning_rate=0.0001), acc=None):
141142
with tf.GradientTape() as tape:
142143
y_pred = network(X_batch)
@@ -149,9 +150,11 @@ def _train_step(network, X_batch, y_batch, cost, train_op=tf.optimizers.Adam(lea
149150
else:
150151
return _loss, None
151152

153+
152154
def accuracy(_logits, y_batch):
153155
return np.mean(np.equal(np.argmax(_logits, 1), y_batch))
154156

157+
155158
# dataset API and augmentation
156159
train_ds = tf.data.Dataset.from_generator(
157160
generator_train, output_types=(tf.float32, tf.int32)

0 commit comments

Comments
 (0)