Skip to content

Commit 006d6a7

Browse files
committed
Update format
1 parent f302ad1 commit 006d6a7

File tree

2 files changed

+7
-20
lines changed

2 files changed

+7
-20
lines changed

tensorlayer/layers/convolution/quan_conv_bn.py

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
#!/usr/bin/env python3
1+
#! /usr/bin/python
22
# -*- coding: utf-8 -*-
33

44
import numpy as np
@@ -50,10 +50,6 @@ class QuanConv2dWithBN(Layer):
5050
The bits of this layer's parameter
5151
bitA : int
5252
The bits of the output of previous layer
53-
epsilon : float
54-
Eplison.
55-
is_train : boolean
56-
Is being used for training or inference.
5753
use_gemm : boolean
5854
If True, use gemm instead of ``tf.matmul`` for inferencing. (TODO).
5955
W_init : initializer
@@ -62,6 +58,10 @@ class QuanConv2dWithBN(Layer):
6258
The arguments for the weight matrix initializer.
6359
data_format : str
6460
"NHWC" or "NCHW", default is "NHWC".
61+
dilation_rate : tuple of int
62+
Specifying the dilation rate to use for dilated convolution.
63+
in_channels : int
64+
The number of in channels.
6565
name : str
6666
A unique layer name.
6767
@@ -98,7 +98,6 @@ def __init__(
9898
name='quan_cnn2d_bn',
9999
):
100100
super(QuanConv2dWithBN, self).__init__(act=act, name=name)
101-
# self.prev_layer = prev_layer
102101
self.n_filter = n_filter
103102
self.filter_size = filter_size
104103
self.strides = strides
@@ -133,7 +132,7 @@ def __repr__(self):
133132
actstr = self.act.__name__ if self.act is not None else 'No Activation'
134133
s = (
135134
'{classname}(in_channels={in_channels}, out_channels={n_filter}, kernel_size={filter_size}'
136-
', strides={strides}, padding={padding}'+ actstr
135+
', strides={strides}, padding={padding}' + actstr
137136
)
138137
if self.dilation_rate != (1, ) * len(self.dilation_rate):
139138
s += ', dilation={dilation_rate}'

tensorlayer/layers/dense/quan_dense_bn.py

Lines changed: 1 addition & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -42,18 +42,6 @@ class QuanDenseLayerWithBN(Layer):
4242
The bits of this layer's parameter
4343
bitA : int
4444
The bits of the output of previous layer
45-
decay : float
46-
A decay factor for `ExponentialMovingAverage`.
47-
Suggest to use a large value for large dataset.
48-
epsilon : float
49-
Eplison.
50-
is_train : boolean
51-
Is being used for training or inference.
52-
beta_init : initializer or None
53-
The initializer for initializing beta, if None, skip beta.
54-
Usually you should not skip beta unless you know what happened.
55-
gamma_init : initializer or None
56-
The initializer for initializing gamma, if None, skip gamma.
5745
use_gemm : boolean
5846
If True, use gemm instead of ``tf.matmul`` for inferencing. (TODO).
5947
W_init : initializer
@@ -146,7 +134,7 @@ def build(self, inputs_shape):
146134
self.moving_variance = self._get_weights(
147135
"moving_variacne",
148136
shape=para_bn_shape,
149-
init=tf.initializers.constant(1.0),
137+
init=tl.initializers.constant(1.0),
150138
trainable=False)
151139

152140
def forward(self, inputs):

0 commit comments

Comments
 (0)