Skip to content

Commit c628d24

Browse files
committed
update yapf
1 parent b4df42c commit c628d24

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

48 files changed

+675
-680
lines changed

tensorlayer/cost.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -374,7 +374,7 @@ def iou_coe(output, target, threshold=0.5, axis=(1, 2, 3), smooth=1e-5):
374374

375375

376376
def sequence_loss_by_example(
377-
logits, targets, weights, average_across_timesteps=True, softmax_loss_function=None, name=None
377+
logits, targets, weights, average_across_timesteps=True, softmax_loss_function=None, name=None
378378
):
379379
"""Weighted cross-entropy loss for a sequence of logits (per example). see original tensorflow code :
380380
<https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/legacy_seq2seq/python/ops/seq2seq.py#L1057>
@@ -782,7 +782,7 @@ def mn_i(weights, name='maxnorm_i_regularizer'):
782782

783783

784784
def huber_loss(
785-
output, target, is_mean=True, delta=1.0, dynamichuber=False, reverse=False, axis=-1, epsilon=0.00001, name=None
785+
output, target, is_mean=True, delta=1.0, dynamichuber=False, reverse=False, axis=-1, epsilon=0.00001, name=None
786786
):
787787
"""Huber Loss operation, see ``https://en.wikipedia.org/wiki/Huber_loss`` .
788788
Reverse Huber Loss operation, see ''https://statweb.stanford.edu/~owen/reports/hhu.pdf''.

tensorlayer/db.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@ class TensorHub(object):
4848

4949
# @deprecated_alias(db_name='dbname', user_name='username', end_support_version=2.1)
5050
def __init__(
51-
self, ip='localhost', port=27017, dbname='dbname', username='None', password='password', project_name=None
51+
self, ip='localhost', port=27017, dbname='dbname', username='None', password='password', project_name=None
5252
):
5353
self.ip = ip
5454
self.port = port
@@ -640,7 +640,7 @@ def run_top_task(self, task_name=None, sort=None, **kwargs):
640640
logging.info("[Database] Start Task: key: {} sort: {} push time: {}".format(task_name, sort, _datetime))
641641
_script = _script.decode('utf-8')
642642
with tf.Graph().as_default(): # # as graph: # clear all TF graphs
643-
exec (_script, globals())
643+
exec(_script, globals())
644644

645645
# set status to finished
646646
_ = self.db.Task.find_one_and_update({'_id': _id}, {'$set': {'status': 'finished'}})

tensorlayer/distributed.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -94,9 +94,9 @@ class Trainer(object):
9494
"""
9595

9696
def __init__(
97-
self, training_dataset, build_training_func, optimizer, optimizer_args, batch_size=32, prefetch_size=None,
98-
checkpoint_dir=None, scaling_learning_rate=True, log_step_size=1, validation_dataset=None,
99-
build_validation_func=None, max_iteration=float('inf')
97+
self, training_dataset, build_training_func, optimizer, optimizer_args, batch_size=32, prefetch_size=None,
98+
checkpoint_dir=None, scaling_learning_rate=True, log_step_size=1, validation_dataset=None,
99+
build_validation_func=None, max_iteration=float('inf')
100100
):
101101
# Initialize Horovod.
102102
hvd.init()
@@ -395,9 +395,9 @@ def create_task_spec_def():
395395

396396
@deprecated(date="2018-10-30", instructions="Using the TensorLayer distributed trainer.")
397397
def create_distributed_session(
398-
task_spec=None, checkpoint_dir=None, scaffold=None, hooks=None, chief_only_hooks=None, save_checkpoint_secs=600,
399-
save_summaries_steps=object(), save_summaries_secs=object(), config=None, stop_grace_period_secs=120,
400-
log_step_count_steps=100
398+
task_spec=None, checkpoint_dir=None, scaffold=None, hooks=None, chief_only_hooks=None, save_checkpoint_secs=600,
399+
save_summaries_steps=object(), save_summaries_secs=object(), config=None, stop_grace_period_secs=120,
400+
log_step_count_steps=100
401401
):
402402
"""Creates a distributed session.
403403

tensorlayer/files/dataset_loaders/imdb_dataset.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,8 +13,8 @@
1313

1414

1515
def load_imdb_dataset(
16-
path='data', nb_words=None, skip_top=0, maxlen=None, test_split=0.2, seed=113, start_char=1, oov_char=2,
17-
index_from=3
16+
path='data', nb_words=None, skip_top=0, maxlen=None, test_split=0.2, seed=113, start_char=1, oov_char=2,
17+
index_from=3
1818
):
1919
"""Load IMDB dataset.
2020

tensorlayer/files/utils.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -839,8 +839,8 @@ def load_matt_mahoney_text8_dataset(path='data'):
839839

840840

841841
def load_imdb_dataset(
842-
path='data', nb_words=None, skip_top=0, maxlen=None, test_split=0.2, seed=113, start_char=1, oov_char=2,
843-
index_from=3
842+
path='data', nb_words=None, skip_top=0, maxlen=None, test_split=0.2, seed=113, start_char=1, oov_char=2,
843+
index_from=3
844844
):
845845
"""Load IMDB dataset.
846846

tensorlayer/layers/activation.py

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -54,11 +54,11 @@ class PRelu(Layer):
5454
"""
5555

5656
def __init__(
57-
self,
58-
channel_shared=False,
59-
in_channels=None,
60-
a_init=truncated_normal(mean=0.0, stddev=0.05),
61-
name=None # "prelu"
57+
self,
58+
channel_shared=False,
59+
in_channels=None,
60+
a_init=truncated_normal(mean=0.0, stddev=0.05),
61+
name=None # "prelu"
6262
):
6363

6464
super(PRelu, self).__init__(name)
@@ -141,11 +141,11 @@ class PRelu6(Layer):
141141
"""
142142

143143
def __init__(
144-
self,
145-
channel_shared=False,
146-
in_channels=None,
147-
a_init=truncated_normal(mean=0.0, stddev=0.05),
148-
name=None # "prelu6"
144+
self,
145+
channel_shared=False,
146+
in_channels=None,
147+
a_init=truncated_normal(mean=0.0, stddev=0.05),
148+
name=None # "prelu6"
149149
):
150150

151151
super(PRelu6, self).__init__(name)
@@ -229,11 +229,11 @@ class PTRelu6(Layer):
229229
"""
230230

231231
def __init__(
232-
self,
233-
channel_shared=False,
234-
in_channels=None,
235-
a_init=truncated_normal(mean=0.0, stddev=0.05),
236-
name=None # "ptrelu6"
232+
self,
233+
channel_shared=False,
234+
in_channels=None,
235+
a_init=truncated_normal(mean=0.0, stddev=0.05),
236+
name=None # "ptrelu6"
237237
):
238238

239239
super(PTRelu6, self).__init__(name)

tensorlayer/layers/convolution/binary_conv.py

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -61,19 +61,19 @@ class BinaryConv2d(Layer):
6161
"""
6262

6363
def __init__(
64-
self,
65-
n_filter=32,
66-
filter_size=(3, 3),
67-
strides=(1, 1),
68-
act=None,
69-
padding='SAME',
70-
use_gemm=False,
71-
data_format="channels_last",
72-
dilation_rate=(1, 1),
73-
W_init=tl.initializers.truncated_normal(stddev=0.02),
74-
b_init=tl.initializers.constant(value=0.0),
75-
in_channels=None,
76-
name=None # 'binary_cnn2d',
64+
self,
65+
n_filter=32,
66+
filter_size=(3, 3),
67+
strides=(1, 1),
68+
act=None,
69+
padding='SAME',
70+
use_gemm=False,
71+
data_format="channels_last",
72+
dilation_rate=(1, 1),
73+
W_init=tl.initializers.truncated_normal(stddev=0.02),
74+
b_init=tl.initializers.constant(value=0.0),
75+
in_channels=None,
76+
name=None # 'binary_cnn2d',
7777
):
7878
super().__init__(name, act=act)
7979
self.n_filter = n_filter

tensorlayer/layers/convolution/deformable_conv.py

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -71,17 +71,17 @@ class DeformableConv2d(Layer):
7171

7272
# @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release
7373
def __init__(
74-
self,
75-
offset_layer=None,
76-
# shape=(3, 3, 1, 100),
77-
n_filter=32,
78-
filter_size=(3, 3),
79-
act=None,
80-
padding='SAME',
81-
W_init=tl.initializers.truncated_normal(stddev=0.02),
82-
b_init=tl.initializers.constant(value=0.0),
83-
in_channels=None,
84-
name=None # 'deformable_conv_2d',
74+
self,
75+
offset_layer=None,
76+
# shape=(3, 3, 1, 100),
77+
n_filter=32,
78+
filter_size=(3, 3),
79+
act=None,
80+
padding='SAME',
81+
W_init=tl.initializers.truncated_normal(stddev=0.02),
82+
b_init=tl.initializers.constant(value=0.0),
83+
in_channels=None,
84+
name=None # 'deformable_conv_2d',
8585
):
8686
super().__init__(name, act=act)
8787

tensorlayer/layers/convolution/depthwise_conv.py

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -69,18 +69,18 @@ class DepthwiseConv2d(Layer):
6969

7070
# https://zhuanlan.zhihu.com/p/31551004 https://github.com/xiaohu2015/DeepLearning_tutorials/blob/master/CNNs/MobileNet.py
7171
def __init__(
72-
self,
73-
filter_size=(3, 3),
74-
strides=(1, 1),
75-
act=None,
76-
padding='SAME',
77-
data_format='channels_last',
78-
dilation_rate=(1, 1),
79-
depth_multiplier=1,
80-
W_init=tl.initializers.truncated_normal(stddev=0.02),
81-
b_init=tl.initializers.constant(value=0.0),
82-
in_channels=None,
83-
name=None # 'depthwise_conv2d'
72+
self,
73+
filter_size=(3, 3),
74+
strides=(1, 1),
75+
act=None,
76+
padding='SAME',
77+
data_format='channels_last',
78+
dilation_rate=(1, 1),
79+
depth_multiplier=1,
80+
W_init=tl.initializers.truncated_normal(stddev=0.02),
81+
b_init=tl.initializers.constant(value=0.0),
82+
in_channels=None,
83+
name=None # 'depthwise_conv2d'
8484
):
8585
super().__init__(name, act=act)
8686
self.filter_size = filter_size

tensorlayer/layers/convolution/dorefa_conv.py

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -65,21 +65,21 @@ class DorefaConv2d(Layer):
6565
"""
6666

6767
def __init__(
68-
self,
69-
bitW=1,
70-
bitA=3,
71-
n_filter=32,
72-
filter_size=(3, 3),
73-
strides=(1, 1),
74-
act=None,
75-
padding='SAME',
76-
use_gemm=False,
77-
data_format="channels_last",
78-
dilation_rate=(1, 1),
79-
W_init=tl.initializers.truncated_normal(stddev=0.02),
80-
b_init=tl.initializers.constant(value=0.0),
81-
in_channels=None,
82-
name=None # 'dorefa_cnn2d',
68+
self,
69+
bitW=1,
70+
bitA=3,
71+
n_filter=32,
72+
filter_size=(3, 3),
73+
strides=(1, 1),
74+
act=None,
75+
padding='SAME',
76+
use_gemm=False,
77+
data_format="channels_last",
78+
dilation_rate=(1, 1),
79+
W_init=tl.initializers.truncated_normal(stddev=0.02),
80+
b_init=tl.initializers.constant(value=0.0),
81+
in_channels=None,
82+
name=None # 'dorefa_cnn2d',
8383
):
8484
super().__init__(name, act=act)
8585
self.bitW = bitW

0 commit comments

Comments
 (0)