Skip to content

Commit e50c763

Browse files
committed
update parameter name
1 parent 0dba212 commit e50c763

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

61 files changed

+710
-717
lines changed

docs/modules/nn.rst

Lines changed: 18 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -23,10 +23,10 @@ Layer list
2323
Embedding
2424
AverageEmbedding
2525

26-
Dense
26+
Linear
2727
Dropout
2828
GaussianNoise
29-
DropconnectDense
29+
DropconnectLinear
3030

3131
UpSampling2d
3232
DownSampling2d
@@ -105,11 +105,11 @@ Layer list
105105
UnStack
106106

107107
Scale
108-
BinaryDense
108+
BinaryLinear
109109
BinaryConv2d
110-
TernaryDense
110+
TernaryLinear
111111
TernaryConv2d
112-
DorefaDense
112+
DorefaLinear
113113
DorefaConv2d
114114

115115
MaskedConv3d
@@ -249,19 +249,19 @@ MaskedConv3d
249249

250250

251251
.. -----------------------------------------------------------
252-
.. Dense Layers
252+
.. Linear Layers
253253
.. -----------------------------------------------------------
254254
255-
Dense Layers
255+
Linear Layers
256256
-------------
257257

258-
Dense Layer
258+
Linear Layer
259259
^^^^^^^^^^^^^^^^^^^^^^^^^^
260-
.. autoclass:: Dense
260+
.. autoclass:: Linear
261261

262-
Drop Connect Dense Layer
262+
Drop Connect Linear Layer
263263
^^^^^^^^^^^^^^^^^^^^^^^^^^
264-
.. autoclass:: DropconnectDense
264+
.. autoclass:: DropconnectLinear
265265

266266

267267
.. -----------------------------------------------------------
@@ -479,9 +479,9 @@ Scale
479479
^^^^^^^^^^^^^^
480480
.. autoclass:: Scale
481481

482-
Binary Dense Layer
482+
Binary Linear Layer
483483
^^^^^^^^^^^^^^^^^^^^^^^^^^
484-
.. autoclass:: BinaryDense
484+
.. autoclass:: BinaryLinear
485485

486486
Binary (De)Convolutions
487487
^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -490,12 +490,12 @@ BinaryConv2d
490490
"""""""""""""""""""""
491491
.. autoclass:: BinaryConv2d
492492

493-
Ternary Dense Layer
493+
Ternary Linear Layer
494494
^^^^^^^^^^^^^^^^^^^^^^^^^^
495495

496-
TernaryDense
496+
TernaryLinear
497497
"""""""""""""""""""""
498-
.. autoclass:: TernaryDense
498+
.. autoclass:: TernaryLinear
499499

500500
Ternary Convolutions
501501
^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -504,9 +504,9 @@ TernaryConv2d
504504
"""""""""""""""""""""
505505
.. autoclass:: TernaryConv2d
506506

507-
DorefaDense
507+
DorefaLinear
508508
"""""""""""""""""""""
509-
.. autoclass:: DorefaDense
509+
.. autoclass:: DorefaLinear
510510

511511
DoReFa Convolutions
512512
^^^^^^^^^^^^^^^^^^^^^^^^^^

examples/basic_tutorials/automatic_inference_input_shape.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -19,21 +19,21 @@ class CustomModel(Module):
1919
def __init__(self):
2020
super(CustomModel, self).__init__()
2121
self.dropout1 = Dropout(p=0.2)
22-
self.dense1 = Linear(out_features=800)
22+
self.linear1 = Linear(out_features=800)
2323
self.batchnorm = BatchNorm1d(act=tlx.ReLU)
2424
self.dropout2 = Dropout(p=0.2)
25-
self.dense2 = Linear(out_features=800, act=tlx.ReLU)
25+
self.linear2 = Linear(out_features=800, act=tlx.ReLU)
2626
self.dropout3 = Dropout(p=0.2)
27-
self.dense3 = Linear(out_features=10, act=tlx.ReLU)
27+
self.linear3 = Linear(out_features=10, act=tlx.ReLU)
2828

2929
def forward(self, x, foo=None):
3030
z = self.dropout1(x)
31-
z = self.dense1(z)
31+
z = self.linear1(z)
3232
z = self.batchnorm(z)
3333
z = self.dropout2(z)
34-
z = self.dense2(z)
34+
z = self.linear2(z)
3535
z = self.dropout3(z)
36-
out = self.dense3(z)
36+
out = self.linear3(z)
3737
if foo is not None:
3838
out = tlx.relu(out)
3939
return out

examples/basic_tutorials/cifar10_cnn.py

Lines changed: 24 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -42,9 +42,9 @@ def __init__(self):
4242
self.maxpool2 = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool2')
4343

4444
self.flatten = Flatten(name='flatten')
45-
self.dense1 = Linear(384, act=tlx.ReLU, W_init=W_init2, b_init=b_init2, name='dense1relu', in_features=2304)
46-
self.dense2 = Linear(192, act=tlx.ReLU, W_init=W_init2, b_init=b_init2, name='dense2relu', in_features=384)
47-
self.dense3 = Linear(10, act=None, W_init=W_init2, name='output', in_features=192)
45+
self.linear1 = Linear(384, act=tlx.ReLU, W_init=W_init2, b_init=b_init2, name='linear1relu', in_features=2304)
46+
self.linear2 = Linear(192, act=tlx.ReLU, W_init=W_init2, b_init=b_init2, name='linear2relu', in_features=384)
47+
self.linear3 = Linear(10, act=None, W_init=W_init2, name='output', in_features=192)
4848

4949
def forward(self, x):
5050
z = self.conv1(x)
@@ -53,9 +53,9 @@ def forward(self, x):
5353
z = self.conv2(z)
5454
z = self.maxpool2(z)
5555
z = self.flatten(z)
56-
z = self.dense1(z)
57-
z = self.dense2(z)
58-
z = self.dense3(z)
56+
z = self.linear1(z)
57+
z = self.linear2(z)
58+
z = self.linear3(z)
5959
return z
6060

6161

@@ -188,9 +188,9 @@ def forward(self, data, label):
188188
# self.maxpool2 = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool2')
189189
#
190190
# self.flatten = Flatten(name='flatten')
191-
# self.dense1 = Linear(384, act=tlx.ReLU, W_init=W_init2, b_init=b_init2, name='dense1relu', in_channels=2304)
192-
# self.dense2 = Linear(192, act=tlx.ReLU, W_init=W_init2, b_init=b_init2, name='dense2relu', in_channels=384)
193-
# self.dense3 = Linear(10, act=None, W_init=W_init2, name='output', in_channels=192)
191+
# self.linear1 = Linear(384, act=tlx.ReLU, W_init=W_init2, b_init=b_init2, name='linear1relu', in_channels=2304)
192+
# self.linear2 = Linear(192, act=tlx.ReLU, W_init=W_init2, b_init=b_init2, name='linear2relu', in_channels=384)
193+
# self.linear3 = Linear(10, act=None, W_init=W_init2, name='output', in_channels=192)
194194
#
195195
# def forward(self, x):
196196
# z = self.conv1(x)
@@ -199,9 +199,9 @@ def forward(self, data, label):
199199
# z = self.conv2(z)
200200
# z = self.maxpool2(z)
201201
# z = self.flatten(z)
202-
# z = self.dense1(z)
203-
# z = self.dense2(z)
204-
# z = self.dense3(z)
202+
# z = self.linear1(z)
203+
# z = self.linear2(z)
204+
# z = self.linear3(z)
205205
# return z
206206
#
207207
#
@@ -374,9 +374,9 @@ def forward(self, data, label):
374374
# self.maxpool2 = MaxPool2d((3, 3), (2, 2), name='pool2', data_format='channels_first')
375375
#
376376
# self.flatten = Flatten(name='flatten')
377-
# self.dense1 = Linear(120, act=tlx.ReLU, name='dense1relu', in_channels=512)
378-
# self.dense2 = Linear(84, act=tlx.ReLU, name='dense2relu', in_channels=120)
379-
# self.dense3 = Linear(10, act=None, name='output', in_channels=84)
377+
# self.linear1 = Linear(120, act=tlx.ReLU, name='linear1relu', in_channels=512)
378+
# self.linear2 = Linear(84, act=tlx.ReLU, name='linear2relu', in_channels=120)
379+
# self.linear3 = Linear(10, act=None, name='output', in_channels=84)
380380
#
381381
# def forward(self, x):
382382
# z = self.conv1(x)
@@ -385,9 +385,9 @@ def forward(self, data, label):
385385
# z = self.conv2(z)
386386
# z = self.maxpool2(z)
387387
# z = self.flatten(z)
388-
# z = self.dense1(z)
389-
# z = self.dense2(z)
390-
# z = self.dense3(z)
388+
# z = self.linear1(z)
389+
# z = self.linear2(z)
390+
# z = self.linear3(z)
391391
# return z
392392
#
393393
#
@@ -519,9 +519,9 @@ def forward(self, data, label):
519519
# self.maxpool2 = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool2')
520520
#
521521
# self.flatten = Flatten(name='flatten')
522-
# self.dense1 = Linear(384, act=tlx.ReLU, W_init=W_init2, b_init=b_init2, name='dense1relu', in_channels=2304)
523-
# self.dense2 = Linear(192, act=tlx.ReLU, W_init=W_init2, b_init=b_init2, name='dense2relu', in_channels=384)
524-
# self.dense3 = Linear(10, act=None, W_init=W_init2, name='output', in_channels=192)
522+
# self.linear1 = Linear(384, act=tlx.ReLU, W_init=W_init2, b_init=b_init2, name='linear1relu', in_channels=2304)
523+
# self.linear2 = Linear(192, act=tlx.ReLU, W_init=W_init2, b_init=b_init2, name='linear2relu', in_channels=384)
524+
# self.linear3 = Linear(10, act=None, W_init=W_init2, name='output', in_channels=192)
525525
#
526526
# def forward(self, x):
527527
# z = self.conv1(x)
@@ -531,9 +531,9 @@ def forward(self, data, label):
531531
# z = self.bn2(z)
532532
# z = self.maxpool2(z)
533533
# z = self.flatten(z)
534-
# z = self.dense1(z)
535-
# z = self.dense2(z)
536-
# z = self.dense3(z)
534+
# z = self.linear1(z)
535+
# z = self.linear2(z)
536+
# z = self.linear3(z)
537537
# return z
538538
#
539539
#

examples/basic_tutorials/gradient_clip_mixed_tensorflow.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -21,14 +21,14 @@ class CustomModel(Module):
2121

2222
def __init__(self):
2323
super(CustomModel, self).__init__()
24-
self.dense1 = Linear(out_features=800, in_features=784)
25-
self.dense2 = Linear(out_features=800, act=tlx.ReLU, in_features=800)
26-
self.dense3 = Linear(out_features=10, act=tlx.ReLU, in_features=800)
24+
self.linear1 = Linear(out_features=800, in_features=784)
25+
self.linear2 = Linear(out_features=800, act=tlx.ReLU, in_features=800)
26+
self.linear3 = Linear(out_features=10, act=tlx.ReLU, in_features=800)
2727

2828
def forward(self, x, foo=None):
29-
z = self.dense1(x)
30-
z = self.dense2(z)
31-
out = self.dense3(z)
29+
z = self.linear1(x)
30+
z = self.linear2(z)
31+
out = self.linear3(z)
3232
if foo is not None:
3333
out = tlx.relu(out)
3434
return out

examples/basic_tutorials/imdb_LSTM_simple.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -43,15 +43,15 @@ def __init__(self):
4343
super(ImdbNet, self).__init__()
4444
self.embedding = Embedding(vocabulary_size=vocab_size, embedding_size=64)
4545
self.lstm = LSTM(input_size=64, hidden_size=64)
46-
self.dense1 = Linear(in_features=64, out_features=64, act=tlx.ReLU)
47-
self.dense2 = Linear(in_features=64, out_features=2)
46+
self.linear1 = Linear(in_features=64, out_features=64, act=tlx.ReLU)
47+
self.linear2 = Linear(in_features=64, out_features=2)
4848

4949
def forward(self, x):
5050
x = self.embedding(x)
5151
x, _ = self.lstm(x)
5252
x = tlx.reduce_mean(x, axis=1)
53-
x = self.dense1(x)
54-
x = self.dense2(x)
53+
x = self.linear1(x)
54+
x = self.linear2(x)
5555
return x
5656

5757

examples/basic_tutorials/mnist_SequentialLayer.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -11,9 +11,9 @@
1111
from tensorlayerx.dataflow import Dataset
1212

1313
layer_list = []
14-
layer_list.append(Linear(out_features=800, act=tlx.ReLU, in_features=784, name='Dense1'))
15-
layer_list.append(Linear(out_features=800, act=tlx.ReLU, in_features=800, name='Dense2'))
16-
layer_list.append(Linear(out_features=10, act=tlx.ReLU, in_features=800, name='Dense3'))
14+
layer_list.append(Linear(out_features=800, act=tlx.ReLU, in_features=784, name='linear1'))
15+
layer_list.append(Linear(out_features=800, act=tlx.ReLU, in_features=800, name='linear2'))
16+
layer_list.append(Linear(out_features=10, act=tlx.ReLU, in_features=800, name='linear3'))
1717
MLP = SequentialLayer(layer_list)
1818

1919
X_train, y_train, X_val, y_val, X_test, y_test = tlx.files.load_mnist_dataset(shape=(-1, 784))

examples/basic_tutorials/mnist_mlp.py

Lines changed: 27 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -35,20 +35,20 @@ class CustomModel(Module):
3535

3636
def __init__(self):
3737
super(CustomModel, self).__init__()
38-
self.dropout1 = Dropout(keep=0.8)
39-
self.dense1 = Linear(out_features=800, act=tlx.ReLU, in_features=784)
40-
self.dropout2 = Dropout(keep=0.8)
41-
self.dense2 = Linear(out_features=800, act=tlx.ReLU, in_features=800)
42-
self.dropout3 = Dropout(keep=0.8)
43-
self.dense3 = Linear(out_features=10, act=tlx.ReLU, in_features=800)
38+
self.dropout1 = Dropout(p=0.8)
39+
self.linear1 = Linear(out_features=800, act=tlx.ReLU, in_features=784)
40+
self.dropout2 = Dropout(p=0.8)
41+
self.linear2 = Linear(out_features=800, act=tlx.ReLU, in_features=800)
42+
self.dropout3 = Dropout(p=0.8)
43+
self.linear3 = Linear(out_features=10, act=tlx.ReLU, in_features=800)
4444

4545
def forward(self, x, foo=None):
4646
z = self.dropout1(x)
47-
z = self.dense1(z)
47+
z = self.linear1(z)
4848
z = self.dropout2(z)
49-
z = self.dense2(z)
49+
z = self.linear2(z)
5050
z = self.dropout3(z)
51-
out = self.dense3(z)
51+
out = self.linear3(z)
5252
if foo is not None:
5353
out = tlx.relu(out)
5454
return out
@@ -91,21 +91,21 @@ def forward(self, x, foo=None):
9191
# def __init__(self):
9292
# super(CustomModel, self).__init__()
9393
# self.dropout1 = Dropout(keep=0.8)
94-
# self.dense1 = Linear(out_features=800, in_features=784)
94+
# self.linear1 = Linear(out_features=800, in_features=784)
9595
# self.batchnorm = BatchNorm1d(act=tlx.ReLU, num_features=800)
9696
# self.dropout2 = Dropout(keep=0.8)
97-
# self.dense2 = Linear(out_features=800, act=tlx.ReLU, in_features=800)
97+
# self.linear2 = Linear(out_features=800, act=tlx.ReLU, in_features=800)
9898
# self.dropout3 = Dropout(keep=0.8)
99-
# self.dense3 = Linear(out_features=10, act=tlx.ReLU, in_features=800)
99+
# self.linear3 = Linear(out_features=10, act=tlx.ReLU, in_features=800)
100100
#
101101
# def forward(self, x, foo=None):
102102
# z = self.dropout1(x)
103-
# z = self.dense1(z)
103+
# z = self.linear1(z)
104104
# z = self.batchnorm(z)
105105
# z = self.dropout2(z)
106-
# z = self.dense2(z)
106+
# z = self.linear2(z)
107107
# z = self.dropout3(z)
108-
# out = self.dense3(z)
108+
# out = self.linear3(z)
109109
# if foo is not None:
110110
# out = tlx.relu(out)
111111
# return out
@@ -187,14 +187,14 @@ def forward(self, x, foo=None):
187187
#
188188
# def __init__(self):
189189
# super(MLP, self).__init__()
190-
# self.dense1 = Linear(out_features=800, act=tlx.ReLU, in_features=784)
191-
# self.dense2 = Linear(out_features=800, act=tlx.ReLU, in_features=800)
192-
# self.dense3 = Linear(out_features=10, act=tlx.ReLU, in_features=800)
190+
# self.linear1 = Linear(out_features=800, act=tlx.ReLU, in_features=784)
191+
# self.linear2 = Linear(out_features=800, act=tlx.ReLU, in_features=800)
192+
# self.linear3 = Linear(out_features=10, act=tlx.ReLU, in_features=800)
193193
#
194194
# def forward(self, x):
195-
# z = self.dense1(x)
196-
# z = self.dense2(z)
197-
# out = self.dense3(z)
195+
# z = self.linear1(x)
196+
# z = self.linear2(z)
197+
# out = self.linear3(z)
198198
# return out
199199
#
200200
#
@@ -375,15 +375,15 @@ def forward(self, x, foo=None):
375375
# def __init__(self):
376376
# super(NeuralNetwork, self).__init__()
377377
# self.flatten = nn.Flatten()
378-
# self.dense1 = Linear(in_features=28 * 28, out_features=512)
379-
# self.dense2 = Linear(in_features=512, out_features=512)
380-
# self.dense3 = Linear(in_features=512, out_features=10)
378+
# self.linear1 = Linear(in_features=28 * 28, out_features=512)
379+
# self.linear2 = Linear(in_features=512, out_features=512)
380+
# self.linear3 = Linear(in_features=512, out_features=10)
381381
#
382382
# def forward(self, x):
383383
# x = self.flatten(x)
384-
# x = self.dense1(x)
385-
# x = self.dense2(x)
386-
# x = self.dense3(x)
384+
# x = self.linear1(x)
385+
# x = self.linear2(x)
386+
# x = self.linear3(x)
387387
# return x
388388
#
389389
#

0 commit comments

Comments
 (0)