Skip to content

Commit ed56fb9

Browse files
committed
Merge branch 'main' of github.com:tensorlayer/TensorLayerX into main
2 parents 25e65ab + 09b2a23 commit ed56fb9

File tree

5 files changed

+119
-105
lines changed

5 files changed

+119
-105
lines changed

README.md

Lines changed: 19 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -38,12 +38,12 @@ Compare to TensorLayer version:
3838

3939
# Resources
4040

41-
- [Examples](https://github.com/tensorlayer/TensorLayerX/tree/main/examples) for tutorials
42-
- OpenIVA an easy-to-use product-level deployment framework
43-
- [TLXZoo](https://github.com/tensorlayer/TLXZoo) pretrained models/backbones
44-
- TLXCV a bunch of Computer Vision applications
45-
- TLXNLP a bunch of Natural Language Processing applications
46-
- TLXRL a bunch of Reinforcement Learning applications, check [RLZoo](https://github.com/tensorlayer/RLzoo) for the old version
41+
- [Examples](https://github.com/tensorlayer/TensorLayerX/tree/main/examples) for tutorials
42+
- OpenIVA an easy-to-use product-level deployment framework
43+
- [TLXZoo](https://github.com/tensorlayer/TLXZoo) pretrained models/backbones🚧
44+
- TLXCV a bunch of Computer Vision applications🚧
45+
- TLXNLP a bunch of Natural Language Processing applications🚧
46+
- TLXRL a bunch of Reinforcement Learning applications, check [RLZoo](https://github.com/tensorlayer/RLzoo) for the old version
4747

4848
More resources can be found [here](https://github.com/tensorlayer)
4949

@@ -70,8 +70,10 @@ For more installation instructions, please refer to [Installtion](https://tensor
7070
You can immediately use tensorlayerx to define a model, using your favourite framework in the background, like so:
7171
```python
7272
import os
73-
os.environ['TL_BACKEND'] = 'tensorflow' # change to any framework!
74-
73+
os.environ['TL_BACKEND'] = 'tensorflow' # Just modify this line, easily change to any framework!
74+
#os.environ['TL_BACKEND'] = 'mindspore'
75+
#os.environ['TL_BACKEND'] = 'paddle'
76+
#os.environ['TL_BACKEND'] = 'torch'
7577
import tensorlayerx as tlx
7678
from tensorlayerx.nn import Module
7779
from tensorlayerx.nn import Linear
@@ -80,14 +82,14 @@ class CustomModel(Module):
8082
def __init__(self):
8183
super(CustomModel, self).__init__()
8284

83-
self.dense1 = Linear(out_features=800, act=tlx.ReLU, in_features=784)
84-
self.dense2 = Linear(out_features=800, act=tlx.ReLU, in_features=800)
85-
self.dense3 = Linear(out_features=10, act=None, in_features=800)
85+
self.linear1 = Linear(out_features=800, act=tlx.ReLU, in_features=784)
86+
self.linear2 = Linear(out_features=800, act=tlx.ReLU, in_features=800)
87+
self.linear3 = Linear(out_features=10, act=None, in_features=800)
8688

8789
def forward(self, x, foo=False):
88-
z = self.dense1(x)
89-
z = self.dense2(z)
90-
out = self.dense3(z)
90+
z = self.linear1(x)
91+
z = self.linear2(z)
92+
out = self.linear3(z)
9193
if foo:
9294
out = tlx.softmax(out)
9395
return out
@@ -102,6 +104,9 @@ TensorLayerX has extensive documentation for both beginners and professionals.
102104
[![English Documentation](https://img.shields.io/badge/documentation-english-blue.svg)](https://tensorlayerx.readthedocs.io/en/latest/)
103105

104106

107+
# Contributing
108+
Join our community as a code contributor, find out more in our [Contributing](https://tensorlayerx.readthedocs.io/en/latest/user/contributing.html) guide!
109+
105110
# Contact
106111
107112

docs/user/get_start_advance.rst

Lines changed: 73 additions & 65 deletions
Original file line numberDiff line numberDiff line change
@@ -18,36 +18,47 @@ The fully-connected layer is `a = f(x*W+b)`, the most simple implementation is a
1818
import tensorlayerx as tlx
1919
from tensorlayerx.nn import Module
2020
21-
class Dense(Module):
22-
"""The :class:`Dense` class is a fully connected layer.
21+
class Linear(Module):
22+
"""The :class:`Linear` class is a fully connected layer.
2323
2424
Parameters
2525
----------
26-
n_units : int
26+
out_features : int
2727
The number of units of this layer.
2828
act : activation function
2929
The activation function of this layer.
30+
W_init : initializer or str
31+
The initializer for the weight matrix.
32+
b_init : initializer or None or str
33+
The initializer for the bias vector. If None, skip biases.
34+
in_features: int
35+
The number of channels of the previous layer.
36+
If None, it will be automatically detected when the layer is forwarded for the first time.
3037
name : None or str
3138
A unique layer name. If None, a unique name will be automatically generated.
3239
"""
3340
3441
def __init__(
35-
self,
36-
n_units, # the number of units/channels of this layer
37-
act=None, # None: no activation, tlx.relu or 'relu': ReLU ...
38-
name=None, # the name of this layer (optional)
39-
in_channels = None
42+
self,
43+
out_features,
44+
act=None,
45+
W_init='truncated_normal',
46+
b_init='constant',
47+
in_features=None,
48+
name=None, # 'linear',
4049
):
41-
super(Dense, self).__init__(name, act=act) # auto naming, dense_1, dense_2 ...
42-
self.n_units = n_units
43-
self.in_channels = in_channels
50+
super(Linear, self).__init__(name, act=act) # auto naming, linear_1, linear_2 ...
51+
self.out_features = out_features
52+
self.in_features = in_features
53+
self.W_init = self.str_to_init(W_init)
54+
self.b_init = self.str_to_init(b_init)
4455
self.build()
4556
self._built = True
4657
4758
def build(self): # initialize the model weights here
48-
shape = [self.in_channels, self.n_units]
59+
shape = [self.in_features, self.out_features]
4960
self.W = self._get_weights("weights", shape=tuple(shape), init=self.W_init)
50-
self.b = self._get_weights("biases", shape=(self.n_units, ), init=self.b_init)
61+
self.b = self._get_weights("biases", shape=(self.out_features, ), init=self.b_init)
5162
5263
def forward(self, inputs): # call function
5364
z = tlx.matmul(inputs, self.W) + self.b
@@ -60,36 +71,32 @@ The full implementation is as follow, which supports both automatic inference in
6071
.. code-block:: python
6172
6273
63-
class Dense(Module):
64-
"""The :class:`Dense` class is a fully connected layer.
74+
class Linear(Module):
75+
"""The :class:`Linear` class is a fully connected layer.
6576
6677
Parameters
6778
----------
68-
n_units : int
79+
out_features : int
6980
The number of units of this layer.
7081
act : activation function
7182
The activation function of this layer.
72-
W_init : initializer
83+
W_init : initializer or str
7384
The initializer for the weight matrix.
74-
b_init : initializer or None
85+
b_init : initializer or None or str
7586
The initializer for the bias vector. If None, skip biases.
76-
in_channels: int
87+
in_features: int
7788
The number of channels of the previous layer.
7889
If None, it will be automatically detected when the layer is forwarded for the first time.
7990
name : None or str
8091
A unique layer name. If None, a unique name will be automatically generated.
8192
8293
Examples
8394
--------
84-
With TensorLayerX
95+
With TensorLayerx
8596
8697
>>> net = tlx.nn.Input([100, 50], name='input')
87-
>>> dense = tlx.nn.Dense(n_units=800, act=tlx.ReLU, in_channels=50, name='dense_1')
88-
>>> print(dense)
89-
Dense(n_units=800, relu, in_channels='50', name='dense_1')
90-
>>> tensor = tl.layers.Dense(n_units=800, act=tlx.ReLU, name='dense_2')(net)
91-
>>> print(tensor)
92-
Tensor([...], shape=(100, 800), dtype=float32)
98+
>>> linear = tlx.nn.Linear(out_features=800, act=tlx.ReLU, in_features=50, name='linear_1')
99+
>>> tensor = tlx.nn.Linear(out_features=800, act=tlx.ReLU, name='linear_2')(net)
93100
94101
Notes
95102
-----
@@ -99,67 +106,67 @@ The full implementation is as follow, which supports both automatic inference in
99106
100107
def __init__(
101108
self,
102-
n_units,
109+
out_features,
103110
act=None,
104-
W_init=tlx.nn.initializers.truncated_normal(stddev=0.05),
105-
b_init=tlx.nn.initializers.constant(value=0.0),
106-
in_channels=None,
107-
name=None, # 'dense',
111+
W_init='truncated_normal',
112+
b_init='constant',
113+
in_features=None,
114+
name=None, # 'linear',
108115
):
109116
110-
super(Dense, self).__init__(name, act=act)
117+
super(Linear, self).__init__(name, act=act)
111118
112-
self.n_units = n_units
113-
self.W_init = W_init
114-
self.b_init = b_init
115-
self.in_channels = in_channels
119+
self.out_features = out_features
120+
self.W_init = self.str_to_init(W_init)
121+
self.b_init = self.str_to_init(b_init)
122+
self.in_features = in_features
116123
117-
if self.in_channels is not None:
118-
self.build(self.in_channels)
124+
if self.in_features is not None:
125+
self.build(self.in_features)
119126
self._built = True
120127
121128
logging.info(
122-
"Dense %s: %d %s" %
123-
(self.name, self.n_units, self.act.__class__.__name__ if self.act is not None else 'No Activation')
129+
"Linear %s: %d %s" %
130+
(self.name, self.out_features, self.act.__class__.__name__ if self.act is not None else 'No Activation')
124131
)
125132
126133
def __repr__(self):
127134
actstr = self.act.__class__.__name__ if self.act is not None else 'No Activation'
128-
s = ('{classname}(n_units={n_units}, ' + actstr)
129-
if self.in_channels is not None:
130-
s += ', in_channels=\'{in_channels}\''
135+
s = ('{classname}(out_features={out_features}, ' + actstr)
136+
if self.in_features is not None:
137+
s += ', in_features=\'{in_features}\''
131138
if self.name is not None:
132139
s += ', name=\'{name}\''
133140
s += ')'
134141
return s.format(classname=self.__class__.__name__, **self.__dict__)
135142
136143
def build(self, inputs_shape):
137-
if self.in_channels is None and len(inputs_shape) != 2:
138-
raise AssertionError("The input dimension must be rank 2, please reshape or flatten it")
139-
if self.in_channels:
140-
shape = [self.in_channels, self.n_units]
144+
if self.in_features is None and len(inputs_shape) < 2:
145+
raise AssertionError("The dimension of input should not be less than 2")
146+
if self.in_features:
147+
shape = [self.in_features, self.out_features]
141148
else:
142-
self.in_channels = inputs_shape[1]
143-
shape = [inputs_shape[1], self.n_units]
149+
self.in_features = inputs_shape[-1]
150+
shape = [self.in_features, self.out_features]
144151
145152
self.W = self._get_weights("weights", shape=tuple(shape), init=self.W_init)
146153
147154
self.b_init_flag = False
148155
if self.b_init:
149-
self.b = self._get_weights("biases", shape=(self.n_units, ), init=self.b_init)
156+
self.b = self._get_weights("biases", shape=(self.out_features, ), init=self.b_init)
150157
self.b_init_flag = True
151-
self.bias_add = tlx.BiasAdd()
158+
self.bias_add = tlx.ops.BiasAdd(data_format='NHWC')
152159
153160
self.act_init_flag = False
154161
if self.act:
155162
self.act_init_flag = True
156163
157-
self.matmul = tlx.MatMul()
164+
self.matmul = tlx.ops.MatMul()
158165
159166
def forward(self, inputs):
160167
if self._forward_state == False:
161168
if self._built == False:
162-
self.build(tl.get_tensor_shape(inputs))
169+
self.build(tlx.get_tensor_shape(inputs))
163170
self._built = True
164171
self._forward_state = True
165172
@@ -171,6 +178,7 @@ The full implementation is as follow, which supports both automatic inference in
171178
return z
172179
173180
181+
174182
Layers with train/test modes
175183
------------------------------
176184

@@ -181,13 +189,12 @@ We use Dropout as an example here:
181189
class Dropout(Module):
182190
"""
183191
The :class:`Dropout` class is a noise layer which randomly set some
184-
activations to zero according to a keeping probability.
192+
activations to zero according to a probability.
185193
186194
Parameters
187195
----------
188-
keep : float
189-
The keeping probability.
190-
The lower the probability it is, the more activations are set to zero.
196+
p : float
197+
probability of an element to be zeroed. Default: 0.5
191198
seed : int or None
192199
The seed for random dropout.
193200
name : None or str
@@ -196,29 +203,29 @@ We use Dropout as an example here:
196203
Examples
197204
--------
198205
>>> net = tlx.nn.Input([10, 200])
199-
>>> net = tlx.nn.Dropout(keep=0.2)(net)
206+
>>> net = tlx.nn.Dropout(p=0.2)(net)
200207
201208
"""
202209
203-
def __init__(self, keep, seed=0, name=None): #"dropout"):
210+
def __init__(self, p=0.5, seed=0, name=None): #"dropout"):
204211
super(Dropout, self).__init__(name)
205-
self.keep = keep
212+
self.p = p
206213
self.seed = seed
207214
208215
self.build()
209216
self._built = True
210217
211-
logging.info("Dropout %s: keep: %f " % (self.name, self.keep))
218+
logging.info("Dropout %s: p: %f " % (self.name, self.p))
212219
213220
def __repr__(self):
214-
s = ('{classname}(keep={keep}')
221+
s = ('{classname}(p={p}')
215222
if self.name is not None:
216223
s += ', name=\'{name}\''
217224
s += ')'
218225
return s.format(classname=self.__class__.__name__, **self.__dict__)
219226
220227
def build(self, inputs_shape=None):
221-
self.dropout = tlx.ops.Dropout(keep=self.keep, seed=self.seed)
228+
self.dropout = tlx.ops.Dropout(p=self.p, seed=self.seed)
222229
223230
def forward(self, inputs):
224231
if self.is_train:
@@ -242,7 +249,8 @@ Get entire CNN
242249
from examples.model_zoo import vgg16
243250
244251
vgg = vgg16(pretrained=True)
245-
img = tlx.utils.visualize.read_image('data/tiger.jpeg')
246-
img = tlx.utils.prepro.imresize(img, (224, 224)).astype(tlx.float32) / 255
252+
img = tlx.vision.load_image('data/tiger.jpeg')
253+
img = tlx.utils.functional.resize(img, (224, 224), method='bilinear')
254+
img = tlx.ops.convert_to_tensor(img, dtype = 'float32') / 255.
247255
output = vgg(img, is_train=False)
248256

0 commit comments

Comments
 (0)