Skip to content

Commit 33cee36

Browse files
committed
add roll and logsoftmax
1 parent 241c094 commit 33cee36

File tree

10 files changed

+145
-5
lines changed

10 files changed

+145
-5
lines changed

docs/index.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ User Guide
2525
------------
2626

2727
The TensorLayerX user guide explains how to install TensorFlow, CUDA and cuDNN,
28-
how to build and train neural networks using TensorLayer3, and how to contribute
28+
how to build and train neural networks using TensorLayerX, and how to contribute
2929
to the library as a developer.
3030

3131
.. toctree::

docs/modules/activation.rst

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,7 @@ activation list
4747
Sigmoid
4848
Softmax
4949
Mish
50+
LogSoftmax
5051

5152
TensorLayerX Activations
5253
--------------------------------
@@ -119,6 +120,9 @@ Softmax
119120
---------
120121
.. autoclass:: Softmax
121122

123+
LogSoftmax
124+
---------
125+
.. autoclass:: LogSoftmax
122126

123127
Parametric activation
124128
------------------------------

docs/modules/ops.rst

Lines changed: 11 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -121,6 +121,8 @@ API - Operations
121121
set_device
122122
get_device
123123
to_device
124+
roll
125+
logsoftmax
124126

125127
TensorLayerX Tensor Operations
126128
--------------------------------
@@ -583,4 +585,12 @@ get_device
583585

584586
to_device
585587
^^^^^^^^^^^^^^^^^^^^^^^
586-
.. autofunction:: to_device
588+
.. autofunction:: to_device
589+
590+
roll
591+
^^^^^^^^^^^^^^^^^^^^^^^
592+
.. autofunction:: roll
593+
594+
logsoftmax
595+
^^^^^^^^^^^^^^^^^^^^^^^
596+
.. autofunction:: logsoftmax

tensorlayerx/backend/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -152,6 +152,8 @@
152152
from .ops import get_device
153153
from .ops import scatter_update
154154
from .ops import to_device
155+
from .ops import logsoftmax
156+
from .ops import roll
155157
# dtype
156158
from .ops import (
157159
DType, float16, float32, float64, int8, int16, int32, int64, uint8, uint16, uint32, uint64, bool, complex64,

tensorlayerx/backend/ops/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -204,6 +204,8 @@
204204
from .load_backend import get_device
205205
from .load_backend import scatter_update
206206
from .load_backend import to_device
207+
from .load_backend import roll
208+
from .load_backend import logsoftmax
207209
# dtype
208210
from .load_backend import (
209211
DType, float16, float32, float64, int8, int16, int32, int64, uint8, uint16, uint32, uint64, bool, complex64,

tensorlayerx/backend/ops/mindspore_backend.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1880,3 +1880,13 @@ def get_device():
18801880
def to_device(tensor, device = 'GPU', id = 0):
18811881

18821882
return tensor
1883+
1884+
def roll(input, shifts, dims=None):
1885+
1886+
return msnp.roll(input, shifts, dims)
1887+
1888+
def logsoftmax(input, dim = None):
1889+
if dim is None:
1890+
dim = -1
1891+
log_softmax = P.LogSoftmax(dim)
1892+
return log_softmax(input)

tensorlayerx/backend/ops/paddle_backend.py

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1909,4 +1909,13 @@ def to_device(tensor, device = 'GPU', id = 0):
19091909
if device == 'GPU':
19101910
return paddle.to_tensor(tensor, place=paddle.CUDAPlace(id))
19111911
if device == 'CPU':
1912-
return paddle.to_tensor(tensor, place=paddle.CPUPlace())
1912+
return paddle.to_tensor(tensor, place=paddle.CPUPlace())
1913+
1914+
def roll(input, shifts, dims=None):
1915+
1916+
return paddle.roll(input, shifts, dims)
1917+
1918+
def logsoftmax(input, dim = None):
1919+
if dim is None:
1920+
dim = -1
1921+
return F.log_softmax(input, dim)

tensorlayerx/backend/ops/tensorflow_backend.py

Lines changed: 55 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3980,3 +3980,58 @@ def to_device(tensor, device = 'GPU', id = 0):
39803980
return tensor
39813981
with tf.device("/" + device.upper()+':'+str(id)):
39823982
return tf.identity(tensor)
3983+
3984+
def roll(input, shifts, dims=None):
3985+
"""Roll the tensor input along the given dimension(s).
3986+
Elements that are shifted beyond the last position are re-introduced at the first position.
3987+
If dims is None, the tensor will be flattened before rolling and then restored to the original shape.
3988+
3989+
Parameters
3990+
----------
3991+
input : tensor
3992+
the input tensor.
3993+
shifts : int or tuple
3994+
The number of places by which the elements of the tensor are shifted.
3995+
If shifts is a tuple, dims must be a tuple of the same size, and each dimension will be rolled by the corresponding value
3996+
dims : int or tuple
3997+
Axis along which to roll
3998+
3999+
Examples
4000+
---------
4001+
>>> import tensorlayerx as tlx
4002+
>>> x = tlx.ops.ones((5,6))
4003+
>>> x = tlx.ops.roll(x, shifts=2)
4004+
4005+
"""
4006+
if dims is None:
4007+
raw_shape = input.shape
4008+
shape = 1
4009+
for d in input.shape:
4010+
shape *= d
4011+
input = tf.reshape(input, [1, shape])
4012+
output = tf.roll(input, shifts, 1)
4013+
output = tf.reshape(output, raw_shape)
4014+
return output
4015+
return tf.roll(input, shifts, dims)
4016+
4017+
def logsoftmax(input, dim = None):
4018+
"""Applies a softmax followed by a logarithm.
4019+
4020+
Parameters
4021+
----------
4022+
input : Tensor
4023+
the input tensor.
4024+
dim : int
4025+
A dimension along which LogSoftmax will be computed.
4026+
4027+
4028+
Examples
4029+
---------
4030+
>>> import tensorlayerx as tlx
4031+
>>> import numpy as np
4032+
>>> x = tlx.ops.convert_to_tensor(np.random.random((3,4)))
4033+
>>> x = tlx.ops.logsoftmax(x, dim=1)
4034+
4035+
"""
4036+
4037+
return tf.nn.log_softmax(input, dim)

tensorlayerx/backend/ops/torch_backend.py

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1726,4 +1726,13 @@ def to_device(tensor, device='GPU', id=0):
17261726
if device == 'gpu':
17271727
device = 'cuda' + ':' + str(id)
17281728
tensor = tensor.detach().to(device)
1729-
return tensor
1729+
return tensor
1730+
1731+
def roll(input, shifts, dims=None):
1732+
1733+
return torch.roll(input, shifts, dims)
1734+
1735+
1736+
def logsoftmax(input, dim=None):
1737+
1738+
return F.log_softmax(input, dim)

tensorlayerx/nn/layers/activation.py

Lines changed: 40 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77

88
__all__ = [
99
'ELU', 'PRelu', 'PRelu6', 'PTRelu6', 'ReLU', 'ReLU6', 'Softplus', 'LeakyReLU', 'LeakyReLU6', 'LeakyTwiceRelu6',
10-
'Ramp', 'Swish', 'HardTanh', 'Mish', 'Tanh', 'Sigmoid', 'Softmax'
10+
'Ramp', 'Swish', 'HardTanh', 'Mish', 'Tanh', 'Sigmoid', 'Softmax', 'LogSoftmax'
1111
]
1212

1313

@@ -914,3 +914,42 @@ def forward(self, x):
914914
self._add_node(x, outputs)
915915
self._nodes_fixed = True
916916
return outputs
917+
918+
class LogSoftmax(Module):
919+
r"""Applies a softmax followed by a logarithm.
920+
921+
.. math::
922+
\text{LogSoftmax}(x_{i}) = \log\left(\frac{\exp(x_i) }{ \sum_j \exp(x_j)} \right)
923+
924+
925+
Parameters
926+
----------
927+
x : Tensor
928+
input.
929+
dim : int
930+
A dimension along which LogSoftmax will be computed.
931+
932+
Examples
933+
--------
934+
>>> net = tlx.nn.Input([10, 200])
935+
>>> net = tlx.nn.LogSoftmax()(net)
936+
937+
Returns
938+
-------
939+
Tensor
940+
A ``Tensor`` in the same type as ``x``.
941+
942+
"""
943+
944+
def __init__(self, dim = None):
945+
super(LogSoftmax, self).__init__()
946+
self.dim = dim
947+
self._built = True
948+
949+
def forward(self, x):
950+
outputs = tlx.ops.logsoftmax(x, self.dim)
951+
952+
if not self._nodes_fixed and self._build_graph:
953+
self._add_node(x, outputs)
954+
self._nodes_fixed = True
955+
return outputs

0 commit comments

Comments
 (0)