@@ -3980,3 +3980,58 @@ def to_device(tensor, device = 'GPU', id = 0):
39803980 return tensor
39813981 with tf .device ("/" + device .upper ()+ ':' + str (id )):
39823982 return tf .identity (tensor )
3983+
3984+ def roll (input , shifts , dims = None ):
3985+ """Roll the tensor input along the given dimension(s).
3986+ Elements that are shifted beyond the last position are re-introduced at the first position.
3987+ If dims is None, the tensor will be flattened before rolling and then restored to the original shape.
3988+
3989+ Parameters
3990+ ----------
3991+ input : tensor
3992+ the input tensor.
3993+ shifts : int or tuple
3994+ The number of places by which the elements of the tensor are shifted.
3995+ If shifts is a tuple, dims must be a tuple of the same size, and each dimension will be rolled by the corresponding value
3996+ dims : int or tuple
3997+ Axis along which to roll
3998+
3999+ Examples
4000+ ---------
4001+ >>> import tensorlayerx as tlx
4002+ >>> x = tlx.ops.ones((5,6))
4003+ >>> x = tlx.ops.roll(x, shifts=2)
4004+
4005+ """
4006+ if dims is None :
4007+ raw_shape = input .shape
4008+ shape = 1
4009+ for d in input .shape :
4010+ shape *= d
4011+ input = tf .reshape (input , [1 , shape ])
4012+ output = tf .roll (input , shifts , 1 )
4013+ output = tf .reshape (output , raw_shape )
4014+ return output
4015+ return tf .roll (input , shifts , dims )
4016+
4017+ def logsoftmax (input , dim = None ):
4018+ """Applies a softmax followed by a logarithm.
4019+
4020+ Parameters
4021+ ----------
4022+ input : Tensor
4023+ the input tensor.
4024+ dim : int
4025+ A dimension along which LogSoftmax will be computed.
4026+
4027+
4028+ Examples
4029+ ---------
4030+ >>> import tensorlayerx as tlx
4031+ >>> import numpy as np
4032+ >>> x = tlx.ops.convert_to_tensor(np.random.random((3,4)))
4033+ >>> x = tlx.ops.logsoftmax(x, dim=1)
4034+
4035+ """
4036+
4037+ return tf .nn .log_softmax (input , dim )
0 commit comments