Skip to content

Commit b21a6be

Browse files
authored
Merge pull request #495 from pinpom/softsign
SINGA-475 add SoftSign operator
2 parents 59711c8 + 609dc8b commit b21a6be

File tree

2 files changed

+62
-1
lines changed

2 files changed

+62
-1
lines changed

python/singa/autograd.py

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1830,6 +1830,28 @@ def leakyrelu(x, a=0.01):
18301830
return LeakyRelu(a)(x)[0]
18311831

18321832

1833+
class SoftSign(Operation):
1834+
def __init__(self):
1835+
super(SoftSign, self).__init__()
1836+
1837+
def forward(self, x):
1838+
# y = x / (1 + np.abs(x))
1839+
if training:
1840+
self.input = x
1841+
x1 = singa.AddFloat(singa.Abs(x),1.0)
1842+
y = singa.__div__(x,x1)
1843+
1844+
return y
1845+
1846+
def backward(self, dy):
1847+
dx = singa.AddFloat(singa.Abs(self.input),1.0)
1848+
dx = singa.PowFloat(singa.Square(dx),-1.0)
1849+
dx = singa.__mul__(dy, dx)
1850+
return dx
1851+
1852+
def softsign(x):
1853+
return SoftSign()(x)[0]
1854+
18331855
class Sqrt(Operation):
18341856
def __init__(self):
18351857
super(Sqrt, self).__init__()

test/python/test_operation.py

Lines changed: 40 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -816,14 +816,53 @@ def test_Sub_gpu(self):
816816

817817
result = autograd.sub(x0, x1)
818818
dx0, dx1 = result.creator.backward(dy.data)
819-
820819
DX0 = np.multiply(DY, 1.0)
821820
DX1 = np.multiply(DY, -1.0)
822821

823822
np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT, decimal=5)
824823
np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx0)), DX0, decimal=5)
825824
np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx1)), DX1, decimal=5)
826825

826+
def test_SoftSign_cpu(self):
827+
# y = x / (1 + np.abs(x))
828+
X = np.array([0.8, -1.2, 3.3, -3.6, -0.5, 0.5]).reshape(3, 2).astype(np.float32)
829+
XT = X/(1 + np.absolute(X))
830+
DY = np.ones((3, 2), dtype = np.float32)
831+
832+
x = tensor.from_numpy(X)
833+
dy = tensor.from_numpy(DY)
834+
x.to_device(cpu_dev)
835+
dy.to_device(cpu_dev)
836+
837+
result = autograd.softsign(x)
838+
dx = result.creator.backward(dy.data)
839+
840+
G = 1.0/np.square(np.absolute(X)+1.0)
841+
DX = np.multiply(G, DY)
842+
843+
np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT, decimal=5)
844+
np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx)), DX, decimal=5)
845+
846+
def test_SoftSign_gpu(self):
847+
# y = x / (1 + np.abs(x))
848+
X = np.array([0.8, -1.2, 3.3, -3.6, -0.5, 0.5]).reshape(3, 2).astype(np.float32)
849+
XT = X/(1 + np.absolute(X))
850+
DY = np.ones((3, 2), dtype = np.float32)
851+
852+
x = tensor.from_numpy(X)
853+
dy = tensor.from_numpy(DY)
854+
x.to_device(gpu_dev)
855+
dy.to_device(gpu_dev)
856+
857+
result = autograd.softsign(x)
858+
dx = result.creator.backward(dy.data)
859+
860+
G = 1.0/np.square(np.absolute(X)+1.0)
861+
DX = np.multiply(G, DY)
862+
863+
np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT, decimal=5)
864+
np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx)), DX, decimal=5)
865+
827866
def test_SoftPlus_cpu(self):
828867
#y = np.log(np.exp(x) + 1)
829868
X = np.array([0.8, -1.2, 3.3, -3.6, -0.5, 0.5]).reshape(3, 2).astype(np.float32)

0 commit comments

Comments
 (0)