Skip to content

Commit bb452e4

Browse files
authored
Merge branch 'master' into pow
2 parents c722d61 + b21a6be commit bb452e4

File tree

2 files changed

+125
-1
lines changed

2 files changed

+125
-1
lines changed

python/singa/autograd.py

Lines changed: 45 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1829,6 +1829,7 @@ def backward(self, dy):
18291829
def leakyrelu(x, a=0.01):
18301830
return LeakyRelu(a)(x)[0]
18311831

1832+
18321833
class Pow(Operation):
18331834
def __init__(self):
18341835
super(Pow, self).__init__()
@@ -1850,6 +1851,28 @@ def backward(self, dy):
18501851
def pow(a, b):
18511852
return Pow()(a,b)[0]
18521853

1854+
class SoftSign(Operation):
1855+
def __init__(self):
1856+
super(SoftSign, self).__init__()
1857+
1858+
def forward(self, x):
1859+
# y = x / (1 + np.abs(x))
1860+
if training:
1861+
self.input = x
1862+
x1 = singa.AddFloat(singa.Abs(x),1.0)
1863+
y = singa.__div__(x,x1)
1864+
1865+
return y
1866+
1867+
def backward(self, dy):
1868+
dx = singa.AddFloat(singa.Abs(self.input),1.0)
1869+
dx = singa.PowFloat(singa.Square(dx),-1.0)
1870+
dx = singa.__mul__(dy, dx)
1871+
return dx
1872+
1873+
def softsign(x):
1874+
return SoftSign()(x)[0]
1875+
18531876
class Sqrt(Operation):
18541877
def __init__(self):
18551878
super(Sqrt, self).__init__()
@@ -1867,6 +1890,28 @@ def backward(self, dy):
18671890

18681891
def sqrt(x):
18691892
return Sqrt()(x)[0]
1893+
1894+
class SoftPlus(Operation):
1895+
def __init__(self):
1896+
super(SoftPlus, self).__init__()
1897+
1898+
def forward(self, x):
1899+
#f(x) = ln(exp(x) + 1)
1900+
if training:
1901+
self.input = x
1902+
x1 = singa.AddFloat(singa.Exp(x),1.0)
1903+
y = singa.Log(x1)
1904+
return y
1905+
1906+
def backward(self, dy):
1907+
dx = singa.Exp(singa.MultFloat(self.input, -1.0))
1908+
dx = singa.PowFloat(singa.AddFloat(dx,1.0),-1.0)
1909+
dx = singa.__mul__(dy, dx)
1910+
return dx
1911+
1912+
1913+
def softplus(x):
1914+
return SoftPlus()(x)[0]
18701915

18711916
class Sub(Operation):
18721917
def __init__(self):

test/python/test_operation.py

Lines changed: 80 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -817,7 +817,6 @@ def test_Sub_gpu(self):
817817

818818
result = autograd.sub(x0, x1)
819819
dx0, dx1 = result.creator.backward(dy.data)
820-
821820
DX0 = np.multiply(DY, 1.0)
822821
DX1 = np.multiply(DY, -1.0)
823822

@@ -875,6 +874,86 @@ def test_Pow_gpu(self):
875874
np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx0)), DX0, decimal=4)
876875
np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx1)), DX1, decimal=4)
877876

877+
def test_SoftSign_cpu(self):
878+
# y = x / (1 + np.abs(x))
879+
X = np.array([0.8, -1.2, 3.3, -3.6, -0.5, 0.5]).reshape(3, 2).astype(np.float32)
880+
XT = X/(1 + np.absolute(X))
881+
DY = np.ones((3, 2), dtype = np.float32)
882+
883+
x = tensor.from_numpy(X)
884+
dy = tensor.from_numpy(DY)
885+
x.to_device(cpu_dev)
886+
dy.to_device(cpu_dev)
887+
888+
result = autograd.softsign(x)
889+
dx = result.creator.backward(dy.data)
890+
891+
G = 1.0/np.square(np.absolute(X)+1.0)
892+
DX = np.multiply(G, DY)
893+
894+
np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT, decimal=5)
895+
np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx)), DX, decimal=5)
896+
897+
def test_SoftSign_gpu(self):
898+
# y = x / (1 + np.abs(x))
899+
X = np.array([0.8, -1.2, 3.3, -3.6, -0.5, 0.5]).reshape(3, 2).astype(np.float32)
900+
XT = X/(1 + np.absolute(X))
901+
DY = np.ones((3, 2), dtype = np.float32)
902+
903+
x = tensor.from_numpy(X)
904+
dy = tensor.from_numpy(DY)
905+
x.to_device(gpu_dev)
906+
dy.to_device(gpu_dev)
907+
908+
result = autograd.softsign(x)
909+
dx = result.creator.backward(dy.data)
910+
911+
G = 1.0/np.square(np.absolute(X)+1.0)
912+
DX = np.multiply(G, DY)
913+
914+
np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT, decimal=5)
915+
np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx)), DX, decimal=5)
916+
917+
def test_SoftPlus_cpu(self):
918+
#y = np.log(np.exp(x) + 1)
919+
X = np.array([0.8, -1.2, 3.3, -3.6, -0.5, 0.5]).reshape(3, 2).astype(np.float32)
920+
XT = np.log(np.exp(X) + 1)
921+
DY = np.ones((3, 2), dtype = np.float32)
922+
923+
x = tensor.from_numpy(X)
924+
dy = tensor.from_numpy(DY)
925+
x.to_device(cpu_dev)
926+
dy.to_device(cpu_dev)
927+
928+
result = autograd.softplus(x)
929+
dx = result.creator.backward(dy.data)
930+
#dx = 1 / (1 + exp(-x))
931+
G = 1.0 / (1.0 + np.exp(-X))
932+
DX = np.multiply(G, DY)
933+
934+
np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT, decimal=5)
935+
np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx)), DX, decimal=5)
936+
937+
def test_SoftPlus_gpu(self):
938+
#y = np.log(np.exp(x) + 1)
939+
X = np.array([0.8, -1.2, 3.3, -3.6, -0.5, 0.5]).reshape(3, 2).astype(np.float32)
940+
XT = np.log(np.exp(X) + 1)
941+
DY = np.ones((3, 2), dtype = np.float32)
942+
943+
x = tensor.from_numpy(X)
944+
dy = tensor.from_numpy(DY)
945+
x.to_device(gpu_dev)
946+
dy.to_device(gpu_dev)
947+
948+
result = autograd.softplus(x)
949+
dx = result.creator.backward(dy.data)
950+
#dx = 1 / (1 + exp(-x))
951+
G = 1.0 / (1.0 + np.exp(-X))
952+
DX = np.multiply(G, DY)
953+
954+
np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT, decimal=5)
955+
np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx)), DX, decimal=5)
956+
878957
def test_Sqrt_cpu(self):
879958
X = np.array([0.1,1.0,0.4,4.0,0.9,9.0]).reshape(3,2).astype(np.float32)
880959
XT = np.sqrt(X)

0 commit comments

Comments
 (0)