Skip to content

Commit a57f081

Browse files
authored
remove linalg api in paddle.__init__ (#36112)
remove recent linalg api in paddle.init; add args 'name' in some new linalg api interface
1 parent 5f168af commit a57f081

File tree

4 files changed

+29
-36
lines changed

4 files changed

+29
-36
lines changed

python/paddle/__init__.py

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -94,20 +94,12 @@
9494
from .tensor.linalg import norm # noqa: F401
9595
from .tensor.linalg import transpose # noqa: F401
9696
from .tensor.linalg import dist # noqa: F401
97-
from .tensor.linalg import cond # noqa: F401
9897
from .tensor.linalg import t # noqa: F401
9998
from .tensor.linalg import cross # noqa: F401
10099
from .tensor.linalg import cholesky # noqa: F401
101100
from .tensor.linalg import bmm # noqa: F401
102101
from .tensor.linalg import histogram # noqa: F401
103102
from .tensor.linalg import mv # noqa: F401
104-
from .tensor.linalg import det # noqa: F401
105-
from .tensor.linalg import slogdet # noqa: F401
106-
from .tensor.linalg import multi_dot # noqa: F401
107-
from .tensor.linalg import matrix_power # noqa: F401
108-
from .tensor.linalg import svd # noqa: F401
109-
from .tensor.linalg import pinv # noqa: F401
110-
from .tensor.linalg import solve # noqa: F401
111103
from .tensor.logic import equal # noqa: F401
112104
from .tensor.logic import greater_equal # noqa: F401
113105
from .tensor.logic import greater_than # noqa: F401
@@ -506,7 +498,6 @@
506498
'stack',
507499
'sqrt',
508500
'cholesky',
509-
'matrix_power',
510501
'randperm',
511502
'linspace',
512503
'reshape',

python/paddle/fluid/tests/unittests/test_linalg_cond.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ def test_static_assert_true(self, x_list, p_list):
2828
for x in x_list:
2929
with static.program_guard(static.Program(), static.Program()):
3030
input_data = static.data("X", shape=x.shape, dtype=x.dtype)
31-
output = paddle.cond(input_data, p)
31+
output = paddle.linalg.cond(input_data, p)
3232
exe = static.Executor()
3333
result = exe.run(feed={"X": x}, fetch_list=[output])
3434
expected_output = np.linalg.cond(x, p)
@@ -39,7 +39,7 @@ def test_dygraph_assert_true(self, x_list, p_list):
3939
for p in p_list:
4040
for x in x_list:
4141
input_tensor = paddle.to_tensor(x)
42-
output = paddle.cond(input_tensor, p)
42+
output = paddle.linalg.cond(input_tensor, p)
4343
expected_output = np.linalg.cond(x, p)
4444
self.assertTrue(np.allclose(output, expected_output))
4545

@@ -103,12 +103,12 @@ def test_dygraph_api_error(self):
103103
for p in p_list_error:
104104
for x in (x_list_n_n + x_list_m_n):
105105
x_tensor = paddle.to_tensor(x)
106-
self.assertRaises(ValueError, paddle.cond, x_tensor, p)
106+
self.assertRaises(ValueError, paddle.linalg.cond, x_tensor, p)
107107

108108
for p in p_list_n_n:
109109
for x in x_list_m_n:
110110
x_tensor = paddle.to_tensor(x)
111-
self.assertRaises(ValueError, paddle.cond, x_tensor, p)
111+
self.assertRaises(ValueError, paddle.linalg.cond, x_tensor, p)
112112

113113
def test_static_api_error(self):
114114
paddle.enable_static()
@@ -119,13 +119,13 @@ def test_static_api_error(self):
119119
for x in (x_list_n_n + x_list_m_n):
120120
with static.program_guard(static.Program(), static.Program()):
121121
x_data = static.data("X", shape=x.shape, dtype=x.dtype)
122-
self.assertRaises(ValueError, paddle.cond, x_data, p)
122+
self.assertRaises(ValueError, paddle.linalg.cond, x_data, p)
123123

124124
for p in p_list_n_n:
125125
for x in x_list_m_n:
126126
with static.program_guard(static.Program(), static.Program()):
127127
x_data = static.data("X", shape=x.shape, dtype=x.dtype)
128-
self.assertRaises(ValueError, paddle.cond, x_data, p)
128+
self.assertRaises(ValueError, paddle.linalg.cond, x_data, p)
129129

130130
# it's not supported when input is an empty tensor in static mode
131131
def test_static_empty_input_error(self):
@@ -136,13 +136,13 @@ def test_static_empty_input_error(self):
136136
for x in x_list_n_n:
137137
with static.program_guard(static.Program(), static.Program()):
138138
x_data = static.data("X", shape=x.shape, dtype=x.dtype)
139-
self.assertRaises(ValueError, paddle.cond, x_data, p)
139+
self.assertRaises(ValueError, paddle.linalg.cond, x_data, p)
140140

141141
for p in (p_list_n_n + p_list_m_n):
142142
for x in x_list_n_n:
143143
with static.program_guard(static.Program(), static.Program()):
144144
x_data = static.data("X", shape=x.shape, dtype=x.dtype)
145-
self.assertRaises(ValueError, paddle.cond, x_data, p)
145+
self.assertRaises(ValueError, paddle.linalg.cond, x_data, p)
146146

147147

148148
class TestCondEmptyTensorInput(unittest.TestCase):

python/paddle/fluid/tests/unittests/test_multi_dot_op.py

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -198,32 +198,34 @@ def test_errors(self):
198198
paddle.static.Program()):
199199
# The inputs type of multi_dot must be list matrix.
200200
input1 = 12
201-
self.assertRaises(TypeError, paddle.multi_dot, [input1, input1])
201+
self.assertRaises(TypeError, paddle.linalg.multi_dot,
202+
[input1, input1])
202203

203204
# The inputs dtype of multi_dot must be float64, float64 or float16.
204205
input2 = paddle.static.data(
205206
name='input2', shape=[10, 10], dtype="int32")
206-
self.assertRaises(TypeError, paddle.multi_dot, [input2, input2])
207+
self.assertRaises(TypeError, paddle.linalg.multi_dot,
208+
[input2, input2])
207209

208210
# the number of tensor must be larger than 1
209211
x0 = paddle.static.data(name='x0', shape=[3, 2], dtype="float64")
210-
self.assertRaises(ValueError, paddle.multi_dot, [x0])
212+
self.assertRaises(ValueError, paddle.linalg.multi_dot, [x0])
211213

212214
#the first tensor must be 1D or 2D
213215
x1 = paddle.static.data(name='x1', shape=[3, 2, 3], dtype="float64")
214216
x2 = paddle.static.data(name='x2', shape=[3, 2], dtype="float64")
215-
self.assertRaises(ValueError, paddle.multi_dot, [x1, x2])
217+
self.assertRaises(ValueError, paddle.linalg.multi_dot, [x1, x2])
216218

217219
#the last tensor must be 1D or 2D
218220
x3 = paddle.static.data(name='x3', shape=[3, 2], dtype="float64")
219221
x4 = paddle.static.data(name='x4', shape=[3, 2, 2], dtype="float64")
220-
self.assertRaises(ValueError, paddle.multi_dot, [x3, x4])
222+
self.assertRaises(ValueError, paddle.linalg.multi_dot, [x3, x4])
221223

222224
#the tensor must be 2D, except first and last tensor
223225
x5 = paddle.static.data(name='x5', shape=[3, 2], dtype="float64")
224226
x6 = paddle.static.data(name='x6', shape=[2], dtype="float64")
225227
x7 = paddle.static.data(name='x7', shape=[2, 2], dtype="float64")
226-
self.assertRaises(ValueError, paddle.multi_dot, [x5, x6, x7])
228+
self.assertRaises(ValueError, paddle.linalg.multi_dot, [x5, x6, x7])
227229

228230

229231
class APITestMultiDot(unittest.TestCase):
@@ -232,7 +234,7 @@ def test_out(self):
232234
with paddle.static.program_guard(paddle.static.Program()):
233235
x0 = paddle.static.data(name='x0', shape=[3, 2], dtype="float64")
234236
x1 = paddle.static.data(name='x1', shape=[2, 3], dtype='float64')
235-
result = paddle.multi_dot([x0, x1])
237+
result = paddle.linalg.multi_dot([x0, x1])
236238
exe = paddle.static.Executor(paddle.CPUPlace())
237239
data1 = np.random.rand(3, 2).astype("float64")
238240
data2 = np.random.rand(2, 3).astype("float64")
@@ -254,7 +256,7 @@ def test_dygraph_without_out(self):
254256
input_array2 = np.random.rand(4, 3).astype("float64")
255257
data1 = paddle.to_tensor(input_array1)
256258
data2 = paddle.to_tensor(input_array2)
257-
out = paddle.multi_dot([data1, data2])
259+
out = paddle.linalg.multi_dot([data1, data2])
258260
expected_result = np.linalg.multi_dot([input_array1, input_array2])
259261
self.assertTrue(np.allclose(expected_result, out.numpy()))
260262

python/paddle/tensor/linalg.py

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -448,7 +448,7 @@ def p_matrix_norm(input, porder=1., axis=axis, keepdim=False, name=None):
448448
format(axis))
449449

450450

451-
def dist(x, y, p=2):
451+
def dist(x, y, p=2, name=None):
452452
r"""
453453
454454
This OP returns the p-norm of (x - y). It is not a norm in a strict sense, only as a measure
@@ -1251,7 +1251,7 @@ def bmm(x, y, name=None):
12511251
return out
12521252

12531253

1254-
def histogram(input, bins=100, min=0, max=0):
1254+
def histogram(input, bins=100, min=0, max=0, name=None):
12551255
"""
12561256
Computes the histogram of a tensor. The elements are sorted into equal width bins between min and max.
12571257
If min and max are both zero, the minimum and maximum values of the data are used.
@@ -1351,7 +1351,7 @@ def __check_input(x, vec):
13511351
return out
13521352

13531353

1354-
def det(x):
1354+
def det(x, name=None):
13551355
"""
13561356
Calculates determinant value of a square matrix or batches of square matrices.
13571357
Args:
@@ -1367,7 +1367,7 @@ def det(x):
13671367
13681368
x = paddle.randn([3,3,3])
13691369
1370-
A = paddle.det(x)
1370+
A = paddle.linalg.det(x)
13711371
13721372
print(A)
13731373
@@ -1399,7 +1399,7 @@ def det(x):
13991399
return out
14001400

14011401

1402-
def slogdet(x):
1402+
def slogdet(x, name=None):
14031403
"""
14041404
Calculates the sign and natural logarithm of the absolute value of a square matrix's or batches square matrices' determinant.
14051405
The determinant can be computed with ``sign * exp(logabsdet)
@@ -1422,7 +1422,7 @@ def slogdet(x):
14221422
14231423
x = paddle.randn([3,3,3])
14241424
1425-
A = paddle.slogdet(x)
1425+
A = paddle.linalg.slogdet(x)
14261426
14271427
print(A)
14281428
@@ -1563,17 +1563,17 @@ def matrix_power(x, n, name=None):
15631563
x = paddle.to_tensor([[1, 2, 3],
15641564
[1, 4, 9],
15651565
[1, 8, 27]], dtype='float64')
1566-
print(paddle.matrix_power(x, 2))
1566+
print(paddle.linalg.matrix_power(x, 2))
15671567
# [[6. , 34. , 102.],
15681568
# [14. , 90. , 282.],
15691569
# [36. , 250., 804.]]
15701570
1571-
print(paddle.matrix_power(x, 0))
1571+
print(paddle.linalg.matrix_power(x, 0))
15721572
# [[1., 0., 0.],
15731573
# [0., 1., 0.],
15741574
# [0., 0., 1.]]
15751575
1576-
print(paddle.matrix_power(x, -2))
1576+
print(paddle.linalg.matrix_power(x, -2))
15771577
# [[ 12.91666667, -12.75000000, 2.83333333 ],
15781578
# [-7.66666667 , 8. , -1.83333333 ],
15791579
# [ 1.80555556 , -1.91666667 , 0.44444444 ]]
@@ -1699,7 +1699,7 @@ def multi_dot(x, name=None):
16991699
B_data = np.random.random([4, 5]).astype(np.float32)
17001700
A = paddle.to_tensor(A_data)
17011701
B = paddle.to_tensor(B_data)
1702-
out = paddle.multi_dot([A, B])
1702+
out = paddle.linalg.multi_dot([A, B])
17031703
print(out.numpy().shape)
17041704
# [3, 5]
17051705
@@ -1710,7 +1710,7 @@ def multi_dot(x, name=None):
17101710
A = paddle.to_tensor(A_data)
17111711
B = paddle.to_tensor(B_data)
17121712
C = paddle.to_tensor(C_data)
1713-
out = paddle.multi_dot([A, B, C])
1713+
out = paddle.linalg.multi_dot([A, B, C])
17141714
print(out.numpy().shape)
17151715
# [10, 7]
17161716

0 commit comments

Comments
 (0)