@@ -102,6 +102,8 @@ def dot(a, b, out=None):
102102 C-contiguous. If these conditions are not met, an exception is
103103 raised, instead of attempting to be flexible.
104104
105+ Default: ``None``.
106+
105107 Returns
106108 -------
107109 out : dpnp.ndarray
@@ -207,15 +209,19 @@ def einsum(
207209 These are the arrays for the operation.
208210 out : {dpnp.ndarrays, usm_ndarray, None}, optional
209211 If provided, the calculation is done into this array.
212+
213+ Default: ``None``.
210214 dtype : {None, str, dtype object}, optional
211215 If provided, forces the calculation to use the data type specified.
216+
212217 Default: ``None``.
213218 order : {"C", "F", "A", "K"}, optional
214219 Controls the memory layout of the output. ``"C"`` means it should be
215220 C-contiguous. ``"F"`` means it should be F-contiguous, ``"A"`` means
216221 it should be ``"F"`` if the inputs are all ``"F"``, ``"C"`` otherwise.
217222 ``"K"`` means it should be as close to the layout as the inputs as
218223 is possible, including arbitrarily permuted axes.
224+
219225 Default: ``"K"``.
220226 casting : {"no", "equiv", "safe", "same_kind", "unsafe"}, optional
221227 Controls what kind of data casting may occur. Setting this to
@@ -233,12 +239,14 @@ def einsum(
233239 ``"same_kind"``. This is to prevent errors that may occur when data
234240 needs to be converted to `float64`, but the device does not support it.
235241 In such cases, the data is instead converted to `float32`.
242+
236243 Default: ``"same_kind"``.
237244 optimize : {False, True, "greedy", "optimal"}, optional
238245 Controls if intermediate optimization should occur. No optimization
239246 will occur if ``False`` and ``True`` will default to the ``"greedy"``
240247 algorithm. Also accepts an explicit contraction list from the
241248 :obj:`dpnp.einsum_path` function.
249+
242250 Default: ``False``.
243251
244252 Returns
@@ -259,15 +267,15 @@ def einsum(
259267 Examples
260268 --------
261269 >>> import dpnp as np
262- >>> a = np.arange(25).reshape(5,5)
270+ >>> a = np.arange(25).reshape(5, 5)
263271 >>> b = np.arange(5)
264- >>> c = np.arange(6).reshape(2,3)
272+ >>> c = np.arange(6).reshape(2, 3)
265273
266274 Trace of a matrix:
267275
268276 >>> np.einsum("ii", a)
269277 array(60)
270- >>> np.einsum(a, [0,0])
278+ >>> np.einsum(a, [0, 0])
271279 array(60)
272280 >>> np.trace(a)
273281 array(60)
@@ -323,14 +331,14 @@ def einsum(
323331 array(30)
324332 >>> np.einsum(b, [0], b, [0])
325333 array(30)
326- >>> np.inner(b,b)
334+ >>> np.inner(b, b)
327335 array(30)
328336
329337 Matrix vector multiplication:
330338
331339 >>> np.einsum("ij,j", a, b)
332340 array([ 30, 80, 130, 180, 230])
333- >>> np.einsum(a, [0,1], b, [1])
341+ >>> np.einsum(a, [0, 1], b, [1])
334342 array([ 30, 80, 130, 180, 230])
335343 >>> np.dot(a, b)
336344 array([ 30, 80, 130, 180, 230])
@@ -412,22 +420,26 @@ def einsum(
412420 Basic `einsum`: 119 ms ± 26 ms per loop (evaluated on 12th
413421 Gen Intel\u00AE Core\u2122 i7 processor)
414422
415- >>> %timeit np.einsum("ijk,ilm,njm,nlk,abc->",a,a,a,a, a)
423+ >>> %timeit np.einsum("ijk,ilm,njm,nlk,abc->",a, a, a, a, a)
416424
417425 Sub-optimal `einsum`: 32.9 ms ± 5.1 ms per loop
418426
419- >>> %timeit np.einsum("ijk,ilm,njm,nlk,abc->",a,a,a,a,a, optimize="optimal")
427+ >>> %timeit np.einsum(
428+ "ijk,ilm,njm,nlk,abc->", a, a, a, a, a, optimize="optimal"
429+ )
420430
421431 Greedy `einsum`: 28.6 ms ± 4.8 ms per loop
422432
423- >>> %timeit np.einsum("ijk,ilm,njm,nlk,abc->",a,a,a,a,a, optimize="greedy")
433+ >>> %timeit np.einsum(
434+ "ijk,ilm,njm,nlk,abc->", a, a, a, a, a, optimize="greedy"
435+ )
424436
425437 Optimal `einsum`: 26.9 ms ± 6.3 ms per loop
426438
427439 >>> path = np.einsum_path(
428- "ijk,ilm,njm,nlk,abc->",a,a,a,a, a, optimize="optimal"
440+ "ijk,ilm,njm,nlk,abc->", a, a, a, a, a, optimize="optimal"
429441 )[0]
430- >>> %timeit np.einsum("ijk,ilm,njm,nlk,abc->",a,a,a,a, a, optimize=path)
442+ >>> %timeit np.einsum("ijk,ilm,njm,nlk,abc->", a, a, a, a, a, optimize=path)
431443
432444 """
433445
@@ -509,10 +521,9 @@ def einsum_path(*operands, optimize="greedy", einsum_call=False):
509521 Examples
510522 --------
511523 We can begin with a chain dot example. In this case, it is optimal to
512- contract the ``b`` and ``c`` tensors first as represented by the first
513- element of the path ``(1, 2)``. The resulting tensor is added to the end
514- of the contraction and the remaining contraction ``(0, 1)`` is then
515- completed.
524+ contract the `b` and `c` tensors first as represented by the first element
525+ of the path ``(1, 2)``. The resulting tensor is added to the end of the
526+ contraction and the remaining contraction ``(0, 1)`` is then completed.
516527
517528 >>> import dpnp as np
518529 >>> np.random.seed(123)
@@ -622,7 +633,7 @@ def inner(a, b):
622633
623634 # Some multidimensional examples
624635
625- >>> a = np.arange(24).reshape((2,3, 4))
636+ >>> a = np.arange(24).reshape((2, 3, 4))
626637 >>> b = np.arange(4)
627638 >>> c = np.inner(a, b)
628639 >>> c.shape
@@ -631,8 +642,8 @@ def inner(a, b):
631642 array([[ 14, 38, 62],
632643 [86, 110, 134]])
633644
634- >>> a = np.arange(2).reshape((1,1, 2))
635- >>> b = np.arange(6).reshape((3,2))
645+ >>> a = np.arange(2).reshape((1, 1, 2))
646+ >>> b = np.arange(6).reshape((3, 2))
636647 >>> c = np.inner(a, b)
637648 >>> c.shape
638649 (1, 1, 3)
@@ -704,19 +715,19 @@ def kron(a, b):
704715 >>> np.kron(b, a)
705716 array([ 5, 50, 500, ..., 7, 70, 700])
706717
707- >>> np.kron(np.eye(2), np.ones((2,2)))
718+ >>> np.kron(np.eye(2), np.ones((2, 2)))
708719 array([[1., 1., 0., 0.],
709720 [1., 1., 0., 0.],
710721 [0., 0., 1., 1.],
711722 [0., 0., 1., 1.]])
712723
713- >>> a = np.arange(100).reshape((2,5,2, 5))
714- >>> b = np.arange(24).reshape((2,3, 4))
715- >>> c = np.kron(a,b)
724+ >>> a = np.arange(100).reshape((2, 5, 2, 5))
725+ >>> b = np.arange(24).reshape((2, 3, 4))
726+ >>> c = np.kron(a, b)
716727 >>> c.shape
717728 (2, 10, 6, 20)
718- >>> I = (1,3,0, 2)
719- >>> J = (0,2, 1)
729+ >>> I = (1, 3, 0, 2)
730+ >>> J = (0, 2, 1)
720731 >>> J1 = (0,) + J # extend to ndim=4
721732 >>> S1 = (1,) + b.shape
722733 >>> K = tuple(np.array(I) * np.array(S1) + np.array(J1))
@@ -869,7 +880,7 @@ def matmul(
869880
870881 >>> a = np.arange(2 * 2 * 4).reshape((2, 2, 4))
871882 >>> b = np.arange(2 * 2 * 4).reshape((2, 4, 2))
872- >>> np.matmul(a,b).shape
883+ >>> np.matmul(a, b).shape
873884 (2, 2, 2)
874885 >>> np.matmul(a, b)[0, 1, 1]
875886 array(98)
@@ -1042,6 +1053,7 @@ def outer(a, b, out=None):
10421053 Second input vector. Input is flattened if not already 1-dimensional.
10431054 out : {None, dpnp.ndarray, usm_ndarray}, optional
10441055 A location where the result is stored.
1056+
10451057 Default: ``None``.
10461058
10471059 Returns
@@ -1176,9 +1188,9 @@ def tensordot(a, b, axes=2):
11761188 >>> np.tensordot(a, b, 1)
11771189 array([14, 32, 50])
11781190
1179- >>> a = np.arange(60.).reshape(3,4, 5)
1180- >>> b = np.arange(24.).reshape(4,3, 2)
1181- >>> c = np.tensordot(a,b, axes=([1,0],[0,1]))
1191+ >>> a = np.arange(60.).reshape(3, 4, 5)
1192+ >>> b = np.arange(24.).reshape(4, 3, 2)
1193+ >>> c = np.tensordot(a, b, axes=([1, 0], [0, 1]))
11821194 >>> c.shape
11831195 (5, 2)
11841196 >>> c
@@ -1190,12 +1202,12 @@ def tensordot(a, b, axes=2):
11901202
11911203 A slower but equivalent way of computing the same...
11921204
1193- >>> d = np.zeros((5,2))
1205+ >>> d = np.zeros((5, 2))
11941206 >>> for i in range(5):
11951207 ... for j in range(2):
11961208 ... for k in range(3):
11971209 ... for n in range(4):
1198- ... d[i,j] += a[k,n, i] * b[n,k, j]
1210+ ... d[i, j] += a[k, n, i] * b[n, k, j]
11991211 >>> c == d
12001212 array([[ True, True],
12011213 [ True, True],
0 commit comments