Skip to content

Commit e48f7a5

Browse files
authored
update 2.0 public api in all left files (#33314)
* update 2.0 public api in all left files * reverse device.py all list; fix some flake8 errors
1 parent 45f8b9d commit e48f7a5

31 files changed

+186
-175
lines changed

python/paddle/__init__.py

Lines changed: 6 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -21,8 +21,7 @@
2121
import paddle from the source directory; please install paddlepaddle*.whl firstly.'''
2222
)
2323

24-
import paddle.batch
25-
batch = batch.batch
24+
from .batch import batch # noqa: F401
2625
from .fluid import monkey_patch_variable
2726
from .fluid.dygraph import monkey_patch_math_varbase
2827
monkey_patch_variable()
@@ -135,7 +134,6 @@
135134
from .tensor.manipulation import squeeze_ # noqa: F401
136135
from .tensor.manipulation import stack # noqa: F401
137136
from .tensor.manipulation import strided_slice # noqa: F401
138-
from .tensor.manipulation import transpose # noqa: F401
139137
from .tensor.manipulation import unique # noqa: F401
140138
from .tensor.manipulation import unsqueeze # noqa: F401
141139
from .tensor.manipulation import unsqueeze_ # noqa: F401
@@ -191,7 +189,6 @@
191189
from .tensor.math import multiply # noqa: F401
192190
from .tensor.math import add # noqa: F401
193191
from .tensor.math import subtract # noqa: F401
194-
from .tensor.math import atan # noqa: F401
195192
from .tensor.math import logsumexp # noqa: F401
196193
from .tensor.math import inverse # noqa: F401
197194
from .tensor.math import log1p # noqa: F401
@@ -244,9 +241,8 @@
244241
from .framework import load # noqa: F401
245242
from .framework import DataParallel # noqa: F401
246243

247-
from .framework import set_default_dtype #DEFINE_ALIAS
248-
from .framework import get_default_dtype #DEFINE_ALIAS
249-
from .framework import set_grad_enabled #DEFINE_ALIAS
244+
from .framework import set_default_dtype # noqa: F401
245+
from .framework import get_default_dtype # noqa: F401
250246

251247
from .tensor.search import index_sample # noqa: F401
252248
from .tensor.stat import mean # noqa: F401
@@ -281,7 +277,7 @@
281277
from .tensor.random import check_shape # noqa: F401
282278
disable_static()
283279

284-
__all__ = [ #noqa
280+
__all__ = [ # noqa
285281
'dtype',
286282
'uint8',
287283
'int8',
@@ -323,7 +319,6 @@
323319
'cos',
324320
'tan',
325321
'mean',
326-
'XPUPlace',
327322
'mv',
328323
'in_dynamic_mode',
329324
'min',
@@ -360,7 +355,6 @@
360355
'to_tensor',
361356
'gather_nd',
362357
'isinf',
363-
'set_device',
364358
'uniform',
365359
'floor_divide',
366360
'remainder',
@@ -384,8 +378,6 @@
384378
'rand',
385379
'less_equal',
386380
'triu',
387-
'is_compiled_with_cuda',
388-
'is_compiled_with_rocm',
389381
'sin',
390382
'dist',
391383
'unbind',
@@ -414,8 +406,6 @@
414406
'bernoulli',
415407
'summary',
416408
'sinh',
417-
'is_compiled_with_xpu',
418-
'is_compiled_with_npu',
419409
'round',
420410
'DataParallel',
421411
'argmin',
@@ -437,7 +427,6 @@
437427
'not_equal',
438428
'sum',
439429
'tile',
440-
'get_device',
441430
'greater_equal',
442431
'isfinite',
443432
'create_parameter',
@@ -470,7 +459,6 @@
470459
'scatter_nd',
471460
'set_default_dtype',
472461
'expand_as',
473-
'get_cudnn_version',
474462
'stack',
475463
'sqrt',
476464
'cholesky',
@@ -484,7 +472,6 @@
484472
'logical_not',
485473
'add_n',
486474
'minimum',
487-
'ComplexTensor',
488475
'scatter',
489476
'scatter_',
490477
'floor',
@@ -493,5 +480,6 @@
493480
'log2',
494481
'log10',
495482
'concat',
496-
'check_shape'
483+
'check_shape',
484+
'standard_normal'
497485
]

python/paddle/amp/__init__.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15-
from .auto_cast import auto_cast
16-
from .grad_scaler import GradScaler
15+
from .auto_cast import auto_cast # noqa: F401
16+
from .grad_scaler import GradScaler # noqa: F401
1717

1818
__all__ = ['auto_cast', 'GradScaler']

python/paddle/amp/auto_cast.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414

1515
from paddle.fluid.dygraph.amp import amp_guard
1616

17-
__all__ = ['auto_cast']
17+
__all__ = []
1818

1919

2020
def auto_cast(enable=True, custom_white_list=None, custom_black_list=None):

python/paddle/amp/grad_scaler.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414

1515
from paddle.fluid.dygraph.amp import AmpScaler
1616

17-
__all__ = ['GradScaler']
17+
__all__ = []
1818

1919

2020
class GradScaler(AmpScaler):

python/paddle/autograd/__init__.py

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -12,10 +12,9 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15-
from ..fluid.dygraph.base import grad #DEFINE_ALIAS
16-
17-
from . import backward_mode
18-
from .backward_mode import backward
19-
from .py_layer import PyLayer, PyLayerContext
15+
from ..fluid.dygraph.base import grad # noqa: F401
16+
from . import backward_mode # noqa: F401
17+
from .backward_mode import backward # noqa: F401
18+
from .py_layer import PyLayer, PyLayerContext # noqa: F401
2019

2120
__all__ = ['grad', 'backward', 'PyLayer', 'PyLayerContext']

python/paddle/autograd/backward_mode.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
from paddle.fluid import core
1616
from paddle.fluid import framework
1717
import paddle
18-
__all__ = ['backward']
18+
__all__ = []
1919

2020

2121
@framework.dygraph_only

python/paddle/autograd/py_layer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
import paddle
1616
from paddle.fluid.framework import dygraph_only
1717
from paddle.fluid import core
18-
__all__ = ['PyLayer', 'PyLayerContext']
18+
__all__ = []
1919

2020

2121
class PyLayerContext(object):

python/paddle/batch.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15-
__all__ = ['batch']
15+
__all__ = []
1616

1717

1818
def batch(reader, batch_size, drop_last=False):
@@ -35,11 +35,11 @@ def batch(reader, batch_size, drop_last=False):
3535
Examples:
3636
.. code-block:: python
3737
38-
import paddle.fluid as fluid
38+
import paddle
3939
def reader():
4040
for i in range(10):
4141
yield i
42-
batch_reader = fluid.io.batch(reader, batch_size=2)
42+
batch_reader = paddle.batch(reader, batch_size=2)
4343
4444
for data in batch_reader():
4545
print(data)
@@ -60,7 +60,7 @@ def batch_reader():
6060
if len(b) == batch_size:
6161
yield b
6262
b = []
63-
if drop_last == False and len(b) != 0:
63+
if drop_last is False and len(b) != 0:
6464
yield b
6565

6666
# Batch size check

python/paddle/compat.py

Lines changed: 2 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -15,18 +15,11 @@
1515
import six
1616
import math
1717

18-
__all__ = [
19-
'long_type',
20-
'to_text',
21-
'to_bytes',
22-
'round',
23-
'floor_division',
24-
'get_exception_message',
25-
]
18+
__all__ = []
2619

2720
if six.PY2:
2821
int_type = int
29-
long_type = long
22+
long_type = long # noqa: F821
3023
else:
3124
int_type = int
3225
long_type = int

python/paddle/device.py

Lines changed: 15 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -18,21 +18,16 @@
1818
from paddle.fluid import core
1919
from paddle.fluid import framework
2020
from paddle.fluid.dygraph.parallel import ParallelEnv
21-
from paddle.fluid.framework import is_compiled_with_cuda #DEFINE_ALIAS
22-
from paddle.fluid.framework import is_compiled_with_rocm #DEFINE_ALIAS
21+
from paddle.fluid.framework import is_compiled_with_cuda # noqa: F401
22+
from paddle.fluid.framework import is_compiled_with_rocm # noqa: F401
2323

24-
__all__ = [
24+
25+
__all__ = [ # npqa
2526
'get_cudnn_version',
2627
'set_device',
2728
'get_device',
2829
'XPUPlace',
29-
'is_compiled_with_xpu'
30-
# 'cpu_places',
31-
# 'CPUPlace',
32-
# 'cuda_pinned_places',
33-
# 'cuda_places',
34-
# 'CUDAPinnedPlace',
35-
# 'CUDAPlace',
30+
'is_compiled_with_xpu',
3631
'is_compiled_with_cuda',
3732
'is_compiled_with_rocm',
3833
'is_compiled_with_npu'
@@ -68,7 +63,7 @@ def is_compiled_with_xpu():
6863
.. code-block:: python
6964
7065
import paddle
71-
support_xpu = paddle.device.is_compiled_with_xpu()
66+
support_xpu = paddle.is_compiled_with_xpu()
7267
"""
7368
return core.is_compiled_with_xpu()
7469

@@ -82,9 +77,10 @@ def XPUPlace(dev_id):
8277
8378
Examples:
8479
.. code-block:: python
85-
80+
# required: xpu
81+
8682
import paddle
87-
place = paddle.device.XPUPlace(0)
83+
place = paddle.XPUPlace(0)
8884
"""
8985
return core.XPUPlace(dev_id)
9086

@@ -127,15 +123,13 @@ def _convert_to_place(device):
127123
place = core.CPUPlace()
128124
elif lower_device == 'gpu':
129125
if not core.is_compiled_with_cuda():
130-
raise ValueError(
131-
"The device should not be 'gpu', " \
132-
"since PaddlePaddle is not compiled with CUDA")
126+
raise ValueError("The device should not be 'gpu', "
127+
"since PaddlePaddle is not compiled with CUDA")
133128
place = core.CUDAPlace(ParallelEnv().dev_id)
134129
elif lower_device == 'xpu':
135130
if not core.is_compiled_with_xpu():
136-
raise ValueError(
137-
"The device should not be 'xpu', " \
138-
"since PaddlePaddle is not compiled with XPU")
131+
raise ValueError("The device should not be 'xpu', "
132+
"since PaddlePaddle is not compiled with XPU")
139133
selected_xpus = os.getenv("FLAGS_selected_xpus", "0").split(",")
140134
device_id = int(selected_xpus[0])
141135
place = core.XPUPlace(device_id)
@@ -149,7 +143,7 @@ def _convert_to_place(device):
149143
if avaliable_gpu_device:
150144
if not core.is_compiled_with_cuda():
151145
raise ValueError(
152-
"The device should not be {}, since PaddlePaddle is " \
146+
"The device should not be {}, since PaddlePaddle is "
153147
"not compiled with CUDA".format(avaliable_gpu_device))
154148
device_info_list = device.split(':', 1)
155149
device_id = device_info_list[1]
@@ -158,7 +152,7 @@ def _convert_to_place(device):
158152
if avaliable_xpu_device:
159153
if not core.is_compiled_with_xpu():
160154
raise ValueError(
161-
"The device should not be {}, since PaddlePaddle is " \
155+
"The device should not be {}, since PaddlePaddle is "
162156
"not compiled with XPU".format(avaliable_xpu_device))
163157
device_info_list = device.split(':', 1)
164158
device_id = device_info_list[1]

0 commit comments

Comments
 (0)