Skip to content

Commit 45f8b9d

Browse files
authored
update 2.0 public api in vision (#33307)
* update 2.0 public api in vision * fix some flake8 errors
1 parent 9567cbd commit 45f8b9d

25 files changed

+236
-119
lines changed

python/paddle/hapi/callbacks.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -324,7 +324,7 @@ class ProgBarLogger(Callback):
324324
])
325325
train_dataset = MNIST(mode='train', transform=transform)
326326
327-
lenet = paddle.vision.LeNet()
327+
lenet = paddle.vision.models.LeNet()
328328
model = paddle.Model(lenet,
329329
inputs, labels)
330330
@@ -554,7 +554,7 @@ class ModelCheckpoint(Callback):
554554
])
555555
train_dataset = MNIST(mode='train', transform=transform)
556556
557-
lenet = paddle.vision.LeNet()
557+
lenet = paddle.vision.models.LeNet()
558558
model = paddle.Model(lenet,
559559
inputs, labels)
560560
@@ -614,7 +614,7 @@ class LRScheduler(Callback):
614614
])
615615
train_dataset = paddle.vision.datasets.MNIST(mode='train', transform=transform)
616616
617-
lenet = paddle.vision.LeNet()
617+
lenet = paddle.vision.models.LeNet()
618618
model = paddle.Model(lenet,
619619
inputs, labels)
620620
@@ -630,7 +630,7 @@ def make_optimizer(parameters=None):
630630
boundaries=boundaries, values=values)
631631
learning_rate = paddle.optimizer.lr.LinearWarmup(
632632
learning_rate=learning_rate,
633-
warmup_steps=wamup_epochs,
633+
warmup_steps=wamup_steps,
634634
start_lr=base_lr / 5.,
635635
end_lr=base_lr,
636636
verbose=True)
@@ -856,7 +856,7 @@ class VisualDL(Callback):
856856
train_dataset = paddle.vision.datasets.MNIST(mode='train', transform=transform)
857857
eval_dataset = paddle.vision.datasets.MNIST(mode='test', transform=transform)
858858
859-
net = paddle.vision.LeNet()
859+
net = paddle.vision.models.LeNet()
860860
model = paddle.Model(net, inputs, labels)
861861
862862
optim = paddle.optimizer.Adam(0.001, parameters=net.parameters())

python/paddle/hapi/model.py

Lines changed: 34 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -30,20 +30,28 @@
3030
import paddle
3131
from paddle import fluid
3232
from paddle.fluid import core
33-
from paddle.fluid.framework import in_dygraph_mode, Variable, ParamBase, _current_expected_place
34-
from paddle.fluid.framework import in_dygraph_mode, Variable, _get_paddle_place
33+
from paddle.fluid.framework import in_dygraph_mode
34+
from paddle.fluid.framework import Variable
35+
from paddle.fluid.framework import ParamBase
36+
from paddle.fluid.framework import _current_expected_place
37+
from paddle.fluid.framework import _get_paddle_place
3538
from paddle.fluid.framework import _current_expected_place as _get_device
3639
from paddle.fluid.executor import global_scope
3740
from paddle.fluid.io import is_belong_to_optimizer
3841
from paddle.fluid.dygraph.base import to_variable
3942
from paddle.fluid.dygraph.parallel import ParallelEnv
40-
from paddle.fluid.dygraph.dygraph_to_static.program_translator import ProgramTranslator, FunctionSpec
41-
from paddle.fluid.dygraph.io import INFER_MODEL_SUFFIX, INFER_PARAMS_SUFFIX
43+
from paddle.fluid.dygraph.dygraph_to_static.program_translator import ProgramTranslator
44+
from paddle.fluid.dygraph.dygraph_to_static.program_translator import FunctionSpec
45+
from paddle.fluid.dygraph.io import INFER_MODEL_SUFFIX
46+
from paddle.fluid.dygraph.io import INFER_PARAMS_SUFFIX
4247
from paddle.fluid.layers.utils import flatten
4348
from paddle.fluid.layers import collective
4449

45-
from paddle.io import DataLoader, Dataset, DistributedBatchSampler
46-
from paddle.fluid.executor import scope_guard, Executor
50+
from paddle.io import DataLoader
51+
from paddle.io import Dataset
52+
from paddle.io import DistributedBatchSampler
53+
from paddle.fluid.executor import scope_guard
54+
from paddle.fluid.executor import Executor
4755
from paddle.fluid.dygraph.layers import Layer
4856
from paddle.metric import Metric
4957
from paddle.static import InputSpec as Input
@@ -166,7 +174,6 @@ def init_communicator(program, rank, nranks, wait_port, current_endpoint,
166174
name=unique_name.generate('hccl_id'),
167175
persistable=True,
168176
type=core.VarDesc.VarType.RAW)
169-
endpoint_to_index_map = {e: idx for idx, e in enumerate(endpoints)}
170177
block.append_op(
171178
type='c_gen_hccl_id',
172179
inputs={},
@@ -710,10 +717,10 @@ def train_batch(self, inputs, labels=None):
710717
enable=self._amp_level != 'O0', **self._amp_custom_lists):
711718
if self._nranks > 1:
712719
outputs = self.ddp_model.forward(
713-
* [to_variable(x) for x in inputs])
720+
*[to_variable(x) for x in inputs])
714721
else:
715722
outputs = self.model.network.forward(
716-
* [to_variable(x) for x in inputs])
723+
*[to_variable(x) for x in inputs])
717724

718725
losses = self.model._loss(*(to_list(outputs) + labels))
719726
losses = to_list(losses)
@@ -732,7 +739,7 @@ def train_batch(self, inputs, labels=None):
732739
metrics = []
733740
for metric in self.model._metrics:
734741
metric_outs = metric.compute(*(to_list(outputs) + labels))
735-
m = metric.update(* [to_numpy(m) for m in to_list(metric_outs)])
742+
m = metric.update(*[to_numpy(m) for m in to_list(metric_outs)])
736743
metrics.append(m)
737744

738745
return ([to_numpy(l) for l in losses], metrics) \
@@ -746,7 +753,7 @@ def eval_batch(self, inputs, labels=None):
746753
labels = labels or []
747754
labels = [to_variable(l) for l in to_list(labels)]
748755

749-
outputs = self.model.network.forward(* [to_variable(x) for x in inputs])
756+
outputs = self.model.network.forward(*[to_variable(x) for x in inputs])
750757
if self.model._loss:
751758
losses = self.model._loss(*(to_list(outputs) + labels))
752759
losses = to_list(losses)
@@ -777,7 +784,7 @@ def eval_batch(self, inputs, labels=None):
777784
self._merge_count[self.mode + '_batch'] = samples
778785

779786
metric_outs = metric.compute(*(to_list(outputs) + labels))
780-
m = metric.update(* [to_numpy(m) for m in to_list(metric_outs)])
787+
m = metric.update(*[to_numpy(m) for m in to_list(metric_outs)])
781788
metrics.append(m)
782789

783790
if self.model._loss and len(metrics):
@@ -1363,8 +1370,9 @@ def _check_pure_fp16_configs():
13631370
# pure float16 training has some restricts now
13641371
if self._adapter._amp_level == "O2":
13651372
if in_dygraph_mode():
1366-
warnings.warn("Pure float16 training is not supported in dygraph mode now, "\
1367-
"and it will be supported in future version.")
1373+
warnings.warn(
1374+
"Pure float16 training is not supported in dygraph mode now, and it will be supported in future version."
1375+
)
13681376
else:
13691377
# grad clip is not supported in pure fp16 training now
13701378
assert self._optimizer._grad_clip is None, \
@@ -1398,8 +1406,7 @@ def _check_pure_fp16_configs():
13981406

13991407
if 'use_pure_fp16' in amp_configs:
14001408
raise ValueError(
1401-
"''use_pure_fp16' is an invalid parameter, "
1402-
"the level of mixed precision training only depends on 'O1' or 'O2'."
1409+
"'use_pure_fp16' is an invalid parameter, the level of mixed precision training only depends on 'O1' or 'O2'."
14031410
)
14041411

14051412
_check_pure_fp16_configs()
@@ -1427,9 +1434,8 @@ def _check_amp_configs(amp_config_key_set):
14271434
}
14281435
if amp_config_key_set - accepted_param_set:
14291436
raise ValueError(
1430-
"Except for 'level', the keys of 'amp_configs' must be accepted by mixed precision APIs, "
1431-
"but {} could not be recognized.".format(
1432-
tuple(amp_config_key_set - accepted_param_set)))
1437+
"Except for 'level', the keys of 'amp_configs' must be accepted by mixed precision APIs, but {} could not be recognized.".
1438+
format(tuple(amp_config_key_set - accepted_param_set)))
14331439

14341440
if 'use_fp16_guard' in amp_config_key_set:
14351441
if in_dygraph_mode():
@@ -1501,8 +1507,9 @@ def prepare(self, optimizer=None, loss=None, metrics=None,
15011507
self._optimizer = optimizer
15021508
if loss is not None:
15031509
if not isinstance(loss, paddle.nn.Layer) and not callable(loss):
1504-
raise TypeError("'loss' must be sub classes of " \
1505-
"`paddle.nn.Layer` or any callable function.")
1510+
raise TypeError(
1511+
"'loss' must be sub classes of `paddle.nn.Layer` or any callable function."
1512+
)
15061513
self._loss = loss
15071514

15081515
metrics = metrics or []
@@ -2080,7 +2087,7 @@ def summary(self, input_size=None, dtype=None):
20802087
input = InputSpec([None, 1, 28, 28], 'float32', 'image')
20812088
label = InputSpec([None, 1], 'int64', 'label')
20822089
2083-
model = paddle.Model(paddle.vision.LeNet(),
2090+
model = paddle.Model(paddle.vision.models.LeNet(),
20842091
input, label)
20852092
optim = paddle.optimizer.Adam(
20862093
learning_rate=0.001, parameters=model.parameters())
@@ -2122,9 +2129,11 @@ def _verify_spec(self, specs, shapes=None, dtypes=None, is_input=False):
21222129
else:
21232130
out_specs = to_list(specs)
21242131
elif isinstance(specs, dict):
2125-
assert is_input == False
2126-
out_specs = [specs[n] \
2127-
for n in extract_args(self.network.forward) if n != 'self']
2132+
assert is_input is False
2133+
out_specs = [
2134+
specs[n] for n in extract_args(self.network.forward)
2135+
if n != 'self'
2136+
]
21282137
else:
21292138
out_specs = to_list(specs)
21302139
# Note: checks each element has specificed `name`.

python/paddle/metric/metrics.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -222,7 +222,7 @@ class Accuracy(Metric):
222222
transform = T.Compose([T.Transpose(), T.Normalize([127.5], [127.5])])
223223
train_dataset = MNIST(mode='train', transform=transform)
224224
225-
model = paddle.Model(paddle.vision.LeNet(), input, label)
225+
model = paddle.Model(paddle.vision.models.LeNet(), input, label)
226226
optim = paddle.optimizer.Adam(
227227
learning_rate=0.001, parameters=model.parameters())
228228
model.prepare(

python/paddle/tests/test_callback_visualdl.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ def test_visualdl_callback(self):
5555
train_dataset = MnistDataset(mode='train', transform=transform)
5656
eval_dataset = MnistDataset(mode='test', transform=transform)
5757

58-
net = paddle.vision.LeNet()
58+
net = paddle.vision.models.LeNet()
5959
model = paddle.Model(net, inputs, labels)
6060

6161
optim = paddle.optimizer.Adam(0.001, parameters=net.parameters())

python/paddle/vision/__init__.py

Lines changed: 50 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -11,22 +11,59 @@
1111
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
14+
import paddle
15+
import paddle.nn as nn
16+
from . import models # noqa: F401
17+
from . import transforms # noqa: F401
18+
from . import datasets # noqa: F401
19+
from . import ops # noqa: F401
20+
from .image import set_image_backend # noqa: F401
21+
from .image import get_image_backend # noqa: F401
22+
from .image import image_load # noqa: F401
23+
from .models import LeNet as models_LeNet
24+
import paddle.utils.deprecated as deprecated
1425

15-
from . import models
16-
from .models import *
26+
__all__ = [ #noqa
27+
'set_image_backend', 'get_image_backend', 'image_load'
28+
]
1729

18-
from . import transforms
19-
from .transforms import *
2030

21-
from . import datasets
22-
from .datasets import *
31+
class LeNet(models_LeNet):
32+
"""LeNet model from
33+
`"LeCun Y, Bottou L, Bengio Y, et al. Gradient-based learning applied to document recognition[J]. Proceedings of the IEEE, 1998, 86(11): 2278-2324.`_
2334
24-
from . import image
25-
from .image import *
35+
Args:
36+
num_classes (int): output dim of last fc layer. If num_classes <=0, last fc layer
37+
will not be defined. Default: 10.
2638
27-
from . import ops
39+
Examples:
40+
.. code-block:: python
2841
29-
__all__ = models.__all__ \
30-
+ transforms.__all__ \
31-
+ datasets.__all__ \
32-
+ image.__all__
42+
from paddle.vision.models import LeNet
43+
44+
model = LeNet()
45+
"""
46+
47+
@deprecated(
48+
since="2.0.0",
49+
update_to="paddle.vision.models.LeNet",
50+
level=1,
51+
reason="Please use new API in models, paddle.vision.LeNet will be removed in future"
52+
)
53+
def __init__(self, num_classes=10):
54+
super(LeNet, self).__init__(num_classes=10)
55+
self.num_classes = num_classes
56+
self.features = nn.Sequential(
57+
nn.Conv2D(
58+
1, 6, 3, stride=1, padding=1),
59+
nn.ReLU(),
60+
nn.MaxPool2D(2, 2),
61+
nn.Conv2D(
62+
6, 16, 5, stride=1, padding=0),
63+
nn.ReLU(),
64+
nn.MaxPool2D(2, 2))
65+
66+
if num_classes > 0:
67+
self.fc = nn.Sequential(
68+
nn.Linear(400, 120),
69+
nn.Linear(120, 84), nn.Linear(84, num_classes))

python/paddle/vision/datasets/__init__.py

Lines changed: 18 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -12,20 +12,22 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15-
from . import folder
16-
from . import mnist
17-
from . import flowers
18-
from . import cifar
19-
from . import voc2012
15+
from .folder import DatasetFolder # noqa: F401
16+
from .folder import ImageFolder # noqa: F401
17+
from .mnist import MNIST # noqa: F401
18+
from .mnist import FashionMNIST # noqa: F401
19+
from .flowers import Flowers # noqa: F401
20+
from .cifar import Cifar10 # noqa: F401
21+
from .cifar import Cifar100 # noqa: F401
22+
from .voc2012 import VOC2012 # noqa: F401
2023

21-
from .folder import *
22-
from .mnist import *
23-
from .flowers import *
24-
from .cifar import *
25-
from .voc2012 import *
26-
27-
__all__ = folder.__all__ \
28-
+ mnist.__all__ \
29-
+ flowers.__all__ \
30-
+ cifar.__all__ \
31-
+ voc2012.__all__
24+
__all__ = [ #noqa
25+
'DatasetFolder'
26+
'ImageFolder',
27+
'MNIST',
28+
'FashionMNIST',
29+
'Flowers',
30+
'Cifar10',
31+
'Cifar100',
32+
'VOC2012'
33+
]

python/paddle/vision/datasets/cifar.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@
2424
from paddle.io import Dataset
2525
from paddle.dataset.common import _check_exists_and_download
2626

27-
__all__ = ['Cifar10', 'Cifar100']
27+
__all__ = []
2828

2929
URL_PREFIX = 'https://dataset.bj.bcebos.com/cifar/'
3030
CIFAR10_URL = URL_PREFIX + 'cifar-10-python.tar.gz'

python/paddle/vision/datasets/flowers.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@
2525
from paddle.utils import try_import
2626
from paddle.dataset.common import _check_exists_and_download
2727

28-
__all__ = ["Flowers"]
28+
__all__ = []
2929

3030
DATA_URL = 'http://paddlemodels.bj.bcebos.com/flowers/102flowers.tgz'
3131
LABEL_URL = 'http://paddlemodels.bj.bcebos.com/flowers/imagelabels.mat'

python/paddle/vision/datasets/folder.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
from paddle.io import Dataset
2121
from paddle.utils import try_import
2222

23-
__all__ = ["DatasetFolder", "ImageFolder"]
23+
__all__ = []
2424

2525

2626
def has_valid_extension(filename, extensions):

python/paddle/vision/datasets/mnist.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@
2424
from paddle.io import Dataset
2525
from paddle.dataset.common import _check_exists_and_download
2626

27-
__all__ = ["MNIST", "FashionMNIST"]
27+
__all__ = []
2828

2929

3030
class MNIST(Dataset):

0 commit comments

Comments
 (0)