30
30
import paddle
31
31
from paddle import fluid
32
32
from paddle .fluid import core
33
- from paddle .fluid .framework import in_dygraph_mode , Variable , ParamBase , _current_expected_place
34
- from paddle .fluid .framework import in_dygraph_mode , Variable , _get_paddle_place
33
+ from paddle .fluid .framework import in_dygraph_mode
34
+ from paddle .fluid .framework import Variable
35
+ from paddle .fluid .framework import ParamBase
36
+ from paddle .fluid .framework import _current_expected_place
37
+ from paddle .fluid .framework import _get_paddle_place
35
38
from paddle .fluid .framework import _current_expected_place as _get_device
36
39
from paddle .fluid .executor import global_scope
37
40
from paddle .fluid .io import is_belong_to_optimizer
38
41
from paddle .fluid .dygraph .base import to_variable
39
42
from paddle .fluid .dygraph .parallel import ParallelEnv
40
- from paddle .fluid .dygraph .dygraph_to_static .program_translator import ProgramTranslator , FunctionSpec
41
- from paddle .fluid .dygraph .io import INFER_MODEL_SUFFIX , INFER_PARAMS_SUFFIX
43
+ from paddle .fluid .dygraph .dygraph_to_static .program_translator import ProgramTranslator
44
+ from paddle .fluid .dygraph .dygraph_to_static .program_translator import FunctionSpec
45
+ from paddle .fluid .dygraph .io import INFER_MODEL_SUFFIX
46
+ from paddle .fluid .dygraph .io import INFER_PARAMS_SUFFIX
42
47
from paddle .fluid .layers .utils import flatten
43
48
from paddle .fluid .layers import collective
44
49
45
- from paddle .io import DataLoader , Dataset , DistributedBatchSampler
46
- from paddle .fluid .executor import scope_guard , Executor
50
+ from paddle .io import DataLoader
51
+ from paddle .io import Dataset
52
+ from paddle .io import DistributedBatchSampler
53
+ from paddle .fluid .executor import scope_guard
54
+ from paddle .fluid .executor import Executor
47
55
from paddle .fluid .dygraph .layers import Layer
48
56
from paddle .metric import Metric
49
57
from paddle .static import InputSpec as Input
@@ -166,7 +174,6 @@ def init_communicator(program, rank, nranks, wait_port, current_endpoint,
166
174
name = unique_name .generate ('hccl_id' ),
167
175
persistable = True ,
168
176
type = core .VarDesc .VarType .RAW )
169
- endpoint_to_index_map = {e : idx for idx , e in enumerate (endpoints )}
170
177
block .append_op (
171
178
type = 'c_gen_hccl_id' ,
172
179
inputs = {},
@@ -710,10 +717,10 @@ def train_batch(self, inputs, labels=None):
710
717
enable = self ._amp_level != 'O0' , ** self ._amp_custom_lists ):
711
718
if self ._nranks > 1 :
712
719
outputs = self .ddp_model .forward (
713
- * [to_variable (x ) for x in inputs ])
720
+ * [to_variable (x ) for x in inputs ])
714
721
else :
715
722
outputs = self .model .network .forward (
716
- * [to_variable (x ) for x in inputs ])
723
+ * [to_variable (x ) for x in inputs ])
717
724
718
725
losses = self .model ._loss (* (to_list (outputs ) + labels ))
719
726
losses = to_list (losses )
@@ -732,7 +739,7 @@ def train_batch(self, inputs, labels=None):
732
739
metrics = []
733
740
for metric in self .model ._metrics :
734
741
metric_outs = metric .compute (* (to_list (outputs ) + labels ))
735
- m = metric .update (* [to_numpy (m ) for m in to_list (metric_outs )])
742
+ m = metric .update (* [to_numpy (m ) for m in to_list (metric_outs )])
736
743
metrics .append (m )
737
744
738
745
return ([to_numpy (l ) for l in losses ], metrics ) \
@@ -746,7 +753,7 @@ def eval_batch(self, inputs, labels=None):
746
753
labels = labels or []
747
754
labels = [to_variable (l ) for l in to_list (labels )]
748
755
749
- outputs = self .model .network .forward (* [to_variable (x ) for x in inputs ])
756
+ outputs = self .model .network .forward (* [to_variable (x ) for x in inputs ])
750
757
if self .model ._loss :
751
758
losses = self .model ._loss (* (to_list (outputs ) + labels ))
752
759
losses = to_list (losses )
@@ -777,7 +784,7 @@ def eval_batch(self, inputs, labels=None):
777
784
self ._merge_count [self .mode + '_batch' ] = samples
778
785
779
786
metric_outs = metric .compute (* (to_list (outputs ) + labels ))
780
- m = metric .update (* [to_numpy (m ) for m in to_list (metric_outs )])
787
+ m = metric .update (* [to_numpy (m ) for m in to_list (metric_outs )])
781
788
metrics .append (m )
782
789
783
790
if self .model ._loss and len (metrics ):
@@ -1363,8 +1370,9 @@ def _check_pure_fp16_configs():
1363
1370
# pure float16 training has some restricts now
1364
1371
if self ._adapter ._amp_level == "O2" :
1365
1372
if in_dygraph_mode ():
1366
- warnings .warn ("Pure float16 training is not supported in dygraph mode now, " \
1367
- "and it will be supported in future version." )
1373
+ warnings .warn (
1374
+ "Pure float16 training is not supported in dygraph mode now, and it will be supported in future version."
1375
+ )
1368
1376
else :
1369
1377
# grad clip is not supported in pure fp16 training now
1370
1378
assert self ._optimizer ._grad_clip is None , \
@@ -1398,8 +1406,7 @@ def _check_pure_fp16_configs():
1398
1406
1399
1407
if 'use_pure_fp16' in amp_configs :
1400
1408
raise ValueError (
1401
- "''use_pure_fp16' is an invalid parameter, "
1402
- "the level of mixed precision training only depends on 'O1' or 'O2'."
1409
+ "'use_pure_fp16' is an invalid parameter, the level of mixed precision training only depends on 'O1' or 'O2'."
1403
1410
)
1404
1411
1405
1412
_check_pure_fp16_configs ()
@@ -1427,9 +1434,8 @@ def _check_amp_configs(amp_config_key_set):
1427
1434
}
1428
1435
if amp_config_key_set - accepted_param_set :
1429
1436
raise ValueError (
1430
- "Except for 'level', the keys of 'amp_configs' must be accepted by mixed precision APIs, "
1431
- "but {} could not be recognized." .format (
1432
- tuple (amp_config_key_set - accepted_param_set )))
1437
+ "Except for 'level', the keys of 'amp_configs' must be accepted by mixed precision APIs, but {} could not be recognized." .
1438
+ format (tuple (amp_config_key_set - accepted_param_set )))
1433
1439
1434
1440
if 'use_fp16_guard' in amp_config_key_set :
1435
1441
if in_dygraph_mode ():
@@ -1501,8 +1507,9 @@ def prepare(self, optimizer=None, loss=None, metrics=None,
1501
1507
self ._optimizer = optimizer
1502
1508
if loss is not None :
1503
1509
if not isinstance (loss , paddle .nn .Layer ) and not callable (loss ):
1504
- raise TypeError ("'loss' must be sub classes of " \
1505
- "`paddle.nn.Layer` or any callable function." )
1510
+ raise TypeError (
1511
+ "'loss' must be sub classes of `paddle.nn.Layer` or any callable function."
1512
+ )
1506
1513
self ._loss = loss
1507
1514
1508
1515
metrics = metrics or []
@@ -2080,7 +2087,7 @@ def summary(self, input_size=None, dtype=None):
2080
2087
input = InputSpec([None, 1, 28, 28], 'float32', 'image')
2081
2088
label = InputSpec([None, 1], 'int64', 'label')
2082
2089
2083
- model = paddle.Model(paddle.vision.LeNet(),
2090
+ model = paddle.Model(paddle.vision.models. LeNet(),
2084
2091
input, label)
2085
2092
optim = paddle.optimizer.Adam(
2086
2093
learning_rate=0.001, parameters=model.parameters())
@@ -2122,9 +2129,11 @@ def _verify_spec(self, specs, shapes=None, dtypes=None, is_input=False):
2122
2129
else :
2123
2130
out_specs = to_list (specs )
2124
2131
elif isinstance (specs , dict ):
2125
- assert is_input == False
2126
- out_specs = [specs [n ] \
2127
- for n in extract_args (self .network .forward ) if n != 'self' ]
2132
+ assert is_input is False
2133
+ out_specs = [
2134
+ specs [n ] for n in extract_args (self .network .forward )
2135
+ if n != 'self'
2136
+ ]
2128
2137
else :
2129
2138
out_specs = to_list (specs )
2130
2139
# Note: checks each element has specificed `name`.
0 commit comments