Skip to content

Commit 5b540a1

Browse files
author
Yibing Liu
authored
init custom black white list (#18377) (#18417)
test=release/1.5
1 parent 0a8b69f commit 5b540a1

File tree

5 files changed

+63
-14
lines changed

5 files changed

+63
-14
lines changed

paddle/fluid/API.spec

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -428,7 +428,8 @@ paddle.fluid.contrib.HDFSClient.upload (ArgSpec(args=['self', 'hdfs_path', 'loca
428428
paddle.fluid.contrib.multi_download (ArgSpec(args=['client', 'hdfs_path', 'local_path', 'trainer_id', 'trainers', 'multi_processes'], varargs=None, keywords=None, defaults=(5,)), ('document', '100927be598ed8f9eaa1f3ef1b23568a'))
429429
paddle.fluid.contrib.multi_upload (ArgSpec(args=['client', 'hdfs_path', 'local_path', 'multi_processes', 'overwrite', 'sync'], varargs=None, keywords=None, defaults=(5, False, True)), ('document', '183f34c83d30dbe16e09e8716c41958a'))
430430
paddle.fluid.contrib.extend_with_decoupled_weight_decay (ArgSpec(args=['base_optimizer'], varargs=None, keywords=None, defaults=None), ('document', 'a1095dfd4ec725747f662d69cd7659d4'))
431-
paddle.fluid.contrib.mixed_precision.decorate (ArgSpec(args=['optimizer', 'init_loss_scaling', 'incr_every_n_steps', 'decr_every_n_nan_or_inf', 'incr_ratio', 'decr_ratio', 'use_dynamic_loss_scaling'], varargs=None, keywords=None, defaults=(1.0, 1000, 2, 2.0, 0.8, False)), ('document', 'bdb8f9dbb0d94b3957272c53eeee9818'))
431+
paddle.fluid.contrib.mixed_precision.decorate (ArgSpec(args=['optimizer', 'amp_lists', 'init_loss_scaling', 'incr_every_n_steps', 'decr_every_n_nan_or_inf', 'incr_ratio', 'decr_ratio', 'use_dynamic_loss_scaling'], varargs=None, keywords=None, defaults=(None, 1.0, 1000, 2, 2.0, 0.8, False)), ('document', 'd05e71f5b0bd6d92bb94e70e00b3f9cf'))
432+
paddle.fluid.contrib.mixed_precision.AutoMixedPrecisionLists.__init__ (ArgSpec(args=['self', 'custom_white_list', 'custom_black_list'], varargs=None, keywords=None, defaults=(None, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
432433
paddle.fluid.contrib.fused_elemwise_activation (ArgSpec(args=['x', 'y', 'functor_list', 'axis', 'scale', 'save_intermediate_out'], varargs=None, keywords=None, defaults=(-1, 0.0, True)), ('document', '1c4b247a2858cea8d9d8750693688270'))
433434
paddle.fluid.contrib.BasicGRUUnit.__init__ (ArgSpec(args=['self', 'name_scope', 'hidden_size', 'param_attr', 'bias_attr', 'gate_activation', 'activation', 'dtype'], varargs=None, keywords=None, defaults=(None, None, None, None, 'float32')), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
434435
paddle.fluid.contrib.BasicGRUUnit.add_parameter (ArgSpec(args=['self', 'name', 'parameter'], varargs=None, keywords=None, defaults=None), ('document', 'f35ab374c7d5165c3daf3bd64a5a2ec1'))

python/paddle/fluid/contrib/mixed_precision/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,5 +15,7 @@
1515
from __future__ import print_function
1616
from . import decorator
1717
from .decorator import *
18+
from .fp16_lists import AutoMixedPrecisionLists
1819

1920
__all__ = decorator.__all__
21+
__all__ += fp16_lists.__all__

python/paddle/fluid/contrib/mixed_precision/decorator.py

Lines changed: 12 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919
from . import fp16_utils
2020
from .fp16_utils import create_master_params_grads, master_param_to_train_param
2121
from .fp16_utils import update_loss_scaling, rewrite_program
22+
from .fp16_lists import AutoMixedPrecisionLists
2223

2324
__all__ = ["decorate"]
2425

@@ -34,6 +35,7 @@ class OptimizerWithMixedPrecison(object):
3435
3536
Args:
3637
optimizer (Optimizer): A common Optimizer object.
38+
amp_lists (AutoMixedPrecisionLists): An AutoMixedPrecisionLists object.
3739
init_loss_scaling (float): The initial loss scaling factor.
3840
use_dynamic_loss_scaling (bool): Whether to use dynamic loss scaling.
3941
incr_every_n_steps(int): Increases loss scaling every n consecutive
@@ -48,10 +50,11 @@ class OptimizerWithMixedPrecison(object):
4850
4951
"""
5052

51-
def __init__(self, optimizer, init_loss_scaling, use_dynamic_loss_scaling,
52-
incr_every_n_steps, decr_every_n_nan_or_inf, incr_ratio,
53-
decr_ratio):
53+
def __init__(self, optimizer, amp_lists, init_loss_scaling,
54+
use_dynamic_loss_scaling, incr_every_n_steps,
55+
decr_every_n_nan_or_inf, incr_ratio, decr_ratio):
5456
self._optimizer = optimizer
57+
self._amp_lists = amp_lists
5558
self._param_grads = None
5659
self._train_program = default_main_program()
5760
self._startup_prog = default_startup_program()
@@ -120,7 +123,7 @@ def backward(self,
120123
A list of (param, grad), which is a tuple of a parameter and its
121124
gradient respectively, and the scaled loss.
122125
"""
123-
rewrite_program(self._train_program)
126+
rewrite_program(self._train_program, self._amp_lists)
124127
scaled_loss = loss * self._loss_scaling
125128
self._param_grads = self._optimizer.backward(
126129
scaled_loss, startup_program, parameter_list, no_grad_set,
@@ -189,6 +192,7 @@ def minimize(self, loss):
189192

190193

191194
def decorate(optimizer,
195+
amp_lists=None,
192196
init_loss_scaling=1.0,
193197
incr_every_n_steps=1000,
194198
decr_every_n_nan_or_inf=2,
@@ -200,6 +204,7 @@ def decorate(optimizer,
200204
201205
Args:
202206
optimizer(Optimizer): A common Optimizer.
207+
amp_lists (AutoMixedPrecisionLists): An AutoMixedPrecisionLists object.
203208
init_loss_scaling(float): The initial loss scaling factor.
204209
incr_every_n_steps(int): Increases loss scaling every n consecutive
205210
steps with finite gradients.
@@ -227,9 +232,10 @@ def decorate(optimizer,
227232
228233
scaled_loss, _, _ = mp_optimizer.minimize(loss)
229234
"""
230-
235+
if amp_lists is None:
236+
amp_lists = AutoMixedPrecisionLists()
231237
mp_optimizer = OptimizerWithMixedPrecison(
232-
optimizer, init_loss_scaling, use_dynamic_loss_scaling,
238+
optimizer, amp_lists, init_loss_scaling, use_dynamic_loss_scaling,
233239
incr_every_n_steps, decr_every_n_nan_or_inf, incr_ratio, decr_ratio)
234240

235241
return mp_optimizer

python/paddle/fluid/contrib/mixed_precision/fp16_lists.py

Lines changed: 41 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,47 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15+
import copy
16+
17+
__all__ = ["AutoMixedPrecisionLists"]
18+
19+
20+
class AutoMixedPrecisionLists(object):
21+
"""
22+
AutoMixedPrecisionLists is a class for black/white list. It can update
23+
pre-defined black list and white list according to users' custom black
24+
white lists. The lists are used for an algorithm which determines op's
25+
exectuion mode (fp32 or fp16).
26+
27+
Args:
28+
custom_white_list (set): Users' custom white list.
29+
custom_black_list (set): Users' custom black list.
30+
"""
31+
32+
def __init__(self, custom_white_list=None, custom_black_list=None):
33+
self._custom_white_list = custom_white_list
34+
self._custom_black_list = custom_black_list
35+
self.white_list = copy.copy(white_list)
36+
self.black_list = copy.copy(black_list)
37+
self.gray_list = copy.copy(gray_list)
38+
self._update_list()
39+
40+
def _update_list(self):
41+
"""
42+
Update black and white list according to users' custom list.
43+
"""
44+
if self._custom_white_list:
45+
for op_name in self._custom_white_list:
46+
if op_name in self.black_list:
47+
self.black_list.remove(op_name)
48+
self.white_list.add(op_name)
49+
if self._custom_black_list:
50+
for op_name in self._custom_black_list:
51+
if op_name in self.white_list:
52+
self.white_list.remove(op_name)
53+
self.black_list.add(op_name)
54+
55+
1556
# The three sets listed below are changed dynamiclly. They don't contain all
1657
# paddle ops currently.
1758

python/paddle/fluid/contrib/mixed_precision/fp16_utils.py

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,6 @@
1717
from ... import core
1818
from ... import layers
1919
from ... import framework
20-
from .fp16_lists import black_list, white_list, gray_list
2120

2221

2322
def append_cast_op(i, o, prog):
@@ -218,7 +217,7 @@ def find_true_prev_op(ops, var_name):
218217
return op
219218

220219

221-
def rewrite_program(main_prog):
220+
def rewrite_program(main_prog, amp_lists):
222221
"""
223222
Traverse all ops in current block and insert cast op according to
224223
which set current op belongs to.
@@ -244,11 +243,11 @@ def rewrite_program(main_prog):
244243
black_op_set = set()
245244
for i in range(len(ops)):
246245
op = ops[i]
247-
if op.type in black_list:
246+
if op.type in amp_lists.black_list:
248247
black_op_set.add(op)
249-
elif op.type in white_list:
248+
elif op.type in amp_lists.white_list:
250249
white_op_set.add(op)
251-
elif op.type in op.type in gray_list:
250+
elif op.type in amp_lists.gray_list:
252251
is_black_op = False
253252
is_white_op = False
254253
for in_name in op.input_names:
@@ -265,10 +264,10 @@ def rewrite_program(main_prog):
265264
prev_op = in_var.op
266265
# if it's one of inputs
267266
if prev_op in black_op_set or \
268-
prev_op.type in black_list:
267+
prev_op.type in amp_lists.black_list:
269268
is_black_op = True
270269
if prev_op in white_op_set or \
271-
prev_op.type in white_list:
270+
prev_op.type in amp_lists.white_list:
272271
is_white_op = True
273272
if is_black_op:
274273
black_op_set.add(op)

0 commit comments

Comments
 (0)