Skip to content

Commit 4d7eb09

Browse files
committed
add python interface of mkldnn_batch_norm
1 parent ad6b531 commit 4d7eb09

File tree

2 files changed

+22
-11
lines changed

2 files changed

+22
-11
lines changed

python/paddle/trainer/config_parser.py

Lines changed: 10 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2420,6 +2420,7 @@ def __init__(self,
24202420
# If not use is_static, even set learning_rate = 0, decay_rate = 0,
24212421
# these paras will change if set average_window in configure.
24222422
use_gpu = bool(int(g_command_config_args.get("use_gpu", 0)))
2423+
use_mkldnn = bool(int(g_command_config_args.get("use_mkldnn", 0)))
24232424
is_shared = True if not use_gpu else False
24242425
for i in xrange(2):
24252426
inputs.append(
@@ -2433,11 +2434,17 @@ def __init__(self,
24332434

24342435
parallel_nn = bool(int(g_command_config_args.get("parallel_nn", 0)))
24352436
cudnn_version = int(g_command_config_args.get("cudnn_version", 0))
2436-
# Automatically select cudnn_batch_norm for GPU and batch_norm for CPU.
2437-
# Also based on cudnn version.
2437+
# Automatically select cudnn_batch_norm for GPU, batch_norm for CPU
2438+
# and mkldnn_batch_norm for MKLDNN. Also based on cudnn version.
2439+
if batch_norm_type == "mkldnn_batch_norm":
2440+
config_assert(use_mkldnn, "mkldnn_batch_norm only support MKLDNN")
24382441
use_cudnn = use_gpu and batch_norm_type != "batch_norm" and \
2442+
not use_mkldnn and batch_norm_type != "mkldnn_batch_norm" and \
24392443
((not parallel_nn) or self.config.device > -1)
2440-
self.layer_type = "cudnn_batch_norm" if use_cudnn else "batch_norm"
2444+
if use_cudnn:
2445+
self.layer_type = "cudnn_batch_norm"
2446+
else:
2447+
self.layer_type = "mkldnn_batch_norm" if use_mkldnn else "batch_norm"
24412448
super(BatchNormLayer, self).__init__(
24422449
name, self.layer_type, 0, inputs=inputs, **xargs)
24432450

python/paddle/trainer_config_helpers/layers.py

Lines changed: 12 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -3014,16 +3014,19 @@ def batch_norm_layer(input,
30143014
:param input: batch normalization input. Better be linear activation.
30153015
Because there is an activation inside batch_normalization.
30163016
:type input: LayerOutput
3017-
:param batch_norm_type: We have batch_norm and cudnn_batch_norm. batch_norm
3018-
supports both CPU and GPU. cudnn_batch_norm requires
3019-
cuDNN version greater or equal to v4 (>=v4). But
3020-
cudnn_batch_norm is faster and needs less memory
3021-
than batch_norm. By default (None), we will
3022-
automaticly select cudnn_batch_norm for GPU and
3023-
batch_norm for CPU. Otherwise, select batch norm
3024-
type based on the specified type. If you use cudnn_batch_norm,
3017+
:param batch_norm_type: We have batch_norm, mkldnn_batch_norm and cudnn_batch_norm.
3018+
batch_norm supports CPU, MKLDNN and GPU. cudnn_batch_norm
3019+
requires cuDNN version greater or equal to v4 (>=v4).
3020+
But cudnn_batch_norm is faster and needs less
3021+
memory than batch_norm. mkldnn_batch_norm requires
3022+
enable use_mkldnn. By default (None), we will
3023+
automaticly select cudnn_batch_norm for GPU,
3024+
mkldnn_batch_norm for MKLDNN and batch_norm for CPU.
3025+
Otherwise, select batch norm type based on the
3026+
specified type. If you use cudnn_batch_norm,
30253027
we suggested you use latest version, such as v5.1.
30263028
:type batch_norm_type: None | string, None or "batch_norm" or "cudnn_batch_norm"
3029+
or "mkldnn_batch_norm"
30273030
:param act: Activation Type. Better be relu. Because batch
30283031
normalization will normalize input near zero.
30293032
:type act: BaseActivation
@@ -3063,6 +3066,7 @@ def batch_norm_layer(input,
30633066
else:
30643067
num_channels = input.size
30653068
assert (batch_norm_type is None) or (batch_norm_type == "batch_norm") or \
3069+
(batch_norm_type == "mkldnn_batch_norm") or \
30663070
(batch_norm_type == "cudnn_batch_norm")
30673071
l = Layer(
30683072
name=name,

0 commit comments

Comments
 (0)