@@ -3014,16 +3014,19 @@ def batch_norm_layer(input,
3014
3014
:param input: batch normalization input. Better be linear activation.
3015
3015
Because there is an activation inside batch_normalization.
3016
3016
:type input: LayerOutput
3017
- :param batch_norm_type: We have batch_norm and cudnn_batch_norm. batch_norm
3018
- supports both CPU and GPU. cudnn_batch_norm requires
3019
- cuDNN version greater or equal to v4 (>=v4). But
3020
- cudnn_batch_norm is faster and needs less memory
3021
- than batch_norm. By default (None), we will
3022
- automaticly select cudnn_batch_norm for GPU and
3023
- batch_norm for CPU. Otherwise, select batch norm
3024
- type based on the specified type. If you use cudnn_batch_norm,
3017
+ :param batch_norm_type: We have batch_norm, mkldnn_batch_norm and cudnn_batch_norm.
3018
+ batch_norm supports CPU, MKLDNN and GPU. cudnn_batch_norm
3019
+ requires cuDNN version greater or equal to v4 (>=v4).
3020
+ But cudnn_batch_norm is faster and needs less
3021
+ memory than batch_norm. mkldnn_batch_norm requires
3022
+ enable use_mkldnn. By default (None), we will
3023
+ automaticly select cudnn_batch_norm for GPU,
3024
+ mkldnn_batch_norm for MKLDNN and batch_norm for CPU.
3025
+ Otherwise, select batch norm type based on the
3026
+ specified type. If you use cudnn_batch_norm,
3025
3027
we suggested you use latest version, such as v5.1.
3026
3028
:type batch_norm_type: None | string, None or "batch_norm" or "cudnn_batch_norm"
3029
+ or "mkldnn_batch_norm"
3027
3030
:param act: Activation Type. Better be relu. Because batch
3028
3031
normalization will normalize input near zero.
3029
3032
:type act: BaseActivation
@@ -3063,6 +3066,7 @@ def batch_norm_layer(input,
3063
3066
else :
3064
3067
num_channels = input .size
3065
3068
assert (batch_norm_type is None ) or (batch_norm_type == "batch_norm" ) or \
3069
+ (batch_norm_type == "mkldnn_batch_norm" ) or \
3066
3070
(batch_norm_type == "cudnn_batch_norm" )
3067
3071
l = Layer (
3068
3072
name = name ,
0 commit comments