44from .. import _logging as logging
55from .core import *
66
7+ from ..deprecation import deprecated_alias
8+
79__all__ = [
810 'BinaryDenseLayer' ,
911 'BinaryConv2d' ,
@@ -124,6 +126,7 @@ class BinaryDenseLayer(Layer):
124126
125127 """
126128
129+ @deprecated_alias (layer = 'prev_layer' , end_support_version = 1.9 ) # TODO remove this line for the 1.9 release
127130 def __init__ (
128131 self ,
129132 prev_layer ,
@@ -136,13 +139,16 @@ def __init__(
136139 b_init_args = None ,
137140 name = 'binary_dense' ,
138141 ):
142+ super (BinaryDenseLayer , self ).__init__ (prev_layer = prev_layer , name = name )
143+ logging .info ("BinaryDenseLayer %s: %d %s" % (name , n_units , act .__name__ ))
144+
145+ self .inputs = prev_layer .outputs
146+
139147 if W_init_args is None :
140148 W_init_args = {}
141149 if b_init_args is None :
142150 b_init_args = {}
143151
144- Layer .__init__ (self , prev_layer = prev_layer , name = name )
145- self .inputs = prev_layer .outputs
146152 if self .inputs .get_shape ().ndims != 2 :
147153 raise Exception ("The input dimension must be rank 2, please reshape or flatten it" )
148154
@@ -151,7 +157,7 @@ def __init__(
151157
152158 n_in = int (self .inputs .get_shape ()[- 1 ])
153159 self .n_units = n_units
154- logging . info ( "BinaryDenseLayer %s: %d %s" % ( self . name , self . n_units , act . __name__ ))
160+
155161 with tf .variable_scope (name ):
156162 W = tf .get_variable (name = 'W' , shape = (n_in , n_units ), initializer = W_init , dtype = LayersConfig .tf_dtype , ** W_init_args )
157163 # W = tl.act.sign(W) # dont update ...
@@ -228,6 +234,7 @@ class BinaryConv2d(Layer):
228234
229235 """
230236
237+ @deprecated_alias (layer = 'prev_layer' , end_support_version = 1.9 ) # TODO remove this line for the 1.9 release
231238 def __init__ (
232239 self ,
233240 prev_layer ,
@@ -255,20 +262,20 @@ def __init__(
255262 # data_format=None,
256263 name = 'binary_cnn2d' ,
257264 ):
265+ super (BinaryConv2d , self ).__init__ (prev_layer = prev_layer , name = name )
266+ logging .info ("BinaryConv2d %s: n_filter:%d filter_size:%s strides:%s pad:%s act:%s" % (name , n_filter , str (filter_size ), str (strides ), padding ,
267+ act .__name__ ))
268+
269+ self .inputs = prev_layer .outputs
270+
258271 if W_init_args is None :
259272 W_init_args = {}
260273 if b_init_args is None :
261274 b_init_args = {}
262-
263- if use_gemm :
264- raise Exception ("TODO. The current version use tf.matmul for inferencing." )
265-
266- Layer .__init__ (self , prev_layer = prev_layer , name = name )
267- self .inputs = prev_layer .outputs
268275 if act is None :
269276 act = tf .identity
270- logging . info ( "BinaryConv2d %s: n_filter:%d filter_size:%s strides:%s pad:%s act:%s" % ( self . name , n_filter , str ( filter_size ), str ( strides ), padding ,
271- act . __name__ ) )
277+ if use_gemm :
278+ raise Exception ( "TODO. The current version use tf.matmul for inferencing." )
272279
273280 if len (strides ) != 2 :
274281 raise ValueError ("len(strides) should be 2." )
@@ -324,6 +331,7 @@ class TernaryDenseLayer(Layer):
324331
325332 """
326333
334+ @deprecated_alias (layer = 'prev_layer' , end_support_version = 1.9 ) # TODO remove this line for the 1.9 release
327335 def __init__ (
328336 self ,
329337 prev_layer ,
@@ -336,22 +344,24 @@ def __init__(
336344 b_init_args = None ,
337345 name = 'ternary_dense' ,
338346 ):
347+ super (TernaryDenseLayer , self ).__init__ (prev_layer = prev_layer , name = name )
348+ logging .info ("TernaryDenseLayer %s: %d %s" % (name , n_units , act .__name__ ))
349+
350+ self .inputs = prev_layer .outputs
351+
339352 if W_init_args is None :
340353 W_init_args = {}
341354 if b_init_args is None :
342355 b_init_args = {}
343356
344- Layer .__init__ (self , prev_layer = prev_layer , name = name )
345- self .inputs = prev_layer .outputs
346357 if self .inputs .get_shape ().ndims != 2 :
347358 raise Exception ("The input dimension must be rank 2, please reshape or flatten it" )
348-
349359 if use_gemm :
350360 raise Exception ("TODO. The current version use tf.matmul for inferencing." )
351361
352362 n_in = int (self .inputs .get_shape ()[- 1 ])
353363 self .n_units = n_units
354- logging . info ( "TernaryDenseLayer %s: %d %s" % ( self . name , self . n_units , act . __name__ ))
364+
355365 with tf .variable_scope (name ):
356366 W = tf .get_variable (name = 'W' , shape = (n_in , n_units ), initializer = W_init , dtype = LayersConfig .tf_dtype , ** W_init_args )
357367 # W = tl.act.sign(W) # dont update ...
@@ -430,6 +440,7 @@ class TernaryConv2d(Layer):
430440
431441 """
432442
443+ @deprecated_alias (layer = 'prev_layer' , end_support_version = 1.9 ) # TODO remove this line for the 1.9 release
433444 def __init__ (
434445 self ,
435446 prev_layer ,
@@ -457,20 +468,18 @@ def __init__(
457468 # data_format=None,
458469 name = 'ternary_cnn2d' ,
459470 ):
471+ super (TernaryConv2d , self ).__init__ (prev_layer = prev_layer , name = name )
472+ logging .info ("TernaryConv2d %s: n_filter:%d filter_size:%s strides:%s pad:%s act:%s" % (name , n_filter , str (filter_size ), str (strides ), padding ,
473+ act .__name__ ))
474+
460475 if W_init_args is None :
461476 W_init_args = {}
462477 if b_init_args is None :
463478 b_init_args = {}
464-
465- if use_gemm :
466- raise Exception ("TODO. The current version use tf.matmul for inferencing." )
467-
468- Layer .__init__ (self , prev_layer = prev_layer , name = name )
469- self .inputs = prev_layer .outputs
470479 if act is None :
471480 act = tf .identity
472- logging . info ( "TernaryConv2d %s: n_filter:%d filter_size:%s strides:%s pad:%s act:%s" % ( self . name , n_filter , str ( filter_size ), str ( strides ), padding ,
473- act . __name__ ) )
481+ if use_gemm :
482+ raise Exception ( "TODO. The current version use tf.matmul for inferencing." )
474483
475484 if len (strides ) != 2 :
476485 raise ValueError ("len(strides) should be 2." )
@@ -508,7 +517,7 @@ class DorefaDenseLayer(Layer):
508517
509518 Parameters
510519 ----------
511- layer : :class:`Layer`
520+ prev_layer : :class:`Layer`
512521 Previous layer.
513522 bitW : int
514523 The bits of this layer's parameter
@@ -533,6 +542,7 @@ class DorefaDenseLayer(Layer):
533542
534543 """
535544
545+ @deprecated_alias (layer = 'prev_layer' , end_support_version = 1.9 ) # TODO remove this line for the 1.9 release
536546 def __init__ (
537547 self ,
538548 prev_layer ,
@@ -547,22 +557,24 @@ def __init__(
547557 b_init_args = None ,
548558 name = 'dorefa_dense' ,
549559 ):
560+ super (DorefaDenseLayer , self ).__init__ (prev_layer = prev_layer , name = name )
561+ logging .info ("DorefaDenseLayer %s: %d %s" % (name , n_units , act .__name__ ))
562+
563+ self .inputs = prev_layer .outputs
564+
550565 if W_init_args is None :
551566 W_init_args = {}
552567 if b_init_args is None :
553568 b_init_args = {}
554569
555- Layer .__init__ (self , prev_layer = prev_layer , name = name )
556- self .inputs = prev_layer .outputs
557570 if self .inputs .get_shape ().ndims != 2 :
558571 raise Exception ("The input dimension must be rank 2, please reshape or flatten it" )
559-
560572 if use_gemm :
561573 raise Exception ("TODO. The current version use tf.matmul for inferencing." )
562574
563575 n_in = int (self .inputs .get_shape ()[- 1 ])
564576 self .n_units = n_units
565- logging . info ( "DorefaDenseLayer %s: %d %s" % ( self . name , self . n_units , act . __name__ ))
577+
566578 with tf .variable_scope (name ):
567579 W = tf .get_variable (name = 'W' , shape = (n_in , n_units ), initializer = W_init , dtype = LayersConfig .tf_dtype , ** W_init_args )
568580 # W = tl.act.sign(W) # dont update ...
@@ -596,7 +608,7 @@ class DorefaConv2d(Layer):
596608
597609 Parameters
598610 ----------
599- layer : :class:`Layer`
611+ prev_layer : :class:`Layer`
600612 Previous layer.
601613 bitW : int
602614 The bits of this layer's parameter
@@ -644,6 +656,7 @@ class DorefaConv2d(Layer):
644656
645657 """
646658
659+ @deprecated_alias (layer = 'prev_layer' , end_support_version = 1.9 ) # TODO remove this line for the 1.9 release
647660 def __init__ (
648661 self ,
649662 prev_layer ,
@@ -673,21 +686,22 @@ def __init__(
673686 # data_format=None,
674687 name = 'dorefa_cnn2d' ,
675688 ):
689+ super (DorefaConv2d , self ).__init__ (prev_layer = prev_layer , name = name )
690+ logging .info ("DorefaConv2d %s: n_filter:%d filter_size:%s strides:%s pad:%s act:%s" % (name , n_filter , str (filter_size ), str (strides ), padding ,
691+ act .__name__ ))
692+
693+ self .inputs = prev_layer .outputs
694+
676695 if W_init_args is None :
677696 W_init_args = {}
678697 if b_init_args is None :
679698 b_init_args = {}
699+ if act is None :
700+ act = tf .identity
680701
681702 if use_gemm :
682703 raise Exception ("TODO. The current version use tf.matmul for inferencing." )
683704
684- Layer .__init__ (self , prev_layer = prev_layer , name = name )
685- self .inputs = prev_layer .outputs
686- if act is None :
687- act = tf .identity
688- logging .info ("DorefaConv2d %s: n_filter:%d filter_size:%s strides:%s pad:%s act:%s" % (self .name , n_filter , str (filter_size ), str (strides ), padding ,
689- act .__name__ ))
690-
691705 if len (strides ) != 2 :
692706 raise ValueError ("len(strides) should be 2." )
693707 try :
@@ -720,23 +734,25 @@ class SignLayer(Layer):
720734
721735 Parameters
722736 ----------
723- layer : :class:`Layer`
737+ prev_layer : :class:`Layer`
724738 Previous layer.
725739 name : a str
726740 A unique layer name.
727741
728742 """
729743
744+ @deprecated_alias (layer = 'prev_layer' , end_support_version = 1.9 ) # TODO remove this line for the 1.9 release
730745 def __init__ (
731746 self ,
732747 prev_layer ,
733748 name = 'sign' ,
734749 ):
750+ super (SignLayer , self ).__init__ (prev_layer = prev_layer , name = name )
735751
736- Layer .__init__ (self , prev_layer = prev_layer , name = name )
737752 self .inputs = prev_layer .outputs
738753
739754 logging .info ("SignLayer %s" % (self .name ))
755+
740756 with tf .variable_scope (name ):
741757 # self.outputs = tl.act.sign(self.inputs)
742758 self .outputs = quantize (self .inputs )
@@ -749,7 +765,7 @@ class ScaleLayer(Layer):
749765
750766 Parameters
751767 ----------
752- layer : :class:`Layer`
768+ prev_layer : :class:`Layer`
753769 Previous layer.
754770 init_scale : float
755771 The initial value for the scale factor.
@@ -758,17 +774,18 @@ class ScaleLayer(Layer):
758774
759775 """
760776
777+ @deprecated_alias (layer = 'prev_layer' , end_support_version = 1.9 ) # TODO remove this line for the 1.9 release
761778 def __init__ (
762779 self ,
763780 prev_layer ,
764781 init_scale = 0.05 ,
765782 name = 'scale' ,
766783 ):
784+ super (ScaleLayer , self ).__init__ (prev_layer = prev_layer , name = name )
785+ logging .info ("ScaleLayer %s: init_scale: %f" % (name , init_scale ))
767786
768- Layer .__init__ (self , prev_layer = prev_layer , name = name )
769787 self .inputs = prev_layer .outputs
770788
771- logging .info ("ScaleLayer %s: init_scale: %f" % (self .name , init_scale ))
772789 with tf .variable_scope (name ):
773790 # scale = tf.get_variable(name='scale_factor', init, trainable=True, )
774791 scale = tf .get_variable ("scale" , shape = [1 ], initializer = tf .constant_initializer (value = init_scale ))
0 commit comments