@@ -737,7 +737,7 @@ def _modify_classifier(
737737 def get_layer_activations ( # type: ignore
738738 p_classifier : PyTorchClassifier ,
739739 x : "torch.Tensor" ,
740- layers : List [Union [int , str ]] = None ,
740+ layers : List [Union [int , str ]],
741741 ) -> Tuple [Dict [str , "torch.Tensor" ], List [str ]]:
742742 """
743743 Return the output of the specified layers for input `x`. `layers` is a list of either layer indices (between 0
@@ -750,21 +750,20 @@ def get_layer_activations( # type: ignore
750750 :return: Tuple containing the output dict and a list of layers' names. In dictionary each element is a
751751 layer's output where the first dimension is the batch size corresponding to `x'.
752752 """
753- import torch
754753
755754 p_classifier .model .train (mode = False )
756755
757756 list_layer_names = []
758757 for layer in layers :
759758 if isinstance (layer , six .string_types ):
760- if layer not in p_classifier .layer_names :
759+ if layer not in p_classifier ._layer_names : # pylint: disable=W0212
761760 raise ValueError (f"Layer name { layer } not supported" )
762761 layer_name = layer
763762 list_layer_names .append (layer_name )
764763
765764 elif isinstance (layer , int ):
766765 layer_index = layer
767- layer_name = p_classifier .layer_names [layer_index ]
766+ layer_name = p_classifier ._layer_names [layer_index ] # pylint: disable=W0212
768767 list_layer_names .append (layer_name )
769768
770769 else :
@@ -778,7 +777,7 @@ def hook(model, input, output): # pylint: disable=W0622,W0613
778777 return hook
779778
780779 if not hasattr (p_classifier , "_features" ):
781- p_classifier ._features : Dict [ str , torch . Tensor ] = {}
780+ p_classifier ._features = {} # pylint: disable=W0212
782781 # register forward hooks on the layers of choice
783782
784783 for layer_name in list_layer_names :
@@ -825,7 +824,7 @@ def calculate_lpips_distance( # type: ignore
825824 p_classifier : PyTorchClassifier ,
826825 input_1 : "torch.Tensor" ,
827826 input_2 : "torch.Tensor" ,
828- layers : List [Union [int , str ]] = None ,
827+ layers : List [Union [int , str ]],
829828 ) -> "torch.Tensor" :
830829 """
831830 Return the LPIPS distance between input_1 and input_2. `layers` is a list of either layer indices (between 0 and
@@ -874,22 +873,28 @@ def update_learning_rate(
874873 else :
875874 l_r = self ._train_params ["lr" ] * 0.5 * (1 + np .cos (epoch / nb_epochs * np .pi ))
876875
876+ for param_group in optimizer .param_groups :
877+ param_group ["lr" ] = l_r
878+
877879 elif lr_schedule .lower () == "linear" :
878880 l_r = (epoch + 1 ) * (self ._train_params ["lr" ] / 10 )
879881
882+ for param_group in optimizer .param_groups :
883+ param_group ["lr" ] = l_r
884+
880885 elif lr_schedule .lower () == "step" :
881886 if epoch >= 75 * nb_epochs / 110 :
882887 l_r = self ._train_params ["lr" ] * 0.1
883888 if epoch >= 90 * nb_epochs / 110 :
884889 l_r = self ._train_params ["lr" ] * 0.01
885890 if epoch >= 100 * nb_epochs / 110 :
886891 l_r = self ._train_params ["lr" ] * 0.001
892+
893+ for param_group in optimizer .param_groups :
894+ param_group ["lr" ] = l_r
887895 else :
888896 raise ValueError (f"lr_schedule { lr_schedule } not supported" )
889897
890- for param_group in optimizer .param_groups :
891- param_group ["lr" ] = l_r
892-
893898 def _attack_lpips ( # type: ignore
894899 self ,
895900 x : np .ndarray ,
@@ -898,7 +903,7 @@ def _attack_lpips( # type: ignore
898903 eps_step : Union [int , float , np .ndarray ],
899904 max_iter : int ,
900905 training_mode : bool ,
901- ) -> "torch.Tensor" :
906+ ) -> np . ndarray :
902907 """
903908 Compute adversarial examples with cross entropy and lpips distance.
904909
@@ -916,12 +921,12 @@ def _attack_lpips( # type: ignore
916921 """
917922 import torch
918923
919- x = torch .from_numpy (x .astype (ART_NUMPY_DTYPE )).to (self ._classifier .device )
920- y = torch .from_numpy (y .astype (ART_NUMPY_DTYPE )).to (self ._classifier .device )
921- adv_x = torch .clone (x )
924+ x_t = torch .from_numpy (x .astype (ART_NUMPY_DTYPE )).to (self ._classifier .device )
925+ y_t = torch .from_numpy (y .astype (ART_NUMPY_DTYPE )).to (self ._classifier .device )
926+ adv_x = torch .clone (x_t )
922927
923928 for i_max_iter in range (max_iter ):
924- adv_x = self ._one_step_adv_example (adv_x , x , y , eps , eps_step , i_max_iter == 0 , training_mode )
929+ adv_x = self ._one_step_adv_example (adv_x , x_t , y_t , eps , eps_step , i_max_iter == 0 , training_mode )
925930
926931 return adv_x .cpu ().detach ().numpy ()
927932
0 commit comments