Skip to content

Commit 0bbf4fb

Browse files
minhitbkBeat Buesser
authored andcommitted
update param names
Signed-off-by: Ngoc Minh Tran <[email protected]>
1 parent 6851acc commit 0bbf4fb

File tree

1 file changed

+18
-19
lines changed

1 file changed

+18
-19
lines changed

art/attacks/evasion/imperceptible_asr/imperceptible_asr_pytorch.py

Lines changed: 18 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ class ImperceptibleASRPyTorch(EvasionAttack):
5252
"""
5353

5454
attack_params = EvasionAttack.attack_params + [
55-
"initial_eps",
55+
"eps",
5656
"max_iter_1st_stage",
5757
"max_iter_2nd_stage",
5858
"learning_rate_1st_stage",
@@ -63,7 +63,7 @@ class ImperceptibleASRPyTorch(EvasionAttack):
6363
"initial_rescale",
6464
"rescale_factor",
6565
"num_iter_adjust_rescale",
66-
"initial_alpha",
66+
"alpha",
6767
"increase_factor_alpha",
6868
"num_iter_increase_alpha",
6969
"decrease_factor_alpha",
@@ -85,7 +85,7 @@ class ImperceptibleASRPyTorch(EvasionAttack):
8585
def __init__(
8686
self,
8787
estimator: PyTorchDeepSpeech,
88-
initial_eps: float = 0.001,
88+
eps: float = 0.001,
8989
max_iter_1st_stage: int = 1000,
9090
max_iter_2nd_stage: int = 4000,
9191
learning_rate_1st_stage: float = 0.1,
@@ -96,7 +96,7 @@ def __init__(
9696
initial_rescale: float = 1.0,
9797
rescale_factor: float = 0.8,
9898
num_iter_adjust_rescale: int = 10,
99-
initial_alpha: float = 0.05,
99+
alpha: float = 0.05,
100100
increase_factor_alpha: float = 1.2,
101101
num_iter_increase_alpha: int = 20,
102102
decrease_factor_alpha: float = 0.8,
@@ -109,7 +109,7 @@ def __init__(
109109
Create a :class:`.ImperceptibleASRPyTorch` instance.
110110
111111
:param estimator: A trained estimator.
112-
:param initial_eps: Initial maximum perturbation that the attacker can introduce.
112+
:param eps: Maximum perturbation that the attacker can introduce.
113113
:param max_iter_1st_stage: The maximum number of iterations applied for the first stage of the optimization of
114114
the attack.
115115
:param max_iter_2nd_stage: The maximum number of iterations applied for the second stage of the optimization of
@@ -128,8 +128,7 @@ def __init__(
128128
:param rescale_factor: The factor to adjust the rescale coefficient during the first stage of the optimization
129129
of the attack.
130130
:param num_iter_adjust_rescale: Number of iterations to adjust the rescale coefficient.
131-
:param initial_alpha: The initial value of the alpha coefficient used in the second stage of the optimization
132-
of the attack.
131+
:param alpha: Value of the alpha coefficient used in the second stage of the optimization of the attack.
133132
:param increase_factor_alpha: The factor to increase the alpha coefficient used in the second stage of the
134133
optimization of the attack.
135134
:param num_iter_increase_alpha: Number of iterations to increase alpha.
@@ -150,7 +149,7 @@ def __init__(
150149

151150
# Set attack attributes
152151
self._targeted = True
153-
self.initial_eps = initial_eps
152+
self.eps = eps
154153
self.max_iter_1st_stage = max_iter_1st_stage
155154
self.max_iter_2nd_stage = max_iter_2nd_stage
156155
self.learning_rate_1st_stage = learning_rate_1st_stage
@@ -159,7 +158,7 @@ def __init__(
159158
self.initial_rescale = initial_rescale
160159
self.rescale_factor = rescale_factor
161160
self.num_iter_adjust_rescale = num_iter_adjust_rescale
162-
self.initial_alpha = initial_alpha
161+
self.alpha = alpha
163162
self.increase_factor_alpha = increase_factor_alpha
164163
self.num_iter_increase_alpha = num_iter_increase_alpha
165164
self.decrease_factor_alpha = decrease_factor_alpha
@@ -407,8 +406,8 @@ class only supports targeted attack.
407406
# Adjust the rescale coefficient
408407
max_local_delta = np.max(np.abs(local_delta[local_batch_size_idx].detach().numpy()))
409408

410-
if rescale[local_batch_size_idx][0] * self.initial_eps > max_local_delta:
411-
rescale[local_batch_size_idx] = max_local_delta / self.initial_eps
409+
if rescale[local_batch_size_idx][0] * self.eps > max_local_delta:
410+
rescale[local_batch_size_idx] = max_local_delta / self.eps
412411
rescale[local_batch_size_idx] *= self.rescale_factor
413412

414413
# Save the best adversarial example
@@ -461,7 +460,7 @@ def _forward_1st_stage(
461460

462461
# Compute perturbed inputs
463462
local_delta = self.global_optimal_delta[:local_batch_size, :local_max_length]
464-
local_delta_rescale = torch.clamp(local_delta, -self.initial_eps, self.initial_eps).to(self.estimator.device)
463+
local_delta_rescale = torch.clamp(local_delta, -self.eps, self.eps).to(self.estimator.device)
465464
local_delta_rescale *= torch.tensor(rescale).to(self.estimator.device)
466465
adv_input = local_delta_rescale + torch.tensor(original_input).to(self.estimator.device)
467466
masked_adv_input = adv_input * torch.tensor(input_mask).to(self.estimator.device)
@@ -523,7 +522,7 @@ class only supports targeted attack.
523522
local_max_length = np.max(real_lengths)
524523

525524
# Initialize alpha and rescale
526-
alpha = np.array([self.initial_alpha] * local_batch_size, dtype=np.float64)
525+
alpha = np.array([self.alpha] * local_batch_size, dtype=np.float64)
527526
rescale = np.ones([local_batch_size, local_max_length], dtype=np.float64) * self.initial_rescale
528527

529528
# Reformat input
@@ -817,8 +816,8 @@ def _check_params(self) -> None:
817816
"""
818817
Apply attack-specific checks.
819818
"""
820-
if self.initial_eps <= 0:
821-
raise ValueError("The perturbation size `initial_eps` has to be positive.")
819+
if self.eps <= 0:
820+
raise ValueError("The perturbation size `eps` has to be positive.")
822821

823822
if not isinstance(self.max_iter_1st_stage, int):
824823
raise ValueError("The maximum number of iterations must be of type int.")
@@ -860,10 +859,10 @@ def _check_params(self) -> None:
860859
if not self.num_iter_adjust_rescale > 0:
861860
raise ValueError("The number of iterations must be greater than 0.")
862861

863-
if not isinstance(self.initial_alpha, float):
864-
raise ValueError("The initial alpha must be of type float.")
865-
if not self.initial_alpha > 0.0:
866-
raise ValueError("The initial alpha must be greater than 0.0.")
862+
if not isinstance(self.alpha, float):
863+
raise ValueError("The value of alpha must be of type float.")
864+
if not self.alpha > 0.0:
865+
raise ValueError("The value of alpha must be greater than 0.0.")
867866

868867
if not isinstance(self.increase_factor_alpha, float):
869868
raise ValueError("The factor to increase alpha must be of type float.")

0 commit comments

Comments
 (0)