Skip to content

Commit ddfb9f0

Browse files
minhitbkBeat Buesser
authored andcommitted
update asr attack
Signed-off-by: Ngoc Minh Tran <[email protected]>
1 parent 0bbf4fb commit ddfb9f0

File tree

1 file changed

+73
-91
lines changed

1 file changed

+73
-91
lines changed

art/attacks/evasion/imperceptible_asr/imperceptible_asr_pytorch.py

Lines changed: 73 additions & 91 deletions
Original file line numberDiff line numberDiff line change
@@ -53,16 +53,16 @@ class ImperceptibleASRPyTorch(EvasionAttack):
5353

5454
attack_params = EvasionAttack.attack_params + [
5555
"eps",
56-
"max_iter_1st_stage",
57-
"max_iter_2nd_stage",
58-
"learning_rate_1st_stage",
59-
"learning_rate_2nd_stage",
60-
"optimizer_1st_stage",
61-
"optimizer_2nd_stage",
56+
"max_iter_1",
57+
"max_iter_2",
58+
"learning_rate_1",
59+
"learning_rate_2",
60+
"optimizer_1",
61+
"optimizer_2",
6262
"global_max_length",
6363
"initial_rescale",
64-
"rescale_factor",
65-
"num_iter_adjust_rescale",
64+
"decrease_factor_eps",
65+
"num_iter_decrease_eps",
6666
"alpha",
6767
"increase_factor_alpha",
6868
"num_iter_increase_alpha",
@@ -85,22 +85,22 @@ class ImperceptibleASRPyTorch(EvasionAttack):
8585
def __init__(
8686
self,
8787
estimator: PyTorchDeepSpeech,
88-
eps: float = 0.001,
89-
max_iter_1st_stage: int = 1000,
90-
max_iter_2nd_stage: int = 4000,
91-
learning_rate_1st_stage: float = 0.1,
92-
learning_rate_2nd_stage: float = 0.001,
93-
optimizer_1st_stage: Optional["torch.optim.Optimizer"] = None,
94-
optimizer_2nd_stage: Optional["torch.optim.Optimizer"] = None,
95-
global_max_length: int = 10000,
88+
eps: float = 0.05,
89+
max_iter_1: int = 10,
90+
max_iter_2: int = 4000,
91+
learning_rate_1: float = 0.001,
92+
learning_rate_2: float = 5e-4,
93+
optimizer_1: torch.optim.Optimizer = torch.optim.Adam,
94+
optimizer_2: torch.optim.Optimizer = torch.optim.Adam,
95+
global_max_length: int = 200000,
9696
initial_rescale: float = 1.0,
97-
rescale_factor: float = 0.8,
98-
num_iter_adjust_rescale: int = 10,
99-
alpha: float = 0.05,
97+
decrease_factor_eps: float = 0.8,
98+
num_iter_decrease_eps: int = 1,
99+
alpha: float = 1.2,
100100
increase_factor_alpha: float = 1.2,
101101
num_iter_increase_alpha: int = 20,
102102
decrease_factor_alpha: float = 0.8,
103-
num_iter_decrease_alpha: int = 50,
103+
num_iter_decrease_alpha: int = 20,
104104
batch_size: int = 32,
105105
use_amp: bool = False,
106106
opt_level: str = "O1",
@@ -110,24 +110,21 @@ def __init__(
110110
111111
:param estimator: A trained estimator.
112112
:param eps: Maximum perturbation that the attacker can introduce.
113-
:param max_iter_1st_stage: The maximum number of iterations applied for the first stage of the optimization of
114-
the attack.
115-
:param max_iter_2nd_stage: The maximum number of iterations applied for the second stage of the optimization of
116-
the attack.
117-
:param learning_rate_1st_stage: The initial learning rate applied for the first stage of the optimization of
118-
the attack.
119-
:param learning_rate_2nd_stage: The initial learning rate applied for the second stage of the optimization of
120-
the attack.
121-
:param optimizer_1st_stage: The optimizer applied for the first stage of the optimization of the attack. If
122-
`None` attack will use `torch.optim.SGD`.
123-
:param optimizer_2nd_stage: The optimizer applied for the second stage of the optimization of the attack. If
124-
`None` attack will use `torch.optim.SGD`.
113+
:param max_iter_1: The maximum number of iterations applied for the first stage of the optimization of the
114+
attack.
115+
:param max_iter_2: The maximum number of iterations applied for the second stage of the optimization of the
116+
attack.
117+
:param learning_rate_1: The learning rate applied for the first stage of the optimization of the attack.
118+
:param learning_rate_2: The learning rate applied for the second stage of the optimization of the attack.
119+
:param optimizer_1: The optimizer applied for the first stage of the optimization of the attack.
120+
:param optimizer_2: The optimizer applied for the second stage of the optimization of the attack.
125121
:param global_max_length: The length of the longest audio signal allowed by this attack.
126122
:param initial_rescale: Initial rescale coefficient to speedup the decrease of the perturbation size during
127123
the first stage of the optimization of the attack.
128-
:param rescale_factor: The factor to adjust the rescale coefficient during the first stage of the optimization
129-
of the attack.
130-
:param num_iter_adjust_rescale: Number of iterations to adjust the rescale coefficient.
124+
:param decrease_factor_eps: The factor to adjust the rescale coefficient during the first stage of the
125+
optimization of the attack.
126+
:param num_iter_decrease_eps: Number of iterations to adjust the rescale coefficient, and therefore adjust the
127+
perturbation size.
131128
:param alpha: Value of the alpha coefficient used in the second stage of the optimization of the attack.
132129
:param increase_factor_alpha: The factor to increase the alpha coefficient used in the second stage of the
133130
optimization of the attack.
@@ -150,14 +147,14 @@ def __init__(
150147
# Set attack attributes
151148
self._targeted = True
152149
self.eps = eps
153-
self.max_iter_1st_stage = max_iter_1st_stage
154-
self.max_iter_2nd_stage = max_iter_2nd_stage
155-
self.learning_rate_1st_stage = learning_rate_1st_stage
156-
self.learning_rate_2nd_stage = learning_rate_2nd_stage
150+
self.max_iter_1 = max_iter_1
151+
self.max_iter_2 = max_iter_2
152+
self.learning_rate_1 = learning_rate_1
153+
self.learning_rate_2 = learning_rate_2
157154
self.global_max_length = global_max_length
158155
self.initial_rescale = initial_rescale
159-
self.rescale_factor = rescale_factor
160-
self.num_iter_adjust_rescale = num_iter_adjust_rescale
156+
self.decrease_factor_eps = decrease_factor_eps
157+
self.num_iter_decrease_eps = num_iter_decrease_eps
161158
self.alpha = alpha
162159
self.increase_factor_alpha = increase_factor_alpha
163160
self.num_iter_increase_alpha = num_iter_increase_alpha
@@ -179,17 +176,11 @@ def __init__(
179176
self.global_optimal_delta.to(self.estimator.device)
180177

181178
# Create the optimizers
182-
self._optimizer_1st_stage_arg = optimizer_1st_stage
183-
if optimizer_1st_stage is None:
184-
self.optimizer_1st_stage = torch.optim.SGD(
185-
params=[self.global_optimal_delta], lr=self.learning_rate_1st_stage
186-
)
179+
self._optimizer_arg_1 = optimizer_1
180+
self.optimizer_1 = self._optimizer_arg_1(params=[self.global_optimal_delta], lr=self.learning_rate_1)
187181

188-
self._optimizer_2nd_stage_arg = optimizer_2nd_stage
189-
if optimizer_2nd_stage is None:
190-
self.optimizer_2nd_stage = torch.optim.SGD(
191-
params=[self.global_optimal_delta], lr=self.learning_rate_2nd_stage
192-
)
182+
self._optimizer_arg_2 = optimizer_2
183+
self.optimizer_2 = self._optimizer_arg_2(params=[self.global_optimal_delta], lr=self.learning_rate_2)
193184

194185
# Setup for AMP use
195186
if self._use_amp:
@@ -200,9 +191,9 @@ def __init__(
200191
else:
201192
enabled = True
202193

203-
self.estimator._model, [self.optimizer_1st_stage, self.optimizer_2nd_stage] = amp.initialize(
194+
self.estimator._model, [self.optimizer_1, self.optimizer_2] = amp.initialize(
204195
models=self.estimator._model,
205-
optimizers=[self.optimizer_1st_stage, self.optimizer_2nd_stage],
196+
optimizers=[self.optimizer_1, self.optimizer_2],
206197
enabled=enabled,
207198
opt_level=opt_level,
208199
loss_scale=1.0,
@@ -250,23 +241,14 @@ class only supports targeted attack.
250241

251242
for m in range(num_batch):
252243
# Batch indexes
253-
batch_index_1, batch_index_2 = (
254-
m * self.batch_size,
255-
min((m + 1) * self.batch_size, len(x)),
256-
)
244+
batch_index_1, batch_index_2 = (m * self.batch_size, min((m + 1) * self.batch_size, len(x)))
257245

258246
# First reset delta
259247
self.global_optimal_delta.data = torch.zeros(self.batch_size, self.global_max_length).type(torch.float64)
260248

261249
# Next, reset optimizers
262-
if self._optimizer_1st_stage_arg is not None:
263-
self.optimizer_1st_stage = self._optimizer_1st_stage_arg(
264-
params=[self.global_optimal_delta], lr=self.learning_rate_1st_stage
265-
)
266-
if self._optimizer_2nd_stage_arg is not None:
267-
self.optimizer_2nd_stage = self._optimizer_2nd_stage_arg(
268-
params=[self.global_optimal_delta], lr=self.learning_rate_2nd_stage
269-
)
250+
self.optimizer_1 = self._optimizer_arg_1(params=[self.global_optimal_delta], lr=self.learning_rate_1)
251+
self.optimizer_2 = self._optimizer_arg_2(params=[self.global_optimal_delta], lr=self.learning_rate_2)
270252

271253
# Then compute the batch
272254
adv_x_batch = self._generate_batch(adv_x[batch_index_1:batch_index_2], y[batch_index_1:batch_index_2])
@@ -368,9 +350,9 @@ class only supports targeted attack.
368350
successful_adv_input = [None] * local_batch_size
369351
trans = [None] * local_batch_size
370352

371-
for iter_1st_stage_idx in range(self.max_iter_1st_stage):
353+
for iter_1st_stage_idx in range(self.max_iter_1):
372354
# Zero the parameter gradients
373-
self.optimizer_1st_stage.zero_grad()
355+
self.optimizer_1.zero_grad()
374356

375357
# Call to forward pass
376358
loss, local_delta, decoded_output, masked_adv_input, _ = self._forward_1st_stage(
@@ -387,7 +369,7 @@ class only supports targeted attack.
387369
if self._use_amp:
388370
from apex import amp
389371

390-
with amp.scale_loss(loss, self.optimizer_1st_stage) as scaled_loss:
372+
with amp.scale_loss(loss, self.optimizer_1) as scaled_loss:
391373
scaled_loss.backward()
392374

393375
else:
@@ -397,25 +379,25 @@ class only supports targeted attack.
397379
self.global_optimal_delta.grad = torch.sign(self.global_optimal_delta.grad)
398380

399381
# Do optimization
400-
self.optimizer_1st_stage.step()
382+
self.optimizer_1.step()
401383

402384
# Save the best adversarial example and adjust the rescale coefficient if successful
403-
if iter_1st_stage_idx % self.num_iter_adjust_rescale == 0:
385+
if iter_1st_stage_idx % self.num_iter_decrease_eps == 0:
404386
for local_batch_size_idx in range(local_batch_size):
405387
if decoded_output[local_batch_size_idx] == y[local_batch_size_idx]:
406388
# Adjust the rescale coefficient
407389
max_local_delta = np.max(np.abs(local_delta[local_batch_size_idx].detach().numpy()))
408390

409391
if rescale[local_batch_size_idx][0] * self.eps > max_local_delta:
410392
rescale[local_batch_size_idx] = max_local_delta / self.eps
411-
rescale[local_batch_size_idx] *= self.rescale_factor
393+
rescale[local_batch_size_idx] *= self.decrease_factor_eps
412394

413395
# Save the best adversarial example
414396
successful_adv_input[local_batch_size_idx] = masked_adv_input[local_batch_size_idx]
415397
trans[local_batch_size_idx] = decoded_output[local_batch_size_idx]
416398

417399
# If attack is unsuccessful
418-
if iter_1st_stage_idx == self.max_iter_1st_stage - 1:
400+
if iter_1st_stage_idx == self.max_iter_1 - 1:
419401
for local_batch_size_idx in range(local_batch_size):
420402
if successful_adv_input[local_batch_size_idx] is None:
421403
successful_adv_input[local_batch_size_idx] = masked_adv_input[local_batch_size_idx]
@@ -538,9 +520,9 @@ class only supports targeted attack.
538520
best_loss_2nd_stage = [np.inf] * local_batch_size
539521
trans = [None] * local_batch_size
540522

541-
for iter_2nd_stage_idx in range(self.max_iter_2nd_stage):
523+
for iter_2nd_stage_idx in range(self.max_iter_2):
542524
# Zero the parameter gradients
543-
self.optimizer_2nd_stage.zero_grad()
525+
self.optimizer_2.zero_grad()
544526

545527
# Call to forward pass of the first stage
546528
loss_1st_stage, _, decoded_output, masked_adv_input, local_delta_rescale = self._forward_1st_stage(
@@ -568,14 +550,14 @@ class only supports targeted attack.
568550
if self._use_amp:
569551
from apex import amp
570552

571-
with amp.scale_loss(loss, self.optimizer_2nd_stage) as scaled_loss:
553+
with amp.scale_loss(loss, self.optimizer_2) as scaled_loss:
572554
scaled_loss.backward()
573555

574556
else:
575557
loss.backward()
576558

577559
# Do optimization
578-
self.optimizer_2nd_stage.step()
560+
self.optimizer_2.step()
579561

580562
# Save the best adversarial example and adjust the alpha coefficient
581563
for local_batch_size_idx in range(local_batch_size):
@@ -598,7 +580,7 @@ class only supports targeted attack.
598580
alpha[local_batch_size_idx] = max(alpha[local_batch_size_idx], 0.0005)
599581

600582
# If attack is unsuccessful
601-
if iter_2nd_stage_idx == self.max_iter_2nd_stage - 1:
583+
if iter_2nd_stage_idx == self.max_iter_2 - 1:
602584
for local_batch_size_idx in range(local_batch_size):
603585
if successful_adv_input[local_batch_size_idx] is None:
604586
successful_adv_input[local_batch_size_idx] = masked_adv_input[local_batch_size_idx]
@@ -819,24 +801,24 @@ def _check_params(self) -> None:
819801
if self.eps <= 0:
820802
raise ValueError("The perturbation size `eps` has to be positive.")
821803

822-
if not isinstance(self.max_iter_1st_stage, int):
804+
if not isinstance(self.max_iter_1, int):
823805
raise ValueError("The maximum number of iterations must be of type int.")
824-
if not self.max_iter_1st_stage > 0:
806+
if not self.max_iter_1 > 0:
825807
raise ValueError("The maximum number of iterations must be greater than 0.")
826808

827-
if not isinstance(self.max_iter_2nd_stage, int):
809+
if not isinstance(self.max_iter_2, int):
828810
raise ValueError("The maximum number of iterations must be of type int.")
829-
if not self.max_iter_2nd_stage > 0:
811+
if not self.max_iter_2 > 0:
830812
raise ValueError("The maximum number of iterations must be greater than 0.")
831813

832-
if not isinstance(self.learning_rate_1st_stage, float):
814+
if not isinstance(self.learning_rate_1, float):
833815
raise ValueError("The learning rate must be of type float.")
834-
if not self.learning_rate_1st_stage > 0.0:
816+
if not self.learning_rate_1 > 0.0:
835817
raise ValueError("The learning rate must be greater than 0.0.")
836818

837-
if not isinstance(self.learning_rate_2nd_stage, float):
819+
if not isinstance(self.learning_rate_2, float):
838820
raise ValueError("The learning rate must be of type float.")
839-
if not self.learning_rate_2nd_stage > 0.0:
821+
if not self.learning_rate_2 > 0.0:
840822
raise ValueError("The learning rate must be greater than 0.0.")
841823

842824
if not isinstance(self.global_max_length, int):
@@ -849,14 +831,14 @@ def _check_params(self) -> None:
849831
if not self.initial_rescale > 0.0:
850832
raise ValueError("The initial rescale coefficient must be greater than 0.0.")
851833

852-
if not isinstance(self.rescale_factor, float):
853-
raise ValueError("The rescale factor must be of type float.")
854-
if not self.rescale_factor > 0.0:
855-
raise ValueError("The rescale factor must be greater than 0.0.")
834+
if not isinstance(self.decrease_factor_eps, float):
835+
raise ValueError("The rescale factor of `eps` must be of type float.")
836+
if not self.decrease_factor_eps > 0.0:
837+
raise ValueError("The rescale factor of `eps` must be greater than 0.0.")
856838

857-
if not isinstance(self.num_iter_adjust_rescale, int):
839+
if not isinstance(self.num_iter_decrease_eps, int):
858840
raise ValueError("The number of iterations must be of type int.")
859-
if not self.num_iter_adjust_rescale > 0:
841+
if not self.num_iter_decrease_eps > 0:
860842
raise ValueError("The number of iterations must be greater than 0.")
861843

862844
if not isinstance(self.alpha, float):

0 commit comments

Comments
 (0)