|
37 | 37 | from art.attacks.evasion.projected_gradient_descent.projected_gradient_descent_numpy import ( |
38 | 38 | ProjectedGradientDescentCommon, |
39 | 39 | ) |
40 | | -from art.utils import compute_success, random_sphere |
| 40 | +from art.utils import compute_success, random_sphere, compute_success_array |
41 | 41 |
|
42 | 42 | if TYPE_CHECKING: |
43 | 43 | import torch |
@@ -154,67 +154,70 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n |
154 | 154 |
|
155 | 155 | else: |
156 | 156 | dataset = torch.utils.data.TensorDataset( |
157 | | - torch.from_numpy(x.astype(ART_NUMPY_DTYPE)), torch.from_numpy(targets.astype(ART_NUMPY_DTYPE)), |
| 157 | + torch.from_numpy(x.astype(ART_NUMPY_DTYPE)), |
| 158 | + torch.from_numpy(targets.astype(ART_NUMPY_DTYPE)), |
158 | 159 | ) |
159 | 160 |
|
160 | 161 | data_loader = torch.utils.data.DataLoader( |
161 | 162 | dataset=dataset, batch_size=self.batch_size, shuffle=False, drop_last=False |
162 | 163 | ) |
163 | 164 |
|
164 | 165 | # Start to compute adversarial examples |
165 | | - adv_x_best = None |
166 | | - rate_best = None |
167 | | - |
168 | | - for _ in trange(max(1, self.num_random_init), desc="PGD - Random Initializations", disable=not self.verbose): |
169 | | - adv_x = x.astype(ART_NUMPY_DTYPE) |
170 | | - |
171 | | - # Compute perturbation with batching |
172 | | - for (batch_id, batch_all) in enumerate( |
173 | | - tqdm(data_loader, desc="PGD - Batches", leave=False, disable=not self.verbose) |
174 | | - ): |
175 | | - if mask is not None: |
176 | | - (batch, batch_labels, mask_batch) = batch_all[0], batch_all[1], batch_all[2] |
177 | | - else: |
178 | | - (batch, batch_labels, mask_batch) = batch_all[0], batch_all[1], None |
| 166 | + adv_x = x.astype(ART_NUMPY_DTYPE) |
179 | 167 |
|
180 | | - batch_index_1, batch_index_2 = batch_id * self.batch_size, (batch_id + 1) * self.batch_size |
| 168 | + # Compute perturbation with batching |
| 169 | + for (batch_id, batch_all) in enumerate( |
| 170 | + tqdm(data_loader, desc="PGD - Batches", leave=False, disable=not self.verbose) |
| 171 | + ): |
| 172 | + if mask is not None: |
| 173 | + (batch, batch_labels, mask_batch) = batch_all[0], batch_all[1], batch_all[2] |
| 174 | + else: |
| 175 | + (batch, batch_labels, mask_batch) = batch_all[0], batch_all[1], None |
181 | 176 |
|
182 | | - # Compute batch_eps and batch_eps_step |
183 | | - if isinstance(self.eps, np.ndarray): |
184 | | - if len(self.eps.shape) == len(x.shape) and self.eps.shape[0] == x.shape[0]: |
185 | | - batch_eps = self.eps[batch_index_1:batch_index_2] |
186 | | - batch_eps_step = self.eps_step[batch_index_1:batch_index_2] |
| 177 | + batch_index_1, batch_index_2 = batch_id * self.batch_size, (batch_id + 1) * self.batch_size |
187 | 178 |
|
188 | | - else: |
189 | | - batch_eps = self.eps |
190 | | - batch_eps_step = self.eps_step |
| 179 | + # Compute batch_eps and batch_eps_step |
| 180 | + if isinstance(self.eps, np.ndarray): |
| 181 | + if len(self.eps.shape) == len(x.shape) and self.eps.shape[0] == x.shape[0]: |
| 182 | + batch_eps = self.eps[batch_index_1:batch_index_2] |
| 183 | + batch_eps_step = self.eps_step[batch_index_1:batch_index_2] |
191 | 184 |
|
192 | 185 | else: |
193 | 186 | batch_eps = self.eps |
194 | 187 | batch_eps_step = self.eps_step |
195 | 188 |
|
196 | | - adv_x[batch_index_1:batch_index_2] = self._generate_batch( |
197 | | - x=batch, targets=batch_labels, mask=mask_batch, eps=batch_eps, eps_step=batch_eps_step |
198 | | - ) |
199 | | - |
200 | | - if self.num_random_init > 1: |
201 | | - rate = 100 * compute_success( |
202 | | - self.estimator, x, targets, adv_x, self.targeted, batch_size=self.batch_size |
203 | | - ) |
204 | | - if rate_best is None or rate > rate_best or adv_x_best is None: |
205 | | - rate_best = rate |
206 | | - adv_x_best = adv_x |
207 | 189 | else: |
208 | | - adv_x_best = adv_x |
| 190 | + batch_eps = self.eps |
| 191 | + batch_eps_step = self.eps_step |
| 192 | + |
| 193 | + for rand_init_num in range(max(1, self.num_random_init)): |
| 194 | + if rand_init_num == 0: |
| 195 | + # first iteration: use the adversarial examples as they are the only ones we have now |
| 196 | + adv_x[batch_index_1:batch_index_2] = self._generate_batch( |
| 197 | + x=batch, targets=batch_labels, mask=mask_batch, eps=batch_eps, eps_step=batch_eps_step |
| 198 | + ) |
| 199 | + else: |
| 200 | + adversarial_batch = self._generate_batch( |
| 201 | + x=batch, targets=batch_labels, mask=mask_batch, eps=batch_eps, eps_step=batch_eps_step |
| 202 | + ) |
| 203 | + |
| 204 | + # return the successful adversarial examples |
| 205 | + attack_success = compute_success_array( |
| 206 | + self.estimator, |
| 207 | + batch, |
| 208 | + batch_labels, |
| 209 | + adversarial_batch, |
| 210 | + self.targeted, |
| 211 | + batch_size=self.batch_size, |
| 212 | + ) |
| 213 | + adv_x[batch_index_1:batch_index_2][attack_success] = adversarial_batch[attack_success] |
209 | 214 |
|
210 | 215 | logger.info( |
211 | 216 | "Success rate of attack: %.2f%%", |
212 | | - rate_best |
213 | | - if rate_best is not None |
214 | | - else 100 * compute_success(self.estimator, x, y, adv_x_best, self.targeted, batch_size=self.batch_size), |
| 217 | + 100 * compute_success(self.estimator, x, y, adv_x, self.targeted, batch_size=self.batch_size), |
215 | 218 | ) |
216 | 219 |
|
217 | | - return adv_x_best |
| 220 | + return adv_x |
218 | 221 |
|
219 | 222 | def _generate_batch( |
220 | 223 | self, |
@@ -245,7 +248,13 @@ def _generate_batch( |
245 | 248 |
|
246 | 249 | for i_max_iter in range(self.max_iter): |
247 | 250 | adv_x = self._compute_torch( |
248 | | - adv_x, inputs, targets, mask, eps, eps_step, self.num_random_init > 0 and i_max_iter == 0, |
| 251 | + adv_x, |
| 252 | + inputs, |
| 253 | + targets, |
| 254 | + mask, |
| 255 | + eps, |
| 256 | + eps_step, |
| 257 | + self.num_random_init > 0 and i_max_iter == 0, |
249 | 258 | ) |
250 | 259 |
|
251 | 260 | return adv_x.cpu().detach().numpy() |
@@ -408,25 +417,31 @@ def _projection( |
408 | 417 | "The parameter `eps` of type `np.ndarray` is not supported to use with norm 2." |
409 | 418 | ) |
410 | 419 |
|
411 | | - values_tmp = values_tmp * torch.min( |
412 | | - torch.tensor([1.0], dtype=torch.float32).to(self.estimator.device), |
413 | | - eps / (torch.norm(values_tmp, p=2, dim=1) + tol), |
414 | | - ).unsqueeze_(-1) |
| 420 | + values_tmp = ( |
| 421 | + values_tmp |
| 422 | + * torch.min( |
| 423 | + torch.tensor([1.0], dtype=torch.float32).to(self.estimator.device), |
| 424 | + eps / (torch.norm(values_tmp, p=2, dim=1) + tol), |
| 425 | + ).unsqueeze_(-1) |
| 426 | + ) |
415 | 427 |
|
416 | 428 | elif norm_p == 1: |
417 | 429 | if isinstance(eps, np.ndarray): |
418 | 430 | raise NotImplementedError( |
419 | 431 | "The parameter `eps` of type `np.ndarray` is not supported to use with norm 1." |
420 | 432 | ) |
421 | 433 |
|
422 | | - values_tmp = values_tmp * torch.min( |
423 | | - torch.tensor([1.0], dtype=torch.float32).to(self.estimator.device), |
424 | | - eps / (torch.norm(values_tmp, p=1, dim=1) + tol), |
425 | | - ).unsqueeze_(-1) |
| 434 | + values_tmp = ( |
| 435 | + values_tmp |
| 436 | + * torch.min( |
| 437 | + torch.tensor([1.0], dtype=torch.float32).to(self.estimator.device), |
| 438 | + eps / (torch.norm(values_tmp, p=1, dim=1) + tol), |
| 439 | + ).unsqueeze_(-1) |
| 440 | + ) |
426 | 441 |
|
427 | 442 | elif norm_p in [np.inf, "inf"]: |
428 | 443 | if isinstance(eps, np.ndarray): |
429 | | - eps = eps * np.ones_like(values) |
| 444 | + eps = eps * np.ones_like(values.cpu()) |
430 | 445 | eps = eps.reshape([eps.shape[0], -1]) |
431 | 446 |
|
432 | 447 | values_tmp = values_tmp.sign() * torch.min( |
|
0 commit comments