Skip to content

Commit 696f75a

Browse files
authored
Merge pull request #1912 from Trusted-AI/dev_1.12.2
Update to ART 1.12.2
2 parents 3fbe568 + 0a9d6dd commit 696f75a

File tree

9 files changed

+120
-30
lines changed

9 files changed

+120
-30
lines changed

art/attacks/evasion/__init__.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,9 @@
11
"""
22
Module providing evasion attacks under a common interface.
33
"""
4+
# pylint: disable=C0413
5+
import importlib
6+
47
from art.attacks.evasion.adversarial_patch.adversarial_patch import AdversarialPatch
58
from art.attacks.evasion.adversarial_patch.adversarial_patch_numpy import AdversarialPatchNumpy
69
from art.attacks.evasion.adversarial_patch.adversarial_patch_tensorflow import AdversarialPatchTensorFlowV2
@@ -9,7 +12,10 @@
912
from art.attacks.evasion.adversarial_asr import CarliniWagnerASR
1013
from art.attacks.evasion.auto_attack import AutoAttack
1114
from art.attacks.evasion.auto_projected_gradient_descent import AutoProjectedGradientDescent
12-
from art.attacks.evasion.brendel_bethge import BrendelBethgeAttack
15+
16+
if importlib.util.find_spec("numba") is not None:
17+
from art.attacks.evasion.brendel_bethge import BrendelBethgeAttack
18+
1319
from art.attacks.evasion.boundary import BoundaryAttack
1420
from art.attacks.evasion.carlini import CarliniL2Method, CarliniLInfMethod, CarliniL0Method
1521
from art.attacks.evasion.decision_tree_attack import DecisionTreeAttack

art/attacks/evasion/boundary.py

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -133,11 +133,6 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n
133133

134134
y = check_and_transform_label_format(y, nb_classes=self.estimator.nb_classes, return_one_hot=False)
135135

136-
if y is not None and self.estimator.nb_classes == 2 and y.shape[1] == 1:
137-
raise ValueError( # pragma: no cover
138-
"This attack has not yet been tested for binary classification with a single output classifier."
139-
)
140-
141136
# Get clip_min and clip_max from the classifier or infer them from data
142137
if self.estimator.clip_values is not None:
143138
clip_min, clip_max = self.estimator.clip_values

art/attacks/poisoning/sleeper_agent_attack.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -229,6 +229,11 @@ def poison( # type: ignore
229229
best_B = B_ # pylint: disable=C0103
230230
best_x_poisoned = x_poisoned
231231
best_indices_poison = self.indices_poison
232+
if best_B == np.finfo(np.float32).max:
233+
logger.warning("Attack unsuccessful: all loss values were non-finite. Defaulting to final trial.")
234+
best_B = B_ # pylint: disable=C0103
235+
best_x_poisoned = x_poisoned
236+
best_indices_poison = self.indices_poison
232237

233238
# Apply De-Normalization
234239
if isinstance(

art/estimators/certification/derandomized_smoothing/pytorch.py

Lines changed: 18 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -155,18 +155,21 @@ def fit( # pylint: disable=W0221
155155
batch_size: int = 128,
156156
nb_epochs: int = 10,
157157
training_mode: bool = True,
158+
drop_last: bool = False,
158159
scheduler: Optional[Any] = None,
159160
**kwargs,
160161
) -> None:
161162
"""
162163
Fit the classifier on the training set `(x, y)`.
163-
164164
:param x: Training data.
165165
:param y: Target values (class labels) one-hot-encoded of shape (nb_samples, nb_classes) or index labels of
166166
shape (nb_samples,).
167167
:param batch_size: Size of batches.
168168
:param nb_epochs: Number of epochs to use for training.
169169
:param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode.
170+
:param drop_last: Set to ``True`` to drop the last incomplete batch, if the dataset size is not divisible by
171+
the batch size. If ``False`` and the size of dataset is not divisible by the batch size, then
172+
the last batch will be smaller. (default: ``False``)
170173
:param scheduler: Learning rate scheduler to run at the start of every epoch.
171174
:param kwargs: Dictionary of framework-specific arguments. This parameter is not currently supported for PyTorch
172175
and providing it takes no effect.
@@ -187,7 +190,11 @@ def fit( # pylint: disable=W0221
187190
# Check label shape
188191
y_preprocessed = self.reduce_labels(y_preprocessed)
189192

190-
num_batch = int(np.ceil(len(x_preprocessed) / float(batch_size)))
193+
num_batch = len(x_preprocessed) / float(batch_size)
194+
if drop_last:
195+
num_batch = int(np.floor(num_batch))
196+
else:
197+
num_batch = int(np.ceil(num_batch))
191198
ind = np.arange(len(x_preprocessed))
192199

193200
# Start training
@@ -207,7 +214,15 @@ def fit( # pylint: disable=W0221
207214
self._optimizer.zero_grad()
208215

209216
# Perform prediction
210-
model_outputs = self._model(i_batch)
217+
try:
218+
model_outputs = self._model(i_batch)
219+
except ValueError as err:
220+
if "Expected more than 1 value per channel when training" in str(err):
221+
logger.exception(
222+
"Try dropping the last incomplete batch by setting drop_last=True in "
223+
"method PyTorchClassifier.fit."
224+
)
225+
raise err
211226

212227
# Form the loss function
213228
loss = self._loss(model_outputs[-1], o_batch) # lgtm [py/call-to-non-callable]

art/estimators/certification/randomized_smoothing/pytorch.py

Lines changed: 30 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@
2323
from __future__ import absolute_import, division, print_function, unicode_literals
2424

2525
import logging
26-
from typing import List, Optional, Tuple, Union, TYPE_CHECKING
26+
from typing import List, Optional, Tuple, Union, Any, TYPE_CHECKING
2727

2828
import warnings
2929
import random
@@ -136,6 +136,8 @@ def fit( # pylint: disable=W0221
136136
batch_size: int = 128,
137137
nb_epochs: int = 10,
138138
training_mode: bool = True,
139+
drop_last: bool = False,
140+
scheduler: Optional[Any] = None,
139141
**kwargs,
140142
) -> None:
141143
"""
@@ -147,6 +149,10 @@ def fit( # pylint: disable=W0221
147149
:param batch_size: Size of batches.
148150
:param nb_epochs: Number of epochs to use for training.
149151
:param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode.
152+
:param drop_last: Set to ``True`` to drop the last incomplete batch, if the dataset size is not divisible by
153+
the batch size. If ``False`` and the size of dataset is not divisible by the batch size, then
154+
the last batch will be smaller. (default: ``False``)
155+
:param scheduler: Learning rate scheduler to run at the start of every epoch.
150156
:param kwargs: Dictionary of framework-specific arguments. This parameter is not currently supported for PyTorch
151157
and providing it takes no effect.
152158
"""
@@ -166,18 +172,26 @@ def fit( # pylint: disable=W0221
166172
# Check label shape
167173
y_preprocessed = self.reduce_labels(y_preprocessed)
168174

169-
num_batch = int(np.ceil(len(x_preprocessed) / float(batch_size)))
175+
num_batch = len(x_preprocessed) / float(batch_size)
176+
if drop_last:
177+
num_batch = int(np.floor(num_batch))
178+
else:
179+
num_batch = int(np.ceil(num_batch))
170180
ind = np.arange(len(x_preprocessed))
171181
std = torch.tensor(self.scale).to(self._device)
182+
183+
x_preprocessed = torch.from_numpy(x_preprocessed).to(self._device)
184+
y_preprocessed = torch.from_numpy(y_preprocessed).to(self._device)
185+
172186
# Start training
173187
for _ in tqdm(range(nb_epochs)):
174188
# Shuffle the examples
175189
random.shuffle(ind)
176190

177191
# Train for one epoch
178192
for m in range(num_batch):
179-
i_batch = torch.from_numpy(x_preprocessed[ind[m * batch_size : (m + 1) * batch_size]]).to(self._device)
180-
o_batch = torch.from_numpy(y_preprocessed[ind[m * batch_size : (m + 1) * batch_size]]).to(self._device)
193+
i_batch = x_preprocessed[ind[m * batch_size : (m + 1) * batch_size]]
194+
o_batch = y_preprocessed[ind[m * batch_size : (m + 1) * batch_size]]
181195

182196
# Add random noise for randomized smoothing
183197
i_batch = i_batch + torch.randn_like(i_batch, device=self._device) * std
@@ -186,7 +200,15 @@ def fit( # pylint: disable=W0221
186200
self._optimizer.zero_grad()
187201

188202
# Perform prediction
189-
model_outputs = self._model(i_batch)
203+
try:
204+
model_outputs = self._model(i_batch)
205+
except ValueError as err:
206+
if "Expected more than 1 value per channel when training" in str(err):
207+
logger.exception(
208+
"Try dropping the last incomplete batch by setting drop_last=True in "
209+
"method PyTorchClassifier.fit."
210+
)
211+
raise err
190212

191213
# Form the loss function
192214
loss = self._loss(model_outputs[-1], o_batch) # lgtm [py/call-to-non-callable]
@@ -203,6 +225,9 @@ def fit( # pylint: disable=W0221
203225

204226
self._optimizer.step()
205227

228+
if scheduler is not None:
229+
scheduler.step()
230+
206231
def predict(self, x: np.ndarray, batch_size: int = 128, **kwargs) -> np.ndarray: # type: ignore
207232
"""
208233
Perform prediction of the given classifier for a batch of inputs, taking an expectation over transformations.

art/estimators/classification/pytorch.py

Lines changed: 29 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -362,6 +362,8 @@ def fit( # pylint: disable=W0221
362362
batch_size: int = 128,
363363
nb_epochs: int = 10,
364364
training_mode: bool = True,
365+
drop_last: bool = False,
366+
scheduler: Optional[Any] = None,
365367
**kwargs,
366368
) -> None:
367369
"""
@@ -373,8 +375,12 @@ def fit( # pylint: disable=W0221
373375
:param batch_size: Size of batches.
374376
:param nb_epochs: Number of epochs to use for training.
375377
:param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode.
378+
:param drop_last: Set to ``True`` to drop the last incomplete batch, if the dataset size is not divisible by
379+
the batch size. If ``False`` and the size of dataset is not divisible by the batch size, then
380+
the last batch will be smaller. (default: ``False``)
381+
:param scheduler: Learning rate scheduler to run at the start of every epoch.
376382
:param kwargs: Dictionary of framework-specific arguments. This parameter is not currently supported for PyTorch
377-
and providing it takes no effect.
383+
and providing it takes no effect.
378384
"""
379385
import torch # lgtm [py/repeated-import]
380386

@@ -392,24 +398,39 @@ def fit( # pylint: disable=W0221
392398
# Check label shape
393399
y_preprocessed = self.reduce_labels(y_preprocessed)
394400

395-
num_batch = int(np.ceil(len(x_preprocessed) / float(batch_size)))
401+
num_batch = len(x_preprocessed) / float(batch_size)
402+
if drop_last:
403+
num_batch = int(np.floor(num_batch))
404+
else:
405+
num_batch = int(np.ceil(num_batch))
396406
ind = np.arange(len(x_preprocessed))
397407

408+
x_preprocessed = torch.from_numpy(x_preprocessed).to(self._device)
409+
y_preprocessed = torch.from_numpy(y_preprocessed).to(self._device)
410+
398411
# Start training
399412
for _ in range(nb_epochs):
400413
# Shuffle the examples
401414
random.shuffle(ind)
402415

403416
# Train for one epoch
404417
for m in range(num_batch):
405-
i_batch = torch.from_numpy(x_preprocessed[ind[m * batch_size : (m + 1) * batch_size]]).to(self._device)
406-
o_batch = torch.from_numpy(y_preprocessed[ind[m * batch_size : (m + 1) * batch_size]]).to(self._device)
418+
i_batch = x_preprocessed[ind[m * batch_size : (m + 1) * batch_size]]
419+
o_batch = y_preprocessed[ind[m * batch_size : (m + 1) * batch_size]]
407420

408421
# Zero the parameter gradients
409422
self._optimizer.zero_grad()
410423

411424
# Perform prediction
412-
model_outputs = self._model(i_batch)
425+
try:
426+
model_outputs = self._model(i_batch)
427+
except ValueError as err:
428+
if "Expected more than 1 value per channel when training" in str(err):
429+
logger.exception(
430+
"Try dropping the last incomplete batch by setting drop_last=True in "
431+
"method PyTorchClassifier.fit."
432+
)
433+
raise err
413434

414435
# Form the loss function
415436
loss = self._loss(model_outputs[-1], o_batch) # lgtm [py/call-to-non-callable]
@@ -426,6 +447,9 @@ def fit( # pylint: disable=W0221
426447

427448
self._optimizer.step()
428449

450+
if scheduler is not None:
451+
scheduler.step()
452+
429453
def fit_generator(self, generator: "DataGenerator", nb_epochs: int = 20, **kwargs) -> None:
430454
"""
431455
Fit the classifier using the generator that yields batches as specified.

art/metrics/verification_decisions_trees.py

Lines changed: 13 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,8 @@
2626
import numpy as np
2727
from tqdm.auto import trange
2828

29+
from art.utils import check_and_transform_label_format
30+
2931
if TYPE_CHECKING:
3032
from art.estimators.classification.classifier import ClassifierDecisionTree
3133

@@ -192,16 +194,25 @@ def verify(
192194
Verify the robustness of the classifier on the dataset `(x, y)`.
193195
194196
:param x: Feature data of shape `(nb_samples, nb_features)`.
195-
:param y: Labels, one-vs-rest encoding of shape `(nb_samples, nb_classes)`.
197+
:param y: Labels, one-hot-encoded of shape `(nb_samples, nb_classes)` or indices of shape
198+
(nb_samples,)`.
196199
:param eps_init: Attack budget for the first search step.
197200
:param norm: The norm to apply epsilon.
198201
:param nb_search_steps: The number of search steps.
199202
:param max_clique: The maximum number of nodes in a clique.
200203
:param max_level: The maximum number of clique search levels.
201204
:return: A tuple of the average robustness bound and the verification error at `eps`.
202205
"""
206+
if np.min(x) < 0 or np.max(x) > 1:
207+
raise ValueError(
208+
"There are features not in the range [0, 1]. The current implementation only supports normalized input"
209+
"values in range [0 1]."
210+
)
211+
203212
self.x: np.ndarray = x
204-
self.y: np.ndarray = np.argmax(y, axis=1)
213+
self.y: np.ndarray = check_and_transform_label_format(
214+
y, nb_classes=self._classifier.nb_classes, return_one_hot=False
215+
)
205216
self.max_clique: int = max_clique
206217
self.max_level: int = max_level
207218

art/utils.py

Lines changed: 16 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -376,13 +376,13 @@ def projection_l1_1(values: np.ndarray, eps: Union[int, float, np.ndarray]) -> n
376376
mat = np.zeros((m, 2))
377377

378378
# if a_sorted[i, n-1] >= a_sorted[i, n-2] + eps, then the projection is [0,...,0,eps]
379-
done = False
379+
done = early_done = False
380380
active = np.array([1] * m)
381381
after_vec = np.zeros((m, n))
382382
proj = a_sorted.copy()
383383
j = n - 2
384384
while j >= 0:
385-
mat[:, 0] = mat[:, 0] + a_sorted[:, j + 1] # = sum(a_sorted[: i] : i = j + 1,...,n-1
385+
mat[:, 0] += a_sorted[:, j + 1] # = sum(a_sorted[: i] : i = j + 1,...,n-1
386386
mat[:, 1] = a_sorted[:, j] * (n - j - 1) + eps
387387
# Find the max in each problem max{ sum{a_sorted[:, i] : i=j+1,..,n-1} , a_sorted[:, j] * (n-j-1) + eps }
388388
row_maxes = np.max(mat, axis=1)
@@ -396,21 +396,29 @@ def projection_l1_1(values: np.ndarray, eps: Union[int, float, np.ndarray]) -> n
396396
# has to be reduced is delta
397397
delta = (mat[:, 0] - eps) / (n - j - 1)
398398
# The vector of reductions
399-
delta_vec = np.array([delta] * (n - j - 1))
400-
delta_vec = np.transpose(delta_vec)
399+
delta_vec = np.transpose(np.array([delta] * (n - j - 1)))
401400
# The sub-vectors: a_sorted[:, (j+1):]
402401
a_sub = a_sorted[:, (j + 1) :]
403402
# After reduction by delta_vec
404403
a_after = a_sub - delta_vec
405404
after_vec[:, (j + 1) :] = a_after
406-
proj = (act_multiplier * after_vec) + ((1 - act_multiplier) * proj)
405+
proj += act_multiplier * (after_vec - proj)
407406
active = active * ind_set
408407
if sum(active) == 0:
409-
done = True
408+
done = early_done = True
410409
break
411410
j -= 1
411+
if not early_done:
412+
delta = (mat[:, 0] + a_sorted[:, 0] - eps) / n
413+
ind_set = np.sign(np.maximum(delta, 0))
414+
act_multiplier = ind_set * active
415+
act_multiplier = np.transpose([np.transpose(act_multiplier)] * n)
416+
delta_vec = np.transpose(np.array([delta] * n))
417+
a_after = a_sorted - delta_vec
418+
proj += act_multiplier * (a_after - proj)
419+
done = True
412420
if not done:
413-
proj = active * a_sorted + (1 - active) * proj
421+
proj = active * (a_sorted - proj)
414422

415423
for i in range(m):
416424
proj[i, :] = proj[i, a_argsort_inv[i, :]]
@@ -461,7 +469,7 @@ def projection_l1_2(values: np.ndarray, eps: Union[int, float, np.ndarray]) -> n
461469
mat0[:, 1] = np.min(mat, axis=1)
462470
min_t = np.max(mat0, axis=1)
463471
if np.max(min_t) < 1e-8:
464-
break
472+
continue
465473
row_sums = row_sums - a_var[:, j] * (n - j)
466474
a_var[:, (j + 1) :] = a_var[:, (j + 1) :] - np.matmul(min_t.reshape((m, 1)), np.ones((1, n - j - 1)))
467475
a_var[:, j] = a_var[:, j] - min_t

setup.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,6 @@
1313
"six",
1414
"setuptools",
1515
"tqdm",
16-
"numba>=0.53.1",
1716
]
1817

1918
docs_require = [
@@ -93,6 +92,7 @@ def get_version(rel_path):
9392
"cma",
9493
"librosa",
9594
"opencv-python",
95+
"numba",
9696
],
9797
"non_framework": [
9898
"matplotlib",
@@ -112,6 +112,7 @@ def get_version(rel_path):
112112
"codecov",
113113
"requests",
114114
"sortedcontainers",
115+
"numba",
115116
],
116117
},
117118
classifiers=[

0 commit comments

Comments
 (0)