Skip to content

Commit 5b1bfdf

Browse files
authored
Merge branch 'dev_1.9.0' into development_maintenance_190
2 parents 36bca55 + 16e84e4 commit 5b1bfdf

File tree

21 files changed

+780
-65
lines changed

21 files changed

+780
-65
lines changed

.github/workflows/ci-lingvo.yml

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ jobs:
4747
sudo apt-get update
4848
sudo apt-get -y -q install ffmpeg libavcodec-extra
4949
python -m pip install --upgrade pip setuptools wheel
50-
pip install -q -r <(sed '/^scipy/d;/^matplotlib/d;/^pandas/d;/^statsmodels/d;/^numba/d' requirements_test.txt)
50+
pip install -q -r <(sed '/^scipy/d;/^matplotlib/d;/^pandas/d;/^statsmodels/d;/^numba/d;/^jax/d' requirements_test.txt)
5151
pip install scipy==1.5.4
5252
pip install matplotlib==3.3.4
5353
pip install pandas==1.1.5
@@ -58,6 +58,7 @@ jobs:
5858
pip install lingvo==${{ matrix.lingvo }}
5959
pip install tensorflow-addons==0.9.1
6060
pip install model-pruning-google-research==0.0.3
61+
pip install jax[cpu]==0.2.17
6162
pip list
6263
- name: Run ${{ matrix.name }} Tests
6364
run: ./run_tests.sh ${{ matrix.framework }}

art/attacks/evasion/pixel_threshold.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -156,9 +156,11 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n
156156
raise ValueError(
157157
"This attack has not yet been tested for binary classification with a single output classifier."
158158
)
159-
if len(y.shape) > 1:
159+
if y.ndim > 1 and y.shape[1] > 1:
160160
y = np.argmax(y, axis=1)
161161

162+
y = np.squeeze(y)
163+
162164
if self.th is None:
163165
logger.info(
164166
"Performing minimal perturbation Attack. \

art/defences/preprocessor/mp3_compression_pytorch.py

Lines changed: 6 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -62,22 +62,19 @@ def __init__(
6262
:param device_type: Type of device on which the classifier is run, either `gpu` or `cpu`.
6363
:param verbose: Show progress bars.
6464
"""
65-
import torch # lgtm [py/repeated-import]
6665
from torch.autograd import Function
6766

68-
super().__init__(is_fitted=True, apply_fit=apply_fit, apply_predict=apply_predict)
67+
super().__init__(
68+
device_type=device_type,
69+
is_fitted=True,
70+
apply_fit=apply_fit,
71+
apply_predict=apply_predict,
72+
)
6973
self.channels_first = channels_first
7074
self.sample_rate = sample_rate
7175
self.verbose = verbose
7276
self._check_params()
7377

74-
# Set device
75-
if device_type == "cpu" or not torch.cuda.is_available():
76-
self._device = torch.device("cpu")
77-
else: # pragma: no cover
78-
cuda_idx = torch.cuda.current_device()
79-
self._device = torch.device("cuda:{}".format(cuda_idx))
80-
8178
self.compression_numpy = Mp3Compression(
8279
sample_rate=sample_rate,
8380
channels_first=channels_first,

art/defences/preprocessor/preprocessor.py

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -140,6 +140,18 @@ class PreprocessorPyTorch(Preprocessor):
140140
Abstract base class for preprocessing defences implemented in PyTorch that support efficient preprocessor-chaining.
141141
"""
142142

143+
def __init__(self, device_type: str = "gpu", **kwargs):
144+
import torch # lgtm [py/repeated-import]
145+
146+
super().__init__(**kwargs)
147+
148+
# Set device
149+
if device_type == "cpu" or not torch.cuda.is_available():
150+
self._device = torch.device("cpu")
151+
else: # pragma: no cover
152+
cuda_idx = torch.cuda.current_device()
153+
self._device = torch.device("cuda:{}".format(cuda_idx))
154+
143155
@abc.abstractmethod
144156
def forward(
145157
self, x: "torch.Tensor", y: Optional["torch.Tensor"] = None

art/defences/preprocessor/spatial_smoothing_pytorch.py

Lines changed: 5 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -72,22 +72,18 @@ def __init__(
7272
:param apply_predict: True if applied during predicting.
7373
:param device_type: Type of device on which the classifier is run, either `gpu` or `cpu`.
7474
"""
75-
import torch # lgtm [py/repeated-import]
7675

77-
super().__init__(apply_fit=apply_fit, apply_predict=apply_predict)
76+
super().__init__(
77+
device_type=device_type,
78+
apply_fit=apply_fit,
79+
apply_predict=apply_predict,
80+
)
7881

7982
self.channels_first = channels_first
8083
self.window_size = window_size
8184
self.clip_values = clip_values
8285
self._check_params()
8386

84-
# Set device
85-
if device_type == "cpu" or not torch.cuda.is_available():
86-
self._device = torch.device("cpu")
87-
else: # pragma: no cover
88-
cuda_idx = torch.cuda.current_device()
89-
self._device = torch.device("cuda:{}".format(cuda_idx))
90-
9187
from kornia.filters import MedianBlur
9288

9389
class MedianBlurCustom(MedianBlur):

art/defences/preprocessor/video_compression_pytorch.py

Lines changed: 6 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -68,23 +68,20 @@ def __init__(
6868
:param device_type: Type of device on which the classifier is run, either `gpu` or `cpu`.
6969
:param verbose: Show progress bars.
7070
"""
71-
import torch # lgtm [py/repeated-import]
7271
from torch.autograd import Function
7372

74-
super().__init__(is_fitted=True, apply_fit=apply_fit, apply_predict=apply_predict)
73+
super().__init__(
74+
device_type=device_type,
75+
is_fitted=True,
76+
apply_fit=apply_fit,
77+
apply_predict=apply_predict,
78+
)
7579
self.video_format = video_format
7680
self.constant_rate_factor = constant_rate_factor
7781
self.channels_first = channels_first
7882
self.verbose = verbose
7983
self._check_params()
8084

81-
# Set device
82-
if device_type == "cpu" or not torch.cuda.is_available():
83-
self._device = torch.device("cpu")
84-
else: # pragma: no cover
85-
cuda_idx = torch.cuda.current_device()
86-
self._device = torch.device("cuda:{}".format(cuda_idx))
87-
8885
self.compression_numpy = VideoCompression(
8986
video_format=video_format,
9087
constant_rate_factor=constant_rate_factor,

art/estimators/classification/keras.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -562,13 +562,14 @@ def fit(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: in
562562
`fit_generator` function in Keras and will be passed to this function as such. Including the number of
563563
epochs or the number of steps per epoch as part of this argument will result in as error.
564564
"""
565+
y_ndim = y.ndim
565566
y = check_and_transform_label_format(y, self.nb_classes)
566567

567568
# Apply preprocessing
568569
x_preprocessed, y_preprocessed = self._apply_preprocessing(x, y, fit=True)
569570

570571
# Adjust the shape of y for loss functions that do not take labels in one-hot encoding
571-
if self._reduce_labels:
572+
if self._reduce_labels or y_ndim == 1:
572573
y_preprocessed = np.argmax(y_preprocessed, axis=1)
573574

574575
self._model.fit(x=x_preprocessed, y=y_preprocessed, batch_size=batch_size, epochs=nb_epochs, **kwargs)

art/experimental/__init__.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
"""
2+
This module contains the experimental Estimator API.
3+
"""
4+
from art.experimental.estimators.jax import JaxEstimator

art/experimental/estimators/__init__.py

Whitespace-only changes.
Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
"""
2+
Experimental classifiers.
3+
"""
4+
from art.experimental.estimators.classification.jax import JaxClassifier

0 commit comments

Comments
 (0)