Skip to content

Commit 82c70ed

Browse files
authored
Merge pull request #1476 from Trusted-AI/dev_1.9.1
Update to ART 1.9.1
2 parents 9b9a962 + 76fef71 commit 82c70ed

File tree

10 files changed

+71
-23
lines changed

10 files changed

+71
-23
lines changed

art/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
from art import preprocessing
1313

1414
# Semantic Version
15-
__version__ = "1.9.0"
15+
__version__ = "1.9.1.dev0"
1616

1717
# pylint: disable=C0103
1818

art/attacks/evasion/laser_attack/utils.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,6 @@
2727
from typing import Any, Callable, List, Tuple, Union
2828

2929
import numpy as np
30-
import matplotlib.pyplot as plt
3130

3231

3332
class Line:
@@ -256,6 +255,8 @@ def save_nrgb_image(image: np.ndarray, number=0, name_length=5, directory="attac
256255
:param name_length: Length of the random string in the name.
257256
:param directory: Directory where images will be saved.
258257
"""
258+
import matplotlib.pyplot as plt
259+
259260
alphabet = np.array(list(string.ascii_letters))
260261
Path(directory).mkdir(exist_ok=True)
261262
im_name = f"{directory}/{number}_{''.join(np.random.choice(alphabet, size=name_length))}.jpg"

art/defences/preprocessor/video_compression.py

Lines changed: 14 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@
2727
import os
2828
from tempfile import TemporaryDirectory
2929
from typing import Optional, Tuple
30+
import warnings
3031

3132
import numpy as np
3233
from tqdm.auto import tqdm
@@ -78,7 +79,8 @@ def __call__(self, x: np.ndarray, y: Optional[np.ndarray] = None) -> Tuple[np.nd
7879
"""
7980
Apply video compression to sample `x`.
8081
81-
:param x: Sample to compress of shape NCFHW or NFHWC. `x` values are expected to be in the data range [0, 255].
82+
:param x: Sample to compress of shape NCFHW or NFHWC. `x` values are expected to be either in range [0, 1] or
83+
[0, 255].
8284
:param y: Labels of the sample `x`. This function does not affect them in any way.
8385
:return: Compressed sample.
8486
"""
@@ -92,6 +94,9 @@ def compress_video(x: np.ndarray, video_format: str, constant_rate_factor: int,
9294
video_path = os.path.join(dir_, f"tmp_video.{video_format}")
9395
_, height, width, _ = x.shape
9496

97+
if (height % 2) != 0 or (width % 2) != 0:
98+
warnings.warn("Codec might require even number of pixels in height and width.")
99+
95100
# numpy to local video file
96101
process = (
97102
ffmpeg.input("pipe:", format="rawvideo", pix_fmt="rgb24", s=f"{width}x{height}")
@@ -118,11 +123,19 @@ def compress_video(x: np.ndarray, video_format: str, constant_rate_factor: int,
118123
x = np.transpose(x, (0, 2, 3, 4, 1))
119124

120125
# apply video compression per video item
126+
scale = 1
127+
if x.min() >= 0 and x.max() <= 1.0:
128+
scale = 255
129+
121130
x_compressed = x.copy()
122131
with TemporaryDirectory(dir=config.ART_DATA_PATH) as tmp_dir:
123132
for i, x_i in enumerate(tqdm(x, desc="Video compression", disable=not self.verbose)):
133+
x_i *= scale
124134
x_compressed[i] = compress_video(x_i, self.video_format, self.constant_rate_factor, dir_=tmp_dir)
125135

136+
x_compressed = x_compressed / scale
137+
x_compressed = x_compressed.astype(x.dtype)
138+
126139
if self.channels_first:
127140
x_compressed = np.transpose(x_compressed, (0, 4, 1, 2, 3))
128141

art/defences/preprocessor/video_compression_pytorch.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -116,11 +116,17 @@ def forward(
116116
"""
117117
Apply video compression to sample `x`.
118118
119-
:param x: Sample to compress of shape NCFHW or NFHWC. `x` values are expected to be in the data range [0, 255].
119+
:param x: Sample to compress of shape NCFHW or NFHWC. `x` values are expected to be either in range [0, 1] or
120+
[0, 255].
120121
:param y: Labels of the sample `x`. This function does not affect them in any way.
121122
:return: Compressed sample.
122123
"""
124+
scale = 1
125+
if x.min() >= 0 and x.max() <= 1.0:
126+
scale = 255
127+
x = x * scale
123128
x_compressed = self._compression_pytorch_numpy.apply(x)
129+
x_compressed = x_compressed / scale
124130
return x_compressed, y
125131

126132
def _check_params(self) -> None:

art/estimators/classification/keras.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -392,7 +392,8 @@ def compute_loss( # pylint: disable=W0221
392392

393393
if self._orig_loss and hasattr(self._orig_loss, "reduction"):
394394
prev_reduction = self._orig_loss.reduction
395-
self._orig_loss.reduction = self._losses.Reduction.NONE
395+
if hasattr(self._losses, "Reduction"):
396+
self._orig_loss.reduction = self._losses.Reduction.NONE
396397
loss = self._orig_loss(y_preprocessed, predictions)
397398
self._orig_loss.reduction = prev_reduction
398399
else:
@@ -401,7 +402,8 @@ def compute_loss( # pylint: disable=W0221
401402
y_preprocessed = k.constant(y_preprocessed)
402403
for loss_function in self._model.loss_functions:
403404
prev_reduction.append(loss_function.reduction)
404-
loss_function.reduction = self._losses.Reduction.NONE
405+
if hasattr(self._losses, "Reduction"):
406+
loss_function.reduction = self._losses.Reduction.NONE
405407
loss = self._loss_function(y_preprocessed, predictions)
406408
for i, loss_function in enumerate(self._model.loss_functions):
407409
loss_function.reduction = prev_reduction[i]

art/estimators/classification/pytorch.py

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -834,7 +834,11 @@ def get_activations(
834834
self._model.eval()
835835

836836
# Apply defences
837-
x_preprocessed, _ = self._apply_preprocessing(x=x, y=None, fit=False)
837+
if framework:
838+
no_grad = False
839+
else:
840+
no_grad = True
841+
x_preprocessed, _ = self._apply_preprocessing(x=x, y=None, fit=False, no_grad=no_grad)
838842

839843
# Get index of the extracted layer
840844
if isinstance(layer, six.string_types):
@@ -849,9 +853,9 @@ def get_activations(
849853
raise TypeError("Layer must be of type str or int")
850854

851855
if framework:
852-
if isinstance(x, torch.Tensor):
853-
return self._model(x)[layer_index]
854-
return self._model(torch.from_numpy(x).to(self._device))[layer_index]
856+
if isinstance(x_preprocessed, torch.Tensor):
857+
return self._model(x_preprocessed)[layer_index]
858+
return self._model(torch.from_numpy(x_preprocessed).to(self._device))[layer_index]
855859

856860
# Run prediction with batch processing
857861
results = []

art/estimators/object_tracking/pytorch_goturn.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -670,10 +670,11 @@ def predict(self, x: np.ndarray, batch_size: int = 128, **kwargs) -> List[Dict[s
670670
x_i = x[i].to(self.device)
671671

672672
# Apply preprocessing
673+
x_i = torch.unsqueeze(x_i, dim=0)
673674
x_i, _ = self._apply_preprocessing(x_i, y=None, fit=False, no_grad=False)
675+
x_i = torch.squeeze(x_i)
674676

675677
y_pred = self._track(x=x_i, y_init=y_init[i])
676-
677678
prediction_dict = dict()
678679
if isinstance(x, np.ndarray):
679680
prediction_dict["boxes"] = y_pred.detach().cpu().numpy()

art/utils.py

Lines changed: 22 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -975,17 +975,20 @@ def load_diabetes(raw: bool = False, test_set: float = 0.3) -> DATASET_TYPE:
975975
return (x_train, y_train), (x_test, y_test), min_, max_
976976

977977

978-
def load_nursery(raw: bool = False, test_set: float = 0.2, transform_social: bool = False) -> DATASET_TYPE:
978+
def load_nursery(
979+
raw: bool = False, scaled: bool = True, test_set: float = 0.2, transform_social: bool = False
980+
) -> DATASET_TYPE:
979981
"""
980982
Loads the UCI Nursery dataset from `config.ART_DATA_PATH` or downloads it if necessary.
981983
982984
:param raw: `True` if no preprocessing should be applied to the data. Otherwise, categorical data is one-hot
983-
encoded and data is scaled using sklearn's StandardScaler.
985+
encoded and data is scaled using sklearn's StandardScaler according to the value of `scaled`.
986+
:param scaled: `True` if data should be scaled.
984987
:param test_set: Proportion of the data to use as validation split. The value should be between 0 and 1.
985988
:param transform_social: If `True`, transforms the social feature to be binary for the purpose of attribute
986989
inference. This is done by assigning the original value 'problematic' the new value 1, and
987990
the other original values are assigned the new value 0.
988-
:return: Entire dataset and labels.
991+
:return: Entire dataset and labels as numpy array.
989992
"""
990993
import pandas as pd
991994
import sklearn.preprocessing
@@ -1050,16 +1053,20 @@ def modify_social(value):
10501053
data = data.drop(features_to_remove, axis=1)
10511054

10521055
# normalize data
1053-
label = data.loc[:, "label"]
1054-
features = data.drop(["label"], axis=1)
1055-
scaler = sklearn.preprocessing.StandardScaler()
1056-
scaler.fit(features)
1057-
scaled_features = pd.DataFrame(scaler.transform(features), columns=features.columns)
1058-
1059-
data = pd.concat([label, scaled_features], axis=1, join="inner")
1056+
if scaled:
1057+
label = data.loc[:, "label"]
1058+
features = data.drop(["label"], axis=1)
1059+
scaler = sklearn.preprocessing.StandardScaler()
1060+
scaler.fit(features)
1061+
scaled_features = pd.DataFrame(scaler.transform(features), columns=features.columns)
1062+
data = pd.concat([label, scaled_features], axis=1, join="inner")
10601063

10611064
features = data.drop(["label"], axis=1)
1062-
min_, max_ = np.amin(features.to_numpy()), np.amax(features.to_numpy())
1065+
if raw:
1066+
numeric_features = features.drop(categorical_features, axis=1).to_numpy().astype(np.int32)
1067+
min_, max_ = np.amin(numeric_features), np.amax(numeric_features)
1068+
else:
1069+
min_, max_ = np.amin(features.to_numpy().astype(np.float64)), np.amax(features.to_numpy().astype(np.float64))
10631070

10641071
# Split training and test sets
10651072
stratified = sklearn.model_selection.StratifiedShuffleSplit(n_splits=1, test_size=test_set, random_state=18)
@@ -1071,6 +1078,10 @@ def modify_social(value):
10711078
x_test = test.drop(["label"], axis=1).to_numpy()
10721079
y_test = test.loc[:, "label"].to_numpy()
10731080

1081+
if not raw and not scaled:
1082+
x_train = x_train.astype(np.float64)
1083+
x_test = x_test.astype(np.float64)
1084+
10741085
return (x_train, y_train), (x_test, y_test), min_, max_
10751086

10761087

docs/conf.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@
2727
# The short X.Y version
2828
version = "1.9"
2929
# The full version, including alpha/beta/rc tags
30-
release = "1.9.0"
30+
release = "1.9.1.dev0"
3131

3232

3333
# -- General configuration ---------------------------------------------------

tests/test_utils.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -387,6 +387,16 @@ def test_stl(self):
387387
self.assertEqual(x_test.shape[0], y_test.shape[0])
388388

389389
def test_nursery(self):
390+
(x_train, y_train), (x_test, y_test), min_, max_ = load_nursery(raw=True)
391+
self.assertEqual(x_train.shape[0], y_train.shape[0])
392+
self.assertEqual(x_test.shape[0], y_test.shape[0])
393+
394+
(x_train, y_train), (x_test, y_test), min_, max_ = load_nursery(scaled=False)
395+
self.assertEqual(min_, 0.0)
396+
self.assertEqual(max_, 4.0)
397+
self.assertEqual(x_train.shape[0], y_train.shape[0])
398+
self.assertEqual(x_test.shape[0], y_test.shape[0])
399+
390400
(x_train, y_train), (x_test, y_test), min_, max_ = load_nursery()
391401
self.assertAlmostEqual(min_, -1.3419307411337875, places=6)
392402
self.assertEqual(max_, 2.0007720517562224)

0 commit comments

Comments
 (0)