Skip to content

Commit 56f7f4a

Browse files
committed
unifying art tools in verbose interface
Signed-off-by: GiulioZizzo <[email protected]>
1 parent aa6a0ee commit 56f7f4a

File tree

14 files changed

+28
-57
lines changed

14 files changed

+28
-57
lines changed

art/attacks/extraction/knockoff_nets.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -155,7 +155,7 @@ def _random_extraction(self, x: np.ndarray, thieved_classifier: "CLASSIFIER_TYPE
155155
y=fake_labels,
156156
batch_size=self.batch_size_fit,
157157
nb_epochs=self.nb_epochs,
158-
verbose=0,
158+
verbose=False,
159159
)
160160

161161
return thieved_classifier
@@ -243,7 +243,7 @@ def _adaptive_extraction(
243243
y=fake_label,
244244
batch_size=self.batch_size_fit,
245245
nb_epochs=1,
246-
verbose=0,
246+
verbose=False,
247247
)
248248

249249
# Test new labels

art/attacks/poisoning/sleeper_agent_attack.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -360,7 +360,7 @@ def _create_model(
360360
for layer in model_pt.model.children():
361361
if hasattr(layer, "reset_parameters"):
362362
layer.reset_parameters() # type: ignore
363-
model_pt.fit(x_train, y_train, batch_size=batch_size, nb_epochs=epochs, verbose=1)
363+
model_pt.fit(x_train, y_train, batch_size=batch_size, nb_epochs=epochs, verbose=True)
364364
predictions = model_pt.predict(x_test)
365365
accuracy = np.sum(np.argmax(predictions, axis=1) == np.argmax(y_test, axis=1)) / len(y_test)
366366
logger.info("Accuracy of retrained model : %s", accuracy * 100.0)
@@ -370,7 +370,7 @@ def _create_model(
370370

371371
self.substitute_classifier.model.trainable = True
372372
model_tf = self.substitute_classifier.clone_for_refitting()
373-
model_tf.fit(x_train, y_train, batch_size=batch_size, nb_epochs=epochs, verbose=0)
373+
model_tf.fit(x_train, y_train, batch_size=batch_size, nb_epochs=epochs, verbose=False)
374374
predictions = model_tf.predict(x_test)
375375
accuracy = np.sum(np.argmax(predictions, axis=1) == np.argmax(y_test, axis=1)) / len(y_test)
376376
logger.info("Accuracy of retrained model : %s", accuracy * 100.0)

art/defences/trainer/adversarial_trainer.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -188,7 +188,9 @@ def fit_generator(self, generator: "DataGenerator", nb_epochs: int = 20, **kwarg
188188
x_batch[adv_ids] = x_adv
189189

190190
# Fit batch
191-
self._classifier.fit(x_batch, y_batch, nb_epochs=1, batch_size=x_batch.shape[0], verbose=0, **kwargs)
191+
self._classifier.fit(
192+
x_batch, y_batch, nb_epochs=1, batch_size=x_batch.shape[0], verbose=False, **kwargs
193+
)
192194
attack_id = (attack_id + 1) % len(self.attacks)
193195

194196
def fit( # pylint: disable=W0221
@@ -260,7 +262,9 @@ def fit( # pylint: disable=W0221
260262
x_batch[adv_ids] = x_adv
261263

262264
# Fit batch
263-
self._classifier.fit(x_batch, y_batch, nb_epochs=1, batch_size=x_batch.shape[0], verbose=0, **kwargs)
265+
self._classifier.fit(
266+
x_batch, y_batch, nb_epochs=1, batch_size=x_batch.shape[0], verbose=False, **kwargs
267+
)
264268
attack_id = (attack_id + 1) % len(self.attacks)
265269

266270
def predict(self, x: np.ndarray, **kwargs) -> np.ndarray:

art/defences/trainer/dp_instahide_trainer.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -155,7 +155,7 @@ def fit( # pylint: disable=W0221
155155
x_aug = self._generate_noise(x_aug)
156156

157157
# fit batch
158-
self._classifier.fit(x_aug, y_aug, nb_epochs=1, batch_size=x_aug.shape[0], verbose=0, **kwargs)
158+
self._classifier.fit(x_aug, y_aug, nb_epochs=1, batch_size=x_aug.shape[0], verbose=False, **kwargs)
159159

160160
# get metrics
161161
loss = self._classifier.compute_loss(x_aug, y_aug, reduction="mean")
@@ -234,7 +234,7 @@ def fit_generator(self, generator: "DataGenerator", nb_epochs: int = 20, **kwarg
234234
x_aug = self._generate_noise(x_aug)
235235

236236
# fit batch
237-
self._classifier.fit(x_aug, y_aug, nb_epochs=1, batch_size=x_aug.shape[0], verbose=0, **kwargs)
237+
self._classifier.fit(x_aug, y_aug, nb_epochs=1, batch_size=x_aug.shape[0], verbose=False, **kwargs)
238238

239239
# get metrics
240240
loss = self._classifier.compute_loss(x_aug, y_aug, reduction="mean")

art/estimators/certification/randomized_smoothing/macer/pytorch.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,6 @@ def __init__(
7575
gamma: float = 8.0,
7676
lmbda: float = 12.0,
7777
gaussian_samples: int = 16,
78-
verbose: bool = False,
7978
) -> None:
8079
"""
8180
Create a MACER classifier.
@@ -105,7 +104,6 @@ def __init__(
105104
:param gamma: The hinge factor.
106105
:param lmbda: The trade-off factor.
107106
:param gaussian_samples: The number of gaussian samples per input.
108-
:param verbose: Show progress bars.
109107
"""
110108
super().__init__(
111109
model=model,
@@ -122,7 +120,6 @@ def __init__(
122120
sample_size=sample_size,
123121
scale=scale,
124122
alpha=alpha,
125-
verbose=verbose,
126123
)
127124
self.beta = beta
128125
self.gamma = gamma

art/estimators/certification/randomized_smoothing/macer/tensorflow.py

Lines changed: 1 addition & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,6 @@ def __init__(
7575
gamma: float = 8.0,
7676
lmbda: float = 12.0,
7777
gaussian_samples: int = 16,
78-
verbose: bool = False,
7978
) -> None:
8079
"""
8180
Create a MACER classifier.
@@ -108,7 +107,6 @@ def __init__(
108107
:param gamma: The hinge factor.
109108
:param lmbda: The trade-off factor.
110109
:param gaussian_samples: The number of gaussian samples per input.
111-
:param verbose: Show progress bars.
112110
"""
113111
super().__init__(
114112
model=model,
@@ -125,21 +123,14 @@ def __init__(
125123
sample_size=sample_size,
126124
scale=scale,
127125
alpha=alpha,
128-
verbose=verbose,
129126
)
130127
self.beta = beta
131128
self.gamma = gamma
132129
self.lmbda = lmbda
133130
self.gaussian_samples = gaussian_samples
134131

135132
def fit(
136-
self,
137-
x: np.ndarray,
138-
y: np.ndarray,
139-
batch_size: int = 128,
140-
nb_epochs: int = 10,
141-
verbose: bool = False,
142-
**kwargs
133+
self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: int = 10, verbose: bool = False, **kwargs
143134
) -> None:
144135
"""
145136
Fit the classifier on the training set `(x, y)`.

art/estimators/certification/randomized_smoothing/pytorch.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,6 @@ def __init__(
7171
sample_size: int = 32,
7272
scale: float = 0.1,
7373
alpha: float = 0.001,
74-
verbose: bool = False,
7574
):
7675
"""
7776
Create a randomized smoothing classifier.
@@ -97,7 +96,6 @@ def __init__(
9796
:param sample_size: Number of samples for smoothing.
9897
:param scale: Standard deviation of Gaussian noise added.
9998
:param alpha: The failure probability of smoothing.
100-
:param verbose: Show progress bars.
10199
"""
102100
if preprocessing_defences is not None:
103101
warnings.warn(
@@ -120,7 +118,6 @@ def __init__(
120118
sample_size=sample_size,
121119
scale=scale,
122120
alpha=alpha,
123-
verbose=verbose,
124121
)
125122

126123
def _predict_classifier(self, x: np.ndarray, batch_size: int, training_mode: bool, **kwargs) -> np.ndarray:

art/estimators/certification/randomized_smoothing/randomized_smoothing.py

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,6 @@ def __init__(
4949
*args,
5050
scale: float = 0.1,
5151
alpha: float = 0.001,
52-
verbose: bool = False,
5352
**kwargs,
5453
) -> None:
5554
"""
@@ -58,13 +57,11 @@ def __init__(
5857
:param sample_size: Number of samples for smoothing.
5958
:param scale: Standard deviation of Gaussian noise added.
6059
:param alpha: The failure probability of smoothing.
61-
:param verbose: Show progress bars.
6260
"""
6361
super().__init__(*args, **kwargs) # type: ignore
6462
self.sample_size = sample_size
6563
self.scale = scale
6664
self.alpha = alpha
67-
self.verbose = verbose
6865

6966
def _predict_classifier(self, x: np.ndarray, batch_size: int, training_mode: bool, **kwargs) -> np.ndarray:
7067
"""
@@ -77,12 +74,13 @@ def _predict_classifier(self, x: np.ndarray, batch_size: int, training_mode: boo
7774
"""
7875
raise NotImplementedError
7976

80-
def predict(self, x: np.ndarray, batch_size: int = 128, **kwargs) -> np.ndarray:
77+
def predict(self, x: np.ndarray, batch_size: int = 128, verbose: bool = False, **kwargs) -> np.ndarray:
8178
"""
8279
Perform prediction of the given classifier for a batch of inputs, taking an expectation over transformations.
8380
8481
:param x: Input samples.
8582
:param batch_size: Batch size.
83+
:param verbose: Display training progress bar.
8684
:param is_abstain: True if function will abstain from prediction and return 0s. Default: True
8785
:type is_abstain: `boolean`
8886
:return: Array of predictions of shape `(nb_inputs, nb_classes)`.
@@ -98,7 +96,7 @@ def predict(self, x: np.ndarray, batch_size: int = 128, **kwargs) -> np.ndarray:
9896
logger.info("Applying randomized smoothing.")
9997
n_abstained = 0
10098
prediction = []
101-
for x_i in tqdm(x, desc="Randomized smoothing", disable=not self.verbose):
99+
for x_i in tqdm(x, desc="Randomized smoothing", disable=not verbose):
102100
# get class counts
103101
counts_pred = self._prediction_counts(x_i, batch_size=batch_size)
104102
top = counts_pred.argsort()[::-1]

art/estimators/certification/randomized_smoothing/smooth_adv/pytorch.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,6 @@ def __init__(
7777
num_noise_vec: int = 1,
7878
num_steps: int = 10,
7979
warmup: int = 1,
80-
verbose: bool = False,
8180
) -> None:
8281
"""
8382
Create a SmoothAdv classifier.
@@ -107,7 +106,6 @@ def __init__(
107106
:param num_noise_vec: The number of noise vectors.
108107
:param num_steps: The number of attack updates.
109108
:param warmup: The warm-up strategy that is gradually increased up to the original value.
110-
:param verbose: Show progress bars.
111109
"""
112110
super().__init__(
113111
model=model,
@@ -124,7 +122,6 @@ def __init__(
124122
sample_size=sample_size,
125123
scale=scale,
126124
alpha=alpha,
127-
verbose=verbose,
128125
)
129126
self.epsilon = epsilon
130127
self.num_noise_vec = num_noise_vec

art/estimators/certification/randomized_smoothing/smooth_adv/tensorflow.py

Lines changed: 1 addition & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,6 @@ def __init__(
7777
num_noise_vec: int = 1,
7878
num_steps: int = 10,
7979
warmup: int = 1,
80-
verbose: bool = False,
8180
) -> None:
8281
"""
8382
Create a MACER classifier.
@@ -110,7 +109,6 @@ def __init__(
110109
:param num_noise_vec: The number of noise vectors.
111110
:param num_steps: The number of attack updates.
112111
:param warmup: The warm-up strategy that is gradually increased up to the original value.
113-
:param verbose: Show progress bars.
114112
"""
115113
super().__init__(
116114
model=model,
@@ -150,13 +148,7 @@ def __init__(
150148
self.attack = ProjectedGradientDescent(classifier, eps=self.epsilon, max_iter=1, verbose=False)
151149

152150
def fit(
153-
self,
154-
x: np.ndarray,
155-
y: np.ndarray,
156-
batch_size: int = 128,
157-
nb_epochs: int = 10,
158-
verbose: bool = False,
159-
**kwargs
151+
self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: int = 10, verbose: bool = False, **kwargs
160152
) -> None:
161153
"""
162154
Fit the classifier on the training set `(x, y)`.

0 commit comments

Comments
 (0)