Skip to content

Commit 5fb24d5

Browse files
committed
standardise verbose use across tools
Signed-off-by: GiulioZizzo <[email protected]>
1 parent 39ab9cd commit 5fb24d5

File tree

11 files changed

+260
-35
lines changed

11 files changed

+260
-35
lines changed

art/estimators/certification/derandomized_smoothing/pytorch.py

Lines changed: 14 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -441,7 +441,7 @@ def fit( # pylint: disable=W0221
441441
update_batchnorm: bool = True,
442442
batchnorm_update_epochs: int = 1,
443443
transform: Optional["torchvision.transforms.transforms.Compose"] = None,
444-
verbose: bool = True,
444+
verbose: Optional[Union[bool, int]] = None,
445445
**kwargs,
446446
) -> None:
447447
"""
@@ -469,6 +469,17 @@ def fit( # pylint: disable=W0221
469469
"""
470470
import torch
471471

472+
if verbose is not None:
473+
if isinstance(verbose, int):
474+
if verbose == 0:
475+
display_pb = False
476+
else:
477+
display_pb = True
478+
else:
479+
display_pb = verbose
480+
else:
481+
display_pb = False
482+
472483
# Set model mode
473484
self._model.train(mode=training_mode)
474485

@@ -499,7 +510,7 @@ def fit( # pylint: disable=W0221
499510
epoch_loss = []
500511
epoch_batch_sizes = []
501512

502-
pbar = tqdm(range(num_batch), disable=not verbose)
513+
pbar = tqdm(range(num_batch), disable=not display_pb)
503514

504515
# Train for one epoch
505516
for m in pbar:
@@ -545,7 +556,7 @@ def fit( # pylint: disable=W0221
545556
epoch_loss.append(loss.cpu().detach().numpy())
546557
epoch_batch_sizes.append(len(i_batch))
547558

548-
if verbose:
559+
if display_pb:
549560
pbar.set_description(
550561
f"Loss {np.average(epoch_loss, weights=epoch_batch_sizes):.3f} "
551562
f"Acc {np.average(epoch_acc, weights=epoch_batch_sizes):.3f} "

art/estimators/certification/derandomized_smoothing/tensorflow.py

Lines changed: 21 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -155,7 +155,13 @@ def _predict_classifier(self, x: np.ndarray, batch_size: int, training_mode: boo
155155
return np.asarray(outputs >= self.threshold).astype(int)
156156

157157
def fit( # pylint: disable=W0221
158-
self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: int = 10, verbose: bool = True, **kwargs
158+
self,
159+
x: np.ndarray,
160+
y: np.ndarray,
161+
batch_size: int = 128,
162+
nb_epochs: int = 10,
163+
verbose: Optional[Union[bool, int]] = None,
164+
**kwargs,
159165
) -> None:
160166
"""
161167
Fit the classifier on the training set `(x, y)`.
@@ -165,13 +171,24 @@ def fit( # pylint: disable=W0221
165171
shape (nb_samples,).
166172
:param batch_size: Size of batches.
167173
:param nb_epochs: Number of epochs to use for training.
168-
:param verbose: if to display training progress bars
174+
:param verbose: If to display training progress bars
169175
:param kwargs: Dictionary of framework-specific arguments. This parameter currently only supports
170176
"scheduler" which is an optional function that will be called at the end of every
171177
epoch to adjust the learning rate.
172178
"""
173179
import tensorflow as tf
174180

181+
if verbose is not None:
182+
if isinstance(verbose, int):
183+
if verbose == 0:
184+
display_pb = False
185+
else:
186+
display_pb = True
187+
else:
188+
display_pb = verbose
189+
else:
190+
display_pb = False
191+
175192
if self._train_step is None: # pragma: no cover
176193
if self._loss_object is None: # pragma: no cover
177194
raise TypeError(
@@ -214,7 +231,7 @@ def train_step(model, images, labels):
214231
epoch_loss = []
215232
epoch_batch_sizes = []
216233

217-
pbar = tqdm(range(num_batch), disable=not verbose)
234+
pbar = tqdm(range(num_batch), disable=not display_pb)
218235

219236
ind = np.arange(len(x_preprocessed))
220237
for m in pbar:
@@ -231,7 +248,7 @@ def train_step(model, images, labels):
231248
else:
232249
train_step(self.model, images, labels)
233250

234-
if verbose:
251+
if display_pb:
235252
if self._train_step is None:
236253
pbar.set_description(
237254
f"Loss {np.average(epoch_loss, weights=epoch_batch_sizes):.3f} "

art/estimators/certification/randomized_smoothing/macer/pytorch.py

Lines changed: 15 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -138,6 +138,7 @@ def fit( # pylint: disable=W0221
138138
training_mode: bool = True,
139139
drop_last: bool = False,
140140
scheduler: Optional["torch.optim.lr_scheduler._LRScheduler"] = None,
141+
verbose: Optional[Union[bool, int]] = None,
141142
**kwargs,
142143
) -> None:
143144
"""
@@ -153,13 +154,26 @@ def fit( # pylint: disable=W0221
153154
the batch size. If ``False`` and the size of dataset is not divisible by the batch size, then
154155
the last batch will be smaller. (default: ``False``)
155156
:param scheduler: Learning rate scheduler to run at the start of every epoch.
157+
:param verbose: (Optional) Display the progress bar, if not supplied will revert to the verbose level when
158+
class was initialised.
156159
:param kwargs: Dictionary of framework-specific arguments. This parameter is not currently supported for PyTorch
157160
and providing it takes no effect.
158161
"""
159162
import torch
160163
import torch.nn.functional as F
161164
from torch.utils.data import TensorDataset, DataLoader
162165

166+
if verbose is not None:
167+
if isinstance(verbose, int):
168+
if verbose == 0:
169+
display_pb = False
170+
else:
171+
display_pb = True
172+
else:
173+
display_pb = verbose
174+
else:
175+
display_pb = self.verbose
176+
163177
# Set model mode
164178
self._model.train(mode=training_mode)
165179

@@ -185,7 +199,7 @@ def fit( # pylint: disable=W0221
185199
)
186200

187201
# Start training
188-
for _ in trange(nb_epochs, disable=not self.verbose):
202+
for _ in trange(nb_epochs, disable=not display_pb):
189203
for x_batch, y_batch in dataloader:
190204
# Move inputs to GPU
191205
x_batch = x_batch.to(self.device)

art/estimators/certification/randomized_smoothing/macer/tensorflow.py

Lines changed: 23 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -132,7 +132,15 @@ def __init__(
132132
self.lmbda = lmbda
133133
self.gaussian_samples = gaussian_samples
134134

135-
def fit(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: int = 10, **kwargs) -> None:
135+
def fit(
136+
self,
137+
x: np.ndarray,
138+
y: np.ndarray,
139+
batch_size: int = 128,
140+
nb_epochs: int = 10,
141+
verbose: Optional[Union[bool, int]] = None,
142+
**kwargs
143+
) -> None:
136144
"""
137145
Fit the classifier on the training set `(x, y)`.
138146
@@ -141,12 +149,25 @@ def fit(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: in
141149
shape (nb_samples,).
142150
:param batch_size: Size of batches.
143151
:param nb_epochs: Number of epochs to use for training.
152+
:param verbose: (Optional) Display the progress bar, if not supplied will revert to the verbose level when
153+
class was initialised.
144154
:param kwargs: Dictionary of framework-specific arguments. This parameter currently only supports
145155
"scheduler" which is an optional function that will be called at the end of every
146156
epoch to adjust the learning rate.
147157
"""
148158
import tensorflow as tf
149159

160+
if verbose is not None:
161+
if isinstance(verbose, int):
162+
if verbose == 0:
163+
display_pb = False
164+
else:
165+
display_pb = True
166+
else:
167+
display_pb = verbose
168+
else:
169+
display_pb = self.verbose
170+
150171
if self._train_step is None: # pragma: no cover
151172
if self._optimizer is None: # pragma: no cover
152173
raise ValueError(
@@ -213,7 +234,7 @@ def train_step(model, images, labels):
213234

214235
train_ds = tf.data.Dataset.from_tensor_slices((x_preprocessed, y_preprocessed)).shuffle(10000).batch(batch_size)
215236

216-
for epoch in trange(nb_epochs, disable=not self.verbose):
237+
for epoch in trange(nb_epochs, disable=not display_pb):
217238
for images, labels in train_ds:
218239
# Tile samples for Gaussian augmentation
219240
input_size = len(images)

art/estimators/certification/randomized_smoothing/pytorch.py

Lines changed: 15 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -140,6 +140,7 @@ def fit( # pylint: disable=W0221
140140
training_mode: bool = True,
141141
drop_last: bool = False,
142142
scheduler: Optional["torch.optim.lr_scheduler._LRScheduler"] = None,
143+
verbose: Optional[Union[bool, int]] = None,
143144
**kwargs,
144145
) -> None:
145146
"""
@@ -155,12 +156,25 @@ def fit( # pylint: disable=W0221
155156
the batch size. If ``False`` and the size of dataset is not divisible by the batch size, then
156157
the last batch will be smaller. (default: ``False``)
157158
:param scheduler: Learning rate scheduler to run at the start of every epoch.
159+
:param verbose: (Optional) Display the progress bar, if not supplied will revert to the verbose level when
160+
class was initialised.
158161
:param kwargs: Dictionary of framework-specific arguments. This parameter is not currently supported for PyTorch
159162
and providing it takes no effect.
160163
"""
161164
import torch
162165
from torch.utils.data import TensorDataset, DataLoader
163166

167+
if verbose is not None:
168+
if isinstance(verbose, int):
169+
if verbose == 0:
170+
display_pb = False
171+
else:
172+
display_pb = True
173+
else:
174+
display_pb = verbose
175+
else:
176+
display_pb = self.verbose
177+
164178
# Set model mode
165179
self._model.train(mode=training_mode)
166180

@@ -182,7 +196,7 @@ def fit( # pylint: disable=W0221
182196
dataloader = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=True, drop_last=drop_last)
183197

184198
# Start training
185-
for _ in trange(nb_epochs, disable=not self.verbose):
199+
for _ in trange(nb_epochs, disable=not display_pb):
186200
for x_batch, y_batch in dataloader:
187201
# Move inputs to device
188202
x_batch = x_batch.to(self._device)

art/estimators/certification/randomized_smoothing/smooth_adv/pytorch.py

Lines changed: 15 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -155,6 +155,7 @@ def fit( # pylint: disable=W0221
155155
training_mode: bool = True,
156156
drop_last: bool = False,
157157
scheduler: Optional["torch.optim.lr_scheduler._LRScheduler"] = None,
158+
verbose: Optional[Union[bool, int]] = None,
158159
**kwargs,
159160
) -> None:
160161
"""
@@ -170,12 +171,25 @@ def fit( # pylint: disable=W0221
170171
the batch size. If ``False`` and the size of dataset is not divisible by the batch size, then
171172
the last batch will be smaller. (default: ``False``)
172173
:param scheduler: Learning rate scheduler to run at the start of every epoch.
174+
:param verbose: (Optional) Display the progress bar, if not supplied will revert to the verbose level when
175+
class was initialised.
173176
:param kwargs: Dictionary of framework-specific arguments. This parameter is not currently supported for PyTorch
174177
and providing it takes no effect.
175178
"""
176179
import torch
177180
from torch.utils.data import TensorDataset, DataLoader
178181

182+
if verbose is not None:
183+
if isinstance(verbose, int):
184+
if verbose == 0:
185+
display_pb = False
186+
else:
187+
display_pb = True
188+
else:
189+
display_pb = verbose
190+
else:
191+
display_pb = self.verbose
192+
179193
# Set model mode
180194
self._model.train(mode=training_mode)
181195

@@ -197,7 +211,7 @@ def fit( # pylint: disable=W0221
197211
dataloader = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=True, drop_last=drop_last)
198212

199213
# Start training
200-
for epoch in trange(nb_epochs, disable=not self.verbose):
214+
for epoch in trange(nb_epochs, disable=not display_pb):
201215
self.attack.norm = min(self.epsilon, (epoch + 1) * self.epsilon / self.warmup)
202216

203217
for x_batch, y_batch in dataloader:

art/estimators/certification/randomized_smoothing/smooth_adv/tensorflow.py

Lines changed: 23 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -149,7 +149,15 @@ def __init__(
149149
)
150150
self.attack = ProjectedGradientDescent(classifier, eps=self.epsilon, max_iter=1, verbose=False)
151151

152-
def fit(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: int = 10, **kwargs) -> None:
152+
def fit(
153+
self,
154+
x: np.ndarray,
155+
y: np.ndarray,
156+
batch_size: int = 128,
157+
nb_epochs: int = 10,
158+
verbose: Optional[Union[bool, int]] = None,
159+
**kwargs
160+
) -> None:
153161
"""
154162
Fit the classifier on the training set `(x, y)`.
155163
@@ -158,12 +166,25 @@ def fit(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: in
158166
shape (nb_samples,).
159167
:param batch_size: Size of batches.
160168
:param nb_epochs: Number of epochs to use for training.
169+
:param verbose: (Optional) Display the progress bar, if not supplied will revert to the verbose level when
170+
class was initialised.
161171
:param kwargs: Dictionary of framework-specific arguments. This parameter currently only supports
162172
"scheduler" which is an optional function that will be called at the end of every
163173
epoch to adjust the learning rate.
164174
"""
165175
import tensorflow as tf
166176

177+
if verbose is not None:
178+
if isinstance(verbose, int):
179+
if verbose == 0:
180+
display_pb = False
181+
else:
182+
display_pb = True
183+
else:
184+
display_pb = verbose
185+
else:
186+
display_pb = self.verbose
187+
167188
if self._train_step is None: # pragma: no cover
168189
if self._loss_object is None: # pragma: no cover
169190
raise TypeError(
@@ -200,7 +221,7 @@ def train_step(model, images, labels):
200221

201222
train_ds = tf.data.Dataset.from_tensor_slices((x_preprocessed, y_preprocessed)).shuffle(10000).batch(batch_size)
202223

203-
for epoch in trange(nb_epochs, disable=not self.verbose):
224+
for epoch in trange(nb_epochs, disable=not display_pb):
204225
self.attack.norm = min(self.epsilon, (epoch + 1) * self.epsilon / self.warmup)
205226

206227
for x_batch, y_batch in train_ds:

art/estimators/certification/randomized_smoothing/smooth_mix/pytorch.py

Lines changed: 15 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -172,6 +172,7 @@ def fit( # pylint: disable=W0221
172172
training_mode: bool = True,
173173
drop_last: bool = False,
174174
scheduler: Optional["torch.optim.lr_scheduler._LRScheduler"] = None,
175+
verbose: Optional[Union[bool, int]] = None,
175176
**kwargs,
176177
) -> None:
177178
"""
@@ -187,13 +188,26 @@ def fit( # pylint: disable=W0221
187188
the batch size. If ``False`` and the size of dataset is not divisible by the batch size, then
188189
the last batch will be smaller. (default: ``False``)
189190
:param scheduler: Learning rate scheduler to run at the start of every epoch.
191+
:param verbose: (Optional) Display the progress bar, if not supplied will revert to the verbose level when
192+
class was initialised.
190193
:param kwargs: Dictionary of framework-specific arguments. This parameter is not currently supported for PyTorch
191194
and providing it takes no effect.
192195
"""
193196
import torch
194197
import torch.nn.functional as F
195198
from torch.utils.data import TensorDataset, DataLoader
196199

200+
if verbose is not None:
201+
if isinstance(verbose, int):
202+
if verbose == 0:
203+
display_pb = False
204+
else:
205+
display_pb = True
206+
else:
207+
display_pb = verbose
208+
else:
209+
display_pb = self.verbose
210+
197211
# Set model mode
198212
self._model.train(mode=training_mode)
199213

@@ -215,7 +229,7 @@ def fit( # pylint: disable=W0221
215229
dataloader = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=True, drop_last=drop_last)
216230

217231
# Start training
218-
for epoch in trange(nb_epochs, disable=not self.verbose):
232+
for epoch in trange(nb_epochs, disable=not display_pb):
219233
warmup_v = min(1.0, (epoch + 1) / self.warmup)
220234

221235
for x_batch, y_batch in dataloader:

0 commit comments

Comments
 (0)