Skip to content

Commit 1f3026b

Browse files
authored
Apply suggestions from code review
1 parent 94cf59f commit 1f3026b

File tree

12 files changed

+54
-194
lines changed

12 files changed

+54
-194
lines changed

art/estimators/certification/derandomized_smoothing/pytorch.py

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -438,7 +438,7 @@ def fit( # pylint: disable=W0221
438438
training_mode: bool = True,
439439
drop_last: bool = False,
440440
scheduler: Optional[Any] = None,
441-
verbose: Optional[Union[bool, int]] = None,
441+
verbose: bool = False,
442442
update_batchnorm: bool = True,
443443
batchnorm_update_epochs: int = 1,
444444
transform: Optional["torchvision.transforms.transforms.Compose"] = None,
@@ -469,8 +469,6 @@ def fit( # pylint: disable=W0221
469469
"""
470470
import torch
471471

472-
display_pb = self.process_verbose(verbose)
473-
474472
# Set model mode
475473
self._model.train(mode=training_mode)
476474

@@ -501,7 +499,7 @@ def fit( # pylint: disable=W0221
501499
epoch_loss = []
502500
epoch_batch_sizes = []
503501

504-
pbar = tqdm(range(num_batch), disable=not display_pb)
502+
pbar = tqdm(range(num_batch), disable=not verbose)
505503

506504
# Train for one epoch
507505
for m in pbar:
@@ -547,7 +545,7 @@ def fit( # pylint: disable=W0221
547545
epoch_loss.append(loss.cpu().detach().numpy())
548546
epoch_batch_sizes.append(len(i_batch))
549547

550-
if display_pb:
548+
if verbose:
551549
pbar.set_description(
552550
f"Loss {np.average(epoch_loss, weights=epoch_batch_sizes):.3f} "
553551
f"Acc {np.average(epoch_acc, weights=epoch_batch_sizes):.3f} "

art/estimators/certification/derandomized_smoothing/tensorflow.py

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -160,7 +160,7 @@ def fit( # pylint: disable=W0221
160160
y: np.ndarray,
161161
batch_size: int = 128,
162162
nb_epochs: int = 10,
163-
verbose: Optional[Union[bool, int]] = None,
163+
verbose: bool = False,
164164
**kwargs,
165165
) -> None:
166166
"""
@@ -171,15 +171,13 @@ def fit( # pylint: disable=W0221
171171
shape (nb_samples,).
172172
:param batch_size: Size of batches.
173173
:param nb_epochs: Number of epochs to use for training.
174-
:param verbose: If to display training progress bars
174+
:param verbose: Display training progress bar.
175175
:param kwargs: Dictionary of framework-specific arguments. This parameter currently only supports
176176
"scheduler" which is an optional function that will be called at the end of every
177177
epoch to adjust the learning rate.
178178
"""
179179
import tensorflow as tf
180180

181-
display_pb = self.process_verbose(verbose)
182-
183181
if self._train_step is None: # pragma: no cover
184182
if self._loss_object is None: # pragma: no cover
185183
raise TypeError(
@@ -222,7 +220,7 @@ def train_step(model, images, labels):
222220
epoch_loss = []
223221
epoch_batch_sizes = []
224222

225-
pbar = tqdm(range(num_batch), disable=not display_pb)
223+
pbar = tqdm(range(num_batch), disable=not verbose)
226224

227225
ind = np.arange(len(x_preprocessed))
228226
for m in pbar:
@@ -239,7 +237,7 @@ def train_step(model, images, labels):
239237
else:
240238
train_step(self.model, images, labels)
241239

242-
if display_pb:
240+
if verbose:
243241
if self._train_step is None:
244242
pbar.set_description(
245243
f"Loss {np.average(epoch_loss, weights=epoch_batch_sizes):.3f} "

art/estimators/certification/randomized_smoothing/macer/pytorch.py

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -138,7 +138,7 @@ def fit( # pylint: disable=W0221
138138
training_mode: bool = True,
139139
drop_last: bool = False,
140140
scheduler: Optional["torch.optim.lr_scheduler._LRScheduler"] = None,
141-
verbose: Optional[Union[bool, int]] = None,
141+
verbose: bool = False,
142142
**kwargs,
143143
) -> None:
144144
"""
@@ -154,17 +154,14 @@ def fit( # pylint: disable=W0221
154154
the batch size. If ``False`` and the size of dataset is not divisible by the batch size, then
155155
the last batch will be smaller. (default: ``False``)
156156
:param scheduler: Learning rate scheduler to run at the start of every epoch.
157-
:param verbose: (Optional) Display the progress bar, if not supplied will revert to the verbose level when
158-
class was initialised.
157+
:param verbose: Display the training progress bar.
159158
:param kwargs: Dictionary of framework-specific arguments. This parameter is not currently supported for PyTorch
160159
and providing it takes no effect.
161160
"""
162161
import torch
163162
import torch.nn.functional as F
164163
from torch.utils.data import TensorDataset, DataLoader
165164

166-
display_pb = self.process_verbose(verbose)
167-
168165
# Set model mode
169166
self._model.train(mode=training_mode)
170167

@@ -190,7 +187,7 @@ class was initialised.
190187
)
191188

192189
# Start training
193-
for _ in trange(nb_epochs, disable=not display_pb):
190+
for _ in trange(nb_epochs, disable=not verbose):
194191
for x_batch, y_batch in dataloader:
195192
# Move inputs to GPU
196193
x_batch = x_batch.to(self.device)

art/estimators/certification/randomized_smoothing/macer/tensorflow.py

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -138,7 +138,7 @@ def fit(
138138
y: np.ndarray,
139139
batch_size: int = 128,
140140
nb_epochs: int = 10,
141-
verbose: Optional[Union[bool, int]] = None,
141+
verbose: bool = False,
142142
**kwargs
143143
) -> None:
144144
"""
@@ -149,16 +149,13 @@ def fit(
149149
shape (nb_samples,).
150150
:param batch_size: Size of batches.
151151
:param nb_epochs: Number of epochs to use for training.
152-
:param verbose: (Optional) Display the progress bar, if not supplied will revert to the verbose level when
153-
class was initialised.
152+
:param verbose: Display the training progress bar.
154153
:param kwargs: Dictionary of framework-specific arguments. This parameter currently only supports
155154
"scheduler" which is an optional function that will be called at the end of every
156155
epoch to adjust the learning rate.
157156
"""
158157
import tensorflow as tf
159158

160-
display_pb = self.process_verbose(verbose)
161-
162159
if self._train_step is None: # pragma: no cover
163160
if self._optimizer is None: # pragma: no cover
164161
raise ValueError(
@@ -225,7 +222,7 @@ def train_step(model, images, labels):
225222

226223
train_ds = tf.data.Dataset.from_tensor_slices((x_preprocessed, y_preprocessed)).shuffle(10000).batch(batch_size)
227224

228-
for epoch in trange(nb_epochs, disable=not display_pb):
225+
for epoch in trange(nb_epochs, disable=not verbose):
229226
for images, labels in train_ds:
230227
# Tile samples for Gaussian augmentation
231228
input_size = len(images)

art/estimators/certification/randomized_smoothing/pytorch.py

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -140,7 +140,7 @@ def fit( # pylint: disable=W0221
140140
training_mode: bool = True,
141141
drop_last: bool = False,
142142
scheduler: Optional["torch.optim.lr_scheduler._LRScheduler"] = None,
143-
verbose: Optional[Union[bool, int]] = None,
143+
verbose: bool = False,
144144
**kwargs,
145145
) -> None:
146146
"""
@@ -156,16 +156,13 @@ def fit( # pylint: disable=W0221
156156
the batch size. If ``False`` and the size of dataset is not divisible by the batch size, then
157157
the last batch will be smaller. (default: ``False``)
158158
:param scheduler: Learning rate scheduler to run at the start of every epoch.
159-
:param verbose: (Optional) Display the progress bar, if not supplied will revert to the verbose level when
160-
class was initialised.
159+
:param verbose: Display the training progress bar.
161160
:param kwargs: Dictionary of framework-specific arguments. This parameter is not currently supported for PyTorch
162161
and providing it takes no effect.
163162
"""
164163
import torch
165164
from torch.utils.data import TensorDataset, DataLoader
166165

167-
display_pb = self.process_verbose(verbose)
168-
169166
# Set model mode
170167
self._model.train(mode=training_mode)
171168

@@ -187,7 +184,7 @@ class was initialised.
187184
dataloader = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=True, drop_last=drop_last)
188185

189186
# Start training
190-
for _ in trange(nb_epochs, disable=not display_pb):
187+
for _ in trange(nb_epochs, disable=not verbose):
191188
for x_batch, y_batch in dataloader:
192189
# Move inputs to device
193190
x_batch = x_batch.to(self._device)

art/estimators/certification/randomized_smoothing/smooth_adv/pytorch.py

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -155,7 +155,7 @@ def fit( # pylint: disable=W0221
155155
training_mode: bool = True,
156156
drop_last: bool = False,
157157
scheduler: Optional["torch.optim.lr_scheduler._LRScheduler"] = None,
158-
verbose: Optional[Union[bool, int]] = None,
158+
verbose: bool = False,
159159
**kwargs,
160160
) -> None:
161161
"""
@@ -171,16 +171,13 @@ def fit( # pylint: disable=W0221
171171
the batch size. If ``False`` and the size of dataset is not divisible by the batch size, then
172172
the last batch will be smaller. (default: ``False``)
173173
:param scheduler: Learning rate scheduler to run at the start of every epoch.
174-
:param verbose: (Optional) Display the progress bar, if not supplied will revert to the verbose level when
175-
class was initialised.
174+
:param verbose: Display the training progress bar.
176175
:param kwargs: Dictionary of framework-specific arguments. This parameter is not currently supported for PyTorch
177176
and providing it takes no effect.
178177
"""
179178
import torch
180179
from torch.utils.data import TensorDataset, DataLoader
181180

182-
display_pb = self.process_verbose(verbose)
183-
184181
# Set model mode
185182
self._model.train(mode=training_mode)
186183

@@ -202,7 +199,7 @@ class was initialised.
202199
dataloader = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=True, drop_last=drop_last)
203200

204201
# Start training
205-
for epoch in trange(nb_epochs, disable=not display_pb):
202+
for epoch in trange(nb_epochs, disable=not verbose):
206203
self.attack.norm = min(self.epsilon, (epoch + 1) * self.epsilon / self.warmup)
207204

208205
for x_batch, y_batch in dataloader:

art/estimators/certification/randomized_smoothing/smooth_adv/tensorflow.py

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -155,7 +155,7 @@ def fit(
155155
y: np.ndarray,
156156
batch_size: int = 128,
157157
nb_epochs: int = 10,
158-
verbose: Optional[Union[bool, int]] = None,
158+
verbose: bool = False,
159159
**kwargs
160160
) -> None:
161161
"""
@@ -166,16 +166,13 @@ def fit(
166166
shape (nb_samples,).
167167
:param batch_size: Size of batches.
168168
:param nb_epochs: Number of epochs to use for training.
169-
:param verbose: (Optional) Display the progress bar, if not supplied will revert to the verbose level when
170-
class was initialised.
169+
:param verbose: Display the training progress bar.
171170
:param kwargs: Dictionary of framework-specific arguments. This parameter currently only supports
172171
"scheduler" which is an optional function that will be called at the end of every
173172
epoch to adjust the learning rate.
174173
"""
175174
import tensorflow as tf
176175

177-
display_pb = self.process_verbose(verbose)
178-
179176
if self._train_step is None: # pragma: no cover
180177
if self._loss_object is None: # pragma: no cover
181178
raise TypeError(
@@ -212,7 +209,7 @@ def train_step(model, images, labels):
212209

213210
train_ds = tf.data.Dataset.from_tensor_slices((x_preprocessed, y_preprocessed)).shuffle(10000).batch(batch_size)
214211

215-
for epoch in trange(nb_epochs, disable=not display_pb):
212+
for epoch in trange(nb_epochs, disable=not verbose):
216213
self.attack.norm = min(self.epsilon, (epoch + 1) * self.epsilon / self.warmup)
217214

218215
for x_batch, y_batch in train_ds:

art/estimators/certification/randomized_smoothing/smooth_mix/pytorch.py

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -172,7 +172,7 @@ def fit( # pylint: disable=W0221
172172
training_mode: bool = True,
173173
drop_last: bool = False,
174174
scheduler: Optional["torch.optim.lr_scheduler._LRScheduler"] = None,
175-
verbose: Optional[Union[bool, int]] = None,
175+
verbose: bool = False,
176176
**kwargs,
177177
) -> None:
178178
"""
@@ -188,17 +188,14 @@ def fit( # pylint: disable=W0221
188188
the batch size. If ``False`` and the size of dataset is not divisible by the batch size, then
189189
the last batch will be smaller. (default: ``False``)
190190
:param scheduler: Learning rate scheduler to run at the start of every epoch.
191-
:param verbose: (Optional) Display the progress bar, if not supplied will revert to the verbose level when
192-
class was initialised.
191+
:param verbose: Display the training progress bar.
193192
:param kwargs: Dictionary of framework-specific arguments. This parameter is not currently supported for PyTorch
194193
and providing it takes no effect.
195194
"""
196195
import torch
197196
import torch.nn.functional as F
198197
from torch.utils.data import TensorDataset, DataLoader
199198

200-
display_pb = self.process_verbose(verbose)
201-
202199
# Set model mode
203200
self._model.train(mode=training_mode)
204201

@@ -220,7 +217,7 @@ class was initialised.
220217
dataloader = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=True, drop_last=drop_last)
221218

222219
# Start training
223-
for epoch in trange(nb_epochs, disable=not display_pb):
220+
for epoch in trange(nb_epochs, disable=not verbose):
224221
warmup_v = min(1.0, (epoch + 1) / self.warmup)
225222

226223
for x_batch, y_batch in dataloader:

art/estimators/certification/randomized_smoothing/tensorflow.py

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -137,7 +137,7 @@ def fit( # pylint: disable=W0221
137137
y: np.ndarray,
138138
batch_size: int = 128,
139139
nb_epochs: int = 10,
140-
verbose: Optional[Union[bool, int]] = None,
140+
verbose: bool = False,
141141
**kwargs
142142
) -> None:
143143
"""
@@ -148,16 +148,13 @@ def fit( # pylint: disable=W0221
148148
shape (nb_samples,).
149149
:param batch_size: Size of batches.
150150
:param nb_epochs: Number of epochs to use for training.
151-
:param verbose: (Optional) Display the progress bar, if not supplied will revert to the verbose level when
152-
class was initialised.
151+
:param verbose: Display the training progress bar.
153152
:param kwargs: Dictionary of framework-specific arguments. This parameter currently only supports
154153
"scheduler" which is an optional function that will be called at the end of every
155154
epoch to adjust the learning rate.
156155
"""
157156
import tensorflow as tf
158157

159-
display_pb = self.process_verbose(verbose)
160-
161158
if self._train_step is None: # pragma: no cover
162159
if self._loss_object is None: # pragma: no cover
163160
raise TypeError(
@@ -194,7 +191,7 @@ def train_step(model, images, labels):
194191

195192
train_ds = tf.data.Dataset.from_tensor_slices((x_preprocessed, y_preprocessed)).shuffle(10000).batch(batch_size)
196193

197-
for epoch in trange(nb_epochs, disable=not display_pb):
194+
for epoch in trange(nb_epochs, disable=not verbose):
198195
for images, labels in train_ds:
199196
# Add random noise for randomized smoothing
200197
images += tf.random.normal(shape=images.shape, mean=0.0, stddev=self.scale)

0 commit comments

Comments
 (0)