Skip to content

Commit 658a22d

Browse files
authored
Merge branch 'dev_1.15.0' into object-detector-resize
2 parents da64aa6 + 76ae22a commit 658a22d

19 files changed

+195
-188
lines changed

art/attacks/evasion/auto_conjugate_gradient.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -224,7 +224,8 @@ def __call__(self, y_true: tf.Tensor, y_pred: tf.Tensor, *args, **kwargs) -> tf.
224224
nb_classes=estimator.nb_classes,
225225
input_shape=estimator.input_shape,
226226
loss_object=_loss_object_tf,
227-
train_step=estimator._train_step,
227+
optimizer=estimator.optimizer,
228+
train_step=estimator.train_step,
228229
channels_first=estimator.channels_first,
229230
clip_values=estimator.clip_values,
230231
preprocessing_defences=estimator.preprocessing_defences,

art/attacks/evasion/auto_projected_gradient_descent.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -203,7 +203,8 @@ def __call__(self, y_true: tf.Tensor, y_pred: tf.Tensor, *args, **kwargs) -> tf.
203203
nb_classes=estimator.nb_classes,
204204
input_shape=estimator.input_shape,
205205
loss_object=_loss_object_tf,
206-
train_step=estimator._train_step,
206+
optimizer=estimator.optimizer,
207+
train_step=estimator.train_step,
207208
channels_first=estimator.channels_first,
208209
clip_values=estimator.clip_values,
209210
preprocessing_defences=estimator.preprocessing_defences,

art/attacks/evasion/brendel_bethge.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2055,7 +2055,8 @@ def logits_difference(y_true, y_pred):
20552055
nb_classes=estimator.nb_classes,
20562056
input_shape=estimator.input_shape,
20572057
loss_object=self._loss_object,
2058-
train_step=estimator._train_step,
2058+
optimizer=estimator.optimizer,
2059+
train_step=estimator.train_step,
20592060
channels_first=estimator.channels_first,
20602061
clip_values=estimator.clip_values,
20612062
preprocessing_defences=estimator.preprocessing_defences,

art/estimators/certification/derandomized_smoothing/tensorflow.py

Lines changed: 32 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -68,6 +68,7 @@ def __init__(
6868
logits: bool,
6969
input_shape: Tuple[int, ...],
7070
loss_object: Optional["tf.Tensor"] = None,
71+
optimizer: Optional["tf.keras.optimizers.Optimizer"] = None,
7172
train_step: Optional[Callable] = None,
7273
channels_first: bool = False,
7374
clip_values: Optional["CLIP_VALUES_TYPE"] = None,
@@ -88,8 +89,12 @@ def __init__(
8889
:param logits: if the model returns logits or normalized probabilities
8990
:param input_shape: Shape of one input for the classifier, e.g. for MNIST input_shape=(28, 28, 1).
9091
:param loss_object: The loss function for which to compute gradients. This parameter is applied for training
91-
the model and computing gradients of the loss w.r.t. the input.
92-
:param train_step: A function that applies a gradient update to the trainable variables.
92+
the model and computing gradients of the loss w.r.t. the input.
93+
:param optimizer: The optimizer used to train the classifier.
94+
:param train_step: A function that applies a gradient update to the trainable variables with signature
95+
`train_step(model, images, labels)`. This will override the default training loop that uses the
96+
provided `loss_object` and `optimizer` parameters. It is recommended to use the `@tf.function`
97+
decorator, if possible, for efficient training.
9398
:param channels_first: Set channels first or last.
9499
:param clip_values: Tuple of the form `(min, max)` of floats or `np.ndarray` representing the minimum and
95100
maximum values allowed for features. If floats are provided, these will be used as the range of all
@@ -106,6 +111,7 @@ def __init__(
106111
nb_classes=nb_classes,
107112
input_shape=input_shape,
108113
loss_object=loss_object,
114+
optimizer=optimizer,
109115
train_step=train_step,
110116
channels_first=channels_first,
111117
clip_values=clip_values,
@@ -144,10 +150,30 @@ def fit(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: in
144150
"scheduler" which is an optional function that will be called at the end of every
145151
epoch to adjust the learning rate.
146152
"""
153+
import tensorflow as tf
154+
147155
if self._train_step is None: # pragma: no cover
148-
raise TypeError(
149-
"The training function `train_step` is required for fitting a model but it has not been " "defined."
150-
)
156+
if self._loss_object is None: # pragma: no cover
157+
raise TypeError(
158+
"A loss function `loss_object` or training function `train_step` is required for fitting the "
159+
"model, but it has not been defined."
160+
)
161+
if self._optimizer is None: # pragma: no cover
162+
raise ValueError(
163+
"An optimizer `optimizer` or training function `train_step` is required for fitting the "
164+
"model, but it has not been defined."
165+
)
166+
167+
@tf.function
168+
def train_step(model, images, labels):
169+
with tf.GradientTape() as tape:
170+
predictions = model(images, training=True)
171+
loss = self.loss_object(labels, predictions)
172+
gradients = tape.gradient(loss, model.trainable_variables)
173+
self.optimizer.apply_gradients(zip(gradients, model.trainable_variables))
174+
175+
else:
176+
train_step = self._train_step
151177

152178
scheduler = kwargs.get("scheduler")
153179

@@ -167,7 +193,7 @@ def fit(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: in
167193
i_batch = np.copy(x_preprocessed[ind[m * batch_size : (m + 1) * batch_size]])
168194
labels = y_preprocessed[ind[m * batch_size : (m + 1) * batch_size]]
169195
images = self.ablator.forward(i_batch)
170-
self._train_step(self.model, images, labels)
196+
train_step(self.model, images, labels)
171197

172198
if scheduler is not None:
173199
scheduler(epoch)

art/estimators/certification/randomized_smoothing/tensorflow.py

Lines changed: 30 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -60,6 +60,7 @@ def __init__(
6060
nb_classes: int,
6161
input_shape: Tuple[int, ...],
6262
loss_object: Optional["tf.Tensor"] = None,
63+
optimizer: Optional["tf.keras.optimizers.Optimizer"] = None,
6364
train_step: Optional[Callable] = None,
6465
channels_first: bool = False,
6566
clip_values: Optional["CLIP_VALUES_TYPE"] = None,
@@ -78,8 +79,12 @@ def __init__(
7879
:param nb_classes: the number of classes in the classification task.
7980
:param input_shape: Shape of one input for the classifier, e.g. for MNIST input_shape=(28, 28, 1).
8081
:param loss_object: The loss function for which to compute gradients. This parameter is applied for training
81-
the model and computing gradients of the loss w.r.t. the input.
82-
:param train_step: A function that applies a gradient update to the trainable variables.
82+
the model and computing gradients of the loss w.r.t. the input.
83+
:param optimizer: The optimizer used to train the classifier.
84+
:param train_step: A function that applies a gradient update to the trainable variables with signature
85+
`train_step(model, images, labels)`. This will override the default training loop that uses the
86+
provided `loss_object` and `optimizer` parameters. It is recommended to use the `@tf.function`
87+
decorator, if possible, for efficient training.
8388
:param channels_first: Set channels first or last.
8489
:param clip_values: Tuple of the form `(min, max)` of floats or `np.ndarray` representing the minimum and
8590
maximum values allowed for features. If floats are provided, these will be used as the range of all
@@ -105,6 +110,7 @@ def __init__(
105110
nb_classes=nb_classes,
106111
input_shape=input_shape,
107112
loss_object=loss_object,
113+
optimizer=optimizer,
108114
train_step=train_step,
109115
channels_first=channels_first,
110116
clip_values=clip_values,
@@ -137,9 +143,27 @@ def fit(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: in
137143
import tensorflow as tf
138144

139145
if self._train_step is None: # pragma: no cover
140-
raise TypeError(
141-
"The training function `train_step` is required for fitting a model but it has not been " "defined."
142-
)
146+
if self._loss_object is None: # pragma: no cover
147+
raise TypeError(
148+
"A loss function `loss_object` or training function `train_step` is required for fitting the "
149+
"model, but it has not been defined."
150+
)
151+
if self._optimizer is None: # pragma: no cover
152+
raise ValueError(
153+
"An optimizer `optimizer` or training function `train_step` is required for fitting the "
154+
"model, but it has not been defined."
155+
)
156+
157+
@tf.function
158+
def train_step(model, images, labels):
159+
with tf.GradientTape() as tape:
160+
predictions = model(images, training=True)
161+
loss = self.loss_object(labels, predictions)
162+
gradients = tape.gradient(loss, model.trainable_variables)
163+
self.optimizer.apply_gradients(zip(gradients, model.trainable_variables))
164+
165+
else:
166+
train_step = self._train_step
143167

144168
y = check_and_transform_label_format(y, nb_classes=self.nb_classes)
145169

@@ -156,7 +180,7 @@ def fit(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: in
156180
for images, labels in train_ds:
157181
# Add random noise for randomized smoothing
158182
images += tf.random.normal(shape=images.shape, mean=0.0, stddev=self.scale)
159-
self._train_step(self.model, images, labels)
183+
train_step(self.model, images, labels)
160184

161185
def predict(self, x: np.ndarray, batch_size: int = 128, **kwargs) -> np.ndarray: # type: ignore
162186
"""

art/estimators/classification/tensorflow.py

Lines changed: 64 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -800,6 +800,7 @@ class TensorFlowV2Classifier(ClassGradientsMixin, ClassifierMixin, TensorFlowV2E
800800
+ [
801801
"input_shape",
802802
"loss_object",
803+
"optimizer",
803804
"train_step",
804805
]
805806
)
@@ -810,6 +811,7 @@ def __init__(
810811
nb_classes: int,
811812
input_shape: Tuple[int, ...],
812813
loss_object: Optional["tf.keras.losses.Loss"] = None,
814+
optimizer: Optional["tf.keras.optimizers.Optimizer"] = None,
813815
train_step: Optional[Callable] = None,
814816
channels_first: bool = False,
815817
clip_values: Optional["CLIP_VALUES_TYPE"] = None,
@@ -824,10 +826,12 @@ def __init__(
824826
:param nb_classes: the number of classes in the classification task.
825827
:param input_shape: shape of one input for the classifier, e.g. for MNIST input_shape=(28, 28, 1).
826828
:param loss_object: The loss function for which to compute gradients. This parameter is applied for training
827-
the model and computing gradients of the loss w.r.t. the input.
828-
:type loss_object: `tf.keras.losses`
829+
the model and computing gradients of the loss w.r.t. the input.
830+
:param optimizer: The optimizer used to train the classifier.
829831
:param train_step: A function that applies a gradient update to the trainable variables with signature
830-
train_step(model, images, labels).
832+
`train_step(model, images, labels)`. This will override the default training loop that uses the
833+
provided `loss_object` and `optimizer` parameters. It is recommended to use the `@tf.function`
834+
decorator, if possible, for efficient training.
831835
:param channels_first: Set channels first or last.
832836
:param clip_values: Tuple of the form `(min, max)` of floats or `np.ndarray` representing the minimum and
833837
maximum values allowed for features. If floats are provided, these will be used as the range of all
@@ -853,6 +857,7 @@ def __init__(
853857
self.nb_classes = nb_classes
854858
self._input_shape = input_shape
855859
self._loss_object = loss_object
860+
self._optimizer = optimizer
856861
self._train_step = train_step
857862

858863
# Check if the loss function requires as input index labels instead of one-hot-encoded labels
@@ -879,6 +884,15 @@ def loss_object(self) -> "tf.keras.losses.Loss":
879884
"""
880885
return self._loss_object # type: ignore
881886

887+
@property
888+
def optimizer(self) -> "tf.keras.optimizers.Optimizer":
889+
"""
890+
Return the optimizer.
891+
892+
:return: The optimizer.
893+
"""
894+
return self._optimizer # type: ignore
895+
882896
@property
883897
def train_step(self) -> Callable:
884898
"""
@@ -949,9 +963,27 @@ def fit(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: in
949963
import tensorflow as tf
950964

951965
if self._train_step is None: # pragma: no cover
952-
raise TypeError(
953-
"The training function `train_step` is required for fitting a model but it has not been " "defined."
954-
)
966+
if self._loss_object is None: # pragma: no cover
967+
raise TypeError(
968+
"A loss function `loss_object` or training function `train_step` is required for fitting the "
969+
"model, but it has not been defined."
970+
)
971+
if self._optimizer is None: # pragma: no cover
972+
raise ValueError(
973+
"An optimizer `optimizer` or training function `train_step` is required for fitting the "
974+
"model, but it has not been defined."
975+
)
976+
977+
@tf.function
978+
def train_step(model, images, labels):
979+
with tf.GradientTape() as tape:
980+
predictions = model(images, training=True)
981+
loss = self.loss_object(labels, predictions)
982+
gradients = tape.gradient(loss, model.trainable_variables)
983+
self.optimizer.apply_gradients(zip(gradients, model.trainable_variables))
984+
985+
else:
986+
train_step = self._train_step
955987

956988
y = check_and_transform_label_format(y, nb_classes=self.nb_classes)
957989

@@ -966,7 +998,7 @@ def fit(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: in
966998

967999
for _ in range(nb_epochs):
9681000
for images, labels in train_ds:
969-
self._train_step(self.model, images, labels)
1001+
train_step(self.model, images, labels)
9701002

9711003
def fit_generator(self, generator: "DataGenerator", nb_epochs: int = 20, **kwargs) -> None:
9721004
"""
@@ -982,9 +1014,27 @@ def fit_generator(self, generator: "DataGenerator", nb_epochs: int = 20, **kwarg
9821014
from art.data_generators import TensorFlowV2DataGenerator
9831015

9841016
if self._train_step is None: # pragma: no cover
985-
raise TypeError(
986-
"The training function `train_step` is required for fitting a model but it has not been " "defined."
987-
)
1017+
if self._loss_object is None: # pragma: no cover
1018+
raise TypeError(
1019+
"A loss function `loss_object` or training function `train_step` is required for fitting the "
1020+
"model, but it has not been defined."
1021+
)
1022+
if self._optimizer is None: # pragma: no cover
1023+
raise ValueError(
1024+
"An optimizer `optimizer` or training function `train_step` is required for fitting the "
1025+
"model, but it has not been defined."
1026+
)
1027+
1028+
@tf.function
1029+
def train_step(model, images, labels):
1030+
with tf.GradientTape() as tape:
1031+
predictions = model(images, training=True)
1032+
loss = self.loss_object(labels, predictions)
1033+
gradients = tape.gradient(loss, model.trainable_variables)
1034+
self.optimizer.apply_gradients(zip(gradients, model.trainable_variables))
1035+
1036+
else:
1037+
train_step = self._train_step
9881038

9891039
# Train directly in TensorFlow
9901040
from art.preprocessing.standardisation_mean_std.tensorflow import StandardisationMeanStdTensorFlow
@@ -1004,7 +1054,7 @@ def fit_generator(self, generator: "DataGenerator", nb_epochs: int = 20, **kwarg
10041054
for i_batch, o_batch in generator.iterator:
10051055
if self._reduce_labels:
10061056
o_batch = tf.math.argmax(o_batch, axis=1)
1007-
self._train_step(self._model, i_batch, o_batch)
1057+
train_step(self._model, i_batch, o_batch)
10081058
else:
10091059
# Fit a generic data generator through the API
10101060
super().fit_generator(generator, nb_epochs=nb_epochs)
@@ -1263,6 +1313,7 @@ def clone_for_refitting(
12631313
clone._train_step = self._train_step # pylint: disable=W0212
12641314
clone._reduce_labels = self._reduce_labels # pylint: disable=W0212
12651315
clone._loss_object = self._loss_object # pylint: disable=W0212
1316+
clone._optimizer = self._optimizer # pylint: disable=W0212
12661317
return clone
12671318

12681319
def reset(self) -> None:
@@ -1401,8 +1452,8 @@ def save(self, filename: str, path: Optional[str] = None) -> None:
14011452
def __repr__(self):
14021453
repr_ = (
14031454
f"{self.__module__ + '.' + self.__class__.__name__}(model={self._model}, nb_classes={self.nb_classes}, "
1404-
f"input_shape={self._input_shape}, loss_object={self._loss_object}, train_step={self._train_step}, "
1405-
f"channels_first={self.channels_first}, clip_values={self.clip_values!r}, "
1455+
f"input_shape={self._input_shape}, loss_object={self._loss_object}, optimizer={self.optimizer}, "
1456+
f"train_step={self._train_step}, channels_first={self.channels_first}, clip_values={self.clip_values!r}, "
14061457
f"preprocessing_defences={self.preprocessing_defences}, "
14071458
f"postprocessing_defences={self.postprocessing_defences}, preprocessing={self.preprocessing})"
14081459
)

examples/get_started_tensorflow_v2.py

Lines changed: 2 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -52,26 +52,16 @@ def call(self, x):
5252
return x
5353

5454

55-
optimizer = tf.keras.optimizers.Adam(learning_rate=0.01)
56-
57-
58-
def train_step(model, images, labels):
59-
with tf.GradientTape() as tape:
60-
predictions = model(images, training=True)
61-
loss = loss_object(labels, predictions)
62-
gradients = tape.gradient(loss, model.trainable_variables)
63-
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
64-
65-
6655
model = TensorFlowModel()
6756
loss_object = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
57+
optimizer = tf.keras.optimizers.Adam(learning_rate=0.01)
6858

6959
# Step 3: Create the ART classifier
7060

7161
classifier = TensorFlowV2Classifier(
7262
model=model,
7363
loss_object=loss_object,
74-
train_step=train_step,
64+
optimizer=optimizer,
7565
nb_classes=10,
7666
input_shape=(28, 28, 1),
7767
clip_values=(0, 1),

0 commit comments

Comments
 (0)