Skip to content

Commit c5c6012

Browse files
authored
Merge pull request #2208 from Foxglove144/new_branch
Update example scripts
2 parents bc39091 + 92ffe0a commit c5c6012

File tree

7 files changed

+28
-12
lines changed

7 files changed

+28
-12
lines changed

examples/adversarial_training_FBF.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
import torchvision.transforms as transforms
1212
from torch.utils.data import Dataset, DataLoader
1313

14-
from art.classifiers import PyTorchClassifier
14+
from art.estimators.classification import PyTorchClassifier
1515
from art.data_generators import PyTorchDataGenerator
1616
from art.defences.trainer import AdversarialTrainerFBFPyTorch
1717
from art.utils import load_cifar10

examples/adversarial_training_data_augmentation.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,10 @@
11
"""
22
This is an example of how to use ART and Keras to perform adversarial training using data generators for CIFAR10
33
"""
4+
import tensorflow as tf
5+
6+
tf.compat.v1.disable_eager_execution()
7+
48
import keras
59
import numpy as np
610
from keras.layers import Conv2D, Dense, Flatten, MaxPooling2D, Input, BatchNormalization

examples/get_started_lightgbm.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@
2727

2828
# Step 2: Create the model
2929

30-
params = {"objective": "multiclass", "metric": "multi_logloss", "num_class": 10}
30+
params = {"objective": "multiclass", "metric": "multi_logloss", "num_class": 10, "force_col_wise": True}
3131
train_set = lgb.Dataset(x_train, label=np.argmax(y_train, axis=1))
3232
test_set = lgb.Dataset(x_test, label=np.argmax(y_test, axis=1))
3333
model = lgb.train(params=params, train_set=train_set, num_boost_round=100, valid_sets=[test_set])

examples/get_started_xgboost.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@
2727

2828
# Step 2: Create the model
2929

30-
params = {"objective": "multi:softprob", "metric": "accuracy", "num_class": 10}
30+
params = {"objective": "multi:softprob", "eval_metric": ["mlogloss", "merror"], "num_class": 10}
3131
dtrain = xgb.DMatrix(x_train, label=np.argmax(y_train, axis=1))
3232
dtest = xgb.DMatrix(x_test, label=np.argmax(y_test, axis=1))
3333
evals = [(dtest, "test"), (dtrain, "train")]

examples/mnist_cnn_fgsm.py

Lines changed: 16 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,10 @@
22
"""Trains a convolutional neural network on the MNIST dataset, then attacks it with the FGSM attack."""
33
from __future__ import absolute_import, division, print_function, unicode_literals
44

5+
import tensorflow as tf
6+
7+
tf.compat.v1.disable_eager_execution()
8+
59
from keras.models import Sequential
610
from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Dropout
711
import numpy as np
@@ -35,12 +39,16 @@
3539
acc = np.sum(preds == np.argmax(y_test, axis=1)) / y_test.shape[0]
3640
print("\nTest accuracy: %.2f%%" % (acc * 100))
3741

38-
# Craft adversarial samples with FGSM
39-
epsilon = 0.1 # Maximum perturbation
40-
adv_crafter = FastGradientMethod(classifier, eps=epsilon)
41-
x_test_adv = adv_crafter.generate(x=x_test)
42+
# Define epsilon values
43+
epsilon_values = [0.01, 0.1, 0.15, 0.2, 0.25, 0.3]
4244

43-
# Evaluate the classifier on the adversarial examples
44-
preds = np.argmax(classifier.predict(x_test_adv), axis=1)
45-
acc = np.sum(preds == np.argmax(y_test, axis=1)) / y_test.shape[0]
46-
print("\nTest accuracy on adversarial sample: %.2f%%" % (acc * 100))
45+
# Iterate over epsilon values
46+
for epsilon in epsilon_values:
47+
# Craft adversarial samples with FGSM
48+
adv_crafter = FastGradientMethod(classifier, eps=epsilon)
49+
x_test_adv = adv_crafter.generate(x=x_test, y=y_test)
50+
51+
# Evaluate the classifier on the adversarial examples
52+
preds = np.argmax(classifier.predict(x_test_adv), axis=1)
53+
acc = np.sum(preds == np.argmax(y_test, axis=1)) / y_test.shape[0]
54+
print("Test accuracy on adversarial sample (epsilon = %.2f): %.2f%%" % (epsilon, acc * 100))

examples/mnist_poison_detection.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,9 @@
55

66
import pprint
77
import json
8+
import tensorflow as tf
89

10+
tf.compat.v1.disable_eager_execution()
911
from keras.models import Sequential
1012
from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Dropout
1113
import numpy as np

examples/mnist_transferability.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,8 @@
1212
import numpy as np
1313
import tensorflow as tf
1414

15+
tf.compat.v1.disable_eager_execution()
16+
1517
from art.attacks.evasion import DeepFool
1618
from art.estimators.classification import KerasClassifier, TensorFlowClassifier
1719
from art.utils import load_mnist
@@ -60,7 +62,7 @@ def cnn_mnist_k(input_shape):
6062

6163

6264
# Get session
63-
session = tf.Session()
65+
session = tf.compat.v1.Session()
6466
k.set_session(session)
6567

6668
# Read MNIST dataset

0 commit comments

Comments
 (0)