Skip to content

Commit c4ab63f

Browse files
committed
working reduction with minimal error
1 parent aaf1401 commit c4ab63f

File tree

2 files changed

+26
-21
lines changed

2 files changed

+26
-21
lines changed

manuscripts/Posion25/main.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@ def main():
1515
# get the experiment model 1 is MNIST, 2 is MNIST with data augmentation, 3 is AudioMNIST
1616
args.model_experiment = args.experiment
1717
args.save_dir = os.path.join(args.save_dir, f'experiment_{args.model_experiment}')
18+
args.model_path = os.path.join(args.save_dir, f'model_experiment_{args.model_experiment}.keras')
1819
if args.model_experiment == 1:
1920
args.model_path = args.model_path or 'mnist_model_1.keras'
2021
model = load_MNIST_model(args.model_path)
@@ -37,7 +38,7 @@ def main():
3738
model.load_weights(args.model_path)
3839
else:
3940
print("Training model...")
40-
model = train_model(model, train_ds, epochs=10)
41+
model = train_model(model, train_ds, epochs=40)
4142
model.save(args.model_path)
4243

4344
# Evaluate the model

manuscripts/Posion25/taint.py

Lines changed: 24 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -2,36 +2,37 @@
22
import numpy as np
33
import tensorflow as tf
44
import pickle
5-
from manuscripts.Posion25.analysis import best_analysis, denoise_analysis, reduce_excess_perturbations, full_analysis
5+
from manuscripts.Posion25.analysis import *
66
from Adversarial_Observation.Swarm import ParticleSwarm
77
from analysis import *
88

99
def adversarial_attack_blackbox(model, dataset, image_index, output_dir='results', num_iterations=30, num_particles=100):
1010

1111
pickle_path = os.path.join(output_dir, 'attacker.pkl')
12+
13+
dataset_list = list(dataset.as_numpy_iterator())
14+
all_images, all_labels = zip(*dataset_list)
15+
all_images = np.concatenate(all_images, axis=0)
16+
all_labels = np.concatenate(all_labels, axis=0)
17+
18+
if image_index < 0 or image_index >= len(all_images):
19+
raise ValueError(f"Image index {image_index} out of range")
20+
21+
single_input = all_images[image_index]
22+
single_target = np.argmax(all_labels[image_index])
23+
target_class = (single_target + 1) % 10
24+
25+
input_set = np.stack([
26+
single_input + (np.random.uniform(0, 1, single_input.shape) * (np.random.rand(*single_input.shape) < 0.9))
27+
for _ in range(num_particles)
28+
])
29+
1230
if os.path.exists(pickle_path):
1331
with open(pickle_path, 'rb') as f:
1432
attacker = pickle.load(f)
1533
print(f"Loaded attacker from {pickle_path}")
1634
else:
1735

18-
dataset_list = list(dataset.as_numpy_iterator())
19-
all_images, all_labels = zip(*dataset_list)
20-
all_images = np.concatenate(all_images, axis=0)
21-
all_labels = np.concatenate(all_labels, axis=0)
22-
23-
if image_index < 0 or image_index >= len(all_images):
24-
raise ValueError(f"Image index {image_index} out of range")
25-
26-
single_input = all_images[image_index]
27-
single_target = np.argmax(all_labels[image_index])
28-
target_class = (single_target + 1) % 10
29-
30-
input_set = np.stack([
31-
single_input + (np.random.uniform(0, 1, single_input.shape) * (np.random.rand(*single_input.shape) < 0.9))
32-
for _ in range(num_particles)
33-
])
34-
3536
attacker = ParticleSwarm(
3637
model=model, input_set=input_set, starting_class=single_target,
3738
target_class=target_class, num_iterations=num_iterations,
@@ -42,7 +43,7 @@ def adversarial_attack_blackbox(model, dataset, image_index, output_dir='results
4243
with open(pickle_path, 'wb') as f:
4344
pickle.dump(attacker, f)
4445
print(f"Saved attacker to {pickle_path}")
45-
46+
print("Adversarial attack completed. Analyzing results...")
4647
analyze_attack(attacker, single_input, target_class)
4748

4849
def best_analysis(attacker, original_data, target):
@@ -200,7 +201,10 @@ def full_analysis(attacker, input_data, target):
200201
print(f"Full analysis saved to {path}")
201202

202203
def analyze_attack(attacker, original_img, target):
204+
print("Starting analysis of the adversarial attack...")
203205
best_analysis(attacker, original_img, target)
204-
reduced_img = reduce_excess_perturbations_scale(attacker, original_img, attacker.global_best_position.numpy(), target)
206+
print("Reducing excess perturbations...")
207+
reduced_img = reduce_excess_perturbations(attacker, original_img, attacker.global_best_position.numpy(), target)
205208
denoise_analysis(attacker, original_img, reduced_img, target)
209+
print("Performing full analysis of the attack...")
206210
full_analysis(attacker, original_img, target)

0 commit comments

Comments
 (0)