-
Notifications
You must be signed in to change notification settings - Fork 17
/
attack.py
137 lines (127 loc) · 8.15 KB
/
attack.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
import json
from commons import get_model, transform_image, rev_transform
import GAE
import torch
imagenet_class_index = json.load(open('imagenet_class_index.json'))
def fgsm_untargeted( model_inp, image_bytes, epsilon, device=torch.device('cpu')):
model = get_model(model_inp)
tensor = transform_image(image_bytes=image_bytes)
#outputs = model.forward(tensor)
original_output, perturbed_output, perturbed_image = GAE.fgsm(model, tensor, epsilon, device)
cpudev = torch.device('cpu')
original_output, perturbed_output = original_output.to(cpudev), perturbed_output.to(cpudev)
perturbed_image, tensor = perturbed_image.to(cpudev), tensor.to(cpudev)
per_tensor = perturbed_image - tensor
or_percentage = torch.nn.functional.softmax(original_output, dim=1)[0] * 100
_, or_indices = torch.sort(original_output, descending=True)
per_percentage = torch.nn.functional.softmax(perturbed_output, dim=1)[0] * 100
_, per_indices = torch.sort(perturbed_output, descending=True)
top5_original = [(imagenet_class_index[str(idx.item())][1], or_percentage[idx.item()].item()) for idx in or_indices[0][:5]]
top5_perturbed = [(imagenet_class_index[str(idx.item())][1], per_percentage[idx.item()].item()) for idx in per_indices[0][:5]]
perturbed_image = rev_transform(perturbed_image[0])
original_image = rev_transform(tensor[0])
perturbation = rev_transform(per_tensor[0])
del model
return dict(top5_original), dict(top5_perturbed), perturbed_image, original_image, perturbation
def fgsm_targeted(model_inp, image_bytes, epsilon, target, device=torch.device('cpu')):
model = get_model(model_inp)
tensor = transform_image(image_bytes=image_bytes)
#outputs = model.forward(tensor)
target = torch.tensor([target],dtype=torch.long)
original_output, perturbed_output, perturbed_image = GAE.fgsm_targeted(model, tensor, epsilon, target, device)
cpudev = torch.device('cpu')
original_output, perturbed_output = original_output.to(cpudev), perturbed_output.to(cpudev)
perturbed_image, tensor = perturbed_image.to(cpudev), tensor.to(cpudev)
per_tensor = perturbed_image - tensor
or_percentage = torch.nn.functional.softmax(original_output, dim=1)[0] * 100
_, or_indices = torch.sort(original_output, descending=True)
per_percentage = torch.nn.functional.softmax(perturbed_output, dim=1)[0] * 100
_, per_indices = torch.sort(perturbed_output, descending=True)
top5_original = [(imagenet_class_index[str(idx.item())][1], or_percentage[idx.item()].item()) for idx in or_indices[0][:5]]
top5_perturbed = [(imagenet_class_index[str(idx.item())][1], per_percentage[idx.item()].item()) for idx in per_indices[0][:5]]
perturbed_image = rev_transform(perturbed_image[0])
original_image = rev_transform(tensor[0])
perturbation = rev_transform(per_tensor[0])
del model
return dict(top5_original), dict(top5_perturbed), perturbed_image, original_image, perturbation
def basic_iterative(model_inp, image_bytes, alpha, epsilon, num_iter, device=torch.device('cpu')):
model = get_model(model_inp)
tensor = transform_image(image_bytes=image_bytes)
#outputs = model.forward(tensor)
original_output, perturbed_output, perturbed_image = GAE.basic_iterative(model, tensor, alpha, epsilon, num_iter, device)
cpudev = torch.device('cpu')
original_output, perturbed_output = original_output.to(cpudev), perturbed_output.to(cpudev)
perturbed_image, tensor = perturbed_image.to(cpudev), tensor.to(cpudev)
per_tensor = perturbed_image - tensor
or_percentage = torch.nn.functional.softmax(original_output, dim=1)[0] * 100
_, or_indices = torch.sort(original_output, descending=True)
per_percentage = torch.nn.functional.softmax(perturbed_output, dim=1)[0] * 100
_, per_indices = torch.sort(perturbed_output, descending=True)
top5_original = [(imagenet_class_index[str(idx.item())][1], or_percentage[idx.item()].item()) for idx in or_indices[0][:5]]
top5_perturbed = [(imagenet_class_index[str(idx.item())][1], per_percentage[idx.item()].item()) for idx in per_indices[0][:5]]
perturbed_image = rev_transform(perturbed_image[0])
original_image = rev_transform(tensor[0])
perturbation = rev_transform(per_tensor[0])
del model
return dict(top5_original), dict(top5_perturbed), perturbed_image, original_image, perturbation
def iterative_ll_class(model_inp, image_bytes, alpha, epsilon, num_iter, device=torch.device('cpu')):
model = get_model(model_inp)
tensor = transform_image(image_bytes=image_bytes)
#outputs = model.forward(tensor)
original_output, perturbed_output, perturbed_image = GAE.basic_iterative(model, tensor, alpha, epsilon, num_iter, device)
cpudev = torch.device('cpu')
original_output, perturbed_output = original_output.to(cpudev), perturbed_output.to(cpudev)
perturbed_image, tensor = perturbed_image.to(cpudev), tensor.to(cpudev)
per_tensor = perturbed_image - tensor
or_percentage = torch.nn.functional.softmax(original_output, dim=1)[0] * 100
_, or_indices = torch.sort(original_output, descending=True)
per_percentage = torch.nn.functional.softmax(perturbed_output, dim=1)[0] * 100
_, per_indices = torch.sort(perturbed_output, descending=True)
top5_original = [(imagenet_class_index[str(idx.item())][1], or_percentage[idx.item()].item()) for idx in or_indices[0][:5]]
top5_perturbed = [(imagenet_class_index[str(idx.item())][1], per_percentage[idx.item()].item()) for idx in per_indices[0][:5]]
perturbed_image = rev_transform(perturbed_image[0])
original_image = rev_transform(tensor[0])
perturbation = rev_transform(per_tensor[0])
del model
return dict(top5_original), dict(top5_perturbed), perturbed_image, original_image, perturbation
def deep_fool(model_inp, image_bytes, max_iter, device=torch.device('cpu')):
model = get_model(model_inp)
tensor = transform_image(image_bytes=image_bytes)
#outputs = model.forward(tensor)
original_output, perturbed_output, perturbed_image = GAE.deep_fool(model, tensor, max_iter, device)
cpudev = torch.device('cpu')
original_output, perturbed_output = original_output.to(cpudev), perturbed_output.to(cpudev)
perturbed_image, tensor = perturbed_image.to(cpudev), tensor.to(cpudev)
per_tensor = perturbed_image - tensor
or_percentage = torch.nn.functional.softmax(original_output, dim=1)[0] * 100
_, or_indices = torch.sort(original_output, descending=True)
per_percentage = torch.nn.functional.softmax(perturbed_output, dim=1)[0] * 100
_, per_indices = torch.sort(perturbed_output, descending=True)
top5_original = [(imagenet_class_index[str(idx.item())][1], or_percentage[idx.item()].item()) for idx in or_indices[0][:5]]
top5_perturbed = [(imagenet_class_index[str(idx.item())][1], per_percentage[idx.item()].item()) for idx in per_indices[0][:5]]
perturbed_image = rev_transform(perturbed_image[0])
original_image = rev_transform(tensor[0])
perturbation = rev_transform(per_tensor[0])
del model
return dict(top5_original), dict(top5_perturbed), perturbed_image, original_image, perturbation
def lbfgs(model_inp, image_bytes, target, c, bin_search_steps, max_iter, const_upper, device=torch.device('cpu')):
model = get_model(model_inp)
tensor = transform_image(image_bytes=image_bytes)
#outputs = model.forward(tensor)
target = torch.tensor([target],dtype=torch.long)
original_output, perturbed_output, perturbed_image = GAE.lbfgs(model, tensor, target, c, bin_search_steps, max_iter, const_upper, device)
cpudev = torch.device('cpu')
original_output, perturbed_output = original_output.to(cpudev), perturbed_output.to(cpudev)
perturbed_image, tensor = perturbed_image.to(cpudev), tensor.to(cpudev)
per_tensor = perturbed_image - tensor
or_percentage = torch.nn.functional.softmax(original_output, dim=1)[0] * 100
_, or_indices = torch.sort(original_output, descending=True)
per_percentage = torch.nn.functional.softmax(perturbed_output, dim=1)[0] * 100
_, per_indices = torch.sort(perturbed_output, descending=True)
top5_original = [(imagenet_class_index[str(idx.item())][1], or_percentage[idx.item()].item()) for idx in or_indices[0][:5]]
top5_perturbed = [(imagenet_class_index[str(idx.item())][1], per_percentage[idx.item()].item()) for idx in per_indices[0][:5]]
perturbed_image = rev_transform(perturbed_image[0])
original_image = rev_transform(tensor[0])
perturbation = rev_transform(per_tensor[0])
del model
return dict(top5_original), dict(top5_perturbed), perturbed_image, original_image, perturbation