-
Notifications
You must be signed in to change notification settings - Fork 1
/
AGI_quantitive.py
181 lines (147 loc) · 5.6 KB
/
AGI_quantitive.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
# %%
# from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torchvision import models
import numpy as np
import matplotlib.pyplot as plt
import argparse
import os
import cv2
from utils import Normalize, pre_processing, pgd_step
import pickle
import random
from evaluation import CausalMetric, auc, gkern
import json # for loading label names
# torch.manual_seed(0)
# np.random.seed(0)
img_list = []
for file in os.listdir("examples/"):
if file.endswith(".JPEG"):
img_list.append(file)
img_list.sort() # easier to track during infering
#%%
# 参数设置
parser = argparse.ArgumentParser(description='AGI')
parser.add_argument('--cuda', action='store_true', default=True, help='if use the cuda to do the accelartion')
parser.add_argument('--model-type', type=str, default='resnet152', help='the type of network')
# parser.add_argument('--img', type=str, default='n01882714_11334_koala_bear.jpg', help='the images name')
parser.add_argument('--eps', type=float, default=0.05, help='epsilon value, aka step size')
parser.add_argument('--iter', type=int, default=20, help="Set the maximum number of adversarial searching iterations")
parser.add_argument('--topk', type=int, default=1, help="Set the k adversarial classes to look for")
args = parser.parse_args("") # this is only for test purpose
# args = parser.parse_args() # use this if runing as script!
# %%
epsilon = args.eps
use_cuda=args.cuda
device = torch.device("cuda:1" if (use_cuda and torch.cuda.is_available()) else "cpu")
max_iter = args.iter
topk = args.topk
# selected_ids = range(0,999,int(1000/topk)) # define the ids of the selected adversarial class
selected_ids = random.sample(list(range(0,999)), topk)
class_idx = json.load(open("imagenet_class_index.json"))
class_names = [class_idx[str(k)][1] for k in range(len(class_idx))]
#%%
# check if have the space to save the results
if not os.path.exists('results/'):
os.mkdir('results/')
if not os.path.exists('results/' + args.model_type):
os.mkdir('results/' + args.model_type)
#%%
# start to create models...
# 选择所用的模型
if args.model_type == 'inception':
# model = models.inception_v3(pretrained=False, init_weights=False)
model = models.inception_v3(pretrained=True)
elif args.model_type == 'resnet152':
model = models.resnet152(pretrained=True)
elif args.model_type == 'resnet18':
model = models.resnet18(pretrained=True)
elif args.model_type == 'vgg19':
model = models.vgg19_bn(pretrained=True)
else:
raise Exception("Model is not defined.")
model.eval()
print()
# %%
# set normalization
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
norm_layer = Normalize(mean, std)
sm = nn.Softmax(dim=-1)
model = nn.Sequential(norm_layer, model, sm).to(device)
# %%
def test(model, device, data, epsilon, topk):
# Send the data and label to the device
data = pre_processing(data, device)
data = data.to(device)
# Forward pass the data through the model
output = model(data)
init_pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability
top_ids = selected_ids # only for predefined ids
# initialize the step_grad towards all target false classes
step_grad = 0
# num_class = 1000 # number of total classes
for l in top_ids:
targeted = torch.tensor([l]).to(device)
if targeted.item() == init_pred.item():
if l < 999:
targeted = torch.tensor([l+1]).to(device) # replace it with l + 1
else:
targeted = torch.tensor([l-1]).to(device) # replace it with l + 1
# continue # we don't want to attack to the predicted class.
delta, perturbed_image = pgd_step(data, epsilon, model, init_pred, targeted, max_iter)
step_grad += delta
adv_ex = step_grad.squeeze().detach().cpu().numpy() # / topk
img = data#.squeeze().detach().cpu().numpy()
# perturbed_image = perturbed_image.squeeze().detach().cpu().numpy()
example = (init_pred.item(), img, adv_ex)
# Return prediction, original image, and heatmap
return example
# %%
# Run test
# %%
examples = []
factor = 100
for idx, img_name in enumerate(img_list):
# img = img_list[0]
img = cv2.imread('examples/' + img_name)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# if args.model_type == 'inception':
# the input image's size is different
img = cv2.resize(img, (224, 224))
img = img.astype(np.float32)
# img = img[:, :, (2, 1, 0)]
example = test(model, device, img, epsilon, topk)
examples.append(example)
if (idx+1) % factor == 0:
f_name = 'results/resnet_1step/resnet_1subclass_' + str((idx+1)//factor)+ "_.txt"
with open(f_name, 'wb') as file:
pickle.dump(examples, file)
examples = []
print("{} has been processed.".format(img_name))
# with open('results/res1000.txt', 'wb') as file:
# pickle.dump(examples, file)
# #%%
# with open('results/res.txt', 'wb') as file:
# pickle.dump(examples, file)
# #%%
# with open('results/res.txt', 'rb') as file:
# examples = pickle.load(file)
# example = examples[0]
# klen = 11
# ksig = 5
# kern = gkern(klen, ksig)
# # Function that blurs input image
# blur = lambda x: nn.functional.conv2d(x, kern, padding=klen//2)
# deletion = CausalMetric(model, 'del', 224, substrate_fn=torch.zeros_like)
# label, img, sal = example
# img = img.cpu()
# h1 = deletion.single_run(img, sal, verbose=1)
# # %%
# insertion = CausalMetric(model, 'ins', 224, substrate_fn=torch.zeros_like)
# h2 = insertion.single_run(img, sal, verbose=1)
# # %%