Kiryu Sento
毕设-离散差分进化算法

毕设-离散差分进化算法

1
2
3
4
5
6
7
8
9
10
11
12
import math
import numpy as np
import logging
import torch
import gc
from multiprocessing import Pool
from ml_liw_model.train import criterion
from PIL import Image
import matplotlib.pyplot as plt
import torchvision.transforms as transforms
# 多标签离散进化算法攻击 SparseEvo
# 实现借助了chatgpt

原文地址[2202.00091] Query Efficient Decision Based Sparse Attacks Against Black-Box Deep Learning Models (arxiv.org)

多标签离散进化攻击调用类定义

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43

class MLSparseEvo(object):
def __init__(self, model):
self.model = model

def generate_np(self, x_list, **kwargs):
if torch.cuda.is_available():
self.model = self.model.cuda()
logging.info('prepare attack')
self.clip_max = kwargs['clip_max']
self.clip_min = kwargs['clip_min']
y_target = kwargs['y_target']
eps = kwargs['eps']
pop_size = kwargs['pop_size']
generation = kwargs['generation']
batch_size = kwargs['batch_size']
starting_points = kwargs['starting_points'] # 起始攻击目标
alpha = kwargs['alpha'] # 每次扰动的点
x_adv = []
success = 0
nchannels,img_rows, img_cols, = x_list.shape[1:4]
count = 0
for i in range(len(x_list)):
target_label = np.argwhere(y_target[i] > 0)
r= SparseEvo(pop_size, generation, img_rows * img_cols * nchannels, self.model, x_list[i],
target_label, eps, batch_size, gradient=None)
x_adv_tem = np.clip(x_list[i] + np.reshape(r, x_list.shape[1:]) * eps, 0, 1)
# count += count_tem
with torch.no_grad():
if torch.cuda.is_available(): adv_pred = self.model(
torch.tensor(np.expand_dims(x_adv_tem, axis=0), dtype=torch.float32).cuda()).cpu()
else:
adv_pred = self.model(torch.tensor(np.expand_dims(x_adv_tem, axis=0), dtype=torch.float32))
adv_pred = np.asarray(adv_pred)
pred = adv_pred.copy()
pred[pred >= (0.5 + 0)] = 1
pred[pred < (0.5 + 0)] = -1
adv_pred_match_target = np.all((pred == y_target[i]), axis=1)
if adv_pred_match_target:
success = success + 1
x_adv.append(x_adv_tem)
logging.info('Successfully generated adversarial examples on '+str(success)+' of '+str(batch_size)+' instances')
return x_adv , count

评估打分类

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
class Problem:
def __init__(self, model, image, target_label, eps, batch_size):
self.model = model
self.image = image
self.target_label = target_label
self.eps = eps
self.batch_size = batch_size


def evaluate(self, x):
with torch.no_grad():
if torch.cuda.is_available():
predict = self.model(torch.tensor(np.clip(np.tile(self.image, (len(x), 1, 1, 1))
+ np.reshape(x, (len(x),) + self.image.shape) * self.eps, 0.,
1.), dtype=torch.float32).cuda()).cpu()
else:
predict = self.model(torch.tensor(np.clip(np.tile(self.image, (len(x), 1, 1, 1))
+ np.reshape(x, (len(x),) + self.image.shape) * self.eps, 0.,
1.), dtype=torch.float32))
p = np.copy(predict)
q = np.zeros(p.shape)+0.5
fit = p-q
fit[:,self.target_label]=-fit[:,self.target_label]
fit[np.where(fit<0)]=0
fitness = np.sum(fit,axis=1)
fitness = fitness[:, np.newaxis]
return fitness, fit

对论文方法的实现

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
def is_adversarial(model, input, target_classes):
# print(input.shape)
outputs = model(input)
classes = (outputs>=0.5)+0
assert classes.shape == target_classes.shape
# classes = classes.cpu().numpy()
is_adv = torch.all(classes==target_classes)
# print(is_adv)
return is_adv

def random_alter(tensor, n):
total_elements = tensor.numel()
indices = torch.randperm(total_elements)[:n]

# 将对应索引位置的元素反转
tensor.view(-1)[indices] = ~tensor.view(-1)[indices]
return tensor


def init_pop(image, target_image, target_classes, classfier, pop_size, init_rate):
distances = torch.zeros((pop_size, )) # 初始的距离设置为无穷
distance=torch.tensor(torch.inf)
V = [] # 初始V为空集
n = math.floor(init_rate*image.shape[2]*image.shape[3]) # n = floor(alpha*W*H)
print(n)
v = (image!=target_image) # 获取初始向量v

# print(v)

for i in range(pop_size):
print(f"generate iter{i}")
while True:
v_o = v.detach().clone()
v_o = random_alter(v_o, n)
x_pert = image
x_pert = np.where(v_o, target_image, x_pert)# x_pert = (1-v)x + vx'
x_pert = torch.from_numpy(x_pert)
distance_pert = update_distance(image, x_pert, classfier, target_classes)
# print(distance_pert)
if distance_pert.item() < distance.item():
distances[i] = torch.where(distance_pert<distance, distance_pert, distance)
V.append(v_o)
break
return V, distances

def update_distance(original, x_pert, model, target_classes):
distance_pert = torch.norm(original-x_pert)
is_adv=is_adversarial(model, x_pert, target_classes)
distance_pert = torch.where(is_adv, distance_pert, torch.inf)
return distance_pert

def rand_select_jq(V, kb):
# 获取 V 中不在 kb 中的索引
valid_indices = np.where(~np.isin(V, kb))[0]

# 从有效索引中随机选择 v(j) 和 v(q)
v_j_index = np.random.choice(valid_indices)
v_q_index = np.random.choice(valid_indices[valid_indices != v_j_index])

# 获取对应的向量 v(j) 和 v(q)
v_j = V[v_j_index]
v_q = V[v_q_index]

return v_j, v_q

def binary_differential_recombination(v_j, v_q, v_kb):
new_candidate = np.zeros_like(v_kb)
random_indices = np.random.rand(v_kb.shape)
# 使用均匀交叉选择位
new_candidate[random_indices>0.5] = v_j[random_indices>0.5]
new_candidate[random_indices<=0.5] = v_q[random_indices<=0.5]

# 重组最佳个体和新的候选解
recombined = np.where(new_candidate == 0, v_kb, new_candidate)

return recombined

def mutation(v_o, mu):
num_ones = np.sum(v_o == 1)
num_mutations = int(mu * num_ones)

# 随机选择要突变的1位
mutation_indices = np.random.choice(np.where(v_o == 1)[0], num_mutations, replace=False)

# 将选定位设置为0
v_o[mutation_indices] = 0

return v_o

def SparseEvo(pop_size, generation, length, model, image, target_image, target_label, eps, batch_size, gradient):
generation_save = np.zeros((10000,))
Vector, distances = init_pop(image, target_image, target_label, model, pop_size, init_rate=0.5)
k_worst = np.argmax(distances)
k_best = np.argmin(distances)
problem = Problem(model, image, target_label, eps, batch_size)
# max_eval = pop_size * generation
for i in range(generation.shape[0]):
v_j, v_q = rand_select_jq(Vector, k_best)
v_r = binary_differential_recombination(v_j, v_q, Vector[k_best])
v_mut = mutation(v_r, mu=0.5)
x_pert = image
x_pert[v_mut] = target_image[v_mut] # x_pert = (1-v_m)x + vx'
distance_pert = np.linalg.norm(image-x_pert, ord=2)
outputs_pert = model(x_pert)
labels_pert = (outputs_pert>0.5)+0 # f(x_pert)
if distance_pert < distances[k_worst]:
distances[k_worst] = distance_pert
Vector[k_worst] = v_mut
k_worst = np.argmax(distances)
k_best = np.argmin(distances)
x_pert = image
x_pert[Vector[k_best]] = target_image[Vector[k_best]] # x_pert = (1-v_m)x + vx'


用pytorch对一些方法的实现进行了改进

基本上是复现了论文的效果(当然用的数据集和模型不是一个),成功率有90%以上

不知道作者是怎么得出最佳的参数的尤其是种群突变率这个参数

本文作者:Kiryu Sento
本文链接:https://wandernforte.github.io/kirameki/毕设-离散差分进化算法/
版权声明:本文采用 CC BY-NC-SA 3.0 CN 协议进行许可