-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmain.py
161 lines (135 loc) · 6.2 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
import numpy as np
import torch
import argparse
import torchvision.utils as vutils
import torch.nn.functional as F
import torch.multiprocessing
from utils import DatasetInfo, load_protect_model, similarity_projection, reserve_gpu, load_original_x
import matplotlib.pyplot as plt
import os
torch.multiprocessing.set_sharing_strategy('file_system')
import mlconfig
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--save', type=bool, default=True)
parser.add_argument("--gpu", type=str, default='2', help="Which GPU to use. Defaults to GPU with least memory.")
parser.add_argument("--num", type=int, default=10, help="training num.")
parser.add_argument("--eps", type=float, default=1.0, help="l2_bound.")
parser.add_argument("--dataset", type=str, default='cifar10', help="dataset name")
parser.add_argument("--debug", type=bool, default=True, help="whether to debug")
parser.add_argument("--k", type=int, default=10,
help="top k classes with highest confidence to maximize self-entropy")
return parser.parse_args()
def shannon_entropy(x):
return (-x * x.log()).sum()
def sensitive_sample_gen(data_info, x, model, eps, similarity_constraint=True, similarity_mode=2,
n_iter=500, lr=1.0, gpu=True, verbose=True, k=None):
'''
:param data_info:
:param x:
:param model:
:param fp_id:
:param similarity_constraint:
:param similarity_mode:
:param eps:
:param n_iter:
:param lr:
:param gpu:
:param early_stop:
:param early_stop_th:
:param verbose:
:param num_of_class:
:param debug:
:param softmax:
:param k:
:return:
'''
x.requires_grad = True
x_origin = x.detach().cpu().numpy()
model.eval()
optimizer = torch.optim.Adam(params=[x], lr=lr)
k = data_info.num_classes if k is None else k # default k is num of classes, i.e., top-k
best_x = None
loss_min = torch.inf
for i in range(n_iter):
logits = torch.squeeze(model(x))
softmax_out = F.softmax(logits, dim=-1)
values, indices = torch.topk(logits, k=data_info.num_classes, dim=-1, largest=True)
loss_shannon_entropy = shannon_entropy(softmax_out[indices[:k]])
loss = - loss_shannon_entropy
loss.backward()
optimizer.step()
x_new = x.detach().cpu().numpy()
if similarity_constraint:
x_new = similarity_projection(data_info, x_origin, x_new, eps, similarity_mode=similarity_mode)
if verbose:
if i % 100 == 0:
print('Iteration %d, shannon entropy loss: %f' % (i, loss_shannon_entropy))
if gpu:
x.data = torch.tensor(x_new).cuda()
else:
x.data = torch.tensor(x_new)
# entropy_loss.append(float(loss_shannon_entropy))
# return the best x with the min loss: maximize shannon entropy
if loss < loss_min:
loss_min = loss
best_x = x.detach().cpu().numpy()
return best_x
if __name__ == '__main__':
plt.rc('font', family='Times New Roman')
torch.backends.cudnn.benchmark = True
torch.multiprocessing.set_sharing_strategy('file_system')
np.set_printoptions(formatter={'float': '{: 0.3f}'.format})
args = parse_args()
reserve_gpu(args.gpu)
np.random.seed(1999)
dataset_info = DatasetInfo(args.dataset)
# load protect model
protect_model = load_protect_model(args.dataset)
# eval_model(protect_model, dataset_info.name)
eps = args.eps
# load original dataset
x_org = load_original_x(args.dataset)
# generate sensitive samples
batch_size, n_iter = 1, 5000
k = args.k # top k classes with highest confidence to maximize self-entropy
assert k <= dataset_info.num_classes and k >= 2, 'k should be in [2, num_of_class]'
ss_path = 'outputs/fingerprint/%s/k=%d/IBSF_%f_%d_n=%d.npy' % (args.dataset, k, eps, n_iter, args.num)
ss_org_path = 'outputs/fingerprint/%s/k=%d/IBSF_%f_%d_org_n=%d.npy' % (args.dataset, k, eps, n_iter, args.num)
os.makedirs('outputs/fingerprint/%s/k=%d' % (args.dataset, k), exist_ok=True)
if os.path.exists(ss_path):
sensitive_samples = np.load(ss_path)[:args.num]
org_image = np.load(ss_org_path)[:args.num]
else:
sensitive_samples = []
org_image = []
for batch_id, x in enumerate(x_org):
print('-------------------Generating fingerprint %d-------------------' % (len(sensitive_samples) + 1))
x_ss = sensitive_sample_gen(dataset_info,
torch.from_numpy(x_org[batch_id:batch_id + 1]).float().cuda(),
protect_model,
# fp_id=batch_id,
gpu=True,
similarity_constraint=True,
n_iter=n_iter,
similarity_mode=2,
eps=eps,
verbose=True,
# debug=False,
k=k,
)
if x_ss is None: continue
# x_ss = x_ss.cpu().detach().numpy()
sensitive_samples.append(x_ss)
org_image.append(x_org[batch_id:batch_id + 1])
if len(sensitive_samples) > args.num: break
sensitive_samples = np.concatenate(sensitive_samples, axis=0)
org_image = np.concatenate(org_image, axis=0)
# save fingerprints and original images all together in npy format
np.save(ss_path, sensitive_samples)
np.save(ss_org_path, org_image)
# save fingerprints and original x as images
vutils.save_image(torch.from_numpy(sensitive_samples), 'outputs/fingerprint/%s/k=%d/IBSF_%f_%d_n=%d.png' %
(args.dataset, k, eps, n_iter, args.num), normalize=False)
vutils.save_image(torch.from_numpy(org_image), 'outputs/fingerprint/%s/k=%d/IBSF_%f_%d_org_n=%d.png' %
(args.dataset, k, eps, n_iter, args.num), normalize=False)