-
Notifications
You must be signed in to change notification settings - Fork 41
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
18 changed files
with
494 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,4 @@ | ||
.vscode/ | ||
__pycache__/ | ||
data/ | ||
sample.png |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,11 @@ | ||
FROM ubuntu:artful | ||
MAINTAINER SIYUAN-ZHUANG USTC-1411 | ||
ENV LC_ALL=C.UTF-8 | ||
|
||
RUN apt-get update | ||
RUN apt-get install --yes python3.6 python3.6-dev python3-pip python3-openssl | ||
COPY ./webapp /webapp | ||
WORKDIR /webapp | ||
RUN pip3 install -r requirements.txt | ||
ENTRYPOINT ["python3"] | ||
CMD ["webapp.py"] |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,7 @@ | ||
version: '3' | ||
services: | ||
web: | ||
build: . | ||
ports: | ||
- "12004:5000" | ||
restart: always |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,11 @@ | ||
FROM ubuntu:artful | ||
MAINTAINER SIYUAN-ZHUANG USTC-1411 | ||
ENV LC_ALL=C.UTF-8 | ||
|
||
RUN apt-get update | ||
RUN apt-get install --yes python3.6 python3.6-dev python3-pip python3-openssl | ||
COPY . /solution | ||
WORKDIR /solution | ||
RUN pip3 install -r requirements.txt | ||
ENTRYPOINT ["python3"] | ||
CMD ["solution.py"] |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,10 @@ | ||
这个目录内部包括参考答案 `solution.py` 和其它依赖文件。 | ||
目录同时包含了 `Dockerfile`, 用户可以用以下方式检验答案: | ||
|
||
```bash | ||
docker build -t solution . | ||
``` | ||
|
||
```bash | ||
docker run -it solution | ||
``` |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,132 @@ | ||
""" | ||
mmmummm mmmm mmsmm mmmm mmmmmm mtm mmcmm mmmmmmmmmmmmm | ||
# m" "m # "# #" " # m" " # "# # # | ||
# # # #mmm#" "#mmm #mmmmm # #mmmm" #mmmmm # | ||
# # # # "# # # # "m # # | ||
# #mm# # "mmm#" #mmmmm "mmm" # " #mmmmm # | ||
""" | ||
|
||
|
||
from __future__ import print_function | ||
import argparse | ||
import os | ||
import torch | ||
import torch.nn as nn | ||
import torch.nn.functional as F | ||
import torch.optim as optim | ||
|
||
from torchvision import datasets, transforms | ||
|
||
|
||
class Net(nn.Module): | ||
def __init__(self): | ||
super(Net, self).__init__() | ||
self.conv1 = nn.Conv2d(1, 10, kernel_size=7) | ||
self.bn1 = nn.BatchNorm2d(10) | ||
self.conv2 = nn.Conv2d(10, 20, kernel_size=5) | ||
self.bn2 = nn.BatchNorm2d(20) | ||
self.conv2_drop = nn.Dropout2d() | ||
self.fc1 = nn.Linear(320, 50) | ||
self.bn3 = nn.BatchNorm1d(50) | ||
self.fc2 = nn.Linear(50, 10) | ||
|
||
def forward(self, x): | ||
x = F.relu(F.max_pool2d(self.bn1(self.conv1(x)), 2)) | ||
x = F.relu(F.max_pool2d(self.bn2(self.conv2(x)), 2)) | ||
x = self.conv2_drop(x) | ||
x = x.view(-1, 320) | ||
x = F.relu(self.bn3(self.fc1(x))) | ||
x = F.dropout(x, training=self.training) | ||
x = self.fc2(x) | ||
return F.log_softmax(x, dim=1) | ||
|
||
|
||
def train(args, model, device, train_loader, optimizer, epoch): | ||
model.train() | ||
for batch_idx, (data, target) in enumerate(train_loader): | ||
data, target = data.to(device), target.to(device) | ||
optimizer.zero_grad() | ||
output = model(data) | ||
loss = F.nll_loss(output, target) | ||
loss.backward() | ||
optimizer.step() | ||
if batch_idx % args.log_interval == 0: | ||
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( | ||
epoch, batch_idx * len(data), len(train_loader.dataset), | ||
100. * batch_idx / len(train_loader), loss.item())) | ||
|
||
def test(args, model, device, test_loader): | ||
model.eval() | ||
test_loss = 0 | ||
correct = 0 | ||
with torch.no_grad(): | ||
for data, target in test_loader: | ||
data, target = data.to(device), target.to(device) | ||
output = model(data) | ||
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss | ||
pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability | ||
correct += pred.eq(target.view_as(pred)).sum().item() | ||
|
||
test_loss /= len(test_loader.dataset) | ||
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( | ||
test_loss, correct, len(test_loader.dataset), | ||
100. * correct / len(test_loader.dataset))) | ||
|
||
def main(): | ||
# Training settings | ||
parser = argparse.ArgumentParser(description='PyTorch MNIST Example') | ||
parser.add_argument('--batch-size', type=int, default=64, metavar='N', | ||
help='input batch size for training (default: 64)') | ||
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N', | ||
help='input batch size for testing (default: 1000)') | ||
parser.add_argument('--epochs', type=int, default=2, metavar='N', | ||
help='number of epochs to train (default: 2)') | ||
parser.add_argument('--lr', type=float, default=0.02, metavar='LR', | ||
help='learning rate (default: 0.02)') | ||
parser.add_argument('--no-cuda', action='store_true', default=False, | ||
help='disables CUDA training') | ||
parser.add_argument('--seed', type=int, default=1, metavar='S', | ||
help='random seed (default: 1)') | ||
parser.add_argument('--log-interval', type=int, default=10, metavar='N', | ||
help='how many batches to wait before logging training status') | ||
args = parser.parse_args() | ||
use_cuda = not args.no_cuda and torch.cuda.is_available() | ||
|
||
torch.manual_seed(args.seed) | ||
|
||
device = torch.device("cuda" if use_cuda else "cpu") | ||
|
||
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {} | ||
train_loader = torch.utils.data.DataLoader( | ||
datasets.MNIST('data', train=True, download=True, | ||
transform=transforms.Compose([ | ||
transforms.Resize((30, 30)), | ||
transforms.ToTensor(), | ||
transforms.Normalize((0.1307,), (0.3081,)), | ||
])), | ||
batch_size=args.batch_size, shuffle=True, **kwargs) | ||
test_loader = torch.utils.data.DataLoader( | ||
datasets.MNIST('data', train=False, transform=transforms.Compose([ | ||
transforms.Resize((30, 30)), | ||
transforms.ToTensor(), | ||
transforms.Normalize((0.1307,), (0.3081,)), | ||
])), | ||
batch_size=args.test_batch_size, shuffle=True, **kwargs) | ||
|
||
|
||
model = Net().to(device) | ||
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-5) | ||
|
||
for epoch in range(1, args.epochs + 1): | ||
train(args, model, device, train_loader, optimizer, epoch) | ||
test(args, model, device, test_loader) | ||
|
||
with open('model.pth', 'wb') as f: | ||
torch.save(model.to(torch.device("cpu")).state_dict(), f) | ||
|
||
|
||
if __name__ == '__main__': | ||
assert not os.path.exists('model.pth'), "There already exists a model file. DO NOT OVERRIDE IT UNLESS YOU KNOW WHAT YOU ARE DOING." | ||
main() |
Binary file not shown.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,23 @@ | ||
# 这个文件和答案无关,只是展示获取 target.png 的方式 | ||
from __future__ import print_function | ||
import torch | ||
from torchvision import datasets, transforms | ||
import numpy as np | ||
from PIL import Image | ||
|
||
test_loader = torch.utils.data.DataLoader( | ||
datasets.MNIST('data', train=False, transform=transforms.Compose([ | ||
transforms.Resize((30, 30)), | ||
transforms.ToTensor(), | ||
# transforms.Normalize((0.1307,), (0.3081,)), | ||
])), batch_size=1, shuffle=False) | ||
|
||
for i, (data, target) in enumerate(test_loader): | ||
if i != 534: | ||
continue | ||
else: | ||
data = (np.array(data).reshape(30, 30) * 255).astype(np.uint8) | ||
img = Image.fromarray(data, mode='L') | ||
img = img.resize((600, 600), resample=Image.BILINEAR) | ||
img.save('target.png') | ||
break |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,2 @@ | ||
http://download.pytorch.org/whl/cpu/torch-0.4.1-cp36-cp36m-linux_x86_64.whl | ||
torchvision==0.2.1 |
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,85 @@ | ||
""" | ||
mmmummm mmmm mmsmm mmmm mmmmmm mtm mmcmm mmmmmmmmmmmmm | ||
# m" "m # "# #" " # m" " # "# # # | ||
# # # #mmm#" "#mmm #mmmmm # #mmmm" #mmmmm # | ||
# # # # "# # # # "m # # | ||
# #mm# # "mmm#" #mmmmm "mmm" # " #mmmmm # | ||
""" | ||
|
||
from __future__ import print_function, division | ||
import torch | ||
import numpy as np | ||
from PIL import Image | ||
import torch.nn.functional as F | ||
|
||
from main import Net | ||
|
||
def solve(): | ||
model = load_model() | ||
image = Image.open('target.png') | ||
image = convert2tensor(image) | ||
inputs = preprocess_image(image) | ||
label = torch.tensor([6], dtype=torch.long) | ||
|
||
# A solution for part 1 | ||
inputs.requires_grad = True | ||
x = (inputs - 0.1307) / 0.3081 # scale mean & std | ||
output = model(x) | ||
loss = F.nll_loss(output, label) | ||
loss.backward() | ||
x_grad = torch.sign(inputs.grad.data) | ||
epsilon = 0.18 | ||
inputs = torch.clamp(inputs + epsilon * x_grad, 0, 1) | ||
|
||
inputs = inputs.reshape(30, 30) | ||
|
||
for i in range(30): | ||
for j in range(30): | ||
image[int((i + 0.5) * 20), int((j + 0.5) * 20)] = inputs[i, j] | ||
|
||
return image | ||
|
||
def convert2tensor(image): | ||
return torch.from_numpy(np.array(image).astype('f') / 255.) | ||
|
||
def convert2image(arr): | ||
return Image.fromarray((arr.detach().numpy() * 255).astype(np.uint8), mode='L') | ||
|
||
def preprocess_image(arr): | ||
image = convert2image(arr) | ||
image = image.resize((30, 30), resample=Image.NEAREST) | ||
return convert2tensor(image).reshape(1, 1, 30 ,30) | ||
|
||
def load_model(): | ||
model = Net() | ||
model.load_state_dict(torch.load('model.pth')) | ||
model.eval() | ||
return model | ||
|
||
def check(): | ||
model = load_model() | ||
|
||
target = convert2tensor(Image.open('target.png')) | ||
sample = convert2tensor(Image.open('sample.png')) | ||
assert torch.sum(target != sample) < 600 * 600 * 0.002, 'Input hacking is detected! Game over.' | ||
assert F.l1_loss(target, sample) < 0.001, 'Input hacking is detected! Game over.' | ||
assert (torch.max(torch.abs(target - sample))) < 0.2, 'Input hacking is detected! Game over.' | ||
target = preprocess_image(target) | ||
sample = preprocess_image(sample) | ||
# A solution for part 2 | ||
target = (target - 0.1307) / 0.3081 | ||
sample = (sample - 0.1307) / 0.3081 | ||
|
||
truth = torch.argmax(model(target)) | ||
hack = torch.argmax(model(sample)) | ||
assert int(truth) != int(hack), 'Fail to hack. Game over.' | ||
print('Cong! You win!!!') | ||
|
||
|
||
if __name__ == '__main__': | ||
output = solve() | ||
convert2image(output).save('sample.png') | ||
# You have to pass this check! | ||
check() |
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1 @@ | ||
这个目录内部包括网页交互相关的代码。 |
Oops, something went wrong.