-
Notifications
You must be signed in to change notification settings - Fork 16
/
ExGANRecLoss.py
69 lines (62 loc) · 2.02 KB
/
ExGANRecLoss.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
from tensorboardX import SummaryWriter
import numpy as np
import matplotlib.pyplot as plt
import os
import torch
import torch.nn as nn
import time
from scipy.stats import genpareto
import torch.nn.functional as F
from torch.autograd import Variable
from torch import FloatTensor
def convTBNReLU(in_channels, out_channels, kernel_size=4, stride=2, padding=1):
return nn.Sequential(
nn.ConvTranspose2d(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
),
nn.InstanceNorm2d(out_channels),
nn.LeakyReLU(0.2, True),
)
class Generator(nn.Module):
def __init__(self, in_channels, out_channels):
super(Generator, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.block1 = convTBNReLU(in_channels + 1, 512, 4, 1, 0)
self.block2 = convTBNReLU(512, 256)
self.block3 = convTBNReLU(256, 128)
self.block4 = convTBNReLU(128, 64)
self.block5 = nn.ConvTranspose2d(64, out_channels, 4, 2, 1)
def forward(self, latent, continuous_code):
inp = torch.cat((latent, continuous_code), 1)
out = self.block1(inp)
out = self.block2(out)
out = self.block3(out)
out = self.block4(out)
return torch.tanh(self.block5(out))
latentdim = 20
G = Generator(in_channels=latentdim, out_channels=1).cuda()
genpareto_params = (1.33, 0, 0.0075761900937239765)
threshold = -0.946046018600464
rv = genpareto(*genpareto_params)
G.load_state_dict(torch.load('ExGAN/G999.pt'))
G.eval()
num = 57
G.requires_grad = False
real = torch.load('data/real.pt').cuda()
z = torch.zeros((num, latentdim, 1, 1)).cuda()
code = (real.sum((1, 2, 3))/4096).view((num, 1, 1, 1))
z.requires_grad = True
optimizer = torch.optim.Adam([z], lr=1e-2)
criterion = nn.MSELoss()
for i in range(2000):
pred = G(z, code)
loss = criterion(pred, real)
optimizer.zero_grad()
loss.backward()
optimizer.step()
print(loss)