Skip to content

Commit

Permalink
ok
Browse files Browse the repository at this point in the history
  • Loading branch information
johndpope committed Aug 11, 2024
1 parent b819e8e commit 21f043b
Show file tree
Hide file tree
Showing 5 changed files with 8 additions and 52 deletions.
Binary file modified __pycache__/helper.cpython-311.pyc
Binary file not shown.
Binary file modified __pycache__/stylegan.cpython-311.pyc
Binary file not shown.
3 changes: 2 additions & 1 deletion helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,8 +124,9 @@ def log_grad_flow(named_parameters, global_step):
# Normalize gradients
max_grad = max(grads)
if max_grad == 0:
print("☠☠☠ Warning: All gradients are zero. ☠☠☠")
print("👿👿👿 Warning: All gradients are zero. 👿👿👿")
normalized_grads = grads # Use unnormalized grads if max is zero
raise ValueError(f"👿👿👿 Warning: All gradients are zero. 👿👿👿")
else:
normalized_grads = [g / max_grad for g in grads]

Expand Down
45 changes: 0 additions & 45 deletions stylegan.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,51 +56,6 @@ def __init__(self, in_dim, out_dim):
def forward(self, input):
return self.linear(input)

class ConvBlock(nn.Module):
def __init__(
self,
in_channel,
out_channel,
kernel_size,
padding,
kernel_size2=None,
padding2=None,
downsample=False,
fused=False,
):
super().__init__()

pad1 = padding
pad2 = padding
if padding2 is not None:
pad2 = padding2

kernel1 = kernel_size
kernel2 = kernel_size
if kernel_size2 is not None:
kernel2 = kernel_size2

self.conv1 = nn.Sequential(
EqualConv2d(in_channel, out_channel, kernel1, padding=pad1),
nn.LeakyReLU(0.2),
)

if downsample:
self.conv2 = nn.Sequential(
EqualConv2d(out_channel, out_channel, kernel2, padding=pad2),
nn.AvgPool2d(2),
nn.LeakyReLU(0.2),
)
else:
self.conv2 = nn.Sequential(
EqualConv2d(out_channel, out_channel, kernel2, padding=pad2),
nn.LeakyReLU(0.2),
)

def forward(self, input):
out = self.conv1(input)
out = self.conv2(out)
return out



Expand Down
12 changes: 6 additions & 6 deletions train.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,12 +53,12 @@ def get_noise_magnitude(epoch, max_epochs, initial_magnitude=0.1, final_magnitud

def get_layer_wise_learning_rates(model):
params = []
params.append({'params': model.dense_feature_encoder.parameters(), 'lr': 1e-4})
params.append({'params': model.latent_token_encoder.parameters(), 'lr': 5e-4})
params.append({'params': model.latent_token_decoder.parameters(), 'lr': 5e-4})
params.append({'params': model.implicit_motion_alignment.parameters(), 'lr': 2e-4})
params.append({'params': model.frame_decoder.parameters(), 'lr': 1e-4})
params.append({'params': model.mapping_network.parameters(), 'lr': 1e-5})
params.append({'params': model.dense_feature_encoder.parameters(), 'lr': 1e-5})
params.append({'params': model.latent_token_encoder.parameters(), 'lr': 5e-5})
params.append({'params': model.latent_token_decoder.parameters(), 'lr': 5e-5})
params.append({'params': model.implicit_motion_alignment.parameters(), 'lr': 2e-5})
params.append({'params': model.frame_decoder.parameters(), 'lr': 1e-5})
params.append({'params': model.mapping_network.parameters(), 'lr': 1e-6})
return params


Expand Down

0 comments on commit 21f043b

Please sign in to comment.