-
Notifications
You must be signed in to change notification settings - Fork 66
/
cifar_resnet_2b.yaml
31 lines (31 loc) · 2.4 KB
/
cifar_resnet_2b.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
# This is an example configuration file that contains most useful parameter settings.
model:
name: resnet2b # This model is defined in model_defs.py. Add your own model definitions there.
path: models/cifar10_resnet/resnet2b.pth # Path to PyTorch checkpoint.
data:
dataset: CIFAR # Dataset name. This is just the standard CIFAR-10 test set defined in the "load_verification_dataset()" function in utils.py
mean: [0.4914, 0.4822, 0.4465] # Mean for normalization.
std: [0.2471, 0.2435, 0.2616] # Std for normalization.
start: 0 # First example to verify in dataset.
end: 100 # Last example to verify in dataset. We verify 100 examples in this test.
specification:
norm: .inf # Linf norm (can also be 2 or 1).
epsilon: 0.00784313725490196 # epsilon=2./255.
attack: # Currently attack is only implemented for Linf norm.
pgd_steps: 100 # Increase for a stronger attack. A PGD attack will be used before verification to filter on non-robust data examples.
pgd_restarts: 30 # Increase for a stronger attack.
solver:
batch_size: 2048 # Number of subdomains to compute in parallel in bound solver. Decrease if you run out of memory.
alpha-crown:
iteration: 100 # Number of iterations for alpha-CROWN optimization. Alpha-CROWN is used to compute all intermediate layer bounds before branch and bound starts.
lr_alpha: 0.1 # Learning rate for alpha in alpha-CROWN. The default (0.1) is typically ok.
beta-crown:
lr_alpha: 0.01 # Learning rate for optimizing the alpha parameters, the default (0.01) is typically ok, but you can try to tune this parameter to get better lower bound.
lr_beta: 0.05 # Learning rate for optimizing the beta parameters, the default (0.05) is typically ok, but you can try to tune this parameter to get better lower bound.
iteration: 20 # Number of iterations for beta-CROWN optimization. 20 is often sufficient, 50 or 100 can also be used.
bab:
timeout: 120 # Timeout threshold for branch and bound. Increase for verifying more points.
branching: # Parameters for branching heuristics.
reduceop: min # Reduction function for the branching heuristic scores, min or max. Using max can be better on some models.
method: kfsb # babsr is fast but less accurate; fsb is slow but most accurate; kfsb is usually a balance.
candidates: 3 # Number of candidates to consider in fsb and kfsb. More leads to slower but better branching. 3 is typically good enough.