-
Notifications
You must be signed in to change notification settings - Fork 5
/
tda_runner.py
146 lines (112 loc) · 6.18 KB
/
tda_runner.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
import random
import argparse
import wandb
from tqdm import tqdm
from datetime import datetime
import torch
import torch.nn.functional as F
import operator
import clip
from utils import *
def get_arguments():
"""Get arguments of the test-time adaptation."""
parser = argparse.ArgumentParser()
parser.add_argument('--config', dest='config', required=True, help='settings of TDA on specific dataset in yaml format.')
parser.add_argument('--wandb-log', dest='wandb', action='store_true', help='Whether you want to log to wandb. Include this flag to enable logging.')
parser.add_argument('--datasets', dest='datasets', type=str, required=True, help="Datasets to process, separated by a slash (/). Example: I/A/V/R/S")
parser.add_argument('--data-root', dest='data_root', type=str, default='./dataset/', help='Path to the datasets directory. Default is ./dataset/')
parser.add_argument('--backbone', dest='backbone', type=str, choices=['RN50', 'ViT-B/16'], required=True, help='CLIP model backbone to use: RN50 or ViT-B/16.')
args = parser.parse_args()
return args
def update_cache(cache, pred, features_loss, shot_capacity, include_prob_map=False):
"""Update cache with new features and loss, maintaining the maximum shot capacity."""
with torch.no_grad():
item = features_loss if not include_prob_map else features_loss[:2] + [features_loss[2]]
if pred in cache:
if len(cache[pred]) < shot_capacity:
cache[pred].append(item)
elif features_loss[1] < cache[pred][-1][1]:
cache[pred][-1] = item
cache[pred] = sorted(cache[pred], key=operator.itemgetter(1))
else:
cache[pred] = [item]
def compute_cache_logits(image_features, cache, alpha, beta, clip_weights, neg_mask_thresholds=None):
"""Compute logits using positive/negative cache."""
with torch.no_grad():
cache_keys = []
cache_values = []
for class_index in sorted(cache.keys()):
for item in cache[class_index]:
cache_keys.append(item[0])
if neg_mask_thresholds:
cache_values.append(item[2])
else:
cache_values.append(class_index)
cache_keys = torch.cat(cache_keys, dim=0).permute(1, 0)
if neg_mask_thresholds:
cache_values = torch.cat(cache_values, dim=0)
cache_values = (((cache_values > neg_mask_thresholds[0]) & (cache_values < neg_mask_thresholds[1])).type(torch.int8)).cuda().half()
else:
cache_values = (F.one_hot(torch.Tensor(cache_values).to(torch.int64), num_classes=clip_weights.size(1))).cuda().half()
affinity = image_features @ cache_keys
cache_logits = ((-1) * (beta - beta * affinity)).exp() @ cache_values
return alpha * cache_logits
def run_test_tda(pos_cfg, neg_cfg, loader, clip_model, clip_weights):
with torch.no_grad():
pos_cache, neg_cache, accuracies = {}, {}, []
#Unpack all hyperparameters
pos_enabled, neg_enabled = pos_cfg['enabled'], neg_cfg['enabled']
if pos_enabled:
pos_params = {k: pos_cfg[k] for k in ['shot_capacity', 'alpha', 'beta']}
if neg_enabled:
neg_params = {k: neg_cfg[k] for k in ['shot_capacity', 'alpha', 'beta', 'entropy_threshold', 'mask_threshold']}
#Test-time adaptation
for i, (images, target) in enumerate(tqdm(loader, desc='Processed test images: ')):
image_features, clip_logits, loss, prob_map, pred = get_clip_logits(images ,clip_model, clip_weights)
target, prop_entropy = target.cuda(), get_entropy(loss, clip_weights)
if pos_enabled:
update_cache(pos_cache, pred, [image_features, loss], pos_params['shot_capacity'])
if neg_enabled and neg_params['entropy_threshold']['lower'] < prop_entropy < neg_params['entropy_threshold']['upper']:
update_cache(neg_cache, pred, [image_features, loss, prob_map], neg_params['shot_capacity'], True)
final_logits = clip_logits.clone()
if pos_enabled and pos_cache:
final_logits += compute_cache_logits(image_features, pos_cache, pos_params['alpha'], pos_params['beta'], clip_weights)
if neg_enabled and neg_cache:
final_logits -= compute_cache_logits(image_features, neg_cache, neg_params['alpha'], neg_params['beta'], clip_weights, (neg_params['mask_threshold']['lower'], neg_params['mask_threshold']['upper']))
acc = cls_acc(final_logits, target)
accuracies.append(acc)
wandb.log({"Averaged test accuracy": sum(accuracies)/len(accuracies)}, commit=True)
if i%1000==0:
print("---- TDA's test accuracy: {:.2f}. ----\n".format(sum(accuracies)/len(accuracies)))
print("---- TDA's test accuracy: {:.2f}. ----\n".format(sum(accuracies)/len(accuracies)))
return sum(accuracies)/len(accuracies)
def main():
args = get_arguments()
config_path = args.config
# Initialize CLIP model
clip_model, preprocess = clip.load(args.backbone)
clip_model.eval()
# Set random seed
random.seed(1)
torch.manual_seed(1)
if args.wandb:
date = datetime.now().strftime("%b%d_%H-%M-%S")
group_name = f"{args.backbone}_{args.datasets}_{date}"
# Run TDA on each dataset
datasets = args.datasets.split('/')
for dataset_name in datasets:
print(f"Processing {dataset_name} dataset.")
cfg = get_config_file(config_path, dataset_name)
print("\nRunning dataset configurations:")
print(cfg, "\n")
test_loader, classnames, template = build_test_data_loader(dataset_name, args.data_root, preprocess)
clip_weights = clip_classifier(classnames, template, clip_model)
if args.wandb:
run_name = f"{dataset_name}"
run = wandb.init(project="ETTA-CLIP", config=cfg, group=group_name, name=run_name)
acc = run_test_tda(cfg['positive'], cfg['negative'], test_loader, clip_model, clip_weights)
if args.wandb:
wandb.log({f"{dataset_name}": acc})
run.finish()
if __name__ == "__main__":
main()