-
Notifications
You must be signed in to change notification settings - Fork 47
/
cifar10c.py
121 lines (99 loc) · 4.3 KB
/
cifar10c.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
import logging
import torch
import torch.optim as optim
from robustbench.data import load_cifar10c
from robustbench.model_zoo.enums import ThreatModel
from robustbench.utils import load_model
from robustbench.utils import clean_accuracy as accuracy
import tent
import norm
from conf import cfg, load_cfg_fom_args
logger = logging.getLogger(__name__)
def evaluate(description):
load_cfg_fom_args(description)
# configure model
base_model = load_model(cfg.MODEL.ARCH, cfg.CKPT_DIR,
cfg.CORRUPTION.DATASET, ThreatModel.corruptions).cuda()
if cfg.MODEL.ADAPTATION == "source":
logger.info("test-time adaptation: NONE")
model = setup_source(base_model)
if cfg.MODEL.ADAPTATION == "norm":
logger.info("test-time adaptation: NORM")
model = setup_norm(base_model)
if cfg.MODEL.ADAPTATION == "tent":
logger.info("test-time adaptation: TENT")
model = setup_tent(base_model)
# evaluate on each severity and type of corruption in turn
for severity in cfg.CORRUPTION.SEVERITY:
for corruption_type in cfg.CORRUPTION.TYPE:
# reset adaptation for each combination of corruption x severity
# note: for evaluation protocol, but not necessarily needed
try:
model.reset()
logger.info("resetting model")
except:
logger.warning("not resetting model")
x_test, y_test = load_cifar10c(cfg.CORRUPTION.NUM_EX,
severity, cfg.DATA_DIR, False,
[corruption_type])
x_test, y_test = x_test.cuda(), y_test.cuda()
acc = accuracy(model, x_test, y_test, cfg.TEST.BATCH_SIZE)
err = 1. - acc
logger.info(f"error % [{corruption_type}{severity}]: {err:.2%}")
def setup_source(model):
"""Set up the baseline source model without adaptation."""
model.eval()
logger.info(f"model for evaluation: %s", model)
return model
def setup_norm(model):
"""Set up test-time normalization adaptation.
Adapt by normalizing features with test batch statistics.
The statistics are measured independently for each batch;
no running average or other cross-batch estimation is used.
"""
norm_model = norm.Norm(model)
logger.info(f"model for adaptation: %s", model)
stats, stat_names = norm.collect_stats(model)
logger.info(f"stats for adaptation: %s", stat_names)
return norm_model
def setup_tent(model):
"""Set up tent adaptation.
Configure the model for training + feature modulation by batch statistics,
collect the parameters for feature modulation by gradient optimization,
set up the optimizer, and then tent the model.
"""
model = tent.configure_model(model)
params, param_names = tent.collect_params(model)
optimizer = setup_optimizer(params)
tent_model = tent.Tent(model, optimizer,
steps=cfg.OPTIM.STEPS,
episodic=cfg.MODEL.EPISODIC)
logger.info(f"model for adaptation: %s", model)
logger.info(f"params for adaptation: %s", param_names)
logger.info(f"optimizer for adaptation: %s", optimizer)
return tent_model
def setup_optimizer(params):
"""Set up optimizer for tent adaptation.
Tent needs an optimizer for test-time entropy minimization.
In principle, tent could make use of any gradient optimizer.
In practice, we advise choosing Adam or SGD+momentum.
For optimization settings, we advise to use the settings from the end of
trainig, if known, or start with a low learning rate (like 0.001) if not.
For best results, try tuning the learning rate and batch size.
"""
if cfg.OPTIM.METHOD == 'Adam':
return optim.Adam(params,
lr=cfg.OPTIM.LR,
betas=(cfg.OPTIM.BETA, 0.999),
weight_decay=cfg.OPTIM.WD)
elif cfg.OPTIM.METHOD == 'SGD':
return optim.SGD(params,
lr=cfg.OPTIM.LR,
momentum=cfg.OPTIM.MOMENTUM,
dampening=cfg.OPTIM.DAMPENING,
weight_decay=cfg.OPTIM.WD,
nesterov=cfg.OPTIM.NESTEROV)
else:
raise NotImplementedError
if __name__ == '__main__':
evaluate('"CIFAR-10-C evaluation.')