-
Notifications
You must be signed in to change notification settings - Fork 9
/
test.py
37 lines (33 loc) · 1.22 KB
/
test.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
import os, time
from options.test_options import TestOptions
from data import create_dataset
from models import create_model
from tqdm import tqdm
from util.evaluator import Evaluator
if __name__ == '__main__':
opt = TestOptions().parse()
opt.num_threads = 1
opt.batch_size = 1
opt.serial_batches = True
test_dataset = create_dataset(opt)
test_size = len(test_dataset)
print('The number of test images = %d. Testset: %s' % (test_size, opt.dataroot))
opt.num_test = test_size
evaluator = Evaluator(opt)
model = create_model(opt)
model.setup(opt)
#save_dir = os.path.join(os.getcwd(), opt.results_dir, opt.name, opt.dataroot.split('/')[-1], '%s_%s' % (opt.phase, opt.epoch))
#if not os.path.exists(save_dir):
# os.makedirs(save_dir)
model.eval()
evaluator.reset()
eval_start_time = time.time()
for data in tqdm(test_dataset):
model.set_input(data)
preds = model.test()
evaluator.update(preds)
eval_time = time.time() - eval_start_time
res = '==>Evaluation time: {:.0f},\n'.format(eval_time)
metric, select_score = evaluator.summary(eval_mode = 'edge_rel | lloc')
res += metric
print(res)