-
Notifications
You must be signed in to change notification settings - Fork 0
/
loss.py
105 lines (87 loc) · 3.42 KB
/
loss.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
import math
import torch.nn.functional as F
import pdb
import torchvision
from torchvision import transforms
from torch.utils.data import DataLoader
import torch.nn.functional as F
from torchvision import datasets
from scipy.spatial.distance import cdist
from torch.autograd import grad
from itertools import chain
import random
def obtain_label(loader, netF, netB, netC, args):
start_test = True
with torch.no_grad():
iter_test = iter(loader)
for _ in range(len(loader)):
data = iter_test.next()
inputs = data[0]
labels = data[1]
#inputs = inputs.cuda()
feas = netB(netF(inputs))
outputs = netC(feas)
if start_test:
all_fea = feas.float().cpu()
all_output = outputs.float().cpu()
all_label = labels.float()
start_test = False
else:
all_fea = torch.cat((all_fea, feas.float().cpu()), 0)
all_output = torch.cat((all_output, outputs.float().cpu()), 0)
all_label = torch.cat((all_label, labels.float()), 0)
all_output = nn.Softmax(dim=1)(all_output)
ent = torch.sum(-all_output * torch.log(all_output + args.epsilon), dim=1)
unknown_weight = 1 - ent / np.log(args.class_num)
_, predict = torch.max(all_output, 1)
accuracy = torch.sum(torch.squeeze(predict).float() == all_label).item() / float(all_label.size()[0])
if args.distance == 'cosine':
all_fea = torch.cat((all_fea, torch.ones(all_fea.size(0), 1)), 1)
all_fea = (all_fea.t() / torch.norm(all_fea, p=2, dim=1)).t()
all_fea = all_fea.float().cpu().numpy()
K = all_output.size(1)
aff = all_output.float().cpu().numpy()
initc = aff.transpose().dot(all_fea)
initc = initc / (1e-8 + aff.sum(axis=0)[:,None])
cls_count = np.eye(K)[predict].sum(axis=0)
labelset = np.where(cls_count>args.threshold)
labelset = labelset[0]
# print(labelset)
dd = cdist(all_fea, initc[labelset], args.distance)
pred_label = dd.argmin(axis=1)
pred_label = labelset[pred_label]
for round in range(1):
aff = np.eye(K)[pred_label]
initc = aff.transpose().dot(all_fea)
initc = initc / (1e-8 + aff.sum(axis=0)[:,None])
dd = cdist(all_fea, initc[labelset], args.distance)
pred_label = dd.argmin(axis=1)
pred_label = labelset[pred_label]
acc = np.sum(pred_label == all_label.float().numpy()) / len(all_fea)
log_str = 'Accuracy = {:.2f}% -> {:.2f}%'.format(accuracy * 100, acc * 100)
args.out_file.write(log_str + '\n')
args.out_file.flush()
print(log_str+'\n')
return pred_label.astype('int')
def Entropy(input_):
bs = input_.size(0)
epsilon = 1e-5
entropy = -input_ * torch.log(input_ + epsilon)
entropy = torch.sum(entropy, dim=1)
return entropy
def op_copy(optimizer):
for param_group in optimizer.param_groups:
param_group['lr0'] = param_group['lr']
return optimizer
def lr_scheduler(optimizer, iter_num, max_iter, gamma=10, power=0.75):
decay = (1 + gamma * iter_num / max_iter) ** (-power)
for param_group in optimizer.param_groups:
param_group['lr'] = param_group['lr0'] * decay
param_group['weight_decay'] = 1e-3
param_group['momentum'] = 0.9
param_group['nesterov'] = True
return optimizer