-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathblock_constant.py
74 lines (62 loc) · 3.06 KB
/
block_constant.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
from base_classes import ODEblock
import torch
from utils import get_rw_adj, gcn_norm_fill_val
class ConstantODEblock(ODEblock):
def __init__(self, odefunc, regularization_fns, opt, data, device, t=torch.tensor([0, 1])):
super(ConstantODEblock, self).__init__(odefunc, regularization_fns, opt, data, device, t)
self.aug_dim = 2 if opt['augment'] else 1
self.odefunc = odefunc(self.aug_dim * opt['hidden_dim'], self.aug_dim * opt['hidden_dim'], opt, data, device)
if opt['data_norm'] == 'rw':
edge_index, edge_weight = get_rw_adj(data.edge_index, edge_weight=data.edge_attr, norm_dim=1,
fill_value=opt['self_loop_weight'],
num_nodes=data.num_nodes,
dtype=data.x.dtype)
else:
edge_index, edge_weight = gcn_norm_fill_val(data.edge_index, edge_weight=data.edge_attr,
fill_value=opt['self_loop_weight'],
num_nodes=data.num_nodes,
dtype=data.x.dtype)
self.odefunc.edge_index = edge_index.to(device)
self.odefunc.edge_weight = edge_weight.to(device)
self.reg_odefunc.odefunc.edge_index, self.reg_odefunc.odefunc.edge_weight = self.odefunc.edge_index, self.odefunc.edge_weight
if opt['adjoint']:
from torchdiffeq import odeint_adjoint as odeint
else:
from torchdiffeq import odeint
self.train_integrator = odeint
self.test_integrator = odeint
self.set_tol()
def forward(self, x):
t = self.t.type_as(x)
integrator = self.train_integrator if self.training else self.test_integrator
reg_states = tuple( torch.zeros(x.size(0)).to(x) for i in range(self.nreg) )
func = self.reg_odefunc if self.training and self.nreg > 0 else self.odefunc
state = (x,) + reg_states if self.training and self.nreg > 0 else x
if self.opt["adjoint"] and self.training:
state_dt = integrator(
func, state, t,
method=self.opt['method'],
options=dict(step_size=self.opt['step_size'], max_iters=self.opt['max_iters']),
adjoint_method=self.opt['adjoint_method'],
adjoint_options=dict(step_size = self.opt['adjoint_step_size'], max_iters=self.opt['max_iters']),
atol=self.atol,
rtol=self.rtol,
adjoint_atol=self.atol_adjoint,
adjoint_rtol=self.rtol_adjoint)
else:
state_dt = integrator(
func, state, t,
method=self.opt['method'],
options=dict(step_size=self.opt['step_size'], max_iters=self.opt['max_iters']),
atol=self.atol,
rtol=self.rtol)
if self.training and self.nreg > 0:
z = state_dt[0][1]
reg_states = tuple( st[1] for st in state_dt[1:] )
return z, reg_states
else:
z = state_dt[1]
return z
def __repr__(self):
return self.__class__.__name__ + '( Time Interval ' + str(self.t[0].item()) + ' -> ' + str(self.t[1].item()) \
+ ")"