-
Notifications
You must be signed in to change notification settings - Fork 83
/
model_concat_upsa_eval_kitti.py
106 lines (85 loc) · 5.15 KB
/
model_concat_upsa_eval_kitti.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
"""
FlowNet3D model with up convolution
"""
import tensorflow as tf
import numpy as np
import math
import sys
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(BASE_DIR, 'utils'))
import tf_util
from pointnet_util import *
def placeholder_inputs(batch_size, num_point):
pointclouds_pl = tf.placeholder(tf.float32, shape=(batch_size, num_point * 2, 6))
labels_pl = tf.placeholder(tf.float32, shape=(batch_size, num_point, 3))
masks_pl = tf.placeholder(tf.float32, shape=(batch_size, num_point))
return pointclouds_pl, labels_pl, masks_pl
def get_model(point_cloud, is_training, bn_decay=None, reuse=False):
""" FlowNet3D, for evaluating on KITTI
input: Bx(N1+N2)x3,
output: BxN1x3 """
end_points = {}
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value // 2
l0_xyz_f1 = point_cloud[:, :num_point, 0:3]
l0_points_f1 = point_cloud[:, :num_point, 3:]
l0_xyz_f2 = point_cloud[:, num_point:, 0:3]
l0_points_f2 = point_cloud[:, num_point:, 3:]
RADIUS1 = 0.5
RADIUS2 = 1.0
RADIUS3 = 2.0
RADIUS4 = 4.0
with tf.variable_scope('sa1') as scope:
if reuse:
scope.reuse_variables()
# Frame 1, Layer 1
l1_xyz_f1, l1_points_f1, l1_indices_f1 = pointnet_sa_module(l0_xyz_f1, l0_points_f1, npoint=8192, radius=RADIUS1, nsample=256, mlp=[32,32,64], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer1')
end_points['l1_indices_f1'] = l1_indices_f1
# Frame 1, Layer 2
l2_xyz_f1, l2_points_f1, l2_indices_f1 = pointnet_sa_module(l1_xyz_f1, l1_points_f1, npoint=2048, radius=RADIUS2, nsample=256, mlp=[64,64,128], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer2')
end_points['l2_indices_f1'] = l2_indices_f1
if not reuse:
scope.reuse_variables()
# Frame 2, Layer 1
l1_xyz_f2, l1_points_f2, l1_indices_f2 = pointnet_sa_module(l0_xyz_f2, l0_points_f2, npoint=8192, radius=RADIUS1, nsample=256, mlp=[32,32,64], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer1')
# Frame 2, Layer 2
l2_xyz_f2, l2_points_f2, l2_indices_f2 = pointnet_sa_module(l1_xyz_f2, l1_points_f2, npoint=2048, radius=RADIUS2, nsample=256, mlp=[64,64,128], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer2')
_, l2_points_f1_new = flow_embedding_module(l2_xyz_f1, l2_xyz_f2, l2_points_f1, l2_points_f2, radius=3.0, nsample=256, mlp=[128,128,128], is_training=is_training, bn_decay=bn_decay, scope='flow_embedding', bn=True, pooling='max', knn=True, corr_func='concat')
# Layer 3
l3_xyz_f1, l3_points_f1, l3_indices_f1 = pointnet_sa_module(l2_xyz_f1, l2_points_f1_new, npoint=512, radius=RADIUS3, nsample=64, mlp=[128,128,256], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer3')
end_points['l3_indices_f1'] = l3_indices_f1
# Layer 4
l4_xyz_f1, l4_points_f1, l4_indices_f1 = pointnet_sa_module(l3_xyz_f1, l3_points_f1, npoint=256, radius=RADIUS4, nsample=64, mlp=[256,256,512], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer4')
end_points['l4_indices_f1'] = l4_indices_f1
# Feature Propagation
l3_feat_f1 = set_upconv_module(l3_xyz_f1, l4_xyz_f1, l3_points_f1, l4_points_f1, nsample=4, radius=2.4, mlp=[], mlp2=[256,256], scope='up_sa_layer1', is_training=is_training, bn_decay=bn_decay, knn=True)
l2_feat_f1 = set_upconv_module(l2_xyz_f1, l3_xyz_f1, tf.concat(axis=-1, values=[l2_points_f1, l2_points_f1_new]), l3_feat_f1, nsample=4, radius=1.2, mlp=[128,128,256], mlp2=[256], scope='up_sa_layer2', is_training=is_training, bn_decay=bn_decay, knn=True)
l1_feat_f1 = set_upconv_module(l1_xyz_f1, l2_xyz_f1, l1_points_f1, l2_feat_f1, nsample=4, radius=0.6, mlp=[128,128,256], mlp2=[256], scope='up_sa_layer3', is_training=is_training, bn_decay=bn_decay, knn=True)
l0_feat_f1 = pointnet_fp_module(l0_xyz_f1, l1_xyz_f1, l0_points_f1, l1_feat_f1, [256,256], is_training, bn_decay, scope='fa_layer4')
# FC layers
net = tf_util.conv1d(l0_feat_f1, 128, 1, padding='VALID', bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
net = tf_util.conv1d(net, 3, 1, padding='VALID', activation_fn=None, scope='fc2')
return net, end_points
def huber_loss(error, delta):
abs_error = tf.abs(error)
quadratic = tf.minimum(abs_error, delta)
linear = (abs_error - quadratic)
losses = 0.5 * quadratic**2 + delta * linear
return tf.reduce_mean(losses)
def get_loss(pred, label, mask, end_points):
""" pred: BxNx3,
label: BxNx3,
mask: BxN
"""
batch_size = pred.get_shape()[0].value
num_point = pred.get_shape()[1].value
l2_loss = tf.reduce_mean(mask * tf.reduce_sum((pred-label) * (pred-label), axis=2) / 2.0)
tf.summary.scalar('l2 loss', l2_loss)
tf.add_to_collection('losses', l2_loss)
return l2_loss
if __name__=='__main__':
with tf.Graph().as_default():
inputs = tf.zeros((32,1024*2,6))
outputs = get_model(inputs, tf.constant(True))
print(outputs)