-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathops.py
109 lines (95 loc) · 4.49 KB
/
ops.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
import tensorflow as tf
import numpy as np
conv_size = 5
deconv_size_first = 4
deconv_size_second = 3
deconv_size = 5
def encoder(input_tensor, output_size):
# output = tf.reshape(input_tensor, [-1, , 28, 1])
output = tf.contrib.layers.conv2d(
input_tensor, 32, conv_size, scope='convlayer1', stride =2,
activation_fn=tf.nn.elu, normalizer_fn=tf.contrib.layers.batch_norm,
normalizer_params={'scale': True})
# print(output.get_shape())
output = tf.contrib.layers.conv2d(
output, 64, conv_size, scope='convlayer2', stride =2,
activation_fn=tf.nn.elu, normalizer_fn=tf.contrib.layers.batch_norm,
normalizer_params={'scale': True})
# print(output.get_shape())s
output = tf.contrib.layers.conv2d(
output, 128, conv_size, scope='convlayer3', stride =2, padding='VALID',
activation_fn=tf.nn.elu, normalizer_fn=tf.contrib.layers.batch_norm,
normalizer_params={'scale': True})
# print(output.get_shape())
output = tf.contrib.layers.dropout(output, 0.9, scope='dropout1')
output = tf.contrib.layers.flatten(output)
return tf.contrib.layers.fully_connected(output, output_size, activation_fn=None)
def decoder(input_sensor):
output = tf.expand_dims(input_sensor ,1)
output = tf.expand_dims(output,1)
print (output.get_shape())
# output = input_sensor
# output = tf.transpose(input_sensor, perm=[0, 2, 3 ,1])
# print(output.get_shape())
output = tf.contrib.layers.conv2d_transpose(
output, 128, deconv_size_second, scope='deconv1', padding='VALID',
activation_fn=tf.nn.elu, normalizer_fn=tf.contrib.layers.batch_norm,
normalizer_params={'scale': True})
print(output.get_shape())
output = tf.contrib.layers.conv2d_transpose(
output, 64, deconv_size_second, scope='deconv2', stride = 2,
activation_fn=tf.nn.elu, normalizer_fn=tf.contrib.layers.batch_norm,
normalizer_params={'scale': True})
print(output.get_shape())
output = tf.contrib.layers.conv2d_transpose(
output, 32, deconv_size_second, scope='deconv3', padding='VALID',
activation_fn=tf.nn.elu, normalizer_fn=tf.contrib.layers.batch_norm,
normalizer_params={'scale': True})
print(output.get_shape())
output = tf.contrib.layers.conv2d_transpose(
output, 16, deconv_size, scope='deconv4', stride = 2,
activation_fn=tf.nn.elu, normalizer_fn=tf.contrib.layers.batch_norm,
normalizer_params={'scale': True})
print(output.get_shape())
# output = tf.contrib.layers.conv2d_transpose(
# output, , deconv_size, scope='deconv5', stride = 2,
# activation_fn=tf.nn.elu, normalizer_fn=tf.contrib.layers.batch_norm,
# normalizer_params={'scale': True})
# print(output.get_shape())
output = tf.contrib.layers.conv2d_transpose(
output, 3, deconv_size, scope='deconv6', stride=2,
activation_fn=tf.nn.tanh, normalizer_fn=tf.contrib.layers.batch_norm,
normalizer_params={'scale': True})
print(output.get_shape())
return output
def log_likelihood_gaussian(sample, mean, sigma):
'''
compute log(sample~Gaussian(mean, sigma^2))
'''
return -log2pi*tf.cast(sample.shape[1].value, tf.float32)/2\
-tf.reduce_sum(tf.square((sample-mean)/sigma) + 2*tf.log(sigma), 1)/2
def log_likelihood_prior(sample):
'''
compute log(sample~Gaussian(0, I))
'''
# print(sample.shape)
return -log2pi*tf.cast(sample.shape[1].value, tf.float32)/2\
-tf.reduce_sum(tf.square(sample), 1)/2
def parzen_cpu_batch(x_batch, samples, sigma, batch_size, num_of_samples, data_size):
'''
x_batch: a data batch (batch_size, data_size), data_size = h*w*c for images
samples: generated data (num_of_samples, data_size)
sigma: standard deviation (float32)
'''
# a=(x-x_i)/sigma, use broadcast
x = x_batch.reshape((batch_size, 1, data_size))
mu = samples.reshape((1, num_of_samples, data_size))
a = (x - mu)/sigma # (batch_size, num_of_samples, data_size)
# sum -0.5*a^2
tmp = -0.5*(a**2).sum(2) # (batch_size, num_of_samples)
# log_mean_exp trick
max_ = np.amax(tmp, axis=1, keepdims=True) # (batch_size, 1)
E = max_ + np.log(np.mean(np.exp(tmp - max_), axis=1, keepdims=True)) # (batch_size, 1)
# Z = dim * log(sigma * sqrt(2*pi)), dim = data_size
Z = data_size * np.log(sigma * np.sqrt(np.pi * 2))
return E-Z