Skip to content

Commit

Permalink
Merge pull request #205 from npuichigo/master
Browse files Browse the repository at this point in the history
Fix the bug of dropout and prenet.
  • Loading branch information
keithito authored Aug 31, 2018
2 parents d715298 + bc20c40 commit 231d6d7
Show file tree
Hide file tree
Showing 2 changed files with 5 additions and 2 deletions.
2 changes: 1 addition & 1 deletion models/modules.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ def prenet(inputs, is_training, layer_sizes, scope=None):
with tf.variable_scope(scope or 'prenet'):
for i, size in enumerate(layer_sizes):
dense = tf.layers.dense(x, units=size, activation=tf.nn.relu, name='dense_%d' % (i+1))
x = tf.layers.dropout(dense, rate=drop_rate, name='dropout_%d' % (i+1))
x = tf.layers.dropout(dense, rate=drop_rate, training=is_training, name='dropout_%d' % (i+1))
return x


Expand Down
5 changes: 4 additions & 1 deletion models/tacotron.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,10 +49,13 @@ def initialize(self, inputs, input_lengths, mel_targets=None, linear_targets=Non

# Attention
attention_cell = AttentionWrapper(
DecoderPrenetWrapper(GRUCell(hp.attention_depth), is_training, hp.prenet_depths),
GRUCell(hp.attention_depth),
BahdanauAttention(hp.attention_depth, encoder_outputs),
alignment_history=True,
output_attention=False) # [N, T_in, attention_depth=256]

# Apply prenet before concatenation in AttentionWrapper.
attention_cell = DecoderPrenetWrapper(attention_cell, is_training, hp.prenet_depths)

# Concatenate attention context vector and RNN cell output into a 2*attention_depth=512D vector.
concat_cell = ConcatOutputAndAttentionWrapper(attention_cell) # [N, T_in, 2*attention_depth=512]
Expand Down

0 comments on commit 231d6d7

Please sign in to comment.