-
Notifications
You must be signed in to change notification settings - Fork 2
/
configuration.py
85 lines (65 loc) · 2.86 KB
/
configuration.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
# Copyright 2016 Syed Ahmed. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Matching Networks model and training configurations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class ModelConfig(object):
"""Wrapper class for model hyperparameters."""
def __init__(self):
"""Sets the default model hyperparameters."""
# File pattern of sharded TFRecord file containing SequenceExample protos.
# Must be provided in training and val modes.
self.input_file_pattern = None
# Sound format ("wav").
self.sound_format = "wav"
# Batch size.
self.batch_size_s = 1
self.batch_size_b = 1
self.num_classes = 5
# File containing an Inception v3 checkpoint to initialize the variables
# of the Inception model. Must be provided when starting training for the
# first time.
self.f_checkpoint_file = None
self.g_checkpoint_file = None
# Dimensions of model sound inputs
self.num_mels = 128
self.frames = 64
self.lstm_processing_steps = 10
# Scale used to initialize model variables.
self.initializer_scale = 0.08
# LSTM input and output dimensionality, respectively.
self.embedding_size = 64
self.num_lstm_units = 64
# If < 1.0, the dropout keep probability applied to LSTM variables.
self.lstm_dropout_keep_prob = 0.7
class TrainingConfig(object):
"""Wrapper class for training hyperparameters."""
def __init__(self):
"""Sets the default training hyperparameters."""
# Number of examples per epoch of training data.
self.num_examples_per_epoch = 586363
# Optimizer for training the model.
self.optimizer = "SGD"
# Learning rate for the initial phase of training.
self.initial_learning_rate = 2.0
self.learning_rate_decay_factor = 0.5
self.num_epochs_per_decay = 8.0
# Learning rate when fine tuning the Inception v3 parameters.
self.train_inception_learning_rate = 0.0005
# If not None, clip gradients to this value.
self.clip_gradients = 5.0
# How many model checkpoints to keep.
self.max_checkpoints_to_keep = 5