forked from kevinhughes27/TensorKart
-
Notifications
You must be signed in to change notification settings - Fork 0
/
train.py
67 lines (53 loc) · 2.15 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
#!/usr/bin/env python
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D
from keras import optimizers
from keras import backend as K
from utils import Sample
# Global variable
OUT_SHAPE = 5
INPUT_SHAPE = (Sample.IMG_H, Sample.IMG_W, Sample.IMG_D)
def customized_loss(y_true, y_pred, loss='euclidean'):
# Simply a mean squared error that penalizes large joystick summed values
if loss == 'L2':
L2_norm_cost = 0.001
val = K.mean(K.square((y_pred - y_true)), axis=-1) \
+ K.sum(K.square(y_pred), axis=-1)/2 * L2_norm_cost
# euclidean distance loss
elif loss == 'euclidean':
val = K.sqrt(K.sum(K.square(y_pred-y_true), axis=-1))
return val
def create_model(keep_prob = 0.8):
model = Sequential()
# NVIDIA's model
model.add(Conv2D(24, kernel_size=(5, 5), strides=(2, 2), activation='relu', input_shape= INPUT_SHAPE))
model.add(Conv2D(36, kernel_size=(5, 5), strides=(2, 2), activation='relu'))
model.add(Conv2D(48, kernel_size=(5, 5), strides=(2, 2), activation='relu'))
model.add(Conv2D(64, kernel_size=(3, 3), activation='relu'))
model.add(Conv2D(64, kernel_size=(3, 3), activation='relu'))
model.add(Flatten())
model.add(Dense(1164, activation='relu'))
drop_out = 1 - keep_prob
model.add(Dropout(drop_out))
model.add(Dense(100, activation='relu'))
model.add(Dropout(drop_out))
model.add(Dense(50, activation='relu'))
model.add(Dropout(drop_out))
model.add(Dense(10, activation='relu'))
model.add(Dropout(drop_out))
model.add(Dense(OUT_SHAPE, activation='softsign'))
return model
if __name__ == '__main__':
# Load Training Data
x_train = np.load("data/X.npy")
y_train = np.load("data/y.npy")
print(x_train.shape[0], 'train samples')
# Training loop variables
epochs = 100
batch_size = 50
model = create_model()
model.compile(loss=customized_loss, optimizer=optimizers.adam())
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, shuffle=True, validation_split=0.1)
model.save_weights('model_weights.h5')