-
Notifications
You must be signed in to change notification settings - Fork 0
/
sample_manage.py
685 lines (547 loc) · 27.3 KB
/
sample_manage.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
#!/usr/bin/env python3
"""
Scripts to drive a donkey 2 car
Usage:
manage.py (drive) [--model=<model>] [--js] [--type=(linear|categorical|rnn|imu|behavior|3d|localizer|latent)] [--camera=(single|stereo)] [--meta=<key:value> ...] [--myconfig=<filename>]
manage.py (train) [--tub=<tub1,tub2,..tubn>] [--file=<file> ...] (--model=<model>) [--transfer=<model>] [--type=(linear|categorical|rnn|imu|behavior|3d|localizer)] [--continuous] [--aug] [--myconfig=<filename>]
Options:
-h --help Show this screen.
--js Use physical joystick.
-f --file=<file> A text file containing paths to tub files, one per line. Option may be used more than once.
--meta=<key:value> Key/Value strings describing describing a piece of meta data about this drive. Option may be used more than once.
--myconfig=filename Specify myconfig file to use.
[default: myconfig.py]
"""
import os
import time
from docopt import docopt
import numpy as np
import donkeycar as dk
#import parts
from donkeycar.parts.transform import Lambda, TriggeredCallback, DelayedTrigger
from donkeycar.parts.datastore import TubHandler
from donkeycar.parts.controller import LocalWebController, \
JoystickController, WebFpv
from donkeycar.parts.throttle_filter import ThrottleFilter
from donkeycar.parts.behavior import BehaviorPart
from donkeycar.parts.file_watcher import FileWatcher
from donkeycar.parts.launch import AiLaunch
from donkeycar.utils import *
from donkeypart_ps3_controller import PS3JoystickController
def drive(cfg, model_path=None, use_joystick=False, model_type=None, camera_type='single', meta=[]):
'''
Construct a working robotic vehicle from many parts.
Each part runs as a job in the Vehicle loop, calling either
it's run or run_threaded method depending on the constructor flag `threaded`.
All parts are updated one after another at the framerate given in
cfg.DRIVE_LOOP_HZ assuming each part finishes processing in a timely manner.
Parts may have named outputs and inputs. The framework handles passing named outputs
to parts requesting the same named input.
'''
if cfg.DONKEY_GYM:
#the simulator will use cuda and then we usually run out of resources
#if we also try to use cuda. so disable for donkey_gym.
os.environ["CUDA_VISIBLE_DEVICES"]="-1"
if model_type is None:
if cfg.TRAIN_LOCALIZER:
model_type = "localizer"
elif cfg.TRAIN_BEHAVIORS:
model_type = "behavior"
else:
model_type = cfg.DEFAULT_MODEL_TYPE
#Initialize car
V = dk.vehicle.Vehicle()
print("cfg.CAMERA_TYPE", cfg.CAMERA_TYPE)
if camera_type == "stereo":
if cfg.CAMERA_TYPE == "WEBCAM":
from donkeycar.parts.camera import Webcam
camA = Webcam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam = 0)
camB = Webcam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam = 1)
elif cfg.CAMERA_TYPE == "CVCAM":
from donkeycar.parts.cv import CvCam
camA = CvCam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam = 0)
camB = CvCam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam = 1)
else:
raise(Exception("Unsupported camera type: %s" % cfg.CAMERA_TYPE))
V.add(camA, outputs=['cam/image_array_a'], threaded=True)
V.add(camB, outputs=['cam/image_array_b'], threaded=True)
from donkeycar.parts.image import StereoPair
V.add(StereoPair(), inputs=['cam/image_array_a', 'cam/image_array_b'],
outputs=['cam/image_array'])
elif cfg.CAMERA_TYPE == "D435":
from donkeycar.parts.realsense435i import RealSense435i
cam = RealSense435i(
enable_rgb=cfg.REALSENSE_D435_RGB,
enable_depth=cfg.REALSENSE_D435_DEPTH,
enable_imu=cfg.REALSENSE_D435_IMU,
device_id=cfg.REALSENSE_D435_ID)
V.add(cam, inputs=[],
outputs=['cam/image_array', 'cam/depth_array',
'imu/acl_x', 'imu/acl_y', 'imu/acl_z',
'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z'],
threaded=True)
else:
if cfg.DONKEY_GYM:
from donkeycar.parts.dgym import DonkeyGymEnv
inputs = []
threaded = True
if cfg.DONKEY_GYM:
from donkeycar.parts.dgym import DonkeyGymEnv
cam = DonkeyGymEnv(cfg.DONKEY_SIM_PATH, host=cfg.SIM_HOST, env_name=cfg.DONKEY_GYM_ENV_NAME, conf=cfg.GYM_CONF, delay=cfg.SIM_ARTIFICIAL_LATENCY)
threaded = True
inputs = ['angle', 'throttle']
elif cfg.CAMERA_TYPE == "PICAM":
from donkeycar.parts.camera import PiCamera
cam = PiCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, framerate=cfg.CAMERA_FRAMERATE, vflip=cfg.CAMERA_VFLIP, hflip=cfg.CAMERA_HFLIP)
elif cfg.CAMERA_TYPE == "WEBCAM":
from donkeycar.parts.camera import Webcam
cam = Webcam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH)
elif cfg.CAMERA_TYPE == "CVCAM":
from donkeycar.parts.cv import CvCam
cam = CvCam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH)
elif cfg.CAMERA_TYPE == "CSIC":
from donkeycar.parts.camera import CSICamera
cam = CSICamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, framerate=cfg.CAMERA_FRAMERATE, gstreamer_flip=cfg.CSIC_CAM_GSTREAMER_FLIP_PARM)
elif cfg.CAMERA_TYPE == "V4L":
from donkeycar.parts.camera import V4LCamera
cam = V4LCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, framerate=cfg.CAMERA_FRAMERATE)
elif cfg.CAMERA_TYPE == "MOCK":
from donkeycar.parts.camera import MockCamera
cam = MockCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH)
elif cfg.CAMERA_TYPE == "IMAGE_LIST":
from donkeycar.parts.camera import ImageListCamera
cam = ImageListCamera(path_mask=cfg.PATH_MASK)
else:
raise(Exception("Unkown camera type: %s" % cfg.CAMERA_TYPE))
V.add(cam, inputs=inputs, outputs=['cam/image_array'], threaded=threaded)
if use_joystick or cfg.USE_JOYSTICK_AS_DEFAULT:
#modify max_throttle closer to 1.0 to have more power
#modify steering_scale lower than 1.0 to have less responsive steering
if cfg.CONTROLLER_TYPE == "MM1":
from donkeycar.parts.robohat import RoboHATController
ctr = RoboHATController(cfg)
elif "custom" == cfg.CONTROLLER_TYPE:
#
# custom controller created with `donkey createjs` command
#
from my_joystick import MyJoystickController
ctr = MyJoystickController(
throttle_dir=cfg.JOYSTICK_THROTTLE_DIR,
throttle_scale=cfg.JOYSTICK_MAX_THROTTLE,
steering_scale=cfg.JOYSTICK_STEERING_SCALE,
auto_record_on_throttle=cfg.AUTO_RECORD_ON_THROTTLE)
ctr.set_deadzone(cfg.JOYSTICK_DEADZONE)
else:
#from donkeycar.parts.controller import get_js_controller
#ctr = get_js_controller(cfg)
ctr = PS3JoystickController(
throttle_scale=cfg.JOYSTICK_MAX_THROTTLE,
steering_scale=cfg.JOYSTICK_STEERING_SCALE, auto_record_on_throttle=cfg.AUTO_RECORD_ON_THROTTLE)
if cfg.USE_NETWORKED_JS:
from donkeycar.parts.controller import JoyStickSub
netwkJs = JoyStickSub(cfg.NETWORK_JS_SERVER_IP)
V.add(netwkJs, threaded=True)
ctr.js = netwkJs
V.add(ctr,
inputs=['cam/image_array'],
outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'],
threaded=True)
else:
#This web controller will create a web server that is capable
#of managing steering, throttle, and modes, and more.
ctr = LocalWebController(port=cfg.WEB_CONTROL_PORT, mode=cfg.WEB_INIT_MODE)
V.add(ctr,
inputs=['cam/image_array', 'tub/num_records'],
outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'],
threaded=True)
"""
ctr = PS3JoystickController(
throttle_scale=cfg.JOYSTICK_MAX_THROTTLE,
steering_scale=cfg.JOYSTICK_STEERING_SCALE, auto_record_on_throttle=cfg.AUTO_RECORD_ON_THROTTLE)
V.add(ctr,
inputs=['cam/image_array'],
outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'],
threaded=True)
"""
#this throttle filter will allow one tap back for esc reverse
th_filter = ThrottleFilter()
V.add(th_filter, inputs=['user/throttle'], outputs=['user/throttle'])
#See if we should even run the pilot module.
#This is only needed because the part run_condition only accepts boolean
class PilotCondition:
def run(self, mode):
if mode == 'user':
return False
else:
return True
V.add(PilotCondition(), inputs=['user/mode'], outputs=['run_pilot'])
class LedConditionLogic:
def __init__(self, cfg):
self.cfg = cfg
def run(self, mode, recording, recording_alert, behavior_state, model_file_changed, track_loc):
#returns a blink rate. 0 for off. -1 for on. positive for rate.
if track_loc is not None:
led.set_rgb(*self.cfg.LOC_COLORS[track_loc])
return -1
if model_file_changed:
led.set_rgb(self.cfg.MODEL_RELOADED_LED_R, self.cfg.MODEL_RELOADED_LED_G, self.cfg.MODEL_RELOADED_LED_B)
return 0.1
else:
led.set_rgb(self.cfg.LED_R, self.cfg.LED_G, self.cfg.LED_B)
if recording_alert:
led.set_rgb(*recording_alert)
return self.cfg.REC_COUNT_ALERT_BLINK_RATE
else:
led.set_rgb(self.cfg.LED_R, self.cfg.LED_G, self.cfg.LED_B)
if behavior_state is not None and model_type == 'behavior':
r, g, b = self.cfg.BEHAVIOR_LED_COLORS[behavior_state]
led.set_rgb(r, g, b)
return -1 #solid on
if recording:
return -1 #solid on
elif mode == 'user':
return 1
elif mode == 'local_angle':
return 0.5
elif mode == 'local':
return 0.1
return 0
if cfg.HAVE_RGB_LED and not cfg.DONKEY_GYM:
from donkeycar.parts.led_status import RGB_LED
led = RGB_LED(cfg.LED_PIN_R, cfg.LED_PIN_G, cfg.LED_PIN_B, cfg.LED_INVERT)
led.set_rgb(cfg.LED_R, cfg.LED_G, cfg.LED_B)
V.add(LedConditionLogic(cfg), inputs=['user/mode', 'recording', "records/alert", 'behavior/state', 'modelfile/modified', "pilot/loc"],
outputs=['led/blink_rate'])
V.add(led, inputs=['led/blink_rate'])
def get_record_alert_color(num_records):
col = (0, 0, 0)
for count, color in cfg.RECORD_ALERT_COLOR_ARR:
if num_records >= count:
col = color
return col
class RecordTracker:
def __init__(self):
self.last_num_rec_print = 0
self.dur_alert = 0
self.force_alert = 0
def run(self, num_records):
if num_records is None:
return 0
if self.last_num_rec_print != num_records or self.force_alert:
self.last_num_rec_print = num_records
if num_records % 10 == 0:
print("recorded", num_records, "records")
if num_records % cfg.REC_COUNT_ALERT == 0 or self.force_alert:
self.dur_alert = num_records // cfg.REC_COUNT_ALERT * cfg.REC_COUNT_ALERT_CYC
self.force_alert = 0
if self.dur_alert > 0:
self.dur_alert -= 1
if self.dur_alert != 0:
return get_record_alert_color(num_records)
return 0
rec_tracker_part = RecordTracker()
V.add(rec_tracker_part, inputs=["tub/num_records"], outputs=['records/alert'])
if cfg.AUTO_RECORD_ON_THROTTLE and isinstance(ctr, JoystickController):
#then we are not using the circle button. hijack that to force a record count indication
def show_record_acount_status():
rec_tracker_part.last_num_rec_print = 0
rec_tracker_part.force_alert = 1
ctr.set_button_down_trigger('circle', show_record_acount_status)
#Sombrero
if cfg.HAVE_SOMBRERO:
from donkeycar.parts.sombrero import Sombrero
s = Sombrero()
#IMU
if cfg.HAVE_IMU:
from donkeycar.parts.imu import IMU
imu = IMU(sensor=cfg.IMU_SENSOR, dlp_setting=cfg.IMU_DLP_CONFIG)
V.add(imu, outputs=['imu/acl_x', 'imu/acl_y', 'imu/acl_z',
'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z'], threaded=True)
class ImgPreProcess():
'''
preprocess camera image for inference.
normalize and crop if needed.
'''
def __init__(self, cfg):
self.cfg = cfg
def run(self, img_arr):
return normalize_and_crop(img_arr, self.cfg)
if "coral" in model_type:
inf_input = 'cam/image_array'
else:
inf_input = 'cam/normalized/cropped'
V.add(ImgPreProcess(cfg),
inputs=['cam/image_array'],
outputs=[inf_input],
run_condition='run_pilot')
# Use the FPV preview, which will show the cropped image output, or the full frame.
if cfg.USE_FPV:
V.add(WebFpv(), inputs=['cam/image_array'], threaded=True)
#Behavioral state
if cfg.TRAIN_BEHAVIORS:
bh = BehaviorPart(cfg.BEHAVIOR_LIST)
V.add(bh, outputs=['behavior/state', 'behavior/label', "behavior/one_hot_state_array"])
try:
ctr.set_button_down_trigger('L1', bh.increment_state)
except:
pass
inputs = [inf_input, "behavior/one_hot_state_array"]
#IMU
elif model_type == "imu":
assert(cfg.HAVE_IMU)
#Run the pilot if the mode is not user.
inputs=[inf_input,
'imu/acl_x', 'imu/acl_y', 'imu/acl_z',
'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z']
else:
inputs=[inf_input]
def load_model(kl, model_path):
start = time.time()
print('loading model', model_path)
kl.load(model_path)
print('finished loading in %s sec.' % (str(time.time() - start)) )
def load_weights(kl, weights_path):
start = time.time()
try:
print('loading model weights', weights_path)
kl.model.load_weights(weights_path)
print('finished loading in %s sec.' % (str(time.time() - start)) )
except Exception as e:
print(e)
print('ERR>> problems loading weights', weights_path)
def load_model_json(kl, json_fnm):
start = time.time()
print('loading model json', json_fnm)
from tensorflow.python import keras
try:
with open(json_fnm, 'r') as handle:
contents = handle.read()
kl.model = keras.models.model_from_json(contents)
print('finished loading json in %s sec.' % (str(time.time() - start)) )
except Exception as e:
print(e)
print("ERR>> problems loading model json", json_fnm)
if model_path:
#When we have a model, first create an appropriate Keras part
kl = dk.utils.get_model_by_type(model_type, cfg)
model_reload_cb = None
if '.h5' in model_path or '.uff' in model_path or 'tflite' in model_path or '.pkl' in model_path:
#when we have a .h5 extension
#load everything from the model file
load_model(kl, model_path)
def reload_model(filename):
load_model(kl, filename)
model_reload_cb = reload_model
elif '.json' in model_path:
#when we have a .json extension
#load the model from there and look for a matching
#.wts file with just weights
load_model_json(kl, model_path)
weights_path = model_path.replace('.json', '.weights')
load_weights(kl, weights_path)
def reload_weights(filename):
weights_path = filename.replace('.json', '.weights')
load_weights(kl, weights_path)
model_reload_cb = reload_weights
else:
print("ERR>> Unknown extension type on model file!!")
return
#this part will signal visual LED, if connected
V.add(FileWatcher(model_path, verbose=True), outputs=['modelfile/modified'])
#these parts will reload the model file, but only when ai is running so we don't interrupt user driving
V.add(FileWatcher(model_path), outputs=['modelfile/dirty'], run_condition="ai_running")
V.add(DelayedTrigger(100), inputs=['modelfile/dirty'], outputs=['modelfile/reload'], run_condition="ai_running")
V.add(TriggeredCallback(model_path, model_reload_cb), inputs=["modelfile/reload"], run_condition="ai_running")
outputs=['pilot/angle', 'pilot/throttle']
if cfg.TRAIN_LOCALIZER:
outputs.append("pilot/loc")
V.add(kl, inputs=inputs,
outputs=outputs,
run_condition='run_pilot')
if cfg.STOP_SIGN_DETECTOR:
from donkeycar.parts.object_detector.stop_sign_detector import StopSignDetector
V.add(StopSignDetector(cfg.STOP_SIGN_MIN_SCORE, cfg.STOP_SIGN_SHOW_BOUNDING_BOX), inputs=['cam/image_array', 'pilot/throttle'], outputs=['pilot/throttle', 'cam/image_array'])
#Choose what inputs should change the car.
class DriveMode:
def run(self, mode,
user_angle, user_throttle,
pilot_angle, pilot_throttle):
if mode == 'user':
return user_angle, user_throttle
elif mode == 'local_angle':
return pilot_angle if pilot_angle else 0.0, user_throttle
else:
return pilot_angle if pilot_angle else 0.0, pilot_throttle * cfg.AI_THROTTLE_MULT if pilot_throttle else 0.0
V.add(DriveMode(),
inputs=['user/mode', 'user/angle', 'user/throttle',
'pilot/angle', 'pilot/throttle'],
outputs=['angle', 'throttle'])
#to give the car a boost when starting ai mode in a race.
aiLauncher = AiLaunch(cfg.AI_LAUNCH_DURATION, cfg.AI_LAUNCH_THROTTLE, cfg.AI_LAUNCH_KEEP_ENABLED)
V.add(aiLauncher,
inputs=['user/mode', 'throttle'],
outputs=['throttle'])
if isinstance(ctr, JoystickController):
ctr.set_button_down_trigger(cfg.AI_LAUNCH_ENABLE_BUTTON, aiLauncher.enable_ai_launch)
class AiRunCondition:
'''
A bool part to let us know when ai is running.
'''
def run(self, mode):
if mode == "user":
return False
return True
V.add(AiRunCondition(), inputs=['user/mode'], outputs=['ai_running'])
#Ai Recording
class AiRecordingCondition:
'''
return True when ai mode, otherwize respect user mode recording flag
'''
def run(self, mode, recording):
if mode == 'user':
return recording
return True
if cfg.RECORD_DURING_AI:
V.add(AiRecordingCondition(), inputs=['user/mode', 'recording'], outputs=['recording'])
#Drive train setup
if cfg.DONKEY_GYM or cfg.DRIVE_TRAIN_TYPE == "MOCK":
pass
elif cfg.DRIVE_TRAIN_TYPE == "SERVO_ESC":
from donkeycar.parts.actuator import PCA9685, PWMSteering, PWMThrottle
steering_controller = PCA9685(cfg.STEERING_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM)
steering = PWMSteering(controller=steering_controller,
left_pulse=cfg.STEERING_LEFT_PWM,
right_pulse=cfg.STEERING_RIGHT_PWM)
throttle_controller = PCA9685(cfg.THROTTLE_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM)
throttle = PWMThrottle(controller=throttle_controller,
max_pulse=cfg.THROTTLE_FORWARD_PWM,
zero_pulse=cfg.THROTTLE_STOPPED_PWM,
min_pulse=cfg.THROTTLE_REVERSE_PWM)
V.add(steering, inputs=['angle'], threaded=True)
V.add(throttle, inputs=['throttle'], threaded=True)
elif cfg.DRIVE_TRAIN_TYPE == "DC_STEER_THROTTLE":
from donkeycar.parts.actuator import Mini_HBridge_DC_Motor_PWM
steering = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_LEFT, cfg.HBRIDGE_PIN_RIGHT)
throttle = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_FWD, cfg.HBRIDGE_PIN_BWD)
V.add(steering, inputs=['angle'])
V.add(throttle, inputs=['throttle'])
elif cfg.DRIVE_TRAIN_TYPE == "DC_TWO_WHEEL":
from donkeycar.parts.actuator import TwoWheelSteeringThrottle, Mini_HBridge_DC_Motor_PWM
left_motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_LEFT_FWD, cfg.HBRIDGE_PIN_LEFT_BWD)
right_motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_RIGHT_FWD, cfg.HBRIDGE_PIN_RIGHT_BWD)
two_wheel_control = TwoWheelSteeringThrottle()
V.add(two_wheel_control,
inputs=['throttle', 'angle'],
outputs=['left_motor_speed', 'right_motor_speed'])
V.add(left_motor, inputs=['left_motor_speed'])
V.add(right_motor, inputs=['right_motor_speed'])
elif cfg.DRIVE_TRAIN_TYPE == "SERVO_HBRIDGE_PWM":
from donkeycar.parts.actuator import ServoBlaster, PWMSteering
steering_controller = ServoBlaster(cfg.STEERING_CHANNEL) #really pin
#PWM pulse values should be in the range of 100 to 200
assert(cfg.STEERING_LEFT_PWM <= 200)
assert(cfg.STEERING_RIGHT_PWM <= 200)
steering = PWMSteering(controller=steering_controller,
left_pulse=cfg.STEERING_LEFT_PWM,
right_pulse=cfg.STEERING_RIGHT_PWM)
from donkeycar.parts.actuator import Mini_HBridge_DC_Motor_PWM
motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_FWD, cfg.HBRIDGE_PIN_BWD)
V.add(steering, inputs=['angle'], threaded=True)
V.add(motor, inputs=["throttle"])
elif cfg.DRIVE_TRAIN_TYPE == "MM1":
from donkeycar.parts.robohat import RoboHATDriver
V.add(RoboHATDriver(cfg), inputs=['angle', 'throttle'])
elif cfg.DRIVE_TRAIN_TYPE == "PIGPIO_PWM":
from donkeycar.parts.actuator import PWMSteering, PWMThrottle, PiGPIO_PWM
steering_controller = PiGPIO_PWM(cfg.STEERING_PWM_PIN, freq=cfg.STEERING_PWM_FREQ, inverted=cfg.STEERING_PWM_INVERTED)
steering = PWMSteering(controller=steering_controller,
left_pulse=cfg.STEERING_LEFT_PWM,
right_pulse=cfg.STEERING_RIGHT_PWM)
throttle_controller = PiGPIO_PWM(cfg.THROTTLE_PWM_PIN, freq=cfg.THROTTLE_PWM_FREQ, inverted=cfg.THROTTLE_PWM_INVERTED)
throttle = PWMThrottle(controller=throttle_controller,
max_pulse=cfg.THROTTLE_FORWARD_PWM,
zero_pulse=cfg.THROTTLE_STOPPED_PWM,
min_pulse=cfg.THROTTLE_REVERSE_PWM)
V.add(steering, inputs=['angle'], threaded=True)
V.add(throttle, inputs=['throttle'], threaded=True)
# OLED setup
if cfg.USE_SSD1306_128_32:
from donkeycar.parts.oled import OLEDPart
auto_record_on_throttle = cfg.USE_JOYSTICK_AS_DEFAULT and cfg.AUTO_RECORD_ON_THROTTLE
oled_part = OLEDPart(cfg.SSD1306_128_32_I2C_BUSNUM, auto_record_on_throttle=auto_record_on_throttle)
V.add(oled_part, inputs=['recording', 'tub/num_records', 'user/mode'], outputs=[], threaded=True)
#add tub to save data
inputs=['cam/image_array',
'user/angle', 'user/throttle',
'user/mode']
types=['image_array',
'float', 'float',
'str']
if cfg.TRAIN_BEHAVIORS:
inputs += ['behavior/state', 'behavior/label', "behavior/one_hot_state_array"]
types += ['int', 'str', 'vector']
if cfg.CAMERA_TYPE == "D435" and cfg.REALSENSE_D435_DEPTH:
inputs += ['cam/depth_array']
types += ['gray16_array']
if cfg.HAVE_IMU or (cfg.CAMERA_TYPE == "D435" and cfg.REALSENSE_D435_IMU):
inputs += ['imu/acl_x', 'imu/acl_y', 'imu/acl_z',
'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z']
types +=['float', 'float', 'float',
'float', 'float', 'float']
if cfg.RECORD_DURING_AI:
inputs += ['pilot/angle', 'pilot/throttle']
types += ['float', 'float']
th = TubHandler(path=cfg.DATA_PATH)
tub = th.new_tub_writer(inputs=inputs, types=types, user_meta=meta)
V.add(tub, inputs=inputs, outputs=["tub/num_records"], run_condition='recording')
if cfg.PUB_CAMERA_IMAGES:
from donkeycar.parts.network import TCPServeValue
from donkeycar.parts.image import ImgArrToJpg
pub = TCPServeValue("camera")
V.add(ImgArrToJpg(), inputs=['cam/image_array'], outputs=['jpg/bin'])
V.add(pub, inputs=['jpg/bin'])
if type(ctr) is LocalWebController:
if cfg.DONKEY_GYM:
print("You can now go to http://localhost:%d to drive your car." % cfg.WEB_CONTROL_PORT)
else:
print("You can now go to <your hostname.local>:%d to drive your car." % cfg.WEB_CONTROL_PORT)
elif isinstance(ctr, JoystickController):
print("You can now move your joystick to drive your car.")
#tell the controller about the tub
ctr.set_tub(tub)
if cfg.BUTTON_PRESS_NEW_TUB:
def new_tub_dir():
V.parts.pop()
tub = th.new_tub_writer(inputs=inputs, types=types, user_meta=meta)
V.add(tub, inputs=inputs, outputs=["tub/num_records"], run_condition='recording')
ctr.set_tub(tub)
ctr.set_button_down_trigger('cross', new_tub_dir)
ctr.print_controls()
#run the vehicle for 20 seconds
V.start(rate_hz=cfg.DRIVE_LOOP_HZ,
max_loop_count=cfg.MAX_LOOPS)
if __name__ == '__main__':
args = docopt(__doc__)
cfg = dk.load_config(myconfig=args['--myconfig'])
if args['drive']:
model_type = args['--type']
camera_type = args['--camera']
drive(cfg, model_path=args['--model'], use_joystick=args['--js'],
model_type=model_type, camera_type=camera_type,
meta=args['--meta'])
if args['train']:
from train import multi_train, preprocessFileList
tub = args['--tub']
model = args['--model']
transfer = args['--transfer']
model_type = args['--type']
continuous = args['--continuous']
aug = args['--aug']
dirs = preprocessFileList( args['--file'] )
if tub is not None:
tub_paths = [os.path.expanduser(n) for n in tub.split(',')]
dirs.extend( tub_paths )
if model_type is None:
model_type = cfg.DEFAULT_MODEL_TYPE
print("using default model type of", model_type)
multi_train(cfg, dirs, model, transfer, model_type, continuous, aug)