-
Notifications
You must be signed in to change notification settings - Fork 871
/
main.go
138 lines (114 loc) · 3.24 KB
/
main.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
// What it does:
//
// This example shows, how to use pretrained SSD (Single Shot Detection) detection networks in gocv.
// Here, we detect human faces from the camera, but the setup is similar for any other kind of object detection.
//
// Download the (small, 5.1mb) Caffe model file from:
// https://raw.githubusercontent.com/opencv/opencv_3rdparty/dnn_samples_face_detector_20180205_fp16/res10_300x300_ssd_iter_140000_fp16.caffemodel
//
// Also, you will need the prototxt file:
// https://raw.githubusercontent.com/opencv/opencv/master/samples/dnn/face_detector/deploy.prototxt
//
// How to run:
//
// go run ./cmd/ssd-facedetect/main.go 0 [protofile] [modelfile]
//
package main
import (
"fmt"
"image"
"image/color"
"os"
"gocv.io/x/gocv"
)
func min(a, b float32) float32 {
if a < b {
return a
}
return b
}
func max(a, b float32) float32 {
if a > b {
return a
}
return b
}
func main() {
if len(os.Args) < 4 {
fmt.Println("How to run:\nssd-facedetect [camera ID] [protofile] [modelfile]")
return
}
// parse args
deviceID := os.Args[1]
proto := os.Args[2]
model := os.Args[3]
// open capture device
webcam, err := gocv.OpenVideoCapture(deviceID)
if err != nil {
fmt.Printf("Error opening video capture device: %v\n", deviceID)
return
}
defer webcam.Close()
window := gocv.NewWindow("SSD Face Detection")
defer window.Close()
img := gocv.NewMat()
defer img.Close()
// open DNN classifier
net := gocv.ReadNetFromCaffe(proto, model)
if net.Empty() {
fmt.Printf("Error reading network model from : %v %v\n", proto, model)
return
}
defer net.Close()
green := color.RGBA{0, 255, 0, 0}
fmt.Printf("Start reading device: %v\n", deviceID)
for {
if ok := webcam.Read(&img); !ok {
fmt.Printf("Device closed: %v\n", deviceID)
return
}
if img.Empty() {
continue
}
W := float32(img.Cols())
H := float32(img.Rows())
// convert image Mat to 96x128 blob that the detector can analyze
blob := gocv.BlobFromImage(img, 1.0, image.Pt(128, 96), gocv.NewScalar(104.0, 177.0, 123.0, 0), false, false)
defer blob.Close()
// feed the blob into the classifier
net.SetInput(blob, "data")
// run a forward pass through the network
detBlob := net.Forward("detection_out")
defer detBlob.Close()
// extract the detections.
// for each object detected, there will be 7 float features:
// objid, classid, confidence, left, top, right, bottom.
detections := gocv.GetBlobChannel(detBlob, 0, 0)
defer detections.Close()
for r := 0; r < detections.Rows(); r++ {
// you would want the classid for general object detection,
// but we do not need it here.
// classid := detections.GetFloatAt(r, 1)
confidence := detections.GetFloatAt(r, 2)
if confidence < 0.5 {
continue
}
left := detections.GetFloatAt(r, 3) * W
top := detections.GetFloatAt(r, 4) * H
right := detections.GetFloatAt(r, 5) * W
bottom := detections.GetFloatAt(r, 6) * H
// scale to video size:
left = min(max(0, left), W-1)
right = min(max(0, right), W-1)
bottom = min(max(0, bottom), H-1)
top = min(max(0, top), H-1)
// draw it
rect := image.Rect(int(left), int(top), int(right), int(bottom))
gocv.Rectangle(&img, rect, green, 3)
}
window.IMShow(img)
if window.WaitKey(1) >= 0 {
break
}
}
}