-
Notifications
You must be signed in to change notification settings - Fork 10
/
Copy pathtest.py
118 lines (90 loc) · 3.56 KB
/
test.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
import numpy as np
import cv2
import face_recognition
from PIL import Image, ImageDraw
from IPython.display import display
# Load a sample picture and learn how to recognize it.
srajan_image = face_recognition.load_image_file("Database/srajan/srajan.jpeg")
# print(type(obama_image))
srajan_face_encoding = face_recognition.face_encodings(srajan_image)[0]
# Load a second sample picture and learn how to recognize it.
atharva_image = face_recognition.load_image_file("Database/aniket/aniket.jpeg")
atharva_face_encoding = face_recognition.face_encodings(atharva_image)[0]
# Load a sample picture and learn how to recognize it.
sharan_image = face_recognition.load_image_file("Database/sharan/sharan.jpeg")
# print(type(obama_image))
# Load a second sample picture and learn how to recognize it.
vivek_image = face_recognition.load_image_file("Database/vivek/vivek.jpeg")
vivek_face_encoding = face_recognition.face_encodings(vivek_image)[0]
# Create arrays of known face encodings and their names
known_face_encodings = [
srajan_face_encoding,
atharva_face_encoding,
sharan_face_encoding,
vivek_face_encoding
]
known_face_names = [
"Srajan Chourasia",
"atharva",
"sharan",
"vivek"
]
# for i in known_face_encodings:
# print(f"face :::: {len(i)}")
print('Learned encoding for', len(known_face_encodings), 'images.')
faceCascade = cv2.CascadeClassifier('Models/haarcascade_frontalface_default.xml')
cap = cv2.VideoCapture(0)
cap.set(3,1000) # set Width
cap.set(4,640) # set Height
while True:
ret, img = cap.read()
# img = cv2.flip(img)
img = np.array(img)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.5,
minNeighbors=5,
minSize=(20, 20)
)
for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),3)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
# cv2.imwrite("catch.jpg", img)
# Load an image with an unknown face
# unknown_image = face_recognition.load_image_file("catch.jpg")
# Find all the faces and face encodings in the unknown image
unknown_image = roi_color
# print(type(face_image))
face_locations = face_recognition.face_locations(unknown_image)
face_encodings = face_recognition.face_encodings(unknown_image, face_locations)
if(len(face_encodings) >= 1):
face_encodings = face_encodings[0]
# print(face_encodings)
# print(known_face_encodings)
matches = face_recognition.compare_faces(known_face_encodings, face_encodings)
name = "Unknown"
face_distances = face_recognition.face_distance(known_face_encodings, face_encodings)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = known_face_names[best_match_index]
# font
font = cv2.FONT_HERSHEY_SIMPLEX
# org
org = (50, 50)
# fontScale
fontScale = 1
# Blue color in BGR
color = (255, 0, 0)
# Line thickness of 2 px
thickness = 2
# Using cv2.putText() method
image = cv2.putText(img, name, org, font,
fontScale, color, thickness, cv2.LINE_AA)
cv2.imshow('video',img)
k = cv2.waitKey(30) & 0xff
if k == 27: # press 'ESC' to quit
break
cap.release()
cv2.destroyAllWindows()