Hi,
I decided to make a tutorial to include OpenCV with HA.
My work is based on this website : http://www.tundra-it.com/fr/raspberry-pi-reconocimiento-facial/ (thanks to the author)
To begin you need to install these modules and dependencies
sudo apt-get install build-essential cmake pkg-config python-dev libgtk2.0-dev libgtk2.0 zlib1g-dev libpng-dev libjpeg-dev libtiff-dev libjasper-dev libavcodec-dev swig unzip vim sudo apt-get install python-numpy python-opencv sudo apt-get install python-pip sudo apt-get install python-dev sudo pip install picamera sudo pip install rpio sudo apt-get install v4l2ucp v4l-utils libv4l-dev
No need to compile OpenCV. (sudo apt-get install opencv)
so, after that you need to get the sources and data
mkdir /home/pi/recoFacial cd /home/pi/recoFacial wget http://www.cl.cam.ac.uk/Research/DTG/attarchive/pub/data/att_faces.zip unzip att_faces.zip wget http://www.bujarra.com/wp-content/uploads/2016/08/haarcascade_frontalface_alt.zip unzip haarcascade_frontalface_alt.zip
There is 2 scripts, i modified them to be more adapted to HA.
The first script is capture.py
size = 4
fn_haar = 'haarcascade_frontalface_alt.xml'
fn_dir = 'person'
fn_name = sys.argv[1]
path = os.path.join(fn_dir, fn_name)
if not os.path.isdir(path):
os.mkdir(path)
(im_width, im_height) = (112, 92)
haar_cascade = cv2.CascadeClassifier(fn_haar)
webcam = cv2.VideoCapture(0)
count = 0
while count < 100:
(rval, im) = webcam.read()
im = cv2.flip(im, 1, 0)
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
mini = cv2.resize(gray, (gray.shape[1] / size, gray.shape[0] / size))
faces = haar_cascade.detectMultiScale(mini)
faces = sorted(faces, key=lambda x: x[3])
if faces:
face_i = faces[0]
(x, y, w, h) = [v * size for v in face_i]
face = gray[y:y + h, x:x + w]
face_resize = cv2.resize(face, (im_width, im_height))
pin=sorted([int(n[:n.find('.')]) for n in os.listdir(path)
if n[0]!='.' ]+[0])[-1] + 1
cv2.imwrite('%s/%s.png' % (path, pin), face_resize)
cv2.rectangle(im, (x, y), (x + w, y + h), (0, 255, 0), 3)
cv2.putText(im, fn_name, (x - 10, y - 10), cv2.FONT_HERSHEY_PLAIN,
1,(0, 255, 0))
count += 1
cv2.imshow('OpenCV', im)
key = cv2.waitKey(10)
if key == 27:
break
You need to create a directory “person”.
Every person which will be create, will appear in it.
Now you can create a first person like that:
python capture.py nameperson
(eg : python capture.py johndoe)
The script will take 100 photos to learn your face (the number can be changed).
I need to take me another time to success, with a another name (and glasses).
It’s strange but if i had only one person, the recognition script was bugged…
Finally the second script reco.py
size = 4
fn_haar = 'haarcascade_frontalface_alt.xml'
fn_dir = 'person'
print('...Preparing...')
(images, lables, names, id) = ([], [], {}, 0)
for (subdirs, dirs, files) in os.walk(fn_dir):
for subdir in dirs:
names[id] = subdir
subjectpath = os.path.join(fn_dir, subdir)
for filename in os.listdir(subjectpath):
path = subjectpath + '/' + filename
lable = id
images.append(cv2.imread(path, 0))
lables.append(int(lable))
id += 1
(im_width, im_height) = (112, 92)
(images, lables) = [numpy.array(lis) for lis in [images, lables]]
model = cv2.createFisherFaceRecognizer()
model.train(images, lables)
haar_cascade = cv2.CascadeClassifier(fn_haar)
webcam = cv2.VideoCapture(0)
while True:
(rval, frame) = webcam.read()
frame=cv2.flip(frame,1,0)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
mini = cv2.resize(gray, (gray.shape[1] / size, gray.shape[0] / size))
faces = haar_cascade.detectMultiScale(mini)
for i in range(len(faces)):
face_i = faces[i]
(x, y, w, h) = [v * size for v in face_i]
face = gray[y:y + h, x:x + w]
face_resize = cv2.resize(face, (im_width, im_height))
prediction = model.predict(face_resize)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 3)
if prediction[1] < 500:
cv2.putText(frame,
'%s - %.0f' % (names[prediction[0]],prediction[1]),
(x-10, y-10), cv2.FONT_HERSHEY_PLAIN,1,(0, 255, 0))
cara = '%s' % (names[prediction[0]])
if cara == "johndoe":
cv2.imwrite ("detection.jpg",frame)
elif cara == "autre":
# Do nothing
else:
cv2.putText(frame,
'Inconnu',
(x-10, y-10), cv2.FONT_HERSHEY_PLAIN,1,(0, 255, 0))
#Load Webcam with
cv2.imshow('OpenCV', frame)
key = cv2.waitKey(10)
if key == 27:
break
you could to give execution rights to the scripts.
chmod +x reco.py capture.py
And execute reco.py
to operate with HA, you can write file like that
file.open ("detection.txt",w)
(...)
if prediction[1] < 500:
cv2.putText(frame,
'%s - %.0f' % (names[prediction[0]],prediction[1]),
(x-10, y-10), cv2.FONT_HERSHEY_PLAIN,1,(0, 255, 0))
cara = '%s' % (names[prediction[0]])
if cara == "johndoe":
cv2.imwrite ("detection.jpg",frame)
file.write ("johndoe")
elif cara == "autre":
# Do nothing
else:
cv2.putText(frame,
'Inconnu',
(x-10, y-10), cv2.FONT_HERSHEY_PLAIN,1,(0, 255, 0))
#Load Webcam with
file.write ("nobody")
cv2.imshow('OpenCV', frame)
key = cv2.waitKey(10)
if key == 27:
break
file.close()
And a sensor from HA which do that
cat detection.txt
And an automation which execute order when sensor state become “johndoe” from “nobody”
That works fine, but on raspberry 3 it’s too slow, i’ll try to install that on a real server to test performance soon…