Hi,
I’m using double take and codeproject.ai as a detector for facial recognition. I use docker and this is my docker setup for containers:
double-take:
container_name: double-take
image: skrashevich/double-take:latest
restart: unless-stopped
volumes:
- ./double-take/config:/double-take/config
- ./double-take:/.storage
ports:
- 3030:3000
codeproject.ai:
container_name: codeproject.ai
image: codeproject/ai-server
mem_limit: "2048m"
mem_reservation: "1024m"
memswap_limit: "4096m"
restart: unless-stopped
volumes:
- /opt/codeproject.ai/data:/etc/codeproject/ai
- /opt/codeproject.ai/modules:/app/modules
ports:
- 32168:32168
This is my double take config file
auth: false
# enable mqtt subscribing and publishing (default: shown below)
mqtt:
host: 192.168.31.103
username: xxx
password: xxxxx
client_id: doubletake
topics:
# mqtt topic for frigate message subscription
frigate: frigate
# mqtt topic for home assistant discovery subscription
homeassistant: homeassistant
# mqtt topic where matches are published by name
matches: double-take/matches
# mqtt topic where matches are published by camera name
cameras: double-take/cameras
# global detect settings (default: shown below)
detect:
match:
# save match images
save: true
# include base64 encoded string in api results and mqtt messages
# options: true, false, box
base64: false
# minimum confidence needed to consider a result a match
confidence: 60
# hours to keep match images until they are deleted
purge: 168
# minimum area in pixels to consider a result a match
min_area: 10000
unknown:
# save unknown images
save: true
# include base64 encoded string in api results and mqtt messages
# options: true, false, box
base64: false
# minimum confidence needed before classifying a name as unknown
confidence: 40
# hours to keep unknown images until they are deleted
purge: 8
# minimum area in pixels to keep an unknown result
min_area: 0
frigate:
url: http://192.168.31.103:5000/
# if double take should send matches back to frigate as a sub label
# NOTE: requires frigate 0.11.0+
update_sub_labels: false
# stop the processing loop if a match is found
# if set to false all image attempts will be processed before determining the best match
stop_on_match: true
# object labels that are allowed for facial recognition
labels:
- person
attempts:
# number of times double take will request a frigate latest.jpg for facial recognition
latest: 10
# number of times double take will request a frigate snapshot.jpg for facial recognition
snapshot: 10
# process frigate images from frigate/+/person/snapshot topics
mqtt: true
# add a delay expressed in seconds between each detection loop
delay: 0
image:
# height of frigate image passed for facial recognition
height: 500
cameras:
doorbell:
#detectors:
detectors:
aiserver:
url: http://192.168.31.103:32168
# number of seconds before the request times out and is aborted
timeout: 15
# require opencv to find a face before processing with detector
opencv_face_required: false
# only process images from specific cameras, if omitted then all cameras will be processed
# cameras:
# - front-door
# - garage
# time settings (default: shown below)
time:
# defaults to iso 8601 format with support for token-based formatting
# https://github.com/moment/luxon/blob/1b8679ef011c7f538c29062934ccc564c9900df3/docs/formatting.md#table-of-tokens
format:
# time zone used in logs
timezone: Vienna/Europe
and this is my frigate config file
mqtt:
host: 192.168.31.103
user: xxx
password: xxxx
port: 1883
#### Cameras and binary sensors configuration
cameras:
doorbell:
ffmpeg:
hwaccel_args: -hwaccel vaapi -hwaccel_device /dev/dri/renderD128 #-hwaccel_output_format yuvj420p #yuv420p
inputs:
### High definition video 1920 x 1080
- path: rtsp://user:[email protected]:554/Streaming/Channels/101/
roles:
# - detect
- record
# - record
# - clips
# - rtmp
# - clips
### Low definition video 704x576
- path: rtsp://user:[email protected]:554/Streaming/Channels/102/
roles:
- detect
# - record
# - clips
# - rtmp
output_args:
rtmp: -aspect 720:480 -c copy -f flv
###
mqtt:
timestamp: False
bounding_box: False
crop: True
quality: 100
height: 700
detect:
width: 1280
height: 720
fps: 25
record:
enabled: True
motion:
threshold: 50
# mask:
# - 876,208,1090,211,1136,541,861,582
# - 1435,750,1713,731,1721,336,1603,298
# - 1798,371,1920,399,1920,606,1818,620
# - 633,212,800,211,783,440,787,641,591,628
# - 1655,295,1562,451,1340,416,1234,211
objects:
track:
- person
- dog
- car
snapshots:
enabled: True
timestamp: True
crop: True
# rtmp:
# enabled: True
live:
quality: 8
# zones:
# front_door:
# coordinates: 209,1080,1920,1080,1363,454,798,439,202,428
# detect:
# width: 704
# height: 576
# fps: 12
# stationary:
# interval: 0
# objects:
# - person
# - dog
# parking:
# coordinates: 1356,250,1331,458,606,410,654,221
# detect:
# width: 704
# height: 576
# fps: 12
# stationary:
# interval: 0
# threshold: 50
# objects:
# - car
database:
path: /media/frigate/frigate.db
The problem I have is the double take error when image is processing for recognition. This is the error:
23-10-13 15:37:56 error: Error [ERR_HTTP_HEADERS_SENT]: Cannot set headers after they are sent to the client
at new NodeError (node:internal/errors:387:5)
at ServerResponse.setHeader (node:_http_outgoing:644:11)
at ServerResponse.header (/double-take/api/node_modules/express/lib/response.js:794:10)
at ServerResponse.send (/double-take/api/node_modules/express/lib/response.js:174:12)
at ServerResponse.res.send (/double-take/api/src/middlewares/respond.js:41:18)
at ServerResponse.json (/double-take/api/node_modules/express/lib/response.js:278:15)
at ServerResponse.send (/double-take/api/node_modules/express/lib/response.js:162:21)
at ServerResponse.res.send (/double-take/api/src/middlewares/respond.js:41:18)
at /double-take/api/src/app.js:46:38
at newFn (/double-take/api/node_modules/express-async-errors/index.js:16:20)
I googled this error and what this basically means is that something is sending picture twice for facial recognition, if I got this right.
I check out mqtt topics using mqtt explorer and I see that I have two topics for a camera
camera - manual - person and
camera - doorbell - person
I presume that problem lies in a fact that I have two cameras for the same thing, but I’m not sure. And I don’t know how the first topic,camera manual, is created.
Does anyone see something out of place that can trigger this error?