Skip to content
Permalink
Branch: master
Find file Copy path
Find file Copy path
4 contributors

Users who have contributed to this file

@pliablepixels @joelsdc @andrewb-nz @SilverGhostBS
245 lines (199 sloc) 7.17 KB
# Configuration file for object detection
# NOTE: ALL parameters here can be overriden
# on a per monitor basis if you want. Just
# duplicate it inside the correct [monitor-<num>] section
[general]
portal=https:/server/zm
user=admin
password=password
allow_self_signed=yes
# if yes, last detection will be stored for monitors
# and bounding boxes that match, along with labels
# will be discarded for new detections. This may be helpful
# in getting rid of static objects that get detected
# due to some motion.
#match_past_detections=yes
match_past_detections=no
# The max difference in area between the objects if match_past_detection is on
# can also be specified in px like 300px. Default is 5%. Basically, bounding boxes of the same
# object can slightly differ ever so slightly between detection. Contributor @neillbell put in this PR
# to calculate the difference in areas and based on his tests, 5% worked well. YMMV. Change it if needed.
past_det_max_diff_area=5%
# sequence of models to run for detection
models=yolo,face
# if all, then we will loop through all models
# if first then the first success will break out
detection_mode=all
# If you need basic auth to access ZM
#basic_user=user
#basic_password=password
# path to store downloaded images
# needs to be RW www-data/apache
image_path=/var/lib/zmeventnotification/images
# this is the global detection pattern used for all monitors.
# choose any set of classes from here https://github.com/pjreddie/darknet/blob/master/data/coco.names
# for everything, make it .*
detect_pattern=(person|car|motorbike|bus|truck|boat)
#detect_pattern=.*
# global settings for
# bestmatch, alarm, snapshot OR a specific frame ID
frame_id=bestmatch
# this is the to resize the image before analysis is done
resize=1200
# set to yes, if you want to remove images after analysis
# setting to yes is recommended to avoid filling up space
# keep to no while debugging/inspecting masks
# Note this does NOT delete debug images later
delete_after_analyze=yes
# If yes, will write an image called <filename>-bbox.jpg as well
# which contains the bounding boxes. This has NO relation to
# write_image_to_zm
# Typically, if you enable delete_after_analyze you may
# also want to set write_debug_image to no.
write_debug_image=no
# if yes, will write an image with bounding boxes
# this needs to be yes to be able to write a bounding box
# image to ZoneMinder that is visible from its console
write_image_to_zm=yes
# Adds percentage to detections
# hog/face shows 100% always
show_percent=no
# color to be used to draw the polygons you specified
poly_color=(255,255,255)
#import_zm_zones=no
## Monitor specific settings
#
# - Format: [monitor-<mid>]
#
# Parameters:
# polygon areas where object detection will be done.
# You can name them anything except the keywords defined in the optional
# params below. You can put as many polygons as you want per [monitor-<mid>]
# (see examples).
#
# detect_pattern: overrides the detection patterns used for this monitor.
#
# Examples:
[monitor-8]
# my driveway
detect_pattern=(person|car|motorbike|bus|truck|boat)
delete_after_analyze=no
#detect_pattern=.*
#import_zm_zones=yes
my_driveway_perimeter=306,356 1003,341 1074,683 154,715
# use license plate recognition for my driveway
# see alpr section later for more data needed
resize=no
models=yolo,alpr
# tiny switches to tiny yolo weights, instead of full Yolo. Much faster, but less accurate
#yolo_type=tiny
[monitor-10]
# my front lawn
# here we want anything except potted plant
# exclusion in regular expressions is not
# as straightforward as you may think, so
# follow this pattern
# detect_pattern = ^(?!object1|object2|objectN)
# the characters in front implement what is
# called a negative look ahead
detect_pattern=^(?!potted plant|pottedplant|bench|broccoli)
#detect_pattern=.*
# local model overrides global
models=yolo
# setting import_zm_zones to yes will import ZM defined zones
#import_zm_zones=yes
[monitor-5]
# my basement
detect_pattern=(person)
#detect_pattern=.*
#poly_color=(255,0,0)
#detect_pattern=^(?!chair|bed)
param=219,304 1113,278 1066,863 177,852
models=yolo,face
[monitor-6]
# deck
detect_pattern=^(?!chair|table|bench|bird)
models=yolo
#yolo_type=tiny
boundary=100,100 2988,10 2988,2220 10,2220
[monitor-2]
#doorbell
detect_pattern=(person)
# try face, if it works, don't do yolo
detection_mode=first
models=face,yolo
frame_id=bestmatch
# try diff. sizes. In my case, 600 was enough
#resize=600
# My doorbell camera needs more accurate face detection
# cnn did a much better job than HOG, but its _much_ slower
face_model=cnn
#if you hard code a frame, you need to make sure it is created
#before we acess it. wait (sec) helps
#frame_id=32
#wait=3
#[monitor-4]
# detect_pattern=(cat|dog)
# kitchen_door=313,221 392,210 418,592 367,659
# No 'detect_pattern', global value would be used.
# [monitor-7]
# entrance_door=313,221 392,210 418,592 367,659
# config files for yolo
[yolo]
yolo_type=full
#yolo_type=tiny
yolo_min_confidence=0.5
config=/var/lib/zmeventnotification/models/yolov3/yolov3.cfg
weights=/var/lib/zmeventnotification/models/yolov3/yolov3.weights
labels=/var/lib/zmeventnotification/models/yolov3/yolov3_classes.txt
tiny_config=/var/lib/zmeventnotification/models/tinyyolo/yolov3-tiny.cfg
tiny_weights=/var/lib/zmeventnotification/models/tinyyolo/yolov3-tiny.weights
tiny_labels=/var/lib/zmeventnotification/models/tinyyolo/yolov3-tiny.txt
# config params for HOG
[hog]
stride=(4,4)
padding=(8,8)
scale=1.05
mean_shift=-1
[face]
known_images_path=/var/lib/zmeventnotification/known_faces
# read https://github.com/ageitgey/face_recognition/wiki/Face-Recognition-Accuracy-Problems
# read https://github.com/ageitgey/face_recognition#automatically-find-all-the-faces-in-an-image
# and play around
# quick overview:
# num_jitters is how many times to distort images
# upsample_times is how many times to upsample input images (for small faces, for example)
# model can be hog or cnn. cnn may be more accurate, but I haven't found it to be
face_num_jitters=1
face_model=hog
face_upsample_times=1
#if a face doesn't match known names, we will detect it as 'unknown face'
# you can change that to something that suits your personality better ;-)
#unknown_face_name=invader
[alpr]
alpr_use_after_detection_only=yes
# -----| If you are using plate recognizer | ------
alpr_service=plate_recognizer
# If you want to host a local SDK https://app.platerecognizer.com/sdk/
#alpr_url=https://localhost:8080
# Plate recog replace with your api key
alpr_key=KEY
# same logic as detect_pattern. But only applied to ALPR plates
alpr_pattern=.*
# if yes, then it will log usage statistics of the ALPR service
platerec_stats=no
# If you want to specify regions. See http://docs.platerecognizer.com/#regions-supported
#platerec_regions=['us','cn','kr']
# minimal confidence for actually detecting a plate
platerec_min_dscore=0.1
# minimal confidence for the translated text
platerec_min_score=0.2
# ----| If you are using openALPR |-----
#alpr_service=open_alpr
#alpr_key=SECRET
# For an explanation of params, see http://doc.openalpr.com/api/?api=cloudapi
#openalpr_recognize_vehicle=1
#openalpr_country=us
#openalpr_state=ca
# openalpr returns percents, but we convert to between 0 and 1
#openalpr_min_confidence=0.3
You can’t perform that action at this time.