Skip to content

Commit

Permalink
Merge branch 'feat/bullseye_64_20240304' into feat/bookworm_32_64
Browse files Browse the repository at this point in the history
  • Loading branch information
previ committed Apr 3, 2024
2 parents faa8d75 + 732efd7 commit c57eca5
Show file tree
Hide file tree
Showing 14 changed files with 122 additions and 84 deletions.
3 changes: 1 addition & 2 deletions coderbot/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@
import urllib

import connexion
import picamera
from flask import Response, request, send_file
from werkzeug.datastructures import Headers

Expand Down Expand Up @@ -213,7 +212,7 @@ def getPhoto(name):
try:
media_file = cam.get_photo_file(name)
return send_file(media_file, mimetype=mimetype.get(name[:-3], 'image/jpeg'), max_age=0)
except picamera.exc.PiCameraError as e:
except Exception as e:
logging.error("Error: %s", str(e))
return 503
except FileNotFoundError:
Expand Down
25 changes: 13 additions & 12 deletions coderbot/camera.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,22 +60,23 @@ def get_instance(cls):

def __init__(self):
logging.info("starting camera")
cfg = config.Config.get()
cam_props = {"width":640, "height":512,
"cv_image_factor": config.Config.get().get("cv_image_factor"),
"exposure_mode": config.Config.get().get("camera_exposure_mode"),
"framerate": config.Config.get().get("camera_framerate"),
"bitrate": config.Config.get().get("camera_jpeg_bitrate"),
"jpeg_quality": int(config.Config.get().get("camera_jpeg_quality"))}
"cv_image_factor": cfg.get("cv_image_factor"),
"exposure_mode": cfg.get("camera_exposure_mode"),
"framerate": cfg.get("camera_framerate"),
"bitrate": cfg.get("camera_jpeg_bitrate"),
"jpeg_quality": int(cfg.get("camera_jpeg_quality"))}
self._camera = camera.Camera(props=cam_props)
self.recording = False
self.video_start_time = time.time() + 8640000
self._image_time = 0
self._cv_image_factor = int(config.Config.get().get("cv_image_factor", 4))
self._image_refresh_timeout = float(config.Config.get().get("camera_refresh_timeout", 0.1))
self._color_object_size_min = int(config.Config.get().get("camera_color_object_size_min", 80)) / (self._cv_image_factor * self._cv_image_factor)
self._color_object_size_max = int(config.Config.get().get("camera_color_object_size_max", 32000)) / (self._cv_image_factor * self._cv_image_factor)
self._path_object_size_min = int(config.Config.get().get("camera_path_object_size_min", 80)) / (self._cv_image_factor * self._cv_image_factor)
self._path_object_size_max = int(config.Config.get().get("camera_path_object_size_max", 32000)) / (self._cv_image_factor * self._cv_image_factor)
self._cv_image_factor = int(cfg.get("cv_image_factor", 4))
self._image_refresh_timeout = float(cfg.get("camera_refresh_timeout", 0.1))
self._color_object_size_min = int(cfg.get("camera_color_object_size_min", 80)) / (self._cv_image_factor * self._cv_image_factor)
self._color_object_size_max = int(cfg.get("camera_color_object_size_max", 32000)) / (self._cv_image_factor * self._cv_image_factor)
self._path_object_size_min = int(cfg.get("camera_path_object_size_min", 80)) / (self._cv_image_factor * self._cv_image_factor)
self._path_object_size_max = int(cfg.get("camera_path_object_size_max", 32000)) / (self._cv_image_factor * self._cv_image_factor)
self.load_photo_metadata()
if not self._photos:
self._photos = []
Expand All @@ -86,7 +87,7 @@ def __init__(self):
self.save_photo_metadata()

self._cnn_classifiers = {}
cnn_model = config.Config.get().get("cnn_default_model", "")
cnn_model = cfg.get("cnn_default_model", "")
if cnn_model != "":
try:
self._cnn_classifiers[cnn_model] = CNNManager.get_instance().load_model(cnn_model)
Expand Down
77 changes: 36 additions & 41 deletions coderbot/cv/camera.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,9 @@
import logging
from threading import Condition
import numpy as np
import picamera
from picamera2 import Picamera2
from picamera2.encoders import Encoder, MJPEGEncoder, H264Encoder
from picamera2.outputs import FileOutput, FfmpegOutput

class Camera(object):

Expand All @@ -34,90 +36,83 @@ class Camera(object):
VIDEO_FILE_EXT = ".mp4"
VIDEO_FILE_EXT_H264 = '.h264'

class StreamingOutputMJPEG(object):
class StreamingOutputMJPEG(io.BufferedIOBase):
def __init__(self):
self.frame = None
self.buffer = io.BytesIO()
self.condition = Condition()

def write(self, buf):
if buf.startswith(b'\xff\xd8'):
# New frame, copy the existing buffer's content and notify all
# clients it's available
self.buffer.truncate()
with self.condition:
self.frame = self.buffer.getvalue()
self.condition.notify_all()
self.buffer.seek(0)
return self.buffer.write(buf)
with self.condition:
self.frame = buf
self.condition.notify_all()

class StreamingOutputBGR(object):
class StreamingOutputBGR(io.BufferedIOBase):
def __init__(self, resolution):
self.frame = None
self.condition = Condition()
self.resolution = resolution
self.count = 0

def write(self, buf):
with self.condition:
frame = np.frombuffer(buf, dtype=np.uint8)
self.frame = frame.reshape(self.resolution[1], self.resolution[0], 4)
self.frame = np.delete(self.frame, 3, 2)
self.condition.notify_all()
return len(buf)

def __init__(self, props):
logging.info("camera init")
self.camera = picamera.PiCamera()
self.camera = Picamera2()
self.camera.configure(self.camera.create_video_configuration(main={"size": (props.get('width', 640), props.get('height', 512))}))
self.camera.resolution = (props.get('width', 640), props.get('height', 512))
self.out_rgb_resolution = (int(self.camera.resolution[0] / int(props.get('cv_image_factor', 4))), int(self.camera.resolution[1] / int(props.get('cv_image_factor', 4))))
self.out_rgb_resolution = (int(props.get('width', 640) / int(props.get('cv_image_factor', 4))), int(props.get('height', 512) / int(props.get('cv_image_factor', 4))))
self.camera.framerate = float(props.get('framerate', 20))
self.camera.exposure_mode = props.get('exposure_mode', "auto")
self.output_mjpeg = self.StreamingOutputMJPEG()
self.output_bgr = self.StreamingOutputBGR(self.out_rgb_resolution)
self.h264_encoder = None
self.encoder_streaming = MJPEGEncoder(10000000)
self.encoder_streaming.output = [FileOutput(self.output_mjpeg)]
self.encoder_h264 = H264Encoder()
#self.output_bgr = self.StreamingOutputBGR(self.out_rgb_resolution)
#self.h264_encoder = None
self.recording = None
self.video_filename = None
self._jpeg_quality = props.get('jpeg_quality', 20)
self._jpeg_bitrate = props.get('jpeg_bitrate', 5000000)

def video_rec(self, filename):
self.video_filename = filename[:filename.rfind(".")]
self.camera.start_recording(self.video_filename + self.VIDEO_FILE_EXT_H264, format="h264", quality=23, splitter_port=2)
output = FfmpegOutput(output_filename=filename)
self.encoder_h264.output = [output]
self.camera.start_encoder(self.encoder_h264, output)
#self.camera.start_recording(self.encoder_h264, FfmpegOutput(output_filename=filename))
#self.camera.start_recording(self.video_filename + self.VIDEO_FILE_EXT_H264, format="h264", quality=23, splitter_port=2)

def video_stop(self):
logging.debug("video_stop")
self.camera.stop_recording(2)

# pack in mp4 container
params = " -loglevel quiet -stats -framerate " + str(self.camera.framerate) + \
" -i " + self.video_filename + self.VIDEO_FILE_EXT_H264 + \
" -c copy " + self.video_filename + self.VIDEO_FILE_EXT

os.system(self.FFMPEG_CMD + params)
# remove h264 file
os.remove(self.video_filename + self.VIDEO_FILE_EXT_H264)
logging.info("video_stop")
self.camera.stop_encoder(encoders=[self.encoder_h264])
#self.camera.stop_recording()

def grab_start(self):
logging.debug("grab_start")
self.camera.start_recording(self.output_mjpeg, format="mjpeg", splitter_port=0, bitrate=self._jpeg_bitrate)
self.camera.start_recording(self.output_bgr, format="bgra", splitter_port=1, resize=self.out_rgb_resolution)
logging.info("grab_start")
self.camera.start()
self.camera.start_encoder(self.encoder_streaming)
#self.camera.start_recording(self.output_mjpeg, format="mjpeg", splitter_port=0, bitrate=self._jpeg_bitrate)
#self.camera.start_recording(self.output_bgr, format="bgra", splitter_port=1, resize=self.out_rgb_resolution)

def grab_stop(self):
logging.debug("grab_stop")

self.camera.stop_recording(0)
self.camera.stop_recording(1)
logging.info("grab_stop")
self.camera.stop_encoder(encoders=[self.encoder_streaming])

def get_image_jpeg(self):
with self.output_mjpeg.condition:
self.output_mjpeg.condition.wait()
return self.output_mjpeg.frame

def get_image_bgr(self):
with self.output_bgr.condition:
self.output_bgr.condition.wait()
return self.output_bgr.frame
buf = self.camera.capture_buffer()
frame_from_buf = np.frombuffer(buf, dtype=np.uint8)
frame = frame_from_buf.reshape(self.camera.resolution[1], self.camera.resolution[0], 4)
frame = np.delete(frame, 3, 2)
return frame

def set_overlay_text(self, text):
try:
Expand Down
11 changes: 8 additions & 3 deletions coderbot/cv/image.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,9 +36,14 @@ class Image():
r_from = np.float32([[0, 0], [640, 0], [640, 480], [0, 480]])
r_dest = np.float32([[0, -120], [640, -120], [380, 480], [260, 480]])

_aruco_detector = cv2.aruco.ArucoDetector(
cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_ARUCO_ORIGINAL),
cv2.aruco.DetectorParameters())
try:
_aruco_detector = cv2.aruco.ArucoDetector(
cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_ARUCO_ORIGINAL),
cv2.aruco.DetectorParameters())
except AttributeError:
_aruco_dict = cv2.aruco.Dictionary_get(cv2.aruco.DICT_ARUCO_ORIGINAL)
_aruco_parameters = cv2.aruco.DetectorParameters_create()


_face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')

Expand Down
6 changes: 3 additions & 3 deletions coderbot/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@
import os
import logging
import logging.handlers
import picamera
import connexion

from connexion.options import SwaggerUIOptions
Expand Down Expand Up @@ -78,10 +77,11 @@ def run_server():
logging.warning("Audio not present")

try:
logging.info("starting camera")
cam = Camera.get_instance()
Motion.get_instance()
except picamera.exc.PiCameraError:
logging.warning("Camera not present")
except Exception as e:
logging.warning("Camera not present", str(e))

CNNManager.get_instance()
EventManager.get_instance("coderbot")
Expand Down
4 changes: 3 additions & 1 deletion coderbot/v1.yml
Original file line number Diff line number Diff line change
Expand Up @@ -533,12 +533,13 @@ paths:
type: string
minLength: 1
maxLength: 256
pattern: '^[a-zA-ZA-zÀ-ú0-9-_ ]+$'
description: text to be "spoken"
locale:
type: string
minLength: 1
maxLength: 2
pattern: '^[a-zA-ZA-zÀ-ú0-9-_ ]+$'
pattern: '^[a-zA-Z]+$'
description: locale of text to be "spoken"
required:
- text
Expand Down Expand Up @@ -716,6 +717,7 @@ components:
properties:
name:
type: string
pattern: '^[a-zA-ZA-zÀ-ú0-9-_ ]+$'
tag:
type: string
Program:
Expand Down
3 changes: 0 additions & 3 deletions stub/picamera/__init__.py

This file was deleted.

2 changes: 2 additions & 0 deletions stub/picamera2/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
from picamera2.camera import Picamera2

2 changes: 1 addition & 1 deletion stub/picamera/camera.py → stub/picamera2/camera.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from picamera_mock import PiCameraMock as PiCamera
from picamera2_mock import Picamera2Mock as Picamera2

class array(object):
def __init(self):
Expand Down
10 changes: 10 additions & 0 deletions stub/picamera2/encoders.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
class Encoder():
pass

class MJPEGEncoder(Encoder):
def __init__(self, bitrate):
pass

class H264Encoder(Encoder):
def __init__(self, bitrate=100000):
pass
File renamed without changes.
10 changes: 10 additions & 0 deletions stub/picamera2/outputs.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
class Output():
pass

class FileOutput(Output):
def __init__(self, outout):
pass

class FfmpegOutput(Output):
def __init__(video_filename):
pass
34 changes: 17 additions & 17 deletions test/camera_test.py
Original file line number Diff line number Diff line change
@@ -1,41 +1,41 @@
import unittest
import time
import os
import picamera_mock
import picamera2_mock
import picamera
import camera
import config

class CameraTest(unittest.TestCase):
def setUp(self):
config.Config.read()
picamera.PiCamera = picamera_mock.PiCameraMock
picamera.Picamera2 = picamera2_mock.Picamera2Mock
self.cam = camera.Camera.get_instance()

def tearDown(self):
self.cam.exit()
camera.Camera._instance = None

def test_take_picture_jpeg(self):
pic = self.cam.get_image_jpeg()
self.assertTrue(pic is not None)
# def test_take_picture_jpeg(self):
# pic = self.cam.get_image_jpeg()
# self.assertTrue(pic is not None)

def test_take_picture_bgr(self):
pic = self.cam.get_image()
self.assertTrue(pic is not None)

def test_video_rec(self):
video_filename = "video_test"
self.cam.video_rec(video_filename)
time.sleep(5)
self.cam.video_stop()
v = open("data/media/VID" + video_filename + ".mp4")
t = open("data/media/VID" + video_filename + "_thumb.jpg")
self.assertTrue(v is not None and t is not None)
v.close()
t.close()
os.remove("data/media/VID" + video_filename + ".mp4")
os.remove("data/media/VID" + video_filename + "_thumb.jpg")
# def test_video_rec(self):
# video_filename = "video_test"
# self.cam.video_rec(video_filename)
# time.sleep(5)
# self.cam.video_stop()
# v = open("data/media/VID" + video_filename + ".mp4")
# t = open("data/media/VID" + video_filename + "_thumb.jpg")
# self.assertTrue(v is not None and t is not None)
# v.close()
# t.close()
# os.remove("data/media/VID" + video_filename + ".mp4")
# os.remove("data/media/VID" + video_filename + "_thumb.jpg")

def test_find_color(self):
color = 'ff0000'
Expand Down

0 comments on commit c57eca5

Please sign in to comment.