From b62ca859c8806109cb1e6874588c393cab3f658f Mon Sep 17 00:00:00 2001 From: SCA075 <82227818+sca075@users.noreply.github.com> Date: Thu, 2 Oct 2025 18:18:54 +0200 Subject: [PATCH 01/25] change local _LOGGER to LOGGER and shared.py optimized parsed zone clean tested for rand. Signed-off-by: Sandro Cantarella --- .../config/rand256_parser.py | 69 ++++++++++++++++++- SCR/valetudo_map_parser/config/shared.py | 17 +++-- SCR/valetudo_map_parser/config/types.py | 10 ++- SCR/valetudo_map_parser/config/utils.py | 32 +++++---- SCR/valetudo_map_parser/rand256_handler.py | 28 ++++---- SCR/valetudo_map_parser/reimg_draw.py | 31 ++++----- pyproject.toml | 2 +- 7 files changed, 129 insertions(+), 60 deletions(-) diff --git a/SCR/valetudo_map_parser/config/rand256_parser.py b/SCR/valetudo_map_parser/config/rand256_parser.py index b328e29..65b6a98 100644 --- a/SCR/valetudo_map_parser/config/rand256_parser.py +++ b/SCR/valetudo_map_parser/config/rand256_parser.py @@ -67,6 +67,52 @@ def _get_int32_signed(data: bytes, address: int) -> int: value = RRMapParser._get_int32(data, address) return value if value < 0x80000000 else value - 0x100000000 + @staticmethod + def _parse_area(header: bytes, data: bytes) -> list: + area_pairs = RRMapParser._get_int16(header, 0x08) + areas = [] + for area_start in range(0, area_pairs * 16, 16): + x0 = RRMapParser._get_int16(data, area_start + 0) + y0 = RRMapParser._get_int16(data, area_start + 2) + x1 = RRMapParser._get_int16(data, area_start + 4) + y1 = RRMapParser._get_int16(data, area_start + 6) + x2 = RRMapParser._get_int16(data, area_start + 8) + y2 = RRMapParser._get_int16(data, area_start + 10) + x3 = RRMapParser._get_int16(data, area_start + 12) + y3 = RRMapParser._get_int16(data, area_start + 14) + areas.append( + [ + x0, + RRMapParser.Tools.DIMENSION_MM - y0, + x1, + RRMapParser.Tools.DIMENSION_MM - y1, + x2, + RRMapParser.Tools.DIMENSION_MM - y2, + x3, + RRMapParser.Tools.DIMENSION_MM - y3, + ] + ) + return areas + + @staticmethod + def _parse_zones(data: bytes, header: bytes) -> list: + zone_pairs = RRMapParser._get_int16(header, 0x08) + zones = [] + for zone_start in range(0, zone_pairs * 8, 8): + x0 = RRMapParser._get_int16(data, zone_start + 0) + y0 = RRMapParser._get_int16(data, zone_start + 2) + x1 = RRMapParser._get_int16(data, zone_start + 4) + y1 = RRMapParser._get_int16(data, zone_start + 6) + zones.append( + [ + x0, + RRMapParser.Tools.DIMENSION_MM - y0, + x1, + RRMapParser.Tools.DIMENSION_MM - y1, + ] + ) + return zones + @staticmethod def _parse_object_position(block_data_length: int, data: bytes) -> Dict[str, Any]: """Parse object position using Xiaomi method.""" @@ -159,6 +205,12 @@ def parse_blocks(self, raw: bytes, pixels: bool = True) -> Dict[int, Any]: blocks[block_type] = self._parse_path_block( raw, block_start_position, block_data_length ) + elif block_type == self.Types.CURRENTLY_CLEANED_ZONES.value: + blocks[block_type] = {"zones": self._parse_zones(data, header)} + elif block_type == self.Types.FORBIDDEN_ZONES.value: + blocks[block_type] = { + "forbidden_zones": self._parse_area(header, data) + } elif block_type == self.Types.GOTO_TARGET.value: blocks[block_type] = {"position": self._parse_goto_target(data)} elif block_type == self.Types.IMAGE.value: @@ -365,8 +417,21 @@ def parse_rrm_data( ] # Add missing fields to match expected JSON format - parsed_map_data["forbidden_zones"] = [] - parsed_map_data["virtual_walls"] = [] + parsed_map_data["currently_cleaned_zones"] = ( + blocks[self.Types.CURRENTLY_CLEANED_ZONES.value]["zones"] + if self.Types.CURRENTLY_CLEANED_ZONES.value in blocks + else [] + ) + parsed_map_data["forbidden_zones"] = ( + blocks[self.Types.FORBIDDEN_ZONES.value]["forbidden_zones"] + if self.Types.FORBIDDEN_ZONES.value in blocks + else [] + ) + parsed_map_data["virtual_walls"] = ( + blocks[self.Types.VIRTUAL_WALLS.value]["virtual_walls"] + if self.Types.VIRTUAL_WALLS.value in blocks + else [] + ) return parsed_map_data diff --git a/SCR/valetudo_map_parser/config/shared.py b/SCR/valetudo_map_parser/config/shared.py index 3a29d77..8ecd4ae 100755 --- a/SCR/valetudo_map_parser/config/shared.py +++ b/SCR/valetudo_map_parser/config/shared.py @@ -12,12 +12,13 @@ from .types import ( ATTR_CALIBRATION_POINTS, ATTR_CAMERA_MODE, + ATTR_CONTENT_TYPE, ATTR_MARGINS, ATTR_OBSTACLES, ATTR_POINTS, ATTR_ROOMS, ATTR_ROTATE, - ATTR_SNAPSHOT, + ATTR_IMAGE_LAST_UPDATED, ATTR_VACUUM_BATTERY, ATTR_VACUUM_CHARGING, ATTR_VACUUM_JSON_ID, @@ -179,12 +180,14 @@ async def batch_get(self, *args): def generate_attributes(self) -> dict: """Generate and return the shared attribute's dictionary.""" attrs = { + ATTR_IMAGE_LAST_UPDATED: self.image_last_updated, + ATTR_CONTENT_TYPE: self.image_format, + ATTR_VACUUM_JSON_ID: self.vac_json_id, ATTR_CAMERA_MODE: self.camera_mode, + ATTR_VACUUM_STATUS: self.vacuum_state, ATTR_VACUUM_BATTERY: f"{self.vacuum_battery}%", ATTR_VACUUM_CHARGING: self.vacuum_bat_charged(), ATTR_VACUUM_POSITION: self.current_room, - ATTR_VACUUM_STATUS: self.vacuum_state, - ATTR_VACUUM_JSON_ID: self.vac_json_id, ATTR_CALIBRATION_POINTS: self.attr_calibration_points, } if self.obstacles_pos and self.vacuum_ips: @@ -193,8 +196,6 @@ def generate_attributes(self) -> dict: ) attrs[ATTR_OBSTACLES] = self.obstacles_data - attrs[ATTR_SNAPSHOT] = self.snapshot_take if self.enable_snapshots else False - shared_attrs = { ATTR_ROOMS: self.map_rooms, ATTR_ZONES: self.map_pred_zones, @@ -211,10 +212,8 @@ def to_dict(self) -> dict: return { "image": { "binary": self.binary_image, - "pil_image_size": self.new_image.size, - "size": self.new_image.size if self.new_image else None, - "format": self.image_format, - "updated": self.image_last_updated, + "pil_image": self.new_image, + "size": self.new_image.size if self.new_image else (10, 10), }, "attributes": self.generate_attributes(), } diff --git a/SCR/valetudo_map_parser/config/types.py b/SCR/valetudo_map_parser/config/types.py index 8624a56..6be8f0c 100644 --- a/SCR/valetudo_map_parser/config/types.py +++ b/SCR/valetudo_map_parser/config/types.py @@ -18,23 +18,29 @@ LOGGER = logging.getLogger(__package__) + class Spot(TypedDict): name: str coordinates: List[int] # [x, y] + class Zone(TypedDict): name: str coordinates: List[List[int]] # [[x1, y1, x2, y2, repeats], ...] + class Room(TypedDict): name: str id: int + +# list[dict[str, str | list[int]]] | list[dict[str, str | list[list[int]]]] | list[dict[str, str | int]] | int]' class Destinations(TypedDict, total=False): spots: NotRequired[Optional[List[Spot]]] zones: NotRequired[Optional[List[Zone]]] rooms: NotRequired[Optional[List[Room]]] - updated: NotRequired[Optional[int]] + updated: NotRequired[Optional[float]] + class RoomProperty(TypedDict): number: int @@ -227,9 +233,11 @@ async def async_set_vacuum_json(self, vacuum_id: str, json_data: Any) -> None: Point = Tuple[int, int] CAMERA_STORAGE = "valetudo_camera" +ATTR_IMAGE_LAST_UPDATED = "image_last_updated" ATTR_ROTATE = "rotate_image" ATTR_CROP = "crop_image" ATTR_MARGINS = "margins" +ATTR_CONTENT_TYPE = "content_type" CONF_OFFSET_TOP = "offset_top" CONF_OFFSET_BOTTOM = "offset_bottom" CONF_OFFSET_LEFT = "offset_left" diff --git a/SCR/valetudo_map_parser/config/utils.py b/SCR/valetudo_map_parser/config/utils.py index 456b59e..21a2473 100644 --- a/SCR/valetudo_map_parser/config/utils.py +++ b/SCR/valetudo_map_parser/config/utils.py @@ -23,7 +23,7 @@ NumpyArray, PilPNG, RobotPosition, - Destinations + Destinations, ) from ..map_data import HyperMapData from .async_utils import AsyncNumPy @@ -197,26 +197,27 @@ async def _async_update_shared_data(self, destinations: Destinations | None = No """Update the shared data with the latest information.""" if hasattr(self, "get_rooms_attributes") and ( - self.shared.map_rooms is None and destinations is not None + self.shared.map_rooms is None and destinations is not None ): - ( - self.shared.map_rooms, - self.shared.map_pred_zones, - self.shared.map_pred_points, - ) = await self.get_rooms_attributes(destinations) + (self.shared.map_rooms,) = await self.get_rooms_attributes(destinations) if self.shared.map_rooms: LOGGER.debug("%s: Rand256 attributes rooms updated", self.file_name) if hasattr(self, "async_get_rooms_attributes") and ( - self.shared.map_rooms is None + self.shared.map_rooms is None ): if self.shared.map_rooms is None: self.shared.map_rooms = await self.async_get_rooms_attributes() if self.shared.map_rooms: LOGGER.debug("%s: Hyper attributes rooms updated", self.file_name) - if hasattr(self, "get_calibration_data") and self.shared.attr_calibration_points is None: - self.shared.attr_calibration_points = self.get_calibration_data(self.shared.image_rotate) + if ( + hasattr(self, "get_calibration_data") + and self.shared.attr_calibration_points is None + ): + self.shared.attr_calibration_points = self.get_calibration_data( + self.shared.image_rotate + ) if not self.shared.image_size: self.shared.image_size = self.get_img_size() @@ -228,14 +229,19 @@ async def _async_update_shared_data(self, destinations: Destinations | None = No self.shared.current_room = self.get_robot_position() - def prepare_resize_params(self, pil_img: PilPNG, rand: bool=False) -> ResizeParams: + def prepare_resize_params( + self, pil_img: PilPNG, rand: bool = False + ) -> ResizeParams: """Prepare resize parameters for image resizing.""" if self.shared.image_rotate in [0, 180]: width, height = pil_img.size else: height, width = pil_img.size - LOGGER.debug("Shared PIL image size: %s x %s", self.shared.image_ref_width, - self.shared.image_ref_height) + LOGGER.debug( + "Shared PIL image size: %s x %s", + self.shared.image_ref_width, + self.shared.image_ref_height, + ) return ResizeParams( pil_img=pil_img, width=width, diff --git a/SCR/valetudo_map_parser/rand256_handler.py b/SCR/valetudo_map_parser/rand256_handler.py index 7a342ca..0f9a157 100644 --- a/SCR/valetudo_map_parser/rand256_handler.py +++ b/SCR/valetudo_map_parser/rand256_handler.py @@ -7,7 +7,6 @@ from __future__ import annotations -import logging import uuid from typing import Any @@ -15,7 +14,6 @@ from .config.async_utils import AsyncPIL -# from .config.auto_crop import AutoCrop from mvcrender.autocrop import AutoCrop from .config.drawable_elements import DrawableElement from .config.types import ( @@ -28,6 +26,7 @@ RobotPosition, RoomsProperties, RoomStore, + LOGGER, ) from .config.utils import ( BaseHandler, @@ -39,9 +38,6 @@ from .rooms_handler import RandRoomsHandler -_LOGGER = logging.getLogger(__name__) - - # noinspection PyTypeChecker class ReImageHandler(BaseHandler, AutoCrop): """ @@ -112,17 +108,17 @@ async def extract_room_properties( self.shared.map_rooms = room_ids # get the zones and points data - zone_properties = await self.async_zone_propriety(zones_data) + self.shared.map_pred_zones = await self.async_zone_propriety(zones_data) # get the points data - point_properties = await self.async_points_propriety(points_data) + self.shared.map_pred_points = await self.async_points_propriety(points_data) - if not (room_properties or zone_properties): + if not (room_properties or self.shared.map_pred_zones): self.rooms_pos = None rooms = RoomStore(self.file_name, room_properties) - return room_properties, zone_properties, point_properties + return room_properties except (RuntimeError, ValueError) as e: - _LOGGER.warning( + LOGGER.warning( "No rooms Data or Error in extract_room_properties: %s", e, exc_info=True, @@ -146,12 +142,12 @@ async def get_image_from_rrm( try: if (m_json is not None) and (not isinstance(m_json, tuple)): - _LOGGER.info("%s: Composing the image for the camera.", self.file_name) + LOGGER.info("%s: Composing the image for the camera.", self.file_name) self.json_data = m_json size_x, size_y = self.data.get_rrm_image_size(m_json) self.img_size = DEFAULT_IMAGE_SIZE self.json_id = str(uuid.uuid4()) # image id - _LOGGER.info("Vacuum Data ID: %s", self.json_id) + LOGGER.info("Vacuum Data ID: %s", self.json_id) ( img_np_array, @@ -178,7 +174,7 @@ async def get_image_from_rrm( return await self._finalize_image(pil_img) except (RuntimeError, RuntimeWarning) as e: - _LOGGER.warning( + LOGGER.warning( "%s: Runtime Error %s during image creation.", self.file_name, str(e), @@ -214,7 +210,7 @@ async def _setup_robot_and_image( colors["background"], DEFAULT_PIXEL_SIZE, ) - _LOGGER.info("%s: Completed base Layers", self.file_name) + LOGGER.info("%s: Completed base Layers", self.file_name) # Update element map for rooms if 0 < room_id <= 15: @@ -362,7 +358,7 @@ async def _draw_map_elements( async def _finalize_image(self, pil_img): if not self.shared.image_ref_width or not self.shared.image_ref_height: - _LOGGER.warning( + LOGGER.warning( "Image finalization failed: Invalid image dimensions. Returning original image." ) return pil_img @@ -515,7 +511,7 @@ def get_calibration_data(self, rotation_angle: int = 0) -> Any: """Return the map calibration data.""" if not self.calibration_data and self.crop_img_size: self.calibration_data = [] - _LOGGER.info( + LOGGER.info( "%s: Getting Calibrations points %s", self.file_name, str(self.crop_area), diff --git a/SCR/valetudo_map_parser/reimg_draw.py b/SCR/valetudo_map_parser/reimg_draw.py index bc82dac..7ec6649 100644 --- a/SCR/valetudo_map_parser/reimg_draw.py +++ b/SCR/valetudo_map_parser/reimg_draw.py @@ -6,17 +6,12 @@ from __future__ import annotations -import logging - from .config.drawable import Drawable from .config.drawable_elements import DrawableElement -from .config.types import Color, JsonType, NumpyArray +from .config.types import Color, JsonType, NumpyArray, LOGGER from .map_data import ImageData, RandImageData -_LOGGER = logging.getLogger(__name__) - - class ImageDraw: """Class to handle the image creation.""" @@ -48,7 +43,7 @@ async def async_draw_go_to_flag( ) return np_array except KeyError as e: - _LOGGER.warning( + LOGGER.warning( "%s: Error in extraction of go-to target: %s", self.file_name, e, @@ -70,7 +65,7 @@ async def async_segment_data( ) except ValueError as e: self.img_h.segment_data = None - _LOGGER.info("%s: No segments data found: %s", self.file_name, e) + LOGGER.info("%s: No segments data found: %s", self.file_name, e) async def async_draw_base_layer( self, @@ -87,13 +82,13 @@ async def async_draw_base_layer( walls_data = self.data.get_rrm_walls(m_json) floor_data = self.data.get_rrm_floor(m_json) - _LOGGER.info("%s: Empty image with background color", self.file_name) + LOGGER.info("%s: Empty image with background color", self.file_name) img_np_array = await self.draw.create_empty_image( self.img_h.img_size["x"], self.img_h.img_size["y"], color_background ) room_id = 0 if self.img_h.frame_number == 0: - _LOGGER.info("%s: Overlapping Layers", self.file_name) + LOGGER.info("%s: Overlapping Layers", self.file_name) # checking if there are segments too (sorted pixels in the raw data). await self.async_segment_data(m_json, size_x, size_y, pos_top, pos_left) @@ -148,10 +143,10 @@ async def _draw_segments( room_id = 0 rooms_list = [color_wall] if not segment_data: - _LOGGER.info("%s: No segments data found.", self.file_name) + LOGGER.info("%s: No segments data found.", self.file_name) return room_id, img_np_array - _LOGGER.info("%s: Drawing segments.", self.file_name) + LOGGER.info("%s: Drawing segments.", self.file_name) for pixels in segment_data: room_color = self.img_h.shared.rooms_colors[room_id] rooms_list.append(room_color) @@ -211,7 +206,7 @@ async def async_draw_charger( self.data.get_rrm_charger_position(m_json) ) except KeyError as e: - _LOGGER.warning("%s: No charger position found: %s", self.file_name, e) + LOGGER.warning("%s: No charger position found: %s", self.file_name, e) else: if charger_pos: charger_pos_dictionary = { @@ -238,7 +233,7 @@ async def async_draw_zones( zone_clean = None if zone_clean: - _LOGGER.info("%s: Got zones.", self.file_name) + LOGGER.info("%s: Got zones.", self.file_name) return await self.draw.zones(np_array, zone_clean, color_zone_clean) return np_array @@ -252,7 +247,7 @@ async def async_draw_virtual_restrictions( virtual_walls = None if virtual_walls: - _LOGGER.info("%s: Got virtual walls.", self.file_name) + LOGGER.info("%s: Got virtual walls.", self.file_name) np_array = await self.draw.draw_virtual_walls( np_array, virtual_walls, color_no_go ) @@ -280,7 +275,7 @@ async def async_draw_path( self.data.rrm_valetudo_path_array(path_pixel["points"]), 2 ) except KeyError as e: - _LOGGER.warning( + LOGGER.warning( "%s: Error extracting paths data: %s", self.file_name, str(e) ) finally: @@ -297,7 +292,7 @@ async def async_get_entity_data(self, m_json: JsonType) -> dict or None: except (ValueError, KeyError): entity_dict = None else: - _LOGGER.info("%s: Got the points in the json.", self.file_name) + LOGGER.info("%s: Got the points in the json.", self.file_name) return entity_dict async def async_get_robot_position(self, m_json: JsonType) -> tuple | None: @@ -310,7 +305,7 @@ async def async_get_robot_position(self, m_json: JsonType) -> tuple | None: robot_pos = self.data.rrm_coordinates_to_valetudo(robot_pos_data) angle = self.data.get_rrm_robot_angle(m_json) except (ValueError, KeyError): - _LOGGER.warning("%s No robot position found.", self.file_name) + LOGGER.warning("%s No robot position found.", self.file_name) return None, None, None finally: robot_position_angle = round(angle[0], 0) diff --git a/pyproject.toml b/pyproject.toml index b9a2747..9d1359f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "valetudo-map-parser" -version = "0.1.10rc6" +version = "0.1.10rc7" description = "A Python library to parse Valetudo map data returning a PIL Image object." authors = ["Sandro Cantarella "] license = "Apache-2.0" From 537417cdf3075535ac70500065f6ee287eab0d95 Mon Sep 17 00:00:00 2001 From: SCA075 <82227818+sca075@users.noreply.github.com> Date: Fri, 3 Oct 2025 14:23:25 +0200 Subject: [PATCH 02/25] typing and other parser touches. Signed-off-by: Sandro Cantarella --- .../config/rand256_parser.py | 119 ++++++++++-------- SCR/valetudo_map_parser/hypfer_handler.py | 1 - SCR/valetudo_map_parser/map_data.py | 5 +- 3 files changed, 71 insertions(+), 54 deletions(-) diff --git a/SCR/valetudo_map_parser/config/rand256_parser.py b/SCR/valetudo_map_parser/config/rand256_parser.py index 65b6a98..b6b618d 100644 --- a/SCR/valetudo_map_parser/config/rand256_parser.py +++ b/SCR/valetudo_map_parser/config/rand256_parser.py @@ -24,6 +24,14 @@ class Types(Enum): VIRTUAL_WALLS = 10 CURRENTLY_CLEANED_BLOCKS = 11 FORBIDDEN_MOP_ZONES = 12 + OBSTACLES = 13 + IGNORED_OBSTACLES = 14 + OBSTACLES_WITH_PHOTO = 15 + IGNORED_OBSTACLES_WITH_PHOTO = 16 + CARPET_MAP = 17 + MOP_PATH = 18 + NO_CARPET_AREAS = 19 + DIGEST = 1024 class Tools: """Tools for coordinate transformations.""" @@ -33,6 +41,7 @@ class Tools: def __init__(self): """Initialize the parser.""" + self.is_valid = False self.map_data: Dict[str, Any] = {} # Xiaomi/Roborock style byte extraction methods @@ -67,6 +76,15 @@ def _get_int32_signed(data: bytes, address: int) -> int: value = RRMapParser._get_int32(data, address) return value if value < 0x80000000 else value - 0x100000000 + @staticmethod + def _parse_carpet_map(data: bytes) -> set[int]: + carpet_map = set() + + for i, v in enumerate(data): + if v: + carpet_map.add(i) + return carpet_map + @staticmethod def _parse_area(header: bytes, data: bytes) -> list: area_pairs = RRMapParser._get_int16(header, 0x08) @@ -128,6 +146,19 @@ def _parse_object_position(block_data_length: int, data: bytes) -> Dict[str, Any angle = raw_angle return {"position": [x, y], "angle": angle} + + @staticmethod + def _parse_walls(data: bytes, header: bytes) -> list: + wall_pairs = RRMapParser._get_int16(header, 0x08) + walls = [] + for wall_start in range(0, wall_pairs * 8, 8): + x0 = RRMapParser._get_int16(data, wall_start + 0) + y0 = RRMapParser._get_int16(data, wall_start + 2) + x1 = RRMapParser._get_int16(data, wall_start + 4) + y1 = RRMapParser._get_int16(data, wall_start + 6) + walls.append([x0, RRMapParser.Tools.DIMENSION_MM - y0, x1, RRMapParser.Tools.DIMENSION_MM - y1]) + return walls + @staticmethod def _parse_path_block(buf: bytes, offset: int, length: int) -> Dict[str, Any]: """Parse path block using EXACT same method as working parser.""" @@ -173,65 +204,45 @@ def parse(self, map_buf: bytes) -> Dict[str, Any]: return {} def parse_blocks(self, raw: bytes, pixels: bool = True) -> Dict[int, Any]: - """Parse all blocks using Xiaomi method.""" blocks = {} map_header_length = self._get_int16(raw, 0x02) block_start_position = map_header_length - while block_start_position < len(raw): try: - # Parse block header using Xiaomi method block_header_length = self._get_int16(raw, block_start_position + 0x02) header = self._get_bytes(raw, block_start_position, block_header_length) block_type = self._get_int16(header, 0x00) block_data_length = self._get_int32(header, 0x04) block_data_start = block_start_position + block_header_length data = self._get_bytes(raw, block_data_start, block_data_length) - - # Parse different block types - if block_type == self.Types.ROBOT_POSITION.value: - blocks[block_type] = self._parse_object_position( - block_data_length, data - ) - elif block_type == self.Types.CHARGER_LOCATION.value: - blocks[block_type] = self._parse_object_position( - block_data_length, data - ) - elif block_type == self.Types.PATH.value: - blocks[block_type] = self._parse_path_block( - raw, block_start_position, block_data_length - ) - elif block_type == self.Types.GOTO_PREDICTED_PATH.value: - blocks[block_type] = self._parse_path_block( - raw, block_start_position, block_data_length - ) - elif block_type == self.Types.CURRENTLY_CLEANED_ZONES.value: - blocks[block_type] = {"zones": self._parse_zones(data, header)} - elif block_type == self.Types.FORBIDDEN_ZONES.value: - blocks[block_type] = { - "forbidden_zones": self._parse_area(header, data) - } - elif block_type == self.Types.GOTO_TARGET.value: - blocks[block_type] = {"position": self._parse_goto_target(data)} - elif block_type == self.Types.IMAGE.value: - # Get header length for Gen1/Gen3 detection - header_length = self._get_int8(header, 2) - blocks[block_type] = self._parse_image_block( - raw, - block_start_position, - block_data_length, - header_length, - pixels, - ) - - # Move to next block using Xiaomi method - block_start_position = ( - block_start_position + block_data_length + self._get_int8(header, 2) - ) - + match block_type: + case self.Types.DIGEST.value: + self.is_valid = True + case self.Types.ROBOT_POSITION.value | self.Types.CHARGER_LOCATION.value: + blocks[block_type] = self._parse_object_position(block_data_length, data) + case self.Types.PATH.value | self.Types.GOTO_PREDICTED_PATH.value: + blocks[block_type] = self._parse_path_block(raw, block_start_position, block_data_length) + case self.Types.CURRENTLY_CLEANED_ZONES.value: + blocks[block_type] = {"zones": self._parse_zones(data, header)} + case self.Types.FORBIDDEN_ZONES.value: + blocks[block_type] = {"forbidden_zones": self._parse_area(header, data)} + case self.Types.FORBIDDEN_MOP_ZONES.value: + blocks[block_type] = {"forbidden_mop_zones": self._parse_area(header, data)} + case self.Types.GOTO_TARGET.value: + blocks[block_type] = {"position": self._parse_goto_target(data)} + case self.Types.VIRTUAL_WALLS.value: + blocks[block_type] = {"virtual_walls": self._parse_walls(data, header)} + case self.Types.CARPET_MAP.value: + data = RRMapParser._get_bytes(raw, block_data_start, block_data_length) + blocks[block_type] = {"carpet_map": self._parse_carpet_map(data)} + case self.Types.IMAGE.value: + header_length = self._get_int8(header, 2) + blocks[block_type] = self._parse_image_block( + raw, block_start_position, block_data_length, header_length, pixels) + + block_start_position = block_start_position + block_data_length + self._get_int8(header, 2) except (struct.error, IndexError): break - return blocks def _parse_image_block( @@ -427,11 +438,22 @@ def parse_rrm_data( if self.Types.FORBIDDEN_ZONES.value in blocks else [] ) + parsed_map_data["forbidden_mop_zones"] = ( + blocks[self.Types.FORBIDDEN_MOP_ZONES.value]["forbidden_mop_zones"] + if self.Types.FORBIDDEN_MOP_ZONES.value in blocks + else [] + ) parsed_map_data["virtual_walls"] = ( blocks[self.Types.VIRTUAL_WALLS.value]["virtual_walls"] if self.Types.VIRTUAL_WALLS.value in blocks else [] ) + parsed_map_data["carpet_areas"] = ( + blocks[self.Types.CARPET_MAP.value]["carpet_map"] + if self.Types.CARPET_MAP.value in blocks + else [] + ) + parsed_map_data["is_valid"] = self.is_valid return parsed_map_data @@ -453,8 +475,3 @@ def parse_data( except (struct.error, IndexError, ValueError): return None return self.map_data - - @staticmethod - def get_int32(data: bytes, address: int) -> int: - """Get a 32-bit integer from the data - kept for compatibility.""" - return struct.unpack_from(" Any: return None @staticmethod - def get_rrm_currently_cleaned_zones(json_data: JsonType) -> dict: + def get_rrm_currently_cleaned_zones(json_data: JsonType) -> list[dict[str, Any]]: """Get the currently cleaned zones from the json.""" re_zones = json_data.get("currently_cleaned_zones", []) formatted_zones = RandImageData._rrm_valetudo_format_zone(re_zones) return formatted_zones @staticmethod - def get_rrm_forbidden_zones(json_data: JsonType) -> dict: + def get_rrm_forbidden_zones(json_data: JsonType) -> list[dict[str, Any]]: """Get the forbidden zones from the json.""" re_zones = json_data.get("forbidden_zones", []) + re_zones.extend(json_data.get("forbidden_mop_zones", [])) formatted_zones = RandImageData._rrm_valetudo_format_zone(re_zones) return formatted_zones From 6dc6b5d472b81c9db242bd0ec38f8991d26a4173 Mon Sep 17 00:00:00 2001 From: SCA075 <82227818+sca075@users.noreply.github.com> Date: Fri, 3 Oct 2025 14:34:36 +0200 Subject: [PATCH 03/25] bump mvcrender and change the async to sync usage. Signed-off-by: Sandro Cantarella --- SCR/valetudo_map_parser/hypfer_handler.py | 3 +- .../hypfer_rooms_handler.py | 599 ------------------ SCR/valetudo_map_parser/rand256_handler.py | 2 +- pyproject.toml | 2 +- 4 files changed, 3 insertions(+), 603 deletions(-) delete mode 100644 SCR/valetudo_map_parser/hypfer_rooms_handler.py diff --git a/SCR/valetudo_map_parser/hypfer_handler.py b/SCR/valetudo_map_parser/hypfer_handler.py index e9aca09..4b62699 100644 --- a/SCR/valetudo_map_parser/hypfer_handler.py +++ b/SCR/valetudo_map_parser/hypfer_handler.py @@ -14,7 +14,6 @@ from .config.async_utils import AsyncPIL -# from .config.auto_crop import AutoCrop from mvcrender.autocrop import AutoCrop from .config.drawable_elements import DrawableElement from .config.shared import CameraShared @@ -361,7 +360,7 @@ async def async_get_image_from_json( self.zooming = self.imd.img_h.zooming # Resize the image - img_np_array = self.async_auto_trim_and_zoom_image( + img_np_array = self.auto_trim_and_zoom_image( img_np_array, colors["background"], int(self.shared.margins), diff --git a/SCR/valetudo_map_parser/hypfer_rooms_handler.py b/SCR/valetudo_map_parser/hypfer_rooms_handler.py deleted file mode 100644 index 91de957..0000000 --- a/SCR/valetudo_map_parser/hypfer_rooms_handler.py +++ /dev/null @@ -1,599 +0,0 @@ -""" -Hipfer Rooms Handler Module. -Handles room data extraction and processing for Valetudo Hipfer vacuum maps. -Provides async methods for room outline extraction and properties management. -Version: 0.1.9 -""" - -from __future__ import annotations - -from math import sqrt -from typing import Any, Dict, Optional, List, Tuple - -import numpy as np - -from .config.drawable_elements import DrawableElement, DrawingConfig -from .config.types import LOGGER, RoomsProperties, RoomStore - - -class HypferRoomsHandler: - """ - Handler for extracting and managing room data from Hipfer vacuum maps. - - This class provides methods to: - - Extract room outlines using the Ramer-Douglas-Peucker algorithm - - Process room properties from JSON data - - Generate room masks and extract contours - - All methods are async for better integration with the rest of the codebase. - """ - - def __init__(self, vacuum_id: str, drawing_config: Optional[DrawingConfig] = None): - """ - Initialize the HipferRoomsHandler. - - Args: - vacuum_id: Identifier for the vacuum - drawing_config: Configuration for which elements to draw (optional) - """ - self.vacuum_id = vacuum_id - self.drawing_config = drawing_config - self.current_json_data = None # Will store the current JSON data being processed - - @staticmethod - def sublist(data: list, chunk_size: int) -> list: - return [data[i : i + chunk_size] for i in range(0, len(data), chunk_size)] - - # Cache for RDP results - _rdp_cache = {} - - @staticmethod - def perpendicular_distance( - point: tuple[int, int], line_start: tuple[int, int], line_end: tuple[int, int] - ) -> float: - """Calculate the perpendicular distance from a point to a line. - Optimized for performance. - """ - # Fast path for point-to-point distance - if line_start == line_end: - dx = point[0] - line_start[0] - dy = point[1] - line_start[1] - return sqrt(dx*dx + dy*dy) - - x, y = point - x1, y1 = line_start - x2, y2 = line_end - - # Precompute differences for efficiency - dx = x2 - x1 - dy = y2 - y1 - - # Calculate the line length squared (avoid sqrt until needed) - line_length_sq = dx*dx + dy*dy - if line_length_sq == 0: - return 0 - - # Calculate the distance from the point to the line - # Using the formula: |cross_product| / |line_vector| - # This is more efficient than the original formula - cross_product = abs(dy * x - dx * y + x2 * y1 - y2 * x1) - return cross_product / sqrt(line_length_sq) - - async def rdp( - self, points: List[Tuple[int, int]], epsilon: float - ) -> List[Tuple[int, int]]: - """Ramer-Douglas-Peucker algorithm for simplifying a curve. - Optimized with caching and better performance. - """ - # Create a hashable key for caching - # Convert points to a tuple for hashing - points_tuple = tuple(points) - cache_key = (points_tuple, epsilon) - - # Check cache first - if cache_key in self._rdp_cache: - return self._rdp_cache[cache_key] - - # Base case - if len(points) <= 2: - return points - - # For very small point sets, process directly without recursion - if len(points) <= 5: - # Find the point with the maximum distance - dmax = 0 - index = 0 - for i in range(1, len(points) - 1): - d = self.perpendicular_distance(points[i], points[0], points[-1]) - if d > dmax: - index = i - dmax = d - - # If max distance is greater than epsilon, keep the point - if dmax > epsilon: - result = [points[0]] + [points[index]] + [points[-1]] - else: - result = [points[0], points[-1]] - - # Cache and return - self._rdp_cache[cache_key] = result - return result - - # For larger point sets, use numpy for faster distance calculation - if len(points) > 20: - # Convert to numpy arrays for vectorized operations - points_array = np.array(points) - start = points_array[0] - end = points_array[-1] - - # Calculate perpendicular distances in one vectorized operation - line_vector = end - start - line_length = np.linalg.norm(line_vector) - - if line_length == 0: - # If start and end are the same, use direct distance - distances = np.linalg.norm(points_array[1:-1] - start, axis=1) - else: - # Normalize line vector - line_vector = line_vector / line_length - # Calculate perpendicular distances using vector operations - vectors_to_points = points_array[1:-1] - start - # Project vectors onto line vector - projections = np.dot(vectors_to_points, line_vector) - # Calculate projected points on line - projected_points = start + np.outer(projections, line_vector) - # Calculate distances from points to their projections - distances = np.linalg.norm(points_array[1:-1] - projected_points, axis=1) - - # Find the point with maximum distance - if len(distances) > 0: - max_idx = np.argmax(distances) - dmax = distances[max_idx] - index = max_idx + 1 # +1 because we skipped the first point - else: - dmax = 0 - index = 0 - else: - # For medium-sized point sets, use the original algorithm - dmax = 0 - index = 0 - for i in range(1, len(points) - 1): - d = self.perpendicular_distance(points[i], points[0], points[-1]) - if d > dmax: - index = i - dmax = d - - # If max distance is greater than epsilon, recursively simplify - if dmax > epsilon: - # Recursive call - first_segment = await self.rdp(points[: index + 1], epsilon) - second_segment = await self.rdp(points[index:], epsilon) - - # Build the result list (avoiding duplicating the common point) - result = first_segment[:-1] + second_segment - else: - result = [points[0], points[-1]] - - # Limit cache size - if len(self._rdp_cache) > 100: # Keep only 100 most recent items - try: - self._rdp_cache.pop(next(iter(self._rdp_cache))) - except (StopIteration, KeyError): - pass - - # Cache the result - self._rdp_cache[cache_key] = result - return result - - # Cache for corner results - _corners_cache = {} - - async def async_get_corners( - self, mask: np.ndarray, epsilon_factor: float = 0.05 - ) -> List[Tuple[int, int]]: - """ - Get the corners of a room shape as a list of (x, y) tuples. - Uses contour detection and Douglas-Peucker algorithm to simplify the contour. - Optimized with caching and faster calculations. - - Args: - mask: Binary mask of the room (1 for room, 0 for background) - epsilon_factor: Controls the level of simplification (higher = fewer points) - - Returns: - List of (x, y) tuples representing the corners of the room - """ - # Create a hash of the mask and epsilon factor for caching - mask_hash = hash((mask.tobytes(), epsilon_factor)) - - # Check if we have a cached result - if mask_hash in self._corners_cache: - return self._corners_cache[mask_hash] - - # Fast path for empty masks - if not np.any(mask): - return [] - - # Find contours in the mask - this uses our optimized method with caching - contour = await self.async_moore_neighbor_trace(mask) - - if not contour: - # Fallback to bounding box if contour detection fails - y_indices, x_indices = np.where(mask > 0) - if len(x_indices) == 0 or len(y_indices) == 0: - return [] - - x_min, x_max = np.min(x_indices), np.max(x_indices) - y_min, y_max = np.min(y_indices), np.max(y_indices) - - result = [ - (x_min, y_min), # Top-left - (x_max, y_min), # Top-right - (x_max, y_max), # Bottom-right - (x_min, y_max), # Bottom-left - (x_min, y_min), # Back to top-left to close the polygon - ] - - # Cache the result - self._corners_cache[mask_hash] = result - return result - - # For small contours (less than 10 points), skip simplification - if len(contour) <= 10: - # Ensure the contour is closed - if contour[0] != contour[-1]: - contour.append(contour[0]) - - # Cache and return - self._corners_cache[mask_hash] = contour - return contour - - # For larger contours, calculate perimeter more efficiently using numpy - points = np.array(contour) - # Calculate differences between consecutive points - diffs = np.diff(points, axis=0) - # Calculate squared distances - squared_dists = np.sum(diffs**2, axis=1) - # Calculate perimeter as sum of distances - perimeter = np.sum(np.sqrt(squared_dists)) - - # Apply Douglas-Peucker algorithm to simplify the contour - epsilon = epsilon_factor * perimeter - simplified_contour = await self.rdp(contour, epsilon=epsilon) - - # Ensure the contour has at least 3 points to form a polygon - if len(simplified_contour) < 3: - # Fallback to bounding box - y_indices, x_indices = np.where(mask > 0) - x_min, x_max = int(np.min(x_indices)), int(np.max(x_indices)) - y_min, y_max = int(np.min(y_indices)), int(np.max(y_indices)) - - LOGGER.debug( - f"{self.vacuum_id}: Too few points in contour, using bounding box" - ) - result = [ - (x_min, y_min), # Top-left - (x_max, y_min), # Top-right - (x_max, y_max), # Bottom-right - (x_min, y_max), # Bottom-left - (x_min, y_min), # Back to top-left to close the polygon - ] - - # Cache the result - self._corners_cache[mask_hash] = result - return result - - # Ensure the contour is closed - if simplified_contour[0] != simplified_contour[-1]: - simplified_contour.append(simplified_contour[0]) - - # Limit cache size - if len(self._corners_cache) > 50: # Keep only 50 most recent items - try: - self._corners_cache.pop(next(iter(self._corners_cache))) - except (StopIteration, KeyError): - pass - - # Cache the result - self._corners_cache[mask_hash] = simplified_contour - return simplified_contour - - # Cache for labeled arrays to avoid redundant calculations - _label_cache = {} - _hull_cache = {} - - @staticmethod - async def async_moore_neighbor_trace(mask: np.ndarray) -> List[Tuple[int, int]]: - """ - Trace the contour of a binary mask using an optimized approach. - Uses caching and simplified algorithms for better performance. - - Args: - mask: Binary mask of the room (1 for room, 0 for background) - - Returns: - List of (x, y) tuples representing the contour - """ - # Create a hash of the mask for caching - mask_hash = hash(mask.tobytes()) - - # Check if we have a cached result - if mask_hash in HypferRoomsHandler._hull_cache: - return HypferRoomsHandler._hull_cache[mask_hash] - - # Fast path for empty masks - if not np.any(mask): - return [] - - # Find bounding box of non-zero elements (much faster than full labeling for simple cases) - y_indices, x_indices = np.where(mask > 0) - if len(x_indices) == 0 or len(y_indices) == 0: - return [] - - # For very small rooms (less than 100 pixels), just use bounding box - if len(x_indices) < 100: - x_min, x_max = np.min(x_indices), np.max(x_indices) - y_min, y_max = np.min(y_indices), np.max(y_indices) - - # Create a simple rectangle - hull_vertices = [ - (int(x_min), int(y_min)), # Top-left - (int(x_max), int(y_min)), # Top-right - (int(x_max), int(y_max)), # Bottom-right - (int(x_min), int(y_max)), # Bottom-left - (int(x_min), int(y_min)), # Back to top-left to close the polygon - ] - - # Cache and return the result - HypferRoomsHandler._hull_cache[mask_hash] = hull_vertices - return hull_vertices - - # For larger rooms, use convex hull but with optimizations - try: - # Import here to avoid overhead for small rooms - from scipy import ndimage - from scipy.spatial import ConvexHull - - # Use cached labeled array if available - if mask_hash in HypferRoomsHandler._label_cache: - labeled_array = HypferRoomsHandler._label_cache[mask_hash] - else: - # Find connected components - this is expensive - labeled_array, _ = ndimage.label(mask) - # Cache the result for future use - HypferRoomsHandler._label_cache[mask_hash] = labeled_array - - # Limit cache size to prevent memory issues - if len(HypferRoomsHandler._label_cache) > 50: # Keep only 50 most recent items - # Remove oldest item (first key) - try: - HypferRoomsHandler._label_cache.pop(next(iter(HypferRoomsHandler._label_cache))) - except (StopIteration, KeyError): - # Handle edge case of empty cache - pass - - # Create a mask with all components - all_components_mask = (labeled_array > 0) - - # Sample points instead of using all points for large masks - # This significantly reduces computation time for ConvexHull - if len(x_indices) > 1000: - # Sample every 10th point for very large rooms - step = 10 - elif len(x_indices) > 500: - # Sample every 5th point for medium-sized rooms - step = 5 - else: - # Use all points for smaller rooms - step = 1 - - # Sample points using the step size - sampled_y = y_indices[::step] - sampled_x = x_indices[::step] - - # Create a list of points - points = np.column_stack((sampled_x, sampled_y)) - - # Compute the convex hull - hull = ConvexHull(points) - - # Extract the vertices of the convex hull - hull_vertices = [(int(points[v, 0]), int(points[v, 1])) for v in hull.vertices] - - # Ensure the hull is closed - if hull_vertices[0] != hull_vertices[-1]: - hull_vertices.append(hull_vertices[0]) - - # Cache and return the result - HypferRoomsHandler._hull_cache[mask_hash] = hull_vertices - - # Limit hull cache size - if len(HypferRoomsHandler._hull_cache) > 50: - try: - HypferRoomsHandler._hull_cache.pop(next(iter(HypferRoomsHandler._hull_cache))) - except (StopIteration, KeyError): - pass - - return hull_vertices - - except Exception as e: - LOGGER.warning(f"Failed to compute convex hull: {e}. Falling back to bounding box.") - - # Fallback to bounding box if convex hull fails - x_min, x_max = np.min(x_indices), np.max(x_indices) - y_min, y_max = np.min(y_indices), np.max(y_indices) - - # Create a simple rectangle - hull_vertices = [ - (int(x_min), int(y_min)), # Top-left - (int(x_max), int(y_min)), # Top-right - (int(x_max), int(y_max)), # Bottom-right - (int(x_min), int(y_max)), # Bottom-left - (int(x_min), int(y_min)), # Back to top-left to close the polygon - ] - - # Cache and return the result - HypferRoomsHandler._hull_cache[mask_hash] = hull_vertices - return hull_vertices - - - - async def async_extract_room_properties( - self, json_data: Dict[str, Any] - ) -> RoomsProperties: - """ - Extract room properties from the JSON data. - - Args: - json_data: JSON data from the vacuum - - Returns: - Dictionary of room properties - """ - room_properties = {} - pixel_size = json_data.get("pixelSize", 5) - height = json_data["size"]["y"] - width = json_data["size"]["x"] - vacuum_id = self.vacuum_id - room_id_counter = 0 - - # Store the JSON data for reference in other methods - self.current_json_data = json_data - - for layer in json_data.get("layers", []): - if layer.get("__class") == "MapLayer" and layer.get("type") == "segment": - meta_data = layer.get("metaData", {}) - segment_id = meta_data.get("segmentId") - name = meta_data.get("name", f"Room {segment_id}") - - # Check if this room is disabled in the drawing configuration - # The room_id_counter is 0-based, but DrawableElement.ROOM_X is 1-based - current_room_id = room_id_counter + 1 - room_id_counter = ( - room_id_counter + 1 - ) % 16 # Cycle room_id back to 0 after 15 - - if 1 <= current_room_id <= 15 and self.drawing_config is not None: - room_element = getattr( - DrawableElement, f"ROOM_{current_room_id}", None - ) - if room_element and not self.drawing_config.is_enabled( - room_element - ): - LOGGER.debug( - "%s: Room %d is disabled and will be skipped", - self.vacuum_id, - current_room_id, - ) - continue - - compressed_pixels = layer.get("compressedPixels", []) - pixels = self.sublist(compressed_pixels, 3) - - # Create a binary mask for the room - if not pixels: - LOGGER.warning(f"Skipping segment {segment_id}: no pixels found") - continue - - mask = np.zeros((height, width), dtype=np.uint8) - for x, y, length in pixels: - if 0 <= y < height and 0 <= x < width and x + length <= width: - mask[y, x : x + length] = 1 - - # Find the room outline using the improved get_corners function - # Adjust epsilon_factor to control the level of simplification (higher = fewer points) - outline = await self.async_get_corners(mask, epsilon_factor=0.05) - - if not outline: - LOGGER.warning( - f"Skipping segment {segment_id}: failed to generate outline" - ) - continue - - # Calculate the center of the room - xs, ys = zip(*outline) - x_min, x_max = min(xs), max(xs) - y_min, y_max = min(ys), max(ys) - - # Scale coordinates by pixel_size - scaled_outline = [(x * pixel_size, y * pixel_size) for x, y in outline] - - room_id = str(segment_id) - room_properties[room_id] = { - "number": segment_id, - "outline": scaled_outline, # Already includes the closing point - "name": name, - "x": ((x_min + x_max) * pixel_size) // 2, - "y": ((y_min + y_max) * pixel_size) // 2, - } - - RoomStore(vacuum_id, room_properties) - return room_properties - - async def get_room_at_position( - self, x: int, y: int, room_properties: Optional[RoomsProperties] = None - ) -> Optional[Dict[str, Any]]: - """ - Get the room at a specific position. - - Args: - x: X coordinate - y: Y coordinate - room_properties: Room properties dictionary (optional) - - Returns: - Room data dictionary or None if no room at position - """ - if room_properties is None: - room_store = RoomStore(self.vacuum_id) - room_properties = room_store.get_rooms() - - if not room_properties: - return None - - for room_id, room_data in room_properties.items(): - outline = room_data.get("outline", []) - if not outline or len(outline) < 3: - continue - - # Check if point is inside the polygon - if self.point_in_polygon(x, y, outline): - return { - "id": room_id, - "name": room_data.get("name", f"Room {room_id}"), - "x": room_data.get("x", 0), - "y": room_data.get("y", 0), - } - - return None - - @staticmethod - def point_in_polygon(x: int, y: int, polygon: List[Tuple[int, int]]) -> bool: - """ - Check if a point is inside a polygon using ray casting algorithm. - - Args: - x: X coordinate of the point - y: Y coordinate of the point - polygon: List of (x, y) tuples forming the polygon - - Returns: - True if the point is inside the polygon, False otherwise - """ - n = len(polygon) - inside = False - - p1x, p1y = polygon[0] - xinters = None # Initialize with default value - for i in range(1, n + 1): - p2x, p2y = polygon[i % n] - if y > min(p1y, p2y): - if y <= max(p1y, p2y): - if x <= max(p1x, p2x): - if p1y != p2y: - xinters = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x - if p1x == p2x or x <= xinters: - inside = not inside - p1x, p1y = p2x, p2y - - return inside diff --git a/SCR/valetudo_map_parser/rand256_handler.py b/SCR/valetudo_map_parser/rand256_handler.py index 0f9a157..f5e6f65 100644 --- a/SCR/valetudo_map_parser/rand256_handler.py +++ b/SCR/valetudo_map_parser/rand256_handler.py @@ -346,7 +346,7 @@ async def _draw_map_elements( else: self.zooming = False - img_np_array = self.async_auto_trim_and_zoom_image( + img_np_array = self.auto_trim_and_zoom_image( img_np_array, detect_colour=colors["background"], margin_size=int(self.shared.margins), diff --git a/pyproject.toml b/pyproject.toml index 9d1359f..6b506c0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -18,7 +18,7 @@ python = ">=3.13" numpy = ">=1.26.4" Pillow = ">=10.3.0" scipy = ">=1.12.0" -mvcrender = ">=0.0.2" +mvcrender = ">=0.0.4" [tool.poetry.group.dev.dependencies] ruff = "*" From d76d7c16effa82526df1073c7b5b846ec60f0562 Mon Sep 17 00:00:00 2001 From: SCA075 <82227818+sca075@users.noreply.github.com> Date: Sat, 4 Oct 2025 22:26:37 +0200 Subject: [PATCH 04/25] bump mvcrender isort and ruff and replace blending and some drawing function with C based from mcvrender. Signed-off-by: Sandro Cantarella --- SCR/valetudo_map_parser/__init__.py | 18 +- SCR/valetudo_map_parser/config/color_utils.py | 7 +- SCR/valetudo_map_parser/config/drawable.py | 125 ++----- .../config/drawable_elements.py | 2 - .../config/enhanced_drawable.py | 324 ------------------ .../config/rand256_parser.py | 58 +++- SCR/valetudo_map_parser/config/shared.py | 5 +- .../config/status_text/status_text.py | 1 + SCR/valetudo_map_parser/config/types.py | 6 +- SCR/valetudo_map_parser/config/utils.py | 24 +- SCR/valetudo_map_parser/hypfer_handler.py | 24 +- SCR/valetudo_map_parser/map_data.py | 20 +- SCR/valetudo_map_parser/rand256_handler.py | 23 +- SCR/valetudo_map_parser/reimg_draw.py | 2 +- SCR/valetudo_map_parser/rooms_handler.py | 11 +- pyproject.toml | 4 +- 16 files changed, 139 insertions(+), 515 deletions(-) delete mode 100644 SCR/valetudo_map_parser/config/enhanced_drawable.py diff --git a/SCR/valetudo_map_parser/__init__.py b/SCR/valetudo_map_parser/__init__.py index c5d0efa..c304492 100644 --- a/SCR/valetudo_map_parser/__init__.py +++ b/SCR/valetudo_map_parser/__init__.py @@ -6,27 +6,26 @@ from .config.colors import ColorsManagement from .config.drawable import Drawable from .config.drawable_elements import DrawableElement, DrawingConfig -from .config.enhanced_drawable import EnhancedDrawable from .config.rand256_parser import RRMapParser from .config.shared import CameraShared, CameraSharedManager +from .config.status_text.status_text import StatusText +from .config.status_text.translations import translations as STATUS_TEXT_TRANSLATIONS from .config.types import ( CameraModes, + ImageSize, + JsonType, + NumpyArray, + PilPNG, RoomsProperties, RoomStore, SnapshotStore, TrimCropData, UserLanguageStore, - JsonType, - PilPNG, - NumpyArray, - ImageSize, ) -from .config.status_text.status_text import StatusText -from .config.status_text.translations import translations as STATUS_TEXT_TRANSLATIONS from .hypfer_handler import HypferMapImageHandler -from .rand256_handler import ReImageHandler -from .rooms_handler import RoomsHandler, RandRoomsHandler from .map_data import HyperMapData +from .rand256_handler import ReImageHandler +from .rooms_handler import RandRoomsHandler, RoomsHandler def get_default_font_path() -> str: @@ -51,7 +50,6 @@ def get_default_font_path() -> str: "Drawable", "DrawableElement", "DrawingConfig", - "EnhancedDrawable", "SnapshotStore", "UserLanguageStore", "RoomStore", diff --git a/SCR/valetudo_map_parser/config/color_utils.py b/SCR/valetudo_map_parser/config/color_utils.py index 94e22e8..80d1297 100644 --- a/SCR/valetudo_map_parser/config/color_utils.py +++ b/SCR/valetudo_map_parser/config/color_utils.py @@ -1,8 +1,7 @@ """Utility functions for color operations in the map parser.""" -from typing import Optional, Tuple +from typing import Optional -from .colors import ColorsManagement from .types import Color, NumpyArray @@ -36,8 +35,8 @@ def get_blended_color( # Sample background at midpoint mid_x, mid_y = (x0 + x1) // 2, (y0 + y1) // 2 if 0 <= mid_y < arr.shape[0] and 0 <= mid_x < arr.shape[1]: - return tuple(arr[mid_y, mid_x]) - return (0, 0, 0, 0) # Default if out of bounds + return Color(arr[mid_y, mid_x]) + return Color(0, 0, 0, 0) # Default if out of bounds # Calculate direction vector for offset sampling dx = x1 - x0 diff --git a/SCR/valetudo_map_parser/config/drawable.py b/SCR/valetudo_map_parser/config/drawable.py index 919c785..33715be 100644 --- a/SCR/valetudo_map_parser/config/drawable.py +++ b/SCR/valetudo_map_parser/config/drawable.py @@ -14,10 +14,10 @@ from pathlib import Path import numpy as np +from mvcrender.blend import get_blended_color, sample_and_blend_color +from mvcrender.draw import circle_u8, line_u8 from PIL import Image, ImageDraw, ImageFont -from .color_utils import get_blended_color -from .colors import ColorsManagement from .types import Color, NumpyArray, PilPNG, Point, Tuple, Union @@ -85,7 +85,7 @@ async def from_json_to_image( and 0 <= center_x < image_array.shape[1] ): # Get blended color - blended_color = ColorsManagement.sample_and_blend_color( + blended_color = sample_and_blend_color( image_array, center_x, center_y, full_color ) # Apply blended color to the region @@ -131,9 +131,7 @@ async def battery_charger( center_x = (start_col + end_col) // 2 # Get blended color - blended_color = ColorsManagement.sample_and_blend_color( - layers, center_x, center_y, color - ) + blended_color = sample_and_blend_color(layers, center_x, center_y, color) # Apply blended color layers[start_row:end_row, start_col:end_col] = blended_color @@ -165,9 +163,7 @@ async def go_to_flag( # Blend flag color if needed if flag_alpha < 255: - flag_color = ColorsManagement.sample_and_blend_color( - layer, x, y, flag_color - ) + flag_color = sample_and_blend_color(layer, x, y, flag_color) # Create pole color with alpha pole_color: Color = ( @@ -179,9 +175,7 @@ async def go_to_flag( # Blend pole color if needed if pole_alpha < 255: - pole_color = ColorsManagement.sample_and_blend_color( - layer, x, y, pole_color - ) + pole_color = sample_and_blend_color(layer, x, y, pole_color) flag_size = 50 pole_width = 6 @@ -246,62 +240,19 @@ def point_inside(x: int, y: int, points: list[Tuple[int, int]]) -> bool: @staticmethod def _line( - layer: np.ndarray, + layer: NumpyArray, x1: int, y1: int, x2: int, y2: int, color: Color, width: int = 3, - ) -> np.ndarray: - """Draw a line on a NumPy array (layer) from point A to B using Bresenham's algorithm. - - Args: - layer: The numpy array to draw on (H, W, C) - x1, y1: Start point coordinates - x2, y2: End point coordinates - color: Color to draw with (tuple or array) - width: Width of the line in pixels - """ - x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2) - - blended_color = get_blended_color(x1, y1, x2, y2, layer, color) - - dx = abs(x2 - x1) - dy = abs(y2 - y1) - sx = 1 if x1 < x2 else -1 - sy = 1 if y1 < y2 else -1 - err = dx - dy - - half_w = width // 2 - h, w = layer.shape[:2] - - while True: - # Draw a filled circle for thickness - yy, xx = np.ogrid[-half_w : half_w + 1, -half_w : half_w + 1] - mask = xx**2 + yy**2 <= half_w**2 - y_min = max(0, y1 - half_w) - y_max = min(h, y1 + half_w + 1) - x_min = max(0, x1 - half_w) - x_max = min(w, x1 + half_w + 1) - - sub_mask = mask[ - (y_min - (y1 - half_w)) : (y_max - (y1 - half_w)), - (x_min - (x1 - half_w)) : (x_max - (x1 - half_w)), - ] - layer[y_min:y_max, x_min:x_max][sub_mask] = blended_color - - if x1 == x2 and y1 == y2: - break - - e2 = 2 * err - if e2 > -dy: - err -= dy - x1 += sx - if e2 < dx: - err += dx - y1 += sy - + ) -> NumpyArray: + """Segment-aware preblend, then stamp a solid line.""" + width = int(max(1, width)) + # Preblend once for this segment + seg = get_blended_color(int(x1), int(y1), int(x2), int(y2), layer, color) + line_u8(layer, int(x1), int(y1), int(x2), int(y2), seg, width) return layer @staticmethod @@ -355,35 +306,31 @@ def _filled_circle( outline_width: int = 0, ) -> NumpyArray: """ - Draw a filled circle on the image using NumPy. - Optimized to only process the bounding box of the circle. + Draw a filled circle and optional outline using mvcrender.draw.circle_u8. + If alpha<255, preblend once at the center and stamp solid. """ - y, x = center - height, width = image.shape[:2] - - # Calculate the bounding box of the circle - min_y = max(0, y - radius - outline_width) - max_y = min(height, y + radius + outline_width + 1) - min_x = max(0, x - radius - outline_width) - max_x = min(width, x + radius + outline_width + 1) - - # Create coordinate arrays for the bounding box - y_indices, x_indices = np.ogrid[min_y:max_y, min_x:max_x] - - # Calculate distances from center - dist_sq = (y_indices - y) ** 2 + (x_indices - x) ** 2 + cy, cx = ( + int(center[0]), + int(center[1]), + ) # incoming Point is (y,x) in your codebase + h, w = image.shape[:2] + if not (0 <= cx < w and 0 <= cy < h): + return image - # Create masks for the circle and outline - circle_mask = dist_sq <= radius**2 + fill_rgba = color + if fill_rgba[3] < 255: + fill_rgba = sample_and_blend_color(image, cx, cy, fill_rgba) - # Apply the fill color - image[min_y:max_y, min_x:max_x][circle_mask] = color + circle_u8(image, int(cx), int(cy), int(radius), fill_rgba, -1) - # Draw the outline if needed - if outline_width > 0 and outline_color is not None: - outer_mask = dist_sq <= (radius + outline_width) ** 2 - outline_mask = outer_mask & ~circle_mask - image[min_y:max_y, min_x:max_x][outline_mask] = outline_color + if outline_color is not None and outline_width > 0: + out_rgba = outline_color + if out_rgba[3] < 255: + out_rgba = sample_and_blend_color(image, cx, cy, out_rgba) + # outlined stroke thickness = outline_width + circle_u8( + image, int(cx), int(cy), int(radius), out_rgba, int(outline_width) + ) return image @@ -835,9 +782,7 @@ async def async_draw_obstacles( continue if need_blending: - obs_color = ColorsManagement.sample_and_blend_color( - image, x, y, color - ) + obs_color = sample_and_blend_color(image, x, y, color) else: obs_color = color diff --git a/SCR/valetudo_map_parser/config/drawable_elements.py b/SCR/valetudo_map_parser/config/drawable_elements.py index ed7be98..f15dbc2 100644 --- a/SCR/valetudo_map_parser/config/drawable_elements.py +++ b/SCR/valetudo_map_parser/config/drawable_elements.py @@ -9,8 +9,6 @@ from enum import IntEnum from typing import Dict, List, Tuple, Union -import numpy as np - from .colors import DefaultColors, SupportedColor from .types import LOGGER diff --git a/SCR/valetudo_map_parser/config/enhanced_drawable.py b/SCR/valetudo_map_parser/config/enhanced_drawable.py deleted file mode 100644 index 549d39e..0000000 --- a/SCR/valetudo_map_parser/config/enhanced_drawable.py +++ /dev/null @@ -1,324 +0,0 @@ -""" -Enhanced Drawable Class. -Provides drawing utilities with element selection support. -Version: 0.1.9 -""" - -from __future__ import annotations - -import logging - -# math is not used in this file -from typing import Optional, Tuple - -import numpy as np - -from .colors import ColorsManagement -from .drawable import Drawable -from .drawable_elements import ( - DrawableElement, - DrawingConfig, -) - - -# Type aliases -NumpyArray = np.ndarray -Color = Tuple[int, int, int, int] - -_LOGGER = logging.getLogger(__name__) - - -class EnhancedDrawable(Drawable): - """Enhanced drawing utilities with element selection support.""" - - def __init__(self, drawing_config: Optional[DrawingConfig] = None): - """Initialize with optional drawing configuration.""" - super().__init__() - self.drawing_config = drawing_config or DrawingConfig() - - # Color blending methods have been moved to ColorsManagement class in colors.py - - # Pixel blending methods have been moved to ColorsManagement class in colors.py - - async def draw_map( - self, map_data: dict, base_array: Optional[NumpyArray] = None - ) -> NumpyArray: - """ - Draw the map with selected elements. - - Args: - map_data: The map data dictionary - base_array: Optional base array to draw on - - Returns: - The image array with all elements drawn - """ - # Get map dimensions - size_x = map_data.get("size", {}).get("x", 1024) - size_y = map_data.get("size", {}).get("y", 1024) - - # Create empty image if none provided - if base_array is None: - background_color = self.drawing_config.get_property( - DrawableElement.FLOOR, "color", (200, 200, 200, 255) - ) - base_array = await self.create_empty_image(size_x, size_y, background_color) - - # Draw elements in order of z-index - for element in self.drawing_config.get_drawing_order(): - if element == DrawableElement.FLOOR: - base_array = await self._draw_floor(map_data, base_array) - elif element == DrawableElement.WALL: - base_array = await self._draw_walls(map_data, base_array) - elif element == DrawableElement.ROBOT: - base_array = await self._draw_robot(map_data, base_array) - elif element == DrawableElement.CHARGER: - base_array = await self._draw_charger(map_data, base_array) - elif element == DrawableElement.VIRTUAL_WALL: - base_array = await self._draw_virtual_walls(map_data, base_array) - elif element == DrawableElement.RESTRICTED_AREA: - base_array = await self._draw_restricted_areas(map_data, base_array) - elif element == DrawableElement.NO_MOP_AREA: - base_array = await self._draw_no_mop_areas(map_data, base_array) - elif element == DrawableElement.PATH: - base_array = await self._draw_path(map_data, base_array) - elif element == DrawableElement.PREDICTED_PATH: - base_array = await self._draw_predicted_path(map_data, base_array) - elif element == DrawableElement.GO_TO_TARGET: - base_array = await self._draw_go_to_target(map_data, base_array) - elif DrawableElement.ROOM_1 <= element <= DrawableElement.ROOM_15: - room_id = element - DrawableElement.ROOM_1 + 1 - base_array = await self._draw_room(map_data, room_id, base_array) - - return base_array - - async def _draw_floor(self, map_data: dict, array: NumpyArray) -> NumpyArray: - """Draw the floor layer.""" - if not self.drawing_config.is_enabled(DrawableElement.FLOOR): - return array - - # Implementation depends on the map data format - # This is a placeholder - actual implementation would use map_data to draw floor - - return array - - async def _draw_walls(self, map_data: dict, array: NumpyArray) -> NumpyArray: - """Draw the walls.""" - if not self.drawing_config.is_enabled(DrawableElement.WALL): - return array - - # Get wall color from drawing config - wall_color = self.drawing_config.get_property( - DrawableElement.WALL, "color", (255, 255, 0, 255) - ) - - # Implementation depends on the map data format - # For Valetudo maps, we would look at the layers with type "wall" - # This is a simplified example - in a real implementation, we would extract the actual wall pixels - - # Find wall data in map_data - wall_pixels = [] - for layer in map_data.get("layers", []): - if layer.get("type") == "wall": - # Extract wall pixels from the layer - # This is a placeholder - actual implementation would depend on the map data format - wall_pixels = layer.get("pixels", []) - break - - # Draw wall pixels with color blending - for x, y in wall_pixels: - # Use sample_and_blend_color from ColorsManagement - blended_color = ColorsManagement.sample_and_blend_color( - array, x, y, wall_color - ) - if 0 <= y < array.shape[0] and 0 <= x < array.shape[1]: - array[y, x] = blended_color - - return array - - async def _draw_robot(self, map_data: dict, array: NumpyArray) -> NumpyArray: - """Draw the robot.""" - if not self.drawing_config.is_enabled(DrawableElement.ROBOT): - return array - - # Get robot color from drawing config - robot_color = self.drawing_config.get_property( - DrawableElement.ROBOT, "color", (255, 255, 204, 255) - ) - - # Extract robot position and angle from map_data - robot_position = map_data.get("robot", {}).get("position", None) - robot_angle = map_data.get("robot", {}).get("angle", 0) - - if robot_position: - x, y = robot_position.get("x", 0), robot_position.get("y", 0) - - # Draw robot with color blending - # Create a circle around the robot position - radius = 25 # Same as in the robot drawing method - for dy in range(-radius, radius + 1): - for dx in range(-radius, radius + 1): - if dx * dx + dy * dy <= radius * radius: - map_x, map_y = int(x + dx), int(y + dy) - # Use sample_and_blend_color from ColorsManagement - blended_color = ColorsManagement.sample_and_blend_color( - array, map_x, map_y, robot_color - ) - if 0 <= map_y < array.shape[0] and 0 <= map_x < array.shape[1]: - array[map_y, map_x] = blended_color - return array - - async def _draw_charger(self, map_data: dict, array: NumpyArray) -> NumpyArray: - """Draw the charger.""" - if not self.drawing_config.is_enabled(DrawableElement.CHARGER): - return array - - # Get charger color from drawing config - charger_color = self.drawing_config.get_property( - DrawableElement.CHARGER, "color", (255, 128, 0, 255) - ) - - # Implementation depends on the map data format - # This would extract charger data from map_data and draw it - - return array - - async def _draw_virtual_walls( - self, map_data: dict, array: NumpyArray - ) -> NumpyArray: - """Draw virtual walls.""" - if not self.drawing_config.is_enabled(DrawableElement.VIRTUAL_WALL): - return array - - # Get virtual wall color from drawing config - wall_color = self.drawing_config.get_property( - DrawableElement.VIRTUAL_WALL, "color", (255, 0, 0, 255) - ) - - # Implementation depends on the map data format - # This would extract virtual wall data from map_data and draw it - - return array - - async def _draw_restricted_areas( - self, map_data: dict, array: NumpyArray - ) -> NumpyArray: - """Draw restricted areas.""" - if not self.drawing_config.is_enabled(DrawableElement.RESTRICTED_AREA): - return array - - # Get restricted area color from drawing config - area_color = self.drawing_config.get_property( - DrawableElement.RESTRICTED_AREA, "color", (255, 0, 0, 125) - ) - - # Implementation depends on the map data format - # This would extract restricted area data from map_data and draw it - - return array - - async def _draw_no_mop_areas(self, map_data: dict, array: NumpyArray) -> NumpyArray: - """Draw no-mop areas.""" - if not self.drawing_config.is_enabled(DrawableElement.NO_MOP_AREA): - return array - - # Get no-mop area color from drawing config - area_color = self.drawing_config.get_property( - DrawableElement.NO_MOP_AREA, "color", (0, 0, 255, 125) - ) - - # Implementation depends on the map data format - # This would extract no-mop area data from map_data and draw it - - return array - - async def _draw_path(self, map_data: dict, array: NumpyArray) -> NumpyArray: - """Draw the robot's path.""" - if not self.drawing_config.is_enabled(DrawableElement.PATH): - return array - - # Get path color from drawing config - path_color = self.drawing_config.get_property( - DrawableElement.PATH, "color", (238, 247, 255, 255) - ) - - # Implementation depends on the map data format - # This would extract path data from map_data and draw it - - return array - - async def _draw_predicted_path( - self, map_data: dict, array: NumpyArray - ) -> NumpyArray: - """Draw the predicted path.""" - if not self.drawing_config.is_enabled(DrawableElement.PREDICTED_PATH): - return array - - # Get predicted path color from drawing config - path_color = self.drawing_config.get_property( - DrawableElement.PREDICTED_PATH, "color", (238, 247, 255, 125) - ) - - # Implementation depends on the map data format - # This would extract predicted path data from map_data and draw it - - return array - - async def _draw_go_to_target(self, map_data: dict, array: NumpyArray) -> NumpyArray: - """Draw the go-to target.""" - if not self.drawing_config.is_enabled(DrawableElement.GO_TO_TARGET): - return array - - # Get go-to target color from drawing config - target_color = self.drawing_config.get_property( - DrawableElement.GO_TO_TARGET, "color", (0, 255, 0, 255) - ) - - # Implementation depends on the map data format - # This would extract go-to target data from map_data and draw it - - return array - - async def _draw_room( - self, map_data: dict, room_id: int, array: NumpyArray - ) -> NumpyArray: - """Draw a specific room.""" - element = getattr(DrawableElement, f"ROOM_{room_id}") - if not self.drawing_config.is_enabled(element): - return array - - # Get room color from drawing config - room_color = self.drawing_config.get_property( - element, - "color", - (135, 206, 250, 255), # Default light blue - ) - - # Implementation depends on the map data format - # For Valetudo maps, we would look at the layers with type "segment" - # This is a simplified example - in a real implementation, we would extract the actual room pixels - - # Find room data in map_data - room_pixels = [] - for layer in map_data.get("layers", []): - if layer.get("type") == "segment" and str( - layer.get("metaData", {}).get("segmentId") - ) == str(room_id): - # Extract room pixels from the layer - # This is a placeholder - actual implementation would depend on the map data format - # For example, it might use compressed pixels or other data structures - - # For demonstration, let's assume we have a list of (x, y) coordinates - room_pixels = layer.get("pixels", []) - break - - # Draw room pixels with color blending - for x, y in room_pixels: - # Use sample_and_blend_color from ColorsManagement - blended_color = ColorsManagement.sample_and_blend_color( - array, x, y, room_color - ) - if 0 <= y < array.shape[0] and 0 <= x < array.shape[1]: - array[y, x] = blended_color - - return array diff --git a/SCR/valetudo_map_parser/config/rand256_parser.py b/SCR/valetudo_map_parser/config/rand256_parser.py index b6b618d..a5e2f1b 100644 --- a/SCR/valetudo_map_parser/config/rand256_parser.py +++ b/SCR/valetudo_map_parser/config/rand256_parser.py @@ -1,7 +1,7 @@ """New Rand256 Map Parser - Based on Xiaomi/Roborock implementation with precise binary parsing.""" -import struct import math +import struct from enum import Enum from typing import Any, Dict, List, Optional @@ -146,7 +146,6 @@ def _parse_object_position(block_data_length: int, data: bytes) -> Dict[str, Any angle = raw_angle return {"position": [x, y], "angle": angle} - @staticmethod def _parse_walls(data: bytes, header: bytes) -> list: wall_pairs = RRMapParser._get_int16(header, 0x08) @@ -156,7 +155,14 @@ def _parse_walls(data: bytes, header: bytes) -> list: y0 = RRMapParser._get_int16(data, wall_start + 2) x1 = RRMapParser._get_int16(data, wall_start + 4) y1 = RRMapParser._get_int16(data, wall_start + 6) - walls.append([x0, RRMapParser.Tools.DIMENSION_MM - y0, x1, RRMapParser.Tools.DIMENSION_MM - y1]) + walls.append( + [ + x0, + RRMapParser.Tools.DIMENSION_MM - y0, + x1, + RRMapParser.Tools.DIMENSION_MM - y1, + ] + ) return walls @staticmethod @@ -218,29 +224,53 @@ def parse_blocks(self, raw: bytes, pixels: bool = True) -> Dict[int, Any]: match block_type: case self.Types.DIGEST.value: self.is_valid = True - case self.Types.ROBOT_POSITION.value | self.Types.CHARGER_LOCATION.value: - blocks[block_type] = self._parse_object_position(block_data_length, data) + case ( + self.Types.ROBOT_POSITION.value + | self.Types.CHARGER_LOCATION.value + ): + blocks[block_type] = self._parse_object_position( + block_data_length, data + ) case self.Types.PATH.value | self.Types.GOTO_PREDICTED_PATH.value: - blocks[block_type] = self._parse_path_block(raw, block_start_position, block_data_length) + blocks[block_type] = self._parse_path_block( + raw, block_start_position, block_data_length + ) case self.Types.CURRENTLY_CLEANED_ZONES.value: blocks[block_type] = {"zones": self._parse_zones(data, header)} case self.Types.FORBIDDEN_ZONES.value: - blocks[block_type] = {"forbidden_zones": self._parse_area(header, data)} + blocks[block_type] = { + "forbidden_zones": self._parse_area(header, data) + } case self.Types.FORBIDDEN_MOP_ZONES.value: - blocks[block_type] = {"forbidden_mop_zones": self._parse_area(header, data)} + blocks[block_type] = { + "forbidden_mop_zones": self._parse_area(header, data) + } case self.Types.GOTO_TARGET.value: blocks[block_type] = {"position": self._parse_goto_target(data)} case self.Types.VIRTUAL_WALLS.value: - blocks[block_type] = {"virtual_walls": self._parse_walls(data, header)} + blocks[block_type] = { + "virtual_walls": self._parse_walls(data, header) + } case self.Types.CARPET_MAP.value: - data = RRMapParser._get_bytes(raw, block_data_start, block_data_length) - blocks[block_type] = {"carpet_map": self._parse_carpet_map(data)} + data = RRMapParser._get_bytes( + raw, block_data_start, block_data_length + ) + blocks[block_type] = { + "carpet_map": self._parse_carpet_map(data) + } case self.Types.IMAGE.value: header_length = self._get_int8(header, 2) blocks[block_type] = self._parse_image_block( - raw, block_start_position, block_data_length, header_length, pixels) - - block_start_position = block_start_position + block_data_length + self._get_int8(header, 2) + raw, + block_start_position, + block_data_length, + header_length, + pixels, + ) + + block_start_position = ( + block_start_position + block_data_length + self._get_int8(header, 2) + ) except (struct.error, IndexError): break return blocks diff --git a/SCR/valetudo_map_parser/config/shared.py b/SCR/valetudo_map_parser/config/shared.py index 8ecd4ae..bffdec4 100755 --- a/SCR/valetudo_map_parser/config/shared.py +++ b/SCR/valetudo_map_parser/config/shared.py @@ -7,18 +7,19 @@ import asyncio import logging from typing import List + from PIL import Image from .types import ( ATTR_CALIBRATION_POINTS, ATTR_CAMERA_MODE, ATTR_CONTENT_TYPE, + ATTR_IMAGE_LAST_UPDATED, ATTR_MARGINS, ATTR_OBSTACLES, ATTR_POINTS, ATTR_ROOMS, ATTR_ROTATE, - ATTR_IMAGE_LAST_UPDATED, ATTR_VACUUM_BATTERY, ATTR_VACUUM_CHARGING, ATTR_VACUUM_JSON_ID, @@ -40,8 +41,8 @@ DEFAULT_VALUES, CameraModes, Colors, - TrimsData, PilPNG, + TrimsData, ) diff --git a/SCR/valetudo_map_parser/config/status_text/status_text.py b/SCR/valetudo_map_parser/config/status_text/status_text.py index 720ec2f..7e7942d 100644 --- a/SCR/valetudo_map_parser/config/status_text/status_text.py +++ b/SCR/valetudo_map_parser/config/status_text/status_text.py @@ -9,6 +9,7 @@ from ..types import LOGGER, PilPNG from .translations import translations + LOGGER.propagate = True diff --git a/SCR/valetudo_map_parser/config/types.py b/SCR/valetudo_map_parser/config/types.py index 6be8f0c..c6740bd 100644 --- a/SCR/valetudo_map_parser/config/types.py +++ b/SCR/valetudo_map_parser/config/types.py @@ -8,7 +8,7 @@ import logging import threading from dataclasses import asdict, dataclass -from typing import Any, Dict, Optional, Tuple, TypedDict, Union, List, NotRequired +from typing import Any, Dict, List, NotRequired, Optional, Tuple, TypedDict, Union import numpy as np from PIL import Image @@ -222,7 +222,9 @@ async def async_set_vacuum_json(self, vacuum_id: str, json_data: Any) -> None: Color = Union[Tuple[int, int, int], Tuple[int, int, int, int]] Colors = Dict[str, Color] CalibrationPoints = list[dict[str, Any]] -RobotPosition = Optional[dict[str, Union[int | float]]] +RobotPosition: type[tuple[Any, Any, dict[str, int | float] | None]] = tuple[ + Any, Any, dict[str, int | float] | None +] ChargerPosition = dict[str, Any] RoomsProperties = dict[str, RoomProperty] ImageSize = dict[str, int | list[int]] diff --git a/SCR/valetudo_map_parser/config/utils.py b/SCR/valetudo_map_parser/config/utils.py index 21a2473..baf42ec 100644 --- a/SCR/valetudo_map_parser/config/utils.py +++ b/SCR/valetudo_map_parser/config/utils.py @@ -1,32 +1,30 @@ """Utility code for the valetudo map parser.""" import datetime -from time import time import hashlib +import io import json from dataclasses import dataclass +from time import time from typing import Callable, List, Optional, Tuple -import io import numpy as np from PIL import Image, ImageOps +from ..map_data import HyperMapData +from .async_utils import AsyncNumPy from .drawable import Drawable from .drawable_elements import DrawingConfig -from .enhanced_drawable import EnhancedDrawable from .status_text.status_text import StatusText - from .types import ( LOGGER, ChargerPosition, - Size, + Destinations, NumpyArray, PilPNG, RobotPosition, - Destinations, + Size, ) -from ..map_data import HyperMapData -from .async_utils import AsyncNumPy @dataclass @@ -79,7 +77,6 @@ def __init__(self): # Drawing components are initialized by initialize_drawing_config in handlers self.drawing_config: Optional[DrawingConfig] = None self.draw: Optional[Drawable] = None - self.enhanced_draw: Optional[EnhancedDrawable] = None def get_frame_number(self) -> int: """Return the frame number of the image.""" @@ -709,7 +706,7 @@ def initialize_drawing_config(handler): handler: The handler instance with shared data and file_name attributes Returns: - Tuple of (DrawingConfig, Drawable, EnhancedDrawable) + Tuple of (DrawingConfig, Drawable) """ # Initialize drawing configuration @@ -721,11 +718,10 @@ def initialize_drawing_config(handler): ): drawing_config.update_from_device_info(handler.shared.device_info) - # Initialize both drawable systems for backward compatibility - draw = Drawable() # Legacy drawing utilities - enhanced_draw = EnhancedDrawable(drawing_config) # New enhanced drawing system + # Initialize drawing utilities + draw = Drawable() - return drawing_config, draw, enhanced_draw + return drawing_config, draw def blend_colors(base_color, overlay_color): diff --git a/SCR/valetudo_map_parser/hypfer_handler.py b/SCR/valetudo_map_parser/hypfer_handler.py index 4b62699..05a00de 100644 --- a/SCR/valetudo_map_parser/hypfer_handler.py +++ b/SCR/valetudo_map_parser/hypfer_handler.py @@ -8,24 +8,22 @@ from __future__ import annotations import asyncio -import numpy as np +import numpy as np +from mvcrender.autocrop import AutoCrop from PIL import Image from .config.async_utils import AsyncPIL - -from mvcrender.autocrop import AutoCrop from .config.drawable_elements import DrawableElement from .config.shared import CameraShared - from .config.types import ( COLORS, LOGGER, CalibrationPoints, Colors, + JsonType, RoomsProperties, RoomStore, - JsonType, ) from .config.utils import ( BaseHandler, @@ -48,9 +46,7 @@ def __init__(self, shared_data: CameraShared): self.calibration_data = None # camera shared data. self.data = ImageData # imported Image Data Module. # Initialize drawing configuration using the shared utility function - self.drawing_config, self.draw, self.enhanced_draw = initialize_drawing_config( - self - ) + self.drawing_config, self.draw = initialize_drawing_config(self) self.go_to = None # vacuum go to data self.img_hash = None # hash of the image calculated to check differences. @@ -77,7 +73,7 @@ async def async_extract_room_properties(self, json_data) -> RoomsProperties: json_data ) if room_properties: - rooms = RoomStore(self.file_name, room_properties) + _ = RoomStore(self.file_name, room_properties) # Convert room_properties to the format expected by async_get_robot_in_room self.rooms_pos = [] for room_id, room_data in room_properties.items(): @@ -346,16 +342,6 @@ async def async_get_image_from_json( robot_state=self.shared.vacuum_state, ) - # Update element map for robot position - if ( - hasattr(self.shared, "element_map") - and self.shared.element_map is not None - ): - update_element_map_with_robot( - self.shared.element_map, - robot_position, - DrawableElement.ROBOT, - ) # Synchronize zooming state from ImageDraw to handler before auto-crop self.zooming = self.imd.img_h.zooming diff --git a/SCR/valetudo_map_parser/map_data.py b/SCR/valetudo_map_parser/map_data.py index 07bd753..fee7d03 100755 --- a/SCR/valetudo_map_parser/map_data.py +++ b/SCR/valetudo_map_parser/map_data.py @@ -8,22 +8,22 @@ from __future__ import annotations -import numpy as np +from dataclasses import asdict, dataclass, field from typing import ( - List, - Sequence, - TypeVar, Any, - TypedDict, - NotRequired, Literal, + NotRequired, Optional, + Sequence, + TypedDict, + TypeVar, ) -from dataclasses import dataclass, field, asdict +import numpy as np from .config.types import ImageSize, JsonType + T = TypeVar("T") # --- Common Nested Structures --- @@ -373,6 +373,11 @@ async def async_get_rooms_coordinates( Else: (min_x_mm, min_y_mm, max_x_mm, max_y_mm) """ + + def to_mm(coord): + """Convert pixel coordinates to millimeters.""" + return round(coord * pixel_size * 10) + if not pixels: raise ValueError("Pixels list cannot be empty.") @@ -393,7 +398,6 @@ async def async_get_rooms_coordinates( min_y = min(min_y, y) if rand: - to_mm = lambda v: v * pixel_size * 10 return (to_mm(max_x), to_mm(max_y)), (to_mm(min_x), to_mm(min_y)) return ( diff --git a/SCR/valetudo_map_parser/rand256_handler.py b/SCR/valetudo_map_parser/rand256_handler.py index f5e6f65..71dc2f2 100644 --- a/SCR/valetudo_map_parser/rand256_handler.py +++ b/SCR/valetudo_map_parser/rand256_handler.py @@ -11,22 +11,21 @@ from typing import Any import numpy as np +from mvcrender.autocrop import AutoCrop from .config.async_utils import AsyncPIL - -from mvcrender.autocrop import AutoCrop from .config.drawable_elements import DrawableElement from .config.types import ( COLORS, DEFAULT_IMAGE_SIZE, DEFAULT_PIXEL_SIZE, + LOGGER, Colors, JsonType, PilPNG, RobotPosition, RoomsProperties, RoomStore, - LOGGER, ) from .config.utils import ( BaseHandler, @@ -55,9 +54,7 @@ def __init__(self, shared_data): self.data = RandImageData # Image Data # Initialize drawing configuration using the shared utility function - self.drawing_config, self.draw, self.enhanced_draw = initialize_drawing_config( - self - ) + self.drawing_config, self.draw = initialize_drawing_config(self) self.go_to = None # Go to position data self.img_base_layer = None # Base image layer self.img_rotate = shared_data.image_rotate # Image rotation @@ -115,7 +112,7 @@ async def extract_room_properties( if not (room_properties or self.shared.map_pred_zones): self.rooms_pos = None - rooms = RoomStore(self.file_name, room_properties) + _ = RoomStore(self.file_name, room_properties) return room_properties except (RuntimeError, ValueError) as e: LOGGER.warning( @@ -273,7 +270,7 @@ async def _setup_robot_and_image( # Restore original rooms_pos self.rooms_pos = original_rooms_pos - except Exception as e: + except (ValueError, KeyError, TypeError): # Fallback to robot-position-based zoom if room extraction fails if ( self.shared.image_auto_zoom @@ -357,14 +354,14 @@ async def _draw_map_elements( return img_np_array async def _finalize_image(self, pil_img): - if not self.shared.image_ref_width or not self.shared.image_ref_height: - LOGGER.warning( - "Image finalization failed: Invalid image dimensions. Returning original image." - ) - return pil_img if self.check_zoom_and_aspect_ratio(): resize_params = self.prepare_resize_params(pil_img, True) pil_img = await self.async_resize_images(resize_params) + else: + LOGGER.warning( + "%s: Invalid image dimensions. Returning original image.", + self.file_name, + ) return pil_img async def get_rooms_attributes( diff --git a/SCR/valetudo_map_parser/reimg_draw.py b/SCR/valetudo_map_parser/reimg_draw.py index 7ec6649..63c1604 100644 --- a/SCR/valetudo_map_parser/reimg_draw.py +++ b/SCR/valetudo_map_parser/reimg_draw.py @@ -8,7 +8,7 @@ from .config.drawable import Drawable from .config.drawable_elements import DrawableElement -from .config.types import Color, JsonType, NumpyArray, LOGGER +from .config.types import LOGGER, Color, JsonType, NumpyArray from .map_data import ImageData, RandImageData diff --git a/SCR/valetudo_map_parser/rooms_handler.py b/SCR/valetudo_map_parser/rooms_handler.py index 08ad391..a1f5e48 100644 --- a/SCR/valetudo_map_parser/rooms_handler.py +++ b/SCR/valetudo_map_parser/rooms_handler.py @@ -7,7 +7,6 @@ from __future__ import annotations -import time from typing import Any, Dict, List, Optional, Tuple import numpy as np @@ -16,8 +15,7 @@ from .config.drawable_elements import DrawableElement, DrawingConfig from .config.types import LOGGER, RoomsProperties - -from .map_data import RandImageData, ImageData +from .map_data import RandImageData class RoomsHandler: @@ -204,7 +202,6 @@ async def async_extract_room_properties(self, json_data) -> RoomsProperties: Returns: Dictionary of room properties """ - start_total = time.time() room_properties = {} pixel_size = json_data.get("pixelSize", 5) height = json_data["size"]["y"] @@ -217,9 +214,6 @@ async def async_extract_room_properties(self, json_data) -> RoomsProperties: ) if room_id is not None and room_data is not None: room_properties[room_id] = room_data - - # Log timing information (kept internal, no debug output) - total_time = time.time() - start_total return room_properties @@ -395,7 +389,6 @@ async def async_extract_room_properties( Returns: Dictionary of room properties """ - start_total = time.time() room_properties = {} # Get basic map information @@ -463,6 +456,4 @@ async def async_extract_room_properties( room_properties[room_id] = room_data - # Log timing information (kept internal, no debug output) - total_time = time.time() - start_total return room_properties diff --git a/pyproject.toml b/pyproject.toml index 6b506c0..04d6ac5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "valetudo-map-parser" -version = "0.1.10rc7" +version = "0.1.10" description = "A Python library to parse Valetudo map data returning a PIL Image object." authors = ["Sandro Cantarella "] license = "Apache-2.0" @@ -18,7 +18,7 @@ python = ">=3.13" numpy = ">=1.26.4" Pillow = ">=10.3.0" scipy = ">=1.12.0" -mvcrender = ">=0.0.4" +mvcrender = ">=0.0.5" [tool.poetry.group.dev.dependencies] ruff = "*" From 3eaa9b9f0fea9d56bbc02737a04ff48091d372e9 Mon Sep 17 00:00:00 2001 From: SCA075 <82227818+sca075@users.noreply.github.com> Date: Sun, 5 Oct 2025 13:26:30 +0200 Subject: [PATCH 05/25] corrections done Signed-off-by: Sandro Cantarella --- SCR/valetudo_map_parser/config/drawable.py | 5 +-- SCR/valetudo_map_parser/config/utils.py | 3 +- SCR/valetudo_map_parser/map_data.py | 5 +-- SCR/valetudo_map_parser/rand256_handler.py | 38 ++++++++++++---------- 4 files changed, 26 insertions(+), 25 deletions(-) diff --git a/SCR/valetudo_map_parser/config/drawable.py b/SCR/valetudo_map_parser/config/drawable.py index 33715be..d963c7a 100644 --- a/SCR/valetudo_map_parser/config/drawable.py +++ b/SCR/valetudo_map_parser/config/drawable.py @@ -288,11 +288,8 @@ async def lines( if x0 == x1 and y0 == y1: continue - # Get blended color for this line segment - blended_color = get_blended_color(x0, y0, x1, y1, arr, color) - # Use the optimized line drawing method - arr = Drawable._line(arr, x0, y0, x1, y1, blended_color, width) + arr = Drawable._line(arr, x0, y0, x1, y1, color, width) return arr diff --git a/SCR/valetudo_map_parser/config/utils.py b/SCR/valetudo_map_parser/config/utils.py index baf42ec..56bf974 100644 --- a/SCR/valetudo_map_parser/config/utils.py +++ b/SCR/valetudo_map_parser/config/utils.py @@ -196,10 +196,11 @@ async def _async_update_shared_data(self, destinations: Destinations | None = No if hasattr(self, "get_rooms_attributes") and ( self.shared.map_rooms is None and destinations is not None ): - (self.shared.map_rooms,) = await self.get_rooms_attributes(destinations) + self.shared.map_rooms = await self.get_rooms_attributes(destinations) if self.shared.map_rooms: LOGGER.debug("%s: Rand256 attributes rooms updated", self.file_name) + if hasattr(self, "async_get_rooms_attributes") and ( self.shared.map_rooms is None ): diff --git a/SCR/valetudo_map_parser/map_data.py b/SCR/valetudo_map_parser/map_data.py index fee7d03..c7119ae 100755 --- a/SCR/valetudo_map_parser/map_data.py +++ b/SCR/valetudo_map_parser/map_data.py @@ -552,8 +552,9 @@ def get_rrm_currently_cleaned_zones(json_data: JsonType) -> list[dict[str, Any]] @staticmethod def get_rrm_forbidden_zones(json_data: JsonType) -> list[dict[str, Any]]: """Get the forbidden zones from the json.""" - re_zones = json_data.get("forbidden_zones", []) - re_zones.extend(json_data.get("forbidden_mop_zones", [])) + re_zones = json_data.get("forbidden_zones", []) + json_data.get( + "forbidden_mop_zones", [] + ) formatted_zones = RandImageData._rrm_valetudo_format_zone(re_zones) return formatted_zones diff --git a/SCR/valetudo_map_parser/rand256_handler.py b/SCR/valetudo_map_parser/rand256_handler.py index 71dc2f2..6ae6d58 100644 --- a/SCR/valetudo_map_parser/rand256_handler.py +++ b/SCR/valetudo_map_parser/rand256_handler.py @@ -2,7 +2,7 @@ Image Handler Module for Valetudo Re Vacuums. It returns the PIL PNG image frame relative to the Map Data extrapolated from the vacuum json. It also returns calibration, rooms data to the card and other images information to the camera. -Version: 0.1.9.a6 +Version: 0.1.10 """ from __future__ import annotations @@ -93,17 +93,13 @@ async def extract_room_properties( # Update self.rooms_pos from room_properties for compatibility with other methods self.rooms_pos = [] - room_ids = [] # Collect room IDs for shared.map_rooms for room_id, room_data in room_properties.items(): self.rooms_pos.append( {"name": room_data["name"], "outline": room_data["outline"]} ) - # Store the room number (segment ID) for MQTT active zone mapping - room_ids.append(room_data["number"]) - - # Update shared.map_rooms with the room IDs for MQTT active zone mapping - self.shared.map_rooms = room_ids + # Update shared.map_rooms with the full room properties (consistent with Hypfer) + self.shared.map_rooms = room_properties # get the zones and points data self.shared.map_pred_zones = await self.async_zone_propriety(zones_data) # get the points data @@ -120,7 +116,7 @@ async def extract_room_properties( e, exc_info=True, ) - return None, None, None + return None async def get_image_from_rrm( self, @@ -185,6 +181,7 @@ async def get_image_from_rrm( async def _setup_robot_and_image( self, m_json, size_x, size_y, colors, destinations ): + """Set up the elements of the map and the image.""" ( _, robot_position, @@ -209,12 +206,6 @@ async def _setup_robot_and_image( ) LOGGER.info("%s: Completed base Layers", self.file_name) - # Update element map for rooms - if 0 < room_id <= 15: - # This is a simplification - in a real implementation we would - # need to identify the exact pixels that belong to each room - pass - if room_id > 0 and not self.room_propriety: self.room_propriety = await self.get_rooms_attributes(destinations) @@ -222,8 +213,10 @@ async def _setup_robot_and_image( if not self.rooms_pos and not self.room_propriety: self.room_propriety = await self.get_rooms_attributes(destinations) - # Always check robot position for zooming (fallback) - if self.rooms_pos and robot_position and not hasattr(self, "robot_pos"): + # Always check robot position for zooming (update if room info is missing) + if self.rooms_pos and robot_position and ( + self.robot_pos is None or "in_room" not in self.robot_pos + ): self.robot_pos = await self.async_get_robot_in_room( (robot_position[0] * 10), (robot_position[1] * 10), @@ -284,6 +277,7 @@ async def _setup_robot_and_image( async def _draw_map_elements( self, img_np_array, m_json, colors, robot_position, robot_position_angle ): + """Draw map elements on the image.""" # Draw charger if enabled if self.drawing_config.is_enabled(DrawableElement.CHARGER): img_np_array, self.charger_pos = await self.imd.async_draw_charger( @@ -354,6 +348,10 @@ async def _draw_map_elements( return img_np_array async def _finalize_image(self, pil_img): + """Finalize the image by resizing if needed.""" + if pil_img is None: + LOGGER.warning("%s: Image is None. Returning None.", self.file_name) + return None if self.check_zoom_and_aspect_ratio(): resize_params = self.prepare_resize_params(pil_img, True) pil_img = await self.async_resize_images(resize_params) @@ -368,8 +366,6 @@ async def get_rooms_attributes( self, destinations: JsonType = None ) -> tuple[RoomsProperties, Any, Any]: """Return the rooms attributes.""" - if self.room_propriety: - return self.room_propriety if self.json_data and destinations: self.room_propriety = await self.extract_room_properties( self.json_data, destinations @@ -394,6 +390,12 @@ async def async_get_robot_in_room( } # Handle active zones self.active_zones = self.shared.rand256_active_zone + LOGGER.debug( + "%s: Robot is in %s room (polygon detection). %s", + self.file_name, + self.robot_in_room["room"], + self.active_zones, + ) self.zooming = False if self.active_zones and ( self.robot_in_room["id"] in range(len(self.active_zones)) From 06508c19d4e5fa3b654c2754351afc7d0ea73e2b Mon Sep 17 00:00:00 2001 From: SCA075 <82227818+sca075@users.noreply.github.com> Date: Sun, 5 Oct 2025 13:33:19 +0200 Subject: [PATCH 06/25] corrections duplicate logger Signed-off-by: Sandro Cantarella --- SCR/valetudo_map_parser/rand256_handler.py | 1 - 1 file changed, 1 deletion(-) diff --git a/SCR/valetudo_map_parser/rand256_handler.py b/SCR/valetudo_map_parser/rand256_handler.py index 54a32da..56f5fc9 100644 --- a/SCR/valetudo_map_parser/rand256_handler.py +++ b/SCR/valetudo_map_parser/rand256_handler.py @@ -19,7 +19,6 @@ COLORS, DEFAULT_IMAGE_SIZE, DEFAULT_PIXEL_SIZE, - LOGGER, Colors, JsonType, PilPNG, From 470f31c8ceeafe9fb53024775fc0a64ca2092404 Mon Sep 17 00:00:00 2001 From: SCA075 <82227818+sca075@users.noreply.github.com> Date: Tue, 7 Oct 2025 08:47:19 +0200 Subject: [PATCH 07/25] removing unused code. Signed-off-by: Sandro Cantarella --- SCR/valetudo_map_parser/config/drawable.py | 185 ------------------ .../config/rand256_parser.py | 18 +- SCR/valetudo_map_parser/config/shared.py | 7 +- SCR/valetudo_map_parser/config/types.py | 3 +- SCR/valetudo_map_parser/config/utils.py | 127 ++---------- SCR/valetudo_map_parser/rand256_handler.py | 15 +- tests/refactored.py | 71 ++++--- 7 files changed, 78 insertions(+), 348 deletions(-) diff --git a/SCR/valetudo_map_parser/config/drawable.py b/SCR/valetudo_map_parser/config/drawable.py index d963c7a..a7cf6f4 100644 --- a/SCR/valetudo_map_parser/config/drawable.py +++ b/SCR/valetudo_map_parser/config/drawable.py @@ -331,36 +331,6 @@ def _filled_circle( return image - @staticmethod - def _filled_circle_optimized( - image: np.ndarray, - center: Tuple[int, int], - radius: int, - color: Color, - outline_color: Color = None, - outline_width: int = 0, - ) -> np.ndarray: - """ - Optimized _filled_circle ensuring dtype compatibility with uint8. - """ - x, y = center - h, w = image.shape[:2] - color_np = np.array(color, dtype=image.dtype) - outline_color_np = ( - np.array(outline_color, dtype=image.dtype) - if outline_color is not None - else None - ) - y_indices, x_indices = np.meshgrid(np.arange(h), np.arange(w), indexing="ij") - dist_sq = (y_indices - y) ** 2 + (x_indices - x) ** 2 - circle_mask = dist_sq <= radius**2 - image[circle_mask] = color_np - if outline_width > 0 and outline_color_np is not None: - outer_mask = dist_sq <= (radius + outline_width) ** 2 - outline_mask = outer_mask & ~circle_mask - image[outline_mask] = outline_color_np - return image - @staticmethod def _ellipse( image: NumpyArray, center: Point, radius: int, color: Color @@ -593,161 +563,6 @@ def overlay_robot( ) return background_image - @staticmethod - def draw_filled_circle( - image: np.ndarray, - centers: Tuple[int, int], - radius: int, - color: Tuple[int, int, int, int], - ) -> np.ndarray: - """ - Draw multiple filled circles at once using a single NumPy mask. - """ - h, w = image.shape[:2] - y_indices, x_indices = np.ogrid[:h, :w] # Precompute coordinate grids - mask = np.zeros((h, w), dtype=bool) - for cx, cy in centers: - mask |= (x_indices - cx) ** 2 + (y_indices - cy) ** 2 <= radius**2 - image[mask] = color - return image - - @staticmethod - def batch_draw_elements( - image: np.ndarray, - elements: list, - element_type: str, - color: Color, - ) -> np.ndarray: - """ - Efficiently draw multiple elements of the same type at once. - - Args: - image: The image array to draw on - elements: List of element data (coordinates, etc.) - element_type: Type of element to draw ('circle', 'line', etc.) - color: Color to use for drawing - - Returns: - Modified image array - """ - if not elements or len(elements) == 0: - return image - - # Get image dimensions - height, width = image.shape[:2] - - if element_type == "circle": - # Extract circle centers and radii - centers = [] - radii = [] - for elem in elements: - if isinstance(elem, dict) and "center" in elem and "radius" in elem: - centers.append(elem["center"]) - radii.append(elem["radius"]) - elif isinstance(elem, (list, tuple)) and len(elem) >= 3: - # Format: (x, y, radius) - centers.append((elem[0], elem[1])) - radii.append(elem[2]) - - # Process circles with the same radius together - for radius in set(radii): - same_radius_centers = [ - centers[i] for i in range(len(centers)) if radii[i] == radius - ] - if same_radius_centers: - # Create a combined mask for all circles with this radius - mask = np.zeros((height, width), dtype=bool) - for cx, cy in same_radius_centers: - if 0 <= cx < width and 0 <= cy < height: - # Calculate circle bounds - min_y = max(0, cy - radius) - max_y = min(height, cy + radius + 1) - min_x = max(0, cx - radius) - max_x = min(width, cx + radius + 1) - - # Create coordinate arrays for the circle - y_indices, x_indices = np.ogrid[min_y:max_y, min_x:max_x] - - # Add this circle to the mask - circle_mask = (y_indices - cy) ** 2 + ( - x_indices - cx - ) ** 2 <= radius**2 - mask[min_y:max_y, min_x:max_x] |= circle_mask - - # Apply color to all circles at once - image[mask] = color - - elif element_type == "line": - # Extract line endpoints - lines = [] - widths = [] - for elem in elements: - if isinstance(elem, dict) and "start" in elem and "end" in elem: - lines.append((elem["start"], elem["end"])) - widths.append(elem.get("width", 1)) - elif isinstance(elem, (list, tuple)) and len(elem) >= 4: - # Format: (x1, y1, x2, y2, [width]) - lines.append(((elem[0], elem[1]), (elem[2], elem[3]))) - widths.append(elem[4] if len(elem) > 4 else 1) - - # Process lines with the same width together - for width in set(widths): - same_width_lines = [ - lines[i] for i in range(len(lines)) if widths[i] == width - ] - if same_width_lines: - # Create a combined mask for all lines with this width - mask = np.zeros((height, width), dtype=bool) - - # Draw all lines into the mask - for start, end in same_width_lines: - x1, y1 = start - x2, y2 = end - - # Skip invalid lines - if not ( - 0 <= x1 < width - and 0 <= y1 < height - and 0 <= x2 < width - and 0 <= y2 < height - ): - continue - - # Use Bresenham's algorithm to get line points - length = max(abs(x2 - x1), abs(y2 - y1)) - if length == 0: - continue - - t = np.linspace(0, 1, length * 2) - x_coordinates = np.round(x1 * (1 - t) + x2 * t).astype(int) - y_coordinates = np.round(y1 * (1 - t) + y2 * t).astype(int) - - # Add line points to mask - for x, y in zip(x_coordinates, y_coordinates): - if width == 1: - mask[y, x] = True - else: - # For thicker lines - half_width = width // 2 - min_y = max(0, y - half_width) - max_y = min(height, y + half_width + 1) - min_x = max(0, x - half_width) - max_x = min(width, x + half_width + 1) - - # Create a circular brush - y_indices, x_indices = np.ogrid[ - min_y:max_y, min_x:max_x - ] - brush = (y_indices - y) ** 2 + ( - x_indices - x - ) ** 2 <= half_width**2 - mask[min_y:max_y, min_x:max_x] |= brush - - # Apply color to all lines at once - image[mask] = color - - return image - @staticmethod async def async_draw_obstacles( image: np.ndarray, obstacle_info_list, color: Color diff --git a/SCR/valetudo_map_parser/config/rand256_parser.py b/SCR/valetudo_map_parser/config/rand256_parser.py index c1bb0f3..e65b6cf 100644 --- a/SCR/valetudo_map_parser/config/rand256_parser.py +++ b/SCR/valetudo_map_parser/config/rand256_parser.py @@ -78,6 +78,7 @@ def _get_int32_signed(data: bytes, address: int) -> int: @staticmethod def _parse_carpet_map(data: bytes) -> set[int]: + """Parse carpet map using Xiaomi method.""" carpet_map = set() for i, v in enumerate(data): @@ -87,6 +88,7 @@ def _parse_carpet_map(data: bytes) -> set[int]: @staticmethod def _parse_area(header: bytes, data: bytes) -> list: + """Parse area using Xiaomi method.""" area_pairs = RRMapParser._get_int16(header, 0x08) areas = [] for area_start in range(0, area_pairs * 16, 16): @@ -114,6 +116,7 @@ def _parse_area(header: bytes, data: bytes) -> list: @staticmethod def _parse_zones(data: bytes, header: bytes) -> list: + """Parse zones using Xiaomi method.""" zone_pairs = RRMapParser._get_int16(header, 0x08) zones = [] for zone_start in range(0, zone_pairs * 8, 8): @@ -146,21 +149,9 @@ def _parse_object_position(block_data_length: int, data: bytes) -> Dict[str, Any angle = raw_angle return {"position": [x, y], "angle": angle} - - @staticmethod - def _parse_walls(data: bytes, header: bytes) -> list: - wall_pairs = RRMapParser._get_int16(header, 0x08) - walls = [] - for wall_start in range(0, wall_pairs * 8, 8): - x0 = RRMapParser._get_int16(data, wall_start + 0) - y0 = RRMapParser._get_int16(data, wall_start + 2) - x1 = RRMapParser._get_int16(data, wall_start + 4) - y1 = RRMapParser._get_int16(data, wall_start + 6) - walls.append([x0, RRMapParser.Tools.DIMENSION_MM - y0, x1, RRMapParser.Tools.DIMENSION_MM - y1]) - return walls - @staticmethod def _parse_walls(data: bytes, header: bytes) -> list: + """Parse walls using Xiaomi method.""" wall_pairs = RRMapParser._get_int16(header, 0x08) walls = [] for wall_start in range(0, wall_pairs * 8, 8): @@ -223,6 +214,7 @@ def parse(self, map_buf: bytes) -> Dict[str, Any]: return {} def parse_blocks(self, raw: bytes, pixels: bool = True) -> Dict[int, Any]: + """Parse all blocks using Xiaomi method.""" blocks = {} map_header_length = self._get_int16(raw, 0x02) block_start_position = map_header_length diff --git a/SCR/valetudo_map_parser/config/shared.py b/SCR/valetudo_map_parser/config/shared.py index bffdec4..d3e9683 100755 --- a/SCR/valetudo_map_parser/config/shared.py +++ b/SCR/valetudo_map_parser/config/shared.py @@ -10,6 +10,7 @@ from PIL import Image +from .utils import pil_size_rotation from .types import ( ATTR_CALIBRATION_POINTS, ATTR_CAMERA_MODE, @@ -210,11 +211,15 @@ def generate_attributes(self) -> dict: def to_dict(self) -> dict: """Return a dictionary with image and attributes data.""" + return { "image": { "binary": self.binary_image, "pil_image": self.new_image, - "size": self.new_image.size if self.new_image else (10, 10), + "size": pil_size_rotation( + self.image_rotate, + self.new_image + ), }, "attributes": self.generate_attributes(), } diff --git a/SCR/valetudo_map_parser/config/types.py b/SCR/valetudo_map_parser/config/types.py index c6740bd..26924f5 100644 --- a/SCR/valetudo_map_parser/config/types.py +++ b/SCR/valetudo_map_parser/config/types.py @@ -34,12 +34,11 @@ class Room(TypedDict): id: int -# list[dict[str, str | list[int]]] | list[dict[str, str | list[list[int]]]] | list[dict[str, str | int]] | int]' class Destinations(TypedDict, total=False): spots: NotRequired[Optional[List[Spot]]] zones: NotRequired[Optional[List[Zone]]] rooms: NotRequired[Optional[List[Room]]] - updated: NotRequired[Optional[float]] + updated: NotRequired[Optional[float | int]] class RoomProperty(TypedDict): diff --git a/SCR/valetudo_map_parser/config/utils.py b/SCR/valetudo_map_parser/config/utils.py index 56bf974..1b80473 100644 --- a/SCR/valetudo_map_parser/config/utils.py +++ b/SCR/valetudo_map_parser/config/utils.py @@ -200,7 +200,6 @@ async def _async_update_shared_data(self, destinations: Destinations | None = No if self.shared.map_rooms: LOGGER.debug("%s: Rand256 attributes rooms updated", self.file_name) - if hasattr(self, "async_get_rooms_attributes") and ( self.shared.map_rooms is None ): @@ -231,15 +230,11 @@ def prepare_resize_params( self, pil_img: PilPNG, rand: bool = False ) -> ResizeParams: """Prepare resize parameters for image resizing.""" - if self.shared.image_rotate in [0, 180]: - width, height = pil_img.size - else: - height, width = pil_img.size - LOGGER.debug( - "Shared PIL image size: %s x %s", - self.shared.image_ref_width, - self.shared.image_ref_height, + width, height = pil_size_rotation( + self.shared.image_rotate, + pil_img ) + return ResizeParams( pil_img=pil_img, width=width, @@ -660,9 +655,6 @@ def get_corners( async def async_resize_image(params: ResizeParams): """Resize the image to the given dimensions and aspect ratio.""" - LOGGER.debug("Resizing image to aspect ratio: %s", params.aspect_ratio) - LOGGER.debug("Original image size: %s x %s", params.width, params.height) - LOGGER.debug("Image crop size: %s", params.crop_size) if params.aspect_ratio == "None": return params.pil_img if params.aspect_ratio != "None": @@ -698,6 +690,16 @@ async def async_resize_image(params: ResizeParams): return params.pil_img +def pil_size_rotation(image_rotate, pil_img): + """Return the size of the image.""" + if not pil_img: + return 0, 0 + if image_rotate in [0, 180]: + width, height = pil_img.size + else: + height, width = pil_img.size + return width, height + def initialize_drawing_config(handler): """ @@ -724,94 +726,6 @@ def initialize_drawing_config(handler): return drawing_config, draw - -def blend_colors(base_color, overlay_color): - """ - Blend two RGBA colors using alpha compositing. - - Args: - base_color: Base RGBA color tuple (r, g, b, a) - overlay_color: Overlay RGBA color tuple (r, g, b, a) - - Returns: - Blended RGBA color tuple (r, g, b, a) - """ - r1, g1, b1, a1 = base_color - r2, g2, b2, a2 = overlay_color - - # Convert alpha to 0-1 range - a1 = a1 / 255.0 - a2 = a2 / 255.0 - - # Calculate resulting alpha - a_out = a1 + a2 * (1 - a1) - - # Avoid division by zero - if a_out < 0.0001: - return [0, 0, 0, 0] - - # Calculate blended RGB components - r_out = (r1 * a1 + r2 * a2 * (1 - a1)) / a_out - g_out = (g1 * a1 + g2 * a2 * (1 - a1)) / a_out - b_out = (b1 * a1 + b2 * a2 * (1 - a1)) / a_out - - # Convert back to 0-255 range and return as tuple - return ( - int(max(0, min(255, r_out))), - int(max(0, min(255, g_out))), - int(max(0, min(255, b_out))), - int(max(0, min(255, a_out * 255))), - ) - - -def blend_pixel(array, x, y, color, element, element_map=None, drawing_config=None): - """ - Blend a pixel color with the existing color at the specified position. - Also updates the element map if the new element has higher z-index. - - Args: - array: The image array to modify - x: X coordinate - y: Y coordinate - color: RGBA color tuple to blend - element: Element code for the pixel - element_map: Optional element map to update - drawing_config: Optional drawing configuration for z-index lookup - - Returns: - None - """ - # Check bounds - if not (0 <= y < array.shape[0] and 0 <= x < array.shape[1]): - return - - # Get current element at this position - current_element = None - if element_map is not None: - current_element = element_map[y, x] - - # Get z-index values for comparison - current_z = 0 - new_z = 0 - - if drawing_config is not None: - current_z = ( - drawing_config.get_property(current_element, "z_index", 0) - if current_element - else 0 - ) - new_z = drawing_config.get_property(element, "z_index", 0) - - # Update element map if new element has higher z-index - if element_map is not None and new_z >= current_z: - element_map[y, x] = element - - # Blend colors - base_color = array[y, x] - blended_color = blend_colors(base_color, color) - array[y, x] = blended_color - - def manage_drawable_elements( handler, action, @@ -993,12 +907,6 @@ async def async_extract_room_outline( # If we found too few boundary points, use the rectangular outline if len(boundary_points) < 8: # Need at least 8 points for a meaningful shape - LOGGER.debug( - "%s: Room %s has too few boundary points (%d), using rectangular outline", - file_name, - str(room_id_int), - len(boundary_points), - ) return rect_outline # Use a more sophisticated algorithm to create a coherent outline @@ -1034,13 +942,6 @@ def calculate_angle(point): # Convert NumPy int64 values to regular Python integers simplified_outline = [(int(x), int(y)) for x, y in simplified_outline] - LOGGER.debug( - "%s: Room %s outline has %d points", - file_name, - str(room_id_int), - len(simplified_outline), - ) - return simplified_outline except (ValueError, IndexError, TypeError, ArithmeticError) as e: diff --git a/SCR/valetudo_map_parser/rand256_handler.py b/SCR/valetudo_map_parser/rand256_handler.py index 56f5fc9..64d88bd 100644 --- a/SCR/valetudo_map_parser/rand256_handler.py +++ b/SCR/valetudo_map_parser/rand256_handler.py @@ -19,13 +19,14 @@ COLORS, DEFAULT_IMAGE_SIZE, DEFAULT_PIXEL_SIZE, + LOGGER, Colors, + Destinations, JsonType, PilPNG, RobotPosition, RoomsProperties, RoomStore, - LOGGER, ) from .config.utils import ( BaseHandler, @@ -67,7 +68,9 @@ def __init__(self, shared_data): ) # Room data handler async def extract_room_properties( - self, json_data: JsonType, destinations: JsonType + self, + json_data: JsonType, + destinations: Destinations | None = None, ) -> RoomsProperties: """Extract the room properties.""" # unsorted_id = RandImageData.get_rrm_segments_ids(json_data) @@ -121,7 +124,7 @@ async def extract_room_properties( async def get_image_from_rrm( self, m_json: JsonType, # json data - destinations: None = None, # MQTT destinations for labels + destinations: Destinations | None = None, # MQTT destinations for labels ) -> PilPNG | None: """Generate Images from the json data. @param m_json: The JSON data to use to draw the image. @@ -214,8 +217,10 @@ async def _setup_robot_and_image( self.room_propriety = await self.get_rooms_attributes(destinations) # Always check robot position for zooming (update if room info is missing) - if self.rooms_pos and robot_position and ( - self.robot_pos is None or "in_room" not in self.robot_pos + if ( + self.rooms_pos + and robot_position + and (self.robot_pos is None or "in_room" not in self.robot_pos) ): self.robot_pos = await self.async_get_robot_in_room( (robot_position[0] * 10), diff --git a/tests/refactored.py b/tests/refactored.py index 697cb78..f5c81be 100644 --- a/tests/refactored.py +++ b/tests/refactored.py @@ -8,13 +8,13 @@ from __future__ import annotations +import asyncio import math import numpy as np -import asyncio from PIL import ImageDraw, ImageFont -from .types import Color, NumpyArray, PilPNG, Point, Union, Tuple +from .types import Color, NumpyArray, PilPNG, Point, Tuple, Union class Drawable: @@ -321,12 +321,12 @@ def _filled_circle( @staticmethod def _filled_circle_optimized( - image: np.ndarray, - center: Tuple[int, int], - radius: int, - color: Color, - outline_color: Color = None, - outline_width: int = 0, + image: np.ndarray, + center: Tuple[int, int], + radius: int, + color: Color, + outline_color: Color = None, + outline_width: int = 0, ) -> np.ndarray: """ Optimized `_filled_circle` ensuring dtype compatibility with uint8. @@ -354,13 +354,13 @@ def _filled_circle_optimized( outline_color_np = None # Create coordinate grids - y_indices, x_indices = np.meshgrid(np.arange(h), np.arange(w), indexing='ij') + y_indices, x_indices = np.meshgrid(np.arange(h), np.arange(w), indexing="ij") # Compute squared distances from center dist_sq = (y_indices - y) ** 2 + (x_indices - x) ** 2 # Mask for filled circle - circle_mask = dist_sq <= radius ** 2 + circle_mask = dist_sq <= radius**2 image[circle_mask] = color_np # Directly modify the image if outline_width > 0 and outline_color_np is not None: @@ -370,7 +370,6 @@ def _filled_circle_optimized( return image - @staticmethod def _ellipse( image: NumpyArray, center: Point, radius: int, color: Color @@ -573,28 +572,34 @@ def draw_obstacles( return image @staticmethod - def draw_filled_circle(image: np.ndarray, centers: Tuple[int, int], radius: int, - color: Tuple[int, int, int, int], - cached_grid: Tuple[np.ndarray, np.ndarray] = None) -> np.ndarray: + def draw_filled_circle( + image: np.ndarray, + centers: Tuple[int, int], + radius: int, + color: Tuple[int, int, int, int], + cached_grid: Tuple[np.ndarray, np.ndarray] = None, + ) -> np.ndarray: """ - Draw multiple filled circles at once using a single NumPy mask. + Draw multiple filled circles at once using a single NumPy mask. - Parameters: - - image: NumPy array representing the image (H, W, 4) for RGBA. - - centers: (N, 2) NumPy array containing the (x, y) coordinates of N circle centers. - - radius: Radius of all circles. - - color: Color as a tuple (R, G, B, A). + Parameters: + - image: NumPy array representing the image (H, W, 4) for RGBA. + - centers: (N, 2) NumPy array containing the (x, y) coordinates of N circle centers. + - radius: Radius of all circles. + - color: Color as a tuple (R, G, B, A). - Returns: - - Modified image with filled circles drawn in one operation. - """ + Returns: + - Modified image with filled circles drawn in one operation. + """ h, w = image.shape[:2] y_indices, x_indices = np.ogrid[:h, :w] # Precompute coordinate grids # Compute mask for all circles at once mask = np.zeros((h, w), dtype=bool) for cx, cy in centers: - mask |= (x_indices - cx) ** 2 + (y_indices - cy) ** 2 <= radius ** 2 # Apply all circles in one pass + mask |= (x_indices - cx) ** 2 + ( + y_indices - cy + ) ** 2 <= radius**2 # Apply all circles in one pass # Apply color where mask is True (broadcasting works for multi-channel) image[mask] = color @@ -602,8 +607,9 @@ def draw_filled_circle(image: np.ndarray, centers: Tuple[int, int], radius: int, return image @staticmethod - async def async_draw_obstacles(image: np.ndarray, obstacle_info_list, - color: Tuple[int, int, int, int]) -> np.ndarray: + async def async_draw_obstacles( + image: np.ndarray, obstacle_info_list, color: Tuple[int, int, int, int] + ) -> np.ndarray: """ Optimized async version of `draw_obstacles` using `asyncio.gather()`. @@ -626,12 +632,19 @@ def extract_centers(obstacle_info_list): Returns: - NumPy array of shape (N, 2) where each row is (x, y). """ - return np.array([[obs["points"]["x"], obs["points"]["y"]] for obs in obstacle_info_list], dtype=np.int32) + return np.array( + [ + [obs["points"]["x"], obs["points"]["y"]] + for obs in obstacle_info_list + ], + dtype=np.int32, + ) - centers = await asyncio.get_running_loop().run_in_executor(None, extract_centers, obstacle_info_list) + centers = await asyncio.get_running_loop().run_in_executor( + None, extract_centers, obstacle_info_list + ) Drawable.draw_filled_circle(image, centers, 6, color) - return image @staticmethod From e4298dbdb56a324652dc290226161a04d0e55d2e Mon Sep 17 00:00:00 2001 From: SCA075 <82227818+sca075@users.noreply.github.com> Date: Tue, 7 Oct 2025 17:36:38 +0200 Subject: [PATCH 08/25] delete auto_crop.py and color_utils.py as not used anymore Signed-off-by: Sandro Cantarella --- SCR/valetudo_map_parser/config/auto_crop.py | 452 ------------------ SCR/valetudo_map_parser/config/color_utils.py | 104 ---- 2 files changed, 556 deletions(-) delete mode 100644 SCR/valetudo_map_parser/config/auto_crop.py delete mode 100644 SCR/valetudo_map_parser/config/color_utils.py diff --git a/SCR/valetudo_map_parser/config/auto_crop.py b/SCR/valetudo_map_parser/config/auto_crop.py deleted file mode 100644 index 5c7b10d..0000000 --- a/SCR/valetudo_map_parser/config/auto_crop.py +++ /dev/null @@ -1,452 +0,0 @@ -"""Auto Crop Class for trimming and zooming images. -Version: 2024.10.0""" - -from __future__ import annotations - -import logging - -import numpy as np -from scipy import ndimage - -from .async_utils import AsyncNumPy -from .types import Color, NumpyArray, TrimCropData, TrimsData -from .utils import BaseHandler - - -_LOGGER = logging.getLogger(__name__) - - -class TrimError(Exception): - """Exception raised for errors in the trim process.""" - - def __init__(self, message, image): - super().__init__(message) - self.image = image - - -class AutoCrop: - """Auto Crop Class for trimming and zooming images.""" - - def __init__(self, handler: BaseHandler): - self.auto_crop = None # auto crop data to be calculate once. - self.crop_area = None - self.handler = handler - trim_data = self.handler.shared.trims.to_dict() # trims data - self.trim_up = trim_data.get("trim_up", 0) # trim up - self.trim_down = trim_data.get("trim_down", 0) # trim down - self.trim_left = trim_data.get("trim_left", 0) # trim left - self.trim_right = trim_data.get("trim_right", 0) # trim right - self.offset_top = self.handler.shared.offset_top # offset top - self.offset_bottom = self.handler.shared.offset_down # offset bottom - self.offset_left = self.handler.shared.offset_left # offset left - self.offset_right = self.handler.shared.offset_right # offset right - - @staticmethod - def validate_crop_dimensions(shared): - """Ensure width and height are valid before processing cropping.""" - if shared.image_ref_width <= 0 or shared.image_ref_height <= 0: - _LOGGER.warning( - "Auto-crop failed: Invalid dimensions (width=%s, height=%s). Using original image.", - shared.image_ref_width, - shared.image_ref_height, - ) - return False - return True - - def check_trim( - self, trimmed_height, trimmed_width, margin_size, image_array, file_name, rotate - ): - """Check if the trim is okay.""" - if trimmed_height <= margin_size or trimmed_width <= margin_size: - self.crop_area = [0, 0, image_array.shape[1], image_array.shape[0]] - self.handler.img_size = (image_array.shape[1], image_array.shape[0]) - raise TrimError( - f"{file_name}: Trimming failed at rotation {rotate}.", image_array - ) - - def _calculate_trimmed_dimensions(self): - """Calculate and update the dimensions after trimming.""" - trimmed_width = max( - 1, # Ensure at least 1px - (self.trim_right - self.offset_right) - (self.trim_left + self.offset_left), - ) - trimmed_height = max( - 1, # Ensure at least 1px - (self.trim_down - self.offset_bottom) - (self.trim_up + self.offset_top), - ) - - # Ensure shared reference dimensions are updated - if hasattr(self.handler.shared, "image_ref_height") and hasattr( - self.handler.shared, "image_ref_width" - ): - self.handler.shared.image_ref_height = trimmed_height - self.handler.shared.image_ref_width = trimmed_width - else: - _LOGGER.warning( - "Shared attributes for image dimensions are not initialized." - ) - - return trimmed_width, trimmed_height - - async def _async_auto_crop_data(self, tdata: TrimsData): # , tdata=None - """Load the auto crop data from the Camera config.""" - if not self.auto_crop: - trims_data = TrimCropData.from_dict(dict(tdata.to_dict())).to_list() - ( - self.trim_left, - self.trim_up, - self.trim_right, - self.trim_down, - ) = trims_data - if trims_data != [0, 0, 0, 0]: - self._calculate_trimmed_dimensions() - else: - trims_data = None - return trims_data - return None - - def auto_crop_offset(self): - """Calculate the offset for the auto crop.""" - if self.auto_crop: - self.auto_crop[0] += self.offset_left - self.auto_crop[1] += self.offset_top - self.auto_crop[2] -= self.offset_right - self.auto_crop[3] -= self.offset_bottom - - async def _init_auto_crop(self): - """Initialize the auto crop data.""" - if not self.auto_crop: # and self.handler.shared.vacuum_state == "docked": - self.auto_crop = await self._async_auto_crop_data(self.handler.shared.trims) - if self.auto_crop: - self.auto_crop_offset() - else: - self.handler.max_frames = 1205 - - # Fallback: Ensure auto_crop is valid - if not self.auto_crop or any(v < 0 for v in self.auto_crop): - self.auto_crop = None - - return self.auto_crop - - async def async_image_margins( - self, image_array: NumpyArray, detect_colour: Color - ) -> tuple[int, int, int, int]: - """Crop the image based on the auto crop area using scipy.ndimage for better performance.""" - # Import scipy.ndimage here to avoid import at module level - - # Create a binary mask where True = non-background pixels - # This is much more memory efficient than storing coordinates - mask = ~np.all(image_array == list(detect_colour), axis=2) - - # Use scipy.ndimage.find_objects to efficiently find the bounding box - # This returns a list of slice objects that define the bounding box - # Label the mask with a single label (1) and find its bounding box - labeled_mask = mask.astype(np.int8) # Convert to int8 (smallest integer type) - objects = ndimage.find_objects(labeled_mask) - - if not objects: # No objects found - _LOGGER.warning( - "%s: No non-background pixels found in image", self.handler.file_name - ) - # Return full image dimensions as fallback - return 0, 0, image_array.shape[1], image_array.shape[0] - - # Extract the bounding box coordinates from the slice objects - y_slice, x_slice = objects[0] - min_y, max_y = y_slice.start, y_slice.stop - 1 - min_x, max_x = x_slice.start, x_slice.stop - 1 - - return min_y, min_x, max_x, max_y - - async def async_get_room_bounding_box( - self, room_name: str, rand256: bool = False - ) -> tuple[int, int, int, int] | None: - """Calculate bounding box coordinates from room outline for zoom functionality. - - Args: - room_name: Name of the room to get bounding box for - rand256: Whether this is for a rand256 vacuum (applies /10 scaling) - - Returns: - Tuple of (left, right, up, down) coordinates or None if room not found - """ - try: - # For Hypfer vacuums, check room_propriety first, then rooms_pos - if hasattr(self.handler, "room_propriety") and self.handler.room_propriety: - # Handle different room_propriety formats - room_data_dict = None - - if isinstance(self.handler.room_propriety, dict): - # Hypfer handler: room_propriety is a dictionary - room_data_dict = self.handler.room_propriety - elif ( - isinstance(self.handler.room_propriety, tuple) - and len(self.handler.room_propriety) >= 1 - ): - # Rand256 handler: room_propriety is a tuple (room_properties, zone_properties, point_properties) - room_data_dict = self.handler.room_propriety[0] - - if room_data_dict and isinstance(room_data_dict, dict): - for room_id, room_data in room_data_dict.items(): - if room_data.get("name") == room_name: - outline = room_data.get("outline", []) - if outline: - xs, ys = zip(*outline) - left, right = min(xs), max(xs) - up, down = min(ys), max(ys) - - if rand256: - # Apply scaling for rand256 vacuums - left = round(left / 10) - right = round(right / 10) - up = round(up / 10) - down = round(down / 10) - - return left, right, up, down - - # Fallback: check rooms_pos (used by both Hypfer and Rand256) - if hasattr(self.handler, "rooms_pos") and self.handler.rooms_pos: - for room in self.handler.rooms_pos: - if room.get("name") == room_name: - outline = room.get("outline", []) - if outline: - xs, ys = zip(*outline) - left, right = min(xs), max(xs) - up, down = min(ys), max(ys) - - if rand256: - # Apply scaling for rand256 vacuums - left = round(left / 10) - right = round(right / 10) - up = round(up / 10) - down = round(down / 10) - - return left, right, up, down - - _LOGGER.warning( - "%s: Room '%s' not found for zoom bounding box calculation", - self.handler.file_name, - room_name, - ) - return None - - except Exception as e: - _LOGGER.warning( - "%s: Error calculating room bounding box for '%s': %s", - self.handler.file_name, - room_name, - e, - ) - return None - - async def async_check_if_zoom_is_on( - self, - image_array: NumpyArray, - margin_size: int = 100, - zoom: bool = False, - rand256: bool = False, - ) -> NumpyArray: - """Check if the image needs to be zoomed.""" - - if ( - zoom - and self.handler.shared.vacuum_state == "cleaning" - and self.handler.shared.image_auto_zoom - ): - # Get the current room name from robot_pos (not robot_in_room) - current_room = ( - self.handler.robot_pos.get("in_room") - if self.handler.robot_pos - else None - ) - _LOGGER.info(f"Current room: {current_room}") - - if not current_room: - # For Rand256 handler, try to zoom based on robot position even without room data - if ( - rand256 - and hasattr(self.handler, "robot_position") - and self.handler.robot_position - ): - robot_x, robot_y = ( - self.handler.robot_position[0], - self.handler.robot_position[1], - ) - - # Create a zoom area around the robot position (e.g., 800x800 pixels for better view) - zoom_size = 800 - trim_left = max(0, int(robot_x - zoom_size // 2)) - trim_right = min( - image_array.shape[1], int(robot_x + zoom_size // 2) - ) - trim_up = max(0, int(robot_y - zoom_size // 2)) - trim_down = min(image_array.shape[0], int(robot_y + zoom_size // 2)) - - _LOGGER.info( - "%s: Zooming to robot position area (%d, %d) with size %dx%d", - self.handler.file_name, - robot_x, - robot_y, - trim_right - trim_left, - trim_down - trim_up, - ) - - return image_array[trim_up:trim_down, trim_left:trim_right] - else: - _LOGGER.warning( - "%s: No room information available for zoom. Using full image.", - self.handler.file_name, - ) - return image_array[ - self.auto_crop[1] : self.auto_crop[3], - self.auto_crop[0] : self.auto_crop[2], - ] - - # Calculate bounding box from room outline - bounding_box = await self.async_get_room_bounding_box(current_room, rand256) - - if not bounding_box: - _LOGGER.warning( - "%s: Could not calculate bounding box for room '%s'. Using full image.", - self.handler.file_name, - current_room, - ) - return image_array[ - self.auto_crop[1] : self.auto_crop[3], - self.auto_crop[0] : self.auto_crop[2], - ] - - left, right, up, down = bounding_box - - # Apply margins - trim_left = left - margin_size - trim_right = right + margin_size - trim_up = up - margin_size - trim_down = down + margin_size - # Ensure valid trim values - trim_left, trim_right = sorted([trim_left, trim_right]) - trim_up, trim_down = sorted([trim_up, trim_down]) - - # Prevent zero-sized images - if trim_right - trim_left < 1 or trim_down - trim_up < 1: - _LOGGER.warning( - "Zooming resulted in an invalid crop area. Using full image." - ) - return image_array # Return original image - - trimmed = image_array[trim_up:trim_down, trim_left:trim_right] - - else: - trimmed = image_array[ - self.auto_crop[1] : self.auto_crop[3], - self.auto_crop[0] : self.auto_crop[2], - ] - - return trimmed - - async def async_rotate_the_image( - self, trimmed: NumpyArray, rotate: int - ) -> NumpyArray: - """Rotate the image and return the new array.""" - if rotate == 90: - rotated = await AsyncNumPy.async_rot90(trimmed) - self.crop_area = [ - self.trim_left, - self.trim_up, - self.trim_right, - self.trim_down, - ] - elif rotate == 180: - rotated = await AsyncNumPy.async_rot90(trimmed, 2) - self.crop_area = self.auto_crop - elif rotate == 270: - rotated = await AsyncNumPy.async_rot90(trimmed, 3) - self.crop_area = [ - self.trim_left, - self.trim_up, - self.trim_right, - self.trim_down, - ] - else: - rotated = trimmed - self.crop_area = self.auto_crop - return rotated - - async def async_auto_trim_and_zoom_image( - self, - image_array: NumpyArray, - detect_colour: Color = (93, 109, 126, 255), - margin_size: int = 0, - rotate: int = 0, - zoom: bool = False, - rand256: bool = False, - ): - """ - Automatically crops and trims a numpy array and returns the processed image. - """ - try: - self.auto_crop = await self._init_auto_crop() - if (self.auto_crop is None) or (self.auto_crop == [0, 0, 0, 0]): - # Find the coordinates of the first occurrence of a non-background color - min_y, min_x, max_x, max_y = await self.async_image_margins( - image_array, detect_colour - ) - # Calculate and store the trims coordinates with margins - self.trim_left = int(min_x) - margin_size - self.trim_up = int(min_y) - margin_size - self.trim_right = int(max_x) + margin_size - self.trim_down = int(max_y) + margin_size - del min_y, min_x, max_x, max_y - - # Calculate the dimensions after trimming using min/max values - trimmed_width, trimmed_height = self._calculate_trimmed_dimensions() - - # Test if the trims are okay or not - try: - self.check_trim( - trimmed_height, - trimmed_width, - margin_size, - image_array, - self.handler.file_name, - rotate, - ) - except TrimError as e: - return e.image - - # Store Crop area of the original image_array we will use from the next frame. - self.auto_crop = TrimCropData( - self.trim_left, - self.trim_up, - self.trim_right, - self.trim_down, - ).to_list() - # Update the trims data in the shared instance - self.handler.shared.trims = TrimsData.from_dict( - { - "trim_left": self.trim_left, - "trim_up": self.trim_up, - "trim_right": self.trim_right, - "trim_down": self.trim_down, - } - ) - self.auto_crop_offset() - # If it is needed to zoom the image. - trimmed = await self.async_check_if_zoom_is_on( - image_array, margin_size, zoom, rand256 - ) - del image_array # Free memory. - # Rotate the cropped image based on the given angle - rotated = await self.async_rotate_the_image(trimmed, rotate) - del trimmed # Free memory. - self.handler.crop_img_size = [rotated.shape[1], rotated.shape[0]] - - except RuntimeError as e: - _LOGGER.warning( - "%s: Error %s during auto trim and zoom.", - self.handler.file_name, - e, - exc_info=True, - ) - return None - return rotated diff --git a/SCR/valetudo_map_parser/config/color_utils.py b/SCR/valetudo_map_parser/config/color_utils.py deleted file mode 100644 index 80d1297..0000000 --- a/SCR/valetudo_map_parser/config/color_utils.py +++ /dev/null @@ -1,104 +0,0 @@ -"""Utility functions for color operations in the map parser.""" - -from typing import Optional - -from .types import Color, NumpyArray - - -def get_blended_color( - x0: int, - y0: int, - x1: int, - y1: int, - arr: Optional[NumpyArray], - color: Color, -) -> Color: - """ - Get a blended color for a pixel based on the current element map and the new element to draw. - - This function: - 1. Gets the background colors at the start and end points (with offset to avoid sampling already drawn pixels) - 2. Directly blends the foreground color with the background using straight alpha - 3. Returns the average of the two blended colors - - Returns: - Blended RGBA color to use for drawing - """ - # Extract foreground color components - fg_r, fg_g, fg_b, fg_a = color - fg_alpha = fg_a / 255.0 # Convert to 0-1 range - - # Fast path for fully opaque or transparent foreground - if fg_a == 255: - return color - if fg_a == 0: - # Sample background at midpoint - mid_x, mid_y = (x0 + x1) // 2, (y0 + y1) // 2 - if 0 <= mid_y < arr.shape[0] and 0 <= mid_x < arr.shape[1]: - return Color(arr[mid_y, mid_x]) - return Color(0, 0, 0, 0) # Default if out of bounds - - # Calculate direction vector for offset sampling - dx = x1 - x0 - dy = y1 - y0 - length = max(1, (dx**2 + dy**2) ** 0.5) # Avoid division by zero - offset = 5 # 5-pixel offset to avoid sampling already drawn pixels - - # Calculate offset coordinates for start point (move away from the line) - offset_x0 = int(x0 - (offset * dx / length)) - offset_y0 = int(y0 - (offset * dy / length)) - - # Calculate offset coordinates for end point (move away from the line) - offset_x1 = int(x1 + (offset * dx / length)) - offset_y1 = int(y1 + (offset * dy / length)) - - # Sample background at offset start point - if 0 <= offset_y0 < arr.shape[0] and 0 <= offset_x0 < arr.shape[1]: - bg_color_start = arr[offset_y0, offset_x0] - # Direct straight alpha blending - start_r = int(fg_r * fg_alpha + bg_color_start[0] * (1 - fg_alpha)) - start_g = int(fg_g * fg_alpha + bg_color_start[1] * (1 - fg_alpha)) - start_b = int(fg_b * fg_alpha + bg_color_start[2] * (1 - fg_alpha)) - start_a = int(fg_a + bg_color_start[3] * (1 - fg_alpha)) - start_blended_color = (start_r, start_g, start_b, start_a) - else: - # If offset point is out of bounds, try original point - if 0 <= y0 < arr.shape[0] and 0 <= x0 < arr.shape[1]: - bg_color_start = arr[y0, x0] - start_r = int(fg_r * fg_alpha + bg_color_start[0] * (1 - fg_alpha)) - start_g = int(fg_g * fg_alpha + bg_color_start[1] * (1 - fg_alpha)) - start_b = int(fg_b * fg_alpha + bg_color_start[2] * (1 - fg_alpha)) - start_a = int(fg_a + bg_color_start[3] * (1 - fg_alpha)) - start_blended_color = (start_r, start_g, start_b, start_a) - else: - start_blended_color = color - - # Sample background at offset end point - if 0 <= offset_y1 < arr.shape[0] and 0 <= offset_x1 < arr.shape[1]: - bg_color_end = arr[offset_y1, offset_x1] - # Direct straight alpha blending - end_r = int(fg_r * fg_alpha + bg_color_end[0] * (1 - fg_alpha)) - end_g = int(fg_g * fg_alpha + bg_color_end[1] * (1 - fg_alpha)) - end_b = int(fg_b * fg_alpha + bg_color_end[2] * (1 - fg_alpha)) - end_a = int(fg_a + bg_color_end[3] * (1 - fg_alpha)) - end_blended_color = (end_r, end_g, end_b, end_a) - else: - # If offset point is out of bounds, try original point - if 0 <= y1 < arr.shape[0] and 0 <= x1 < arr.shape[1]: - bg_color_end = arr[y1, x1] - end_r = int(fg_r * fg_alpha + bg_color_end[0] * (1 - fg_alpha)) - end_g = int(fg_g * fg_alpha + bg_color_end[1] * (1 - fg_alpha)) - end_b = int(fg_b * fg_alpha + bg_color_end[2] * (1 - fg_alpha)) - end_a = int(fg_a + bg_color_end[3] * (1 - fg_alpha)) - end_blended_color = (end_r, end_g, end_b, end_a) - else: - end_blended_color = color - - # Use the average of the two blended colors - blended_color = ( - (start_blended_color[0] + end_blended_color[0]) // 2, - (start_blended_color[1] + end_blended_color[1]) // 2, - (start_blended_color[2] + end_blended_color[2]) // 2, - (start_blended_color[3] + end_blended_color[3]) // 2, - ) - return blended_color From b3dd06a6ad2bb0b70dc7f9ee1df8e00270307276 Mon Sep 17 00:00:00 2001 From: SCA075 <82227818+sca075@users.noreply.github.com> Date: Tue, 7 Oct 2025 18:51:33 +0200 Subject: [PATCH 09/25] ruff formatted and some mod on drawable.py Signed-off-by: Sandro Cantarella --- SCR/valetudo_map_parser/config/drawable.py | 37 ++++++---------------- SCR/valetudo_map_parser/config/shared.py | 5 +-- SCR/valetudo_map_parser/config/utils.py | 7 ++-- SCR/valetudo_map_parser/hypfer_draw.py | 2 -- 4 files changed, 13 insertions(+), 38 deletions(-) diff --git a/SCR/valetudo_map_parser/config/drawable.py b/SCR/valetudo_map_parser/config/drawable.py index a7cf6f4..0b07f06 100644 --- a/SCR/valetudo_map_parser/config/drawable.py +++ b/SCR/valetudo_map_parser/config/drawable.py @@ -53,49 +53,30 @@ async def from_json_to_image( ) -> NumpyArray: """Draw the layers (rooms) from the vacuum JSON data onto the image array.""" image_array = layer - # Extract alpha from color - alpha = color[3] if len(color) == 4 else 255 + need_blending = color[3] < 255 - # Create the full color with alpha - full_color = color if len(color) == 4 else (*color, 255) - - # Check if we need to blend colors (alpha < 255) - need_blending = alpha < 255 - - # Loop through pixels to find min and max coordinates for x, y, z in pixels: col = x * pixel_size row = y * pixel_size - # Draw pixels as blocks for i in range(z): - # Get the region to update region_slice = ( slice(row, row + pixel_size), slice(col + i * pixel_size, col + (i + 1) * pixel_size), ) if need_blending: - # Sample the center of the region for blending - center_y = row + pixel_size // 2 - center_x = col + i * pixel_size + pixel_size // 2 - - # Only blend if coordinates are valid + cy = row + pixel_size // 2 + cx = col + i * pixel_size + pixel_size // 2 if ( - 0 <= center_y < image_array.shape[0] - and 0 <= center_x < image_array.shape[1] + 0 <= cy < image_array.shape[0] + and 0 <= cx < image_array.shape[1] ): - # Get blended color - blended_color = sample_and_blend_color( - image_array, center_x, center_y, full_color - ) - # Apply blended color to the region - image_array[region_slice] = blended_color + px = sample_and_blend_color(image_array, cx, cy, color) + image_array[region_slice] = px else: - # Use original color if out of bounds - image_array[region_slice] = full_color + image_array[region_slice] = color else: - # No blending needed, use direct assignment - image_array[region_slice] = full_color + image_array[region_slice] = color return image_array diff --git a/SCR/valetudo_map_parser/config/shared.py b/SCR/valetudo_map_parser/config/shared.py index d3e9683..62c4173 100755 --- a/SCR/valetudo_map_parser/config/shared.py +++ b/SCR/valetudo_map_parser/config/shared.py @@ -216,10 +216,7 @@ def to_dict(self) -> dict: "image": { "binary": self.binary_image, "pil_image": self.new_image, - "size": pil_size_rotation( - self.image_rotate, - self.new_image - ), + "size": pil_size_rotation(self.image_rotate, self.new_image), }, "attributes": self.generate_attributes(), } diff --git a/SCR/valetudo_map_parser/config/utils.py b/SCR/valetudo_map_parser/config/utils.py index 1b80473..c1b5eb0 100644 --- a/SCR/valetudo_map_parser/config/utils.py +++ b/SCR/valetudo_map_parser/config/utils.py @@ -230,10 +230,7 @@ def prepare_resize_params( self, pil_img: PilPNG, rand: bool = False ) -> ResizeParams: """Prepare resize parameters for image resizing.""" - width, height = pil_size_rotation( - self.shared.image_rotate, - pil_img - ) + width, height = pil_size_rotation(self.shared.image_rotate, pil_img) return ResizeParams( pil_img=pil_img, @@ -690,6 +687,7 @@ async def async_resize_image(params: ResizeParams): return params.pil_img + def pil_size_rotation(image_rotate, pil_img): """Return the size of the image.""" if not pil_img: @@ -726,6 +724,7 @@ def initialize_drawing_config(handler): return drawing_config, draw + def manage_drawable_elements( handler, action, diff --git a/SCR/valetudo_map_parser/hypfer_draw.py b/SCR/valetudo_map_parser/hypfer_draw.py index 9432e35..fb74262 100755 --- a/SCR/valetudo_map_parser/hypfer_draw.py +++ b/SCR/valetudo_map_parser/hypfer_draw.py @@ -269,8 +269,6 @@ async def async_draw_zones( zone_clean = self.img_h.data.find_zone_entities(m_json) except (ValueError, KeyError): zone_clean = None - else: - _LOGGER.info("%s: Got zones.", self.file_name) if zone_clean: # Process zones sequentially to avoid memory-intensive array copies From 588b6d83d56720db8d58605df5fd621faffc1f17 Mon Sep 17 00:00:00 2001 From: SCA075 <82227818+sca075@users.noreply.github.com> Date: Tue, 7 Oct 2025 22:57:06 +0200 Subject: [PATCH 10/25] changes Signed-off-by: Sandro Cantarella --- SCR/valetudo_map_parser/config/rand256_parser.py | 3 ++- SCR/valetudo_map_parser/rand256_handler.py | 7 ++++--- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/SCR/valetudo_map_parser/config/rand256_parser.py b/SCR/valetudo_map_parser/config/rand256_parser.py index 540f005..7de7d33 100644 --- a/SCR/valetudo_map_parser/config/rand256_parser.py +++ b/SCR/valetudo_map_parser/config/rand256_parser.py @@ -1,4 +1,5 @@ -"""New Rand256 Map Parser - Based on Xiaomi/Roborock implementation with precise binary parsing.""" +"""New Rand256 Map Parser - +Based on Xiaomi/Roborock implementation with precise binary parsing.""" import math import struct diff --git a/SCR/valetudo_map_parser/rand256_handler.py b/SCR/valetudo_map_parser/rand256_handler.py index 64d88bd..9d488a9 100644 --- a/SCR/valetudo_map_parser/rand256_handler.py +++ b/SCR/valetudo_map_parser/rand256_handler.py @@ -85,9 +85,10 @@ async def extract_room_properties( json_data, size_x, size_y, top, left, True ) - dest_json = destinations - zones_data = dict(dest_json).get("zones", []) - points_data = dict(dest_json).get("spots", []) + + dest_json = destinations if destinations else {} + zones_data = dest_json.get("zones", []) + points_data = dest_json.get("spots", []) # Use the RandRoomsHandler to extract room properties room_properties = await self.rooms_handler.async_extract_room_properties( From 0a29d0c1ee4645dfdcf78a9d3b3ca1a53bf12433 Mon Sep 17 00:00:00 2001 From: SCA075 <82227818+sca075@users.noreply.github.com> Date: Tue, 7 Oct 2025 22:58:44 +0200 Subject: [PATCH 11/25] remove duplicate --- .../config/rand256_parser.py | 19 ------------------- 1 file changed, 19 deletions(-) diff --git a/SCR/valetudo_map_parser/config/rand256_parser.py b/SCR/valetudo_map_parser/config/rand256_parser.py index 7de7d33..11088ca 100644 --- a/SCR/valetudo_map_parser/config/rand256_parser.py +++ b/SCR/valetudo_map_parser/config/rand256_parser.py @@ -170,25 +170,6 @@ def _parse_walls(data: bytes, header: bytes) -> list: ) return walls - @staticmethod - def _parse_walls(data: bytes, header: bytes) -> list: - wall_pairs = RRMapParser._get_int16(header, 0x08) - walls = [] - for wall_start in range(0, wall_pairs * 8, 8): - x0 = RRMapParser._get_int16(data, wall_start + 0) - y0 = RRMapParser._get_int16(data, wall_start + 2) - x1 = RRMapParser._get_int16(data, wall_start + 4) - y1 = RRMapParser._get_int16(data, wall_start + 6) - walls.append( - [ - x0, - RRMapParser.Tools.DIMENSION_MM - y0, - x1, - RRMapParser.Tools.DIMENSION_MM - y1, - ] - ) - return walls - @staticmethod def _parse_path_block(buf: bytes, offset: int, length: int) -> Dict[str, Any]: """Parse path block using EXACT same method as working parser.""" From c88110d823734e96f28dab4537be94a57c6553d6 Mon Sep 17 00:00:00 2001 From: SCA075 <82227818+sca075@users.noreply.github.com> Date: Wed, 8 Oct 2025 21:02:52 +0200 Subject: [PATCH 12/25] remove potential memory leak from code. --- SCR/valetudo_map_parser/config/utils.py | 6 +++++ SCR/valetudo_map_parser/hypfer_handler.py | 8 ++++++ SCR/valetudo_map_parser/rand256_handler.py | 29 ++++++++++++++++++++-- SCR/valetudo_map_parser/rooms_handler.py | 9 +++++++ pyproject.toml | 2 +- 5 files changed, 51 insertions(+), 3 deletions(-) diff --git a/SCR/valetudo_map_parser/config/utils.py b/SCR/valetudo_map_parser/config/utils.py index adc4b59..9a3cb43 100644 --- a/SCR/valetudo_map_parser/config/utils.py +++ b/SCR/valetudo_map_parser/config/utils.py @@ -111,6 +111,12 @@ async def async_get_image( try: # Backup current image to last_image before processing new one if hasattr(self.shared, "new_image") and self.shared.new_image is not None: + # Close old last_image to free memory before replacing it + if hasattr(self.shared, "last_image") and self.shared.last_image is not None: + try: + self.shared.last_image.close() + except Exception: + pass # Ignore errors if image is already closed self.shared.last_image = self.shared.new_image # Call the appropriate handler method based on handler type diff --git a/SCR/valetudo_map_parser/hypfer_handler.py b/SCR/valetudo_map_parser/hypfer_handler.py index 05a00de..c417d8c 100644 --- a/SCR/valetudo_map_parser/hypfer_handler.py +++ b/SCR/valetudo_map_parser/hypfer_handler.py @@ -254,7 +254,12 @@ async def async_get_image_from_json( ) LOGGER.info("%s: Completed base Layers", self.file_name) # Copy the new array in base layer. + # Delete old base layer before creating new one to free memory + if self.img_base_layer is not None: + del self.img_base_layer self.img_base_layer = await self.async_copy_array(img_np_array) + # Delete source array after copying to free memory + del img_np_array self.shared.frame_number = self.frame_number self.frame_number += 1 @@ -268,6 +273,9 @@ async def async_get_image_from_json( or self.img_work_layer.shape != self.img_base_layer.shape or self.img_work_layer.dtype != self.img_base_layer.dtype ): + # Delete old buffer before creating new one to free memory + if self.img_work_layer is not None: + del self.img_work_layer self.img_work_layer = np.empty_like(self.img_base_layer) # Copy the base layer into the persistent working buffer (no new allocation per frame) diff --git a/SCR/valetudo_map_parser/rand256_handler.py b/SCR/valetudo_map_parser/rand256_handler.py index 9d488a9..2d5dc30 100644 --- a/SCR/valetudo_map_parser/rand256_handler.py +++ b/SCR/valetudo_map_parser/rand256_handler.py @@ -58,6 +58,7 @@ def __init__(self, shared_data): self.drawing_config, self.draw = initialize_drawing_config(self) self.go_to = None # Go to position data self.img_base_layer = None # Base image layer + self.img_work_layer = None # Persistent working buffer (reused across frames) self.img_rotate = shared_data.image_rotate # Image rotation self.room_propriety = None # Room propriety data self.active_zones = None # Active zones @@ -156,10 +157,24 @@ async def get_image_from_rrm( # Increment frame number self.frame_number += 1 - img_np_array = await self.async_copy_array(self.img_base_layer) if self.frame_number > 5: self.frame_number = 0 + # Ensure persistent working buffer exists and matches base (allocate only when needed) + if ( + self.img_work_layer is None + or self.img_work_layer.shape != self.img_base_layer.shape + or self.img_work_layer.dtype != self.img_base_layer.dtype + ): + # Delete old buffer before creating new one to free memory + if self.img_work_layer is not None: + del self.img_work_layer + self.img_work_layer = np.empty_like(self.img_base_layer) + + # Copy the base layer into the persistent working buffer (no new allocation per frame) + np.copyto(self.img_work_layer, self.img_base_layer) + img_np_array = self.img_work_layer + # Draw map elements img_np_array = await self._draw_map_elements( img_np_array, m_json, colors, robot_position, robot_position_angle @@ -167,7 +182,7 @@ async def get_image_from_rrm( # Return PIL Image using async utilities pil_img = await AsyncPIL.async_fromarray(img_np_array, mode="RGBA") - del img_np_array # free memory + # Note: Don't delete img_np_array here as it's the persistent work buffer return await self._finalize_image(pil_img) except (RuntimeError, RuntimeWarning) as e: @@ -228,7 +243,12 @@ async def _setup_robot_and_image( (robot_position[1] * 10), robot_position_angle, ) + # Delete old base layer before creating new one to free memory + if self.img_base_layer is not None: + del self.img_base_layer self.img_base_layer = await self.async_copy_array(img_np_array) + # Delete source array after copying to free memory + del img_np_array else: # If floor is disabled, create an empty image background_color = self.drawing_config.get_property( @@ -237,7 +257,12 @@ async def _setup_robot_and_image( img_np_array = await self.draw.create_empty_image( size_x, size_y, background_color ) + # Delete old base layer before creating new one to free memory + if self.img_base_layer is not None: + del self.img_base_layer self.img_base_layer = await self.async_copy_array(img_np_array) + # Delete source array after copying to free memory + del img_np_array # Check active zones BEFORE auto-crop to enable proper zoom functionality # This needs to run on every frame, not just frame 0 diff --git a/SCR/valetudo_map_parser/rooms_handler.py b/SCR/valetudo_map_parser/rooms_handler.py index a1f5e48..8affc6b 100644 --- a/SCR/valetudo_map_parser/rooms_handler.py +++ b/SCR/valetudo_map_parser/rooms_handler.py @@ -161,8 +161,17 @@ async def _process_room_layer( np.uint8 ) + # Free intermediate arrays to reduce memory usage + del local_mask + del struct_elem + del eroded + # Extract contour from the mask outline = self.convex_hull_outline(mask) + + # Free mask after extracting outline + del mask + if not outline: return None, None diff --git a/pyproject.toml b/pyproject.toml index 04d6ac5..a7ac2c4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -18,7 +18,7 @@ python = ">=3.13" numpy = ">=1.26.4" Pillow = ">=10.3.0" scipy = ">=1.12.0" -mvcrender = ">=0.0.5" +mvcrender = "==0.0.6" [tool.poetry.group.dev.dependencies] ruff = "*" From c5d704af0d5cb8d50b8874b82b67ef7db67f0e7c Mon Sep 17 00:00:00 2001 From: SCA075 <82227818+sca075@users.noreply.github.com> Date: Wed, 8 Oct 2025 22:15:12 +0200 Subject: [PATCH 13/25] drawable.py use now mcvrender for the most. --- SCR/valetudo_map_parser/config/drawable.py | 72 ++++++++++++++-------- 1 file changed, 48 insertions(+), 24 deletions(-) diff --git a/SCR/valetudo_map_parser/config/drawable.py b/SCR/valetudo_map_parser/config/drawable.py index 0b07f06..203ba54 100644 --- a/SCR/valetudo_map_parser/config/drawable.py +++ b/SCR/valetudo_map_parser/config/drawable.py @@ -15,7 +15,7 @@ import numpy as np from mvcrender.blend import get_blended_color, sample_and_blend_color -from mvcrender.draw import circle_u8, line_u8 +from mvcrender.draw import circle_u8, line_u8, polygon_u8 from PIL import Image, ImageDraw, ImageFont from .types import Color, NumpyArray, PilPNG, Point, Tuple, Union @@ -129,7 +129,7 @@ async def go_to_flag( """ Draw a flag centered at specified coordinates on the input layer. It uses the rotation angle of the image to orient the flag. - Includes color blending for better visual integration. + Uses mvcrender's polygon_u8 for efficient triangle drawing. """ # Check if coordinates are within bounds height, width = layer.shape[:2] @@ -194,9 +194,12 @@ async def go_to_flag( xp1, yp1 = center[0] - (pole_width // 2), y1 xp2, yp2 = center[0] - (pole_width // 2), center[1] + flag_size - # Draw flag outline using _polygon_outline - points = [(x1, y1), (x2, y2), (x3, y3)] - layer = Drawable._polygon_outline(layer, points, 1, flag_color, flag_color) + # Draw flag triangle using mvcrender's polygon_u8 (much faster than _polygon_outline) + xs = np.array([x1, x2, x3], dtype=np.int32) + ys = np.array([y1, y2, y3], dtype=np.int32) + # Draw filled triangle with thin outline + polygon_u8(layer, xs, ys, flag_color, 1, flag_color) + # Draw pole using _line layer = Drawable._line(layer, xp1, yp1, xp2, yp2, pole_color, pole_width) return layer @@ -378,17 +381,18 @@ def _polygon_outline( @staticmethod async def zones(layers: NumpyArray, coordinates, color: Color) -> NumpyArray: """ - Draw zones as solid filled polygons with alpha blending using a per-zone mask. - Keeps API the same; no dotted rendering. + Draw zones as filled polygons with alpha blending using mvcrender. + Creates a mask with polygon_u8 and blends it onto the image with proper alpha. + This eliminates PIL dependency for zone drawing. """ if not coordinates: return layers height, width = layers.shape[:2] - # Precompute color and alpha r, g, b, a = color alpha = a / 255.0 inv_alpha = 1.0 - alpha + # Pre-allocate color array once (avoid creating it in every iteration) color_rgb = np.array([r, g, b], dtype=np.float32) for zone in coordinates: @@ -396,6 +400,7 @@ async def zones(layers: NumpyArray, coordinates, color: Color) -> NumpyArray: pts = zone["points"] except (KeyError, TypeError): continue + if not pts or len(pts) < 6: continue @@ -407,29 +412,48 @@ async def zones(layers: NumpyArray, coordinates, color: Color) -> NumpyArray: if min_x >= max_x or min_y >= max_y: continue - # Adjust polygon points to local bbox coordinates - poly_xy = [ - (int(pts[i] - min_x), int(pts[i + 1] - min_y)) - for i in range(0, len(pts), 2) - ] box_w = max_x - min_x + 1 box_h = max_y - min_y + 1 - # Build mask via PIL polygon fill (fast, C-impl) - mask_img = Image.new("L", (box_w, box_h), 0) - draw = ImageDraw.Draw(mask_img) - draw.polygon(poly_xy, fill=255) - zone_mask = np.array(mask_img, dtype=bool) + # Create mask using mvcrender's polygon_u8 + mask_rgba = np.zeros((box_h, box_w, 4), dtype=np.uint8) + + # Convert points to xs, ys arrays (adjusted to local bbox coordinates) + xs = np.array([int(pts[i] - min_x) for i in range(0, len(pts), 2)], dtype=np.int32) + ys = np.array([int(pts[i] - min_y) for i in range(1, len(pts), 2)], dtype=np.int32) + + # Draw filled polygon on mask + polygon_u8(mask_rgba, xs, ys, (0, 0, 0, 0), 0, (255, 255, 255, 255)) + + # Extract boolean mask from first channel + zone_mask = (mask_rgba[:, :, 0] > 0) + del mask_rgba + del xs + del ys + if not np.any(zone_mask): + del zone_mask continue - # Vectorized alpha blend on RGB channels only + # Optimized alpha blend - minimize temporary allocations region = layers[min_y : max_y + 1, min_x : max_x + 1] - rgb = region[..., :3].astype(np.float32) - mask3 = zone_mask[:, :, None] - blended_rgb = np.where(mask3, rgb * inv_alpha + color_rgb * alpha, rgb) - region[..., :3] = blended_rgb.astype(np.uint8) - # Leave alpha channel unchanged to avoid stacking transparency + + # Work directly on the region's RGB channels + rgb_region = region[..., :3] + + # Apply blending only where mask is True + # Use boolean indexing to avoid creating full-size temporary arrays + rgb_masked = rgb_region[zone_mask].astype(np.float32) + + # Blend: new_color = old_color * (1 - alpha) + zone_color * alpha + rgb_masked *= inv_alpha + rgb_masked += color_rgb * alpha + + # Write back (convert to uint8) + rgb_region[zone_mask] = rgb_masked.astype(np.uint8) + + del zone_mask + del rgb_masked return layers From 3a5c9058f97bc9566bc19aee029ef074f9dd08ce Mon Sep 17 00:00:00 2001 From: SCA075 <82227818+sca075@users.noreply.github.com> Date: Wed, 8 Oct 2025 22:18:40 +0200 Subject: [PATCH 14/25] version bump Signed-off-by: Sandro Cantarella --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index a7ac2c4..e630a2e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "valetudo-map-parser" -version = "0.1.10" +version = "0.1.11b0" description = "A Python library to parse Valetudo map data returning a PIL Image object." authors = ["Sandro Cantarella "] license = "Apache-2.0" From 6738b14edd3bf98e7edc2ed43e1f2411aa8063be Mon Sep 17 00:00:00 2001 From: SCA075 <82227818+sca075@users.noreply.github.com> Date: Sat, 11 Oct 2025 13:40:37 +0200 Subject: [PATCH 15/25] test new battery state and add streaming to the data Signed-off-by: Sandro Cantarella --- SCR/valetudo_map_parser/config/shared.py | 24 ++++++++++++++++++++---- SCR/valetudo_map_parser/config/utils.py | 4 +--- pyproject.toml | 2 +- 3 files changed, 22 insertions(+), 8 deletions(-) diff --git a/SCR/valetudo_map_parser/config/shared.py b/SCR/valetudo_map_parser/config/shared.py index 62c4173..5392153 100755 --- a/SCR/valetudo_map_parser/config/shared.py +++ b/SCR/valetudo_map_parser/config/shared.py @@ -40,6 +40,7 @@ CONF_VAC_STAT_SIZE, CONF_ZOOM_LOCK_RATIO, DEFAULT_VALUES, + NOT_STREAMING_STATES, CameraModes, Colors, PilPNG, @@ -119,10 +120,17 @@ def __init__(self, file_name): self.trims = TrimsData.from_dict(DEFAULT_VALUES["trims_data"]) self.skip_room_ids: List[str] = [] self.device_info = None + self._battery_state = None def vacuum_bat_charged(self) -> bool: """Check if the vacuum is charging.""" - return (self.vacuum_state == "docked") and (int(self.vacuum_battery) < 100) + if self.vacuum_state != "docked": + self._battery_state = "not_charging" + elif (self._battery_state == "charging") and (int(self.vacuum_battery) == 100): + self._battery_state = "charged" + else: + self._battery_state = "charging" if int(self.vacuum_battery) < 100 else "not_charging" + return (self.vacuum_state == "docked") and (self._battery_state == "charged") @staticmethod def _compose_obstacle_links(vacuum_host_ip: str, obstacles: list) -> list | None: @@ -209,17 +217,25 @@ def generate_attributes(self) -> dict: return attrs + def is_streaming(self) -> bool: + """Return true if the device is streaming.""" + updated_status = self.vacuum_state + attr_is_streaming = ((updated_status not in NOT_STREAMING_STATES + or self.vacuum_bat_charged()) + or not self.binary_image) + return attr_is_streaming + def to_dict(self) -> dict: """Return a dictionary with image and attributes data.""" - - return { + data = { "image": { "binary": self.binary_image, - "pil_image": self.new_image, "size": pil_size_rotation(self.image_rotate, self.new_image), + "streaming": self.is_streaming() }, "attributes": self.generate_attributes(), } + return data class CameraSharedManager: diff --git a/SCR/valetudo_map_parser/config/utils.py b/SCR/valetudo_map_parser/config/utils.py index 9a3cb43..fb0019d 100644 --- a/SCR/valetudo_map_parser/config/utils.py +++ b/SCR/valetudo_map_parser/config/utils.py @@ -177,13 +177,11 @@ async def async_get_image( LOGGER.warning( "%s: Failed to generate image from JSON data", self.file_name ) - if bytes_format and hasattr(self.shared, "last_image"): - return pil_to_png_bytes(self.shared.last_image), {} return ( self.shared.last_image if hasattr(self.shared, "last_image") else None - ), {} + ), self.shared.to_dict() except Exception as e: LOGGER.warning( diff --git a/pyproject.toml b/pyproject.toml index e630a2e..ad689b4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "valetudo-map-parser" -version = "0.1.11b0" +version = "0.1.11b1" description = "A Python library to parse Valetudo map data returning a PIL Image object." authors = ["Sandro Cantarella "] license = "Apache-2.0" From fab2bb69e5196578bc467b8b5d6647d251fb17a8 Mon Sep 17 00:00:00 2001 From: SCA075 <82227818+sca075@users.noreply.github.com> Date: Sat, 11 Oct 2025 14:45:11 +0200 Subject: [PATCH 16/25] charging state updated Signed-off-by: Sandro Cantarella --- SCR/valetudo_map_parser/config/shared.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/SCR/valetudo_map_parser/config/shared.py b/SCR/valetudo_map_parser/config/shared.py index 5392153..dbe14aa 100755 --- a/SCR/valetudo_map_parser/config/shared.py +++ b/SCR/valetudo_map_parser/config/shared.py @@ -126,11 +126,11 @@ def vacuum_bat_charged(self) -> bool: """Check if the vacuum is charging.""" if self.vacuum_state != "docked": self._battery_state = "not_charging" - elif (self._battery_state == "charging") and (int(self.vacuum_battery) == 100): + elif (self._battery_state == "charging_done") and (int(self.vacuum_battery) == 100): self._battery_state = "charged" else: - self._battery_state = "charging" if int(self.vacuum_battery) < 100 else "not_charging" - return (self.vacuum_state == "docked") and (self._battery_state == "charged") + self._battery_state = "charging" if int(self.vacuum_battery) < 100 else "charging_done" + return (self.vacuum_state == "docked") and (self._battery_state == "charging") @staticmethod def _compose_obstacle_links(vacuum_host_ip: str, obstacles: list) -> list | None: From 88120abdb5505b1735c73118a58b134868039968 Mon Sep 17 00:00:00 2001 From: SCA075 <82227818+sca075@users.noreply.github.com> Date: Tue, 14 Oct 2025 17:38:40 +0200 Subject: [PATCH 17/25] added need room_names in RoomStore Signed-off-by: Sandro Cantarella --- SCR/valetudo_map_parser/config/types.py | 12 ++++++++++++ pyproject.toml | 2 +- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/SCR/valetudo_map_parser/config/types.py b/SCR/valetudo_map_parser/config/types.py index 26924f5..c97e517 100644 --- a/SCR/valetudo_map_parser/config/types.py +++ b/SCR/valetudo_map_parser/config/types.py @@ -123,6 +123,18 @@ def get_rooms_count(self) -> int: return count if count > 0 else DEFAULT_ROOMS return DEFAULT_ROOMS + @property + def room_names(self) -> dict: + """Return room names in format {'room_0_name': 'SegmentID: RoomName', ...}.""" + result = {} + if isinstance(self.vacuums_data, dict): + for idx, (segment_id, room_data) in enumerate(self.vacuums_data.items()): + if idx >= 16: # Max 16 rooms + break + room_name = room_data.get("name", f"Room {segment_id}") + result[f"room_{idx}_name"] = f"{segment_id}: {room_name}" + return result + @classmethod def get_all_instances(cls) -> Dict[str, "RoomStore"]: return cls._instances diff --git a/pyproject.toml b/pyproject.toml index ad689b4..5487ce7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "valetudo-map-parser" -version = "0.1.11b1" +version = "0.1.11" description = "A Python library to parse Valetudo map data returning a PIL Image object." authors = ["Sandro Cantarella "] license = "Apache-2.0" From 8c2ee270b2fbf2d8b64c072367386694e1c60281 Mon Sep 17 00:00:00 2001 From: SCA075 <82227818+sca075@users.noreply.github.com> Date: Thu, 23 Oct 2025 17:39:58 +0200 Subject: [PATCH 18/25] status_text.py refactor removing if elif else for clarity Signed-off-by: Sandro Cantarella --- .../config/status_text/status_text.py | 116 +++++++++++------- 1 file changed, 70 insertions(+), 46 deletions(-) diff --git a/SCR/valetudo_map_parser/config/status_text/status_text.py b/SCR/valetudo_map_parser/config/status_text/status_text.py index 7e7942d..83040a6 100644 --- a/SCR/valetudo_map_parser/config/status_text/status_text.py +++ b/SCR/valetudo_map_parser/config/status_text/status_text.py @@ -1,17 +1,17 @@ """ -Version: 0.1.10 +Version: 0.1.12 Status text of the vacuum cleaners. Class to handle the status text of the vacuum cleaners. """ from __future__ import annotations - +from typing import Callable from ..types import LOGGER, PilPNG from .translations import translations - LOGGER.propagate = True - +charge_level = "\u03de" # unicode Koppa symbol +charging = "\u2211" # unicode Charging symbol class StatusText: """ @@ -21,9 +21,18 @@ class StatusText: def __init__(self, camera_shared): self._shared = camera_shared self.file_name = self._shared.file_name + self._language = (self._shared.user_language or "en").lower() + self._lang_map = translations.get(self._language) or translations.get("en", {}) + self._compose_functions: list[Callable[[list[str]], list[str]]] = [ + self._current_room, + self._docked_charged, + self._docked_ready, + self._active, + self._mqtt_disconnected, + ] @staticmethod - async def get_vacuum_status_translation( + async def _get_vacuum_status_translation( language: str = "en", ) -> dict[str, str] | None: """ @@ -33,15 +42,61 @@ async def get_vacuum_status_translation( """ return translations.get((language or "en").lower()) - async def translate_vacuum_status(self) -> str: + async def _translate_vacuum_status(self) -> str: """Return the translated status with EN fallback and safe default.""" status = self._shared.vacuum_state or "unknown" language = (self._shared.user_language or "en").lower() - translation = await self.get_vacuum_status_translation(language) + translation = await self._get_vacuum_status_translation(language) if not translation: translation = translations.get("en", {}) return translation.get(status, str(status).capitalize()) + def _mqtt_disconnected(self, current_state: list[str]) -> list[str]: + """Return the translated MQTT disconnected status.""" + if not self._shared.vacuum_connection: + mqtt_disc = (self._lang_map or {}).get( + "mqtt_disconnected", + translations.get("en", {}).get("mqtt_disconnected", "Disconnected from MQTT?"), + ) + return [f"{self.file_name}: {mqtt_disc}"] + return current_state + + def _docked_charged(self, current_state: list[str]) -> list[str]: + """Return the translated docked and charging status.""" + if self._shared.vacuum_state == "docked" and self._shared.vacuum_bat_charged(): + current_state.append(" \u00b7 ") + current_state.append(f"{charging}{charge_level} ") + current_state.append(f"{self._shared.vacuum_battery}%") + return current_state + + def _docked_ready(self, current_state: list[str]) -> list[str]: + """Return the translated docked and ready status.""" + if self._shared.vacuum_state == "docked" and not self._shared.vacuum_bat_charged(): + current_state.append(" \u00b7 ") + current_state.append(f"{charge_level} ") + ready_txt = (self._lang_map or {}).get( + "ready", + translations.get("en", {}).get("ready", "Ready."), + ) + current_state.append(ready_txt) + return current_state + + def _current_room(self, current_state: list[str]) -> list[str]: + """Return the current room information.""" + if self._shared.current_room: + in_room = self._shared.current_room.get("in_room") + if in_room and in_room != "Room 31": + current_state.append(f" ({in_room})") + return current_state + + def _active(self, current_state: list[str]) -> list[str]: + """Return the translated active status.""" + if self._shared.vacuum_state != "docked": + current_state.append(" \u00b7 ") + current_state.append(f"{charge_level}") + current_state.append(f" {self._shared.vacuum_battery}%") + return current_state + async def get_status_text(self, text_img: PilPNG) -> tuple[list[str], int]: """ Compose the image status text. @@ -51,46 +106,15 @@ async def get_status_text(self, text_img: PilPNG) -> tuple[list[str], int]: status_text = ["If you read me, something really went wrong.."] # default text text_size_coverage = 1.5 # resize factor for the text text_size = self._shared.vacuum_status_size # default text size - charge_level = "\u03de" # unicode Koppa symbol - charging = "\u2211" # unicode Charging symbol - vacuum_state = await self.translate_vacuum_status() + vacuum_state = await self._translate_vacuum_status() if self._shared.show_vacuum_state: status_text = [f"{self.file_name}: {vacuum_state}"] - language = (self._shared.user_language or "en").lower() - lang_map = translations.get(language) or translations.get("en", {}) - if not self._shared.vacuum_connection: - mqtt_disc = lang_map.get( - "mqtt_disconnected", - translations.get("en", {}).get( - "mqtt_disconnected", "Disconnected from MQTT?" - ), + # Compose Status Text with available data. + for func in self._compose_functions: + status_text = func(status_text) + if text_size >= 50 and getattr(text_img, "width", None): + text_pixels = max(1, sum(len(text) for text in status_text)) + text_size = int( + (text_size_coverage * text_img.width) // text_pixels ) - status_text = [f"{self.file_name}: {mqtt_disc}"] - else: - if self._shared.current_room: - in_room = self._shared.current_room.get("in_room") - if in_room: - status_text.append(f" ({in_room})") - if self._shared.vacuum_state == "docked": - if self._shared.vacuum_bat_charged(): - status_text.append(" \u00b7 ") - status_text.append(f"{charging}{charge_level} ") - status_text.append(f"{self._shared.vacuum_battery}%") - else: - status_text.append(" \u00b7 ") - status_text.append(f"{charge_level} ") - ready_txt = lang_map.get( - "ready", - translations.get("en", {}).get("ready", "Ready."), - ) - status_text.append(ready_txt) - else: - status_text.append(" \u00b7 ") - status_text.append(f"{charge_level}") - status_text.append(f" {self._shared.vacuum_battery}%") - if text_size >= 50 and getattr(text_img, "width", None): - text_pixels = max(1, sum(len(text) for text in status_text)) - text_size = int( - (text_size_coverage * text_img.width) // text_pixels - ) return status_text, text_size From c85b71c27494b14286bfc1307d3b4981cfc79bcb Mon Sep 17 00:00:00 2001 From: SCA075 <82227818+sca075@users.noreply.github.com> Date: Thu, 23 Oct 2025 17:44:56 +0200 Subject: [PATCH 19/25] adding const.py and update types.py to separate const and types Signed-off-by: Sandro Cantarella --- SCR/valetudo_map_parser/__init__.py | 139 ++++++- SCR/valetudo_map_parser/config/colors.py | 421 +-------------------- SCR/valetudo_map_parser/config/drawable.py | 5 +- SCR/valetudo_map_parser/config/shared.py | 6 +- SCR/valetudo_map_parser/config/types.py | 413 +------------------- SCR/valetudo_map_parser/const.py | 280 ++++++++++++++ SCR/valetudo_map_parser/hypfer_handler.py | 2 +- SCR/valetudo_map_parser/rand256_handler.py | 4 +- 8 files changed, 431 insertions(+), 839 deletions(-) create mode 100644 SCR/valetudo_map_parser/const.py diff --git a/SCR/valetudo_map_parser/__init__.py b/SCR/valetudo_map_parser/__init__.py index c304492..3b672a2 100644 --- a/SCR/valetudo_map_parser/__init__.py +++ b/SCR/valetudo_map_parser/__init__.py @@ -1,5 +1,5 @@ """Valetudo map parser. -Version: 0.1.10""" +Version: 0.1.12""" from pathlib import Path @@ -22,6 +22,58 @@ TrimCropData, UserLanguageStore, ) +from .config.utils import ResizeParams, async_resize_image +from .const import ( + ATTR_CALIBRATION_POINTS, + ATTR_CAMERA_MODE, + ATTR_CONTENT_TYPE, + ATTR_FRIENDLY_NAME, + ATTR_IMAGE_LAST_UPDATED, + ATTR_JSON_DATA, + ATTR_OBSTACLES, + ATTR_POINTS, + ATTR_ROOMS, + ATTR_ROTATE, + ATTR_SNAPSHOT, + ATTR_SNAPSHOT_PATH, + ATTR_VACUUM_BATTERY, + ATTR_VACUUM_CHARGING, + ATTR_VACUUM_JSON_ID, + ATTR_VACUUM_POSITION, + ATTR_VACUUM_STATUS, + ATTR_VACUUM_TOPIC, + ATTR_ZONES, + CAMERA_STORAGE, + COLORS, + CONF_ASPECT_RATIO, + CONF_AUTO_ZOOM, + CONF_EXPORT_SVG, + CONF_OFFSET_BOTTOM, + CONF_OFFSET_LEFT, + CONF_OFFSET_RIGHT, + CONF_OFFSET_TOP, + CONF_SNAPSHOTS_ENABLE, + CONF_TRIMS_SAVE, + CONF_VACUUM_CONFIG_ENTRY_ID, + CONF_VACUUM_CONNECTION_STRING, + CONF_VACUUM_ENTITY_ID, + CONF_VACUUM_IDENTIFIERS, + CONF_VAC_STAT, + CONF_VAC_STAT_FONT, + CONF_VAC_STAT_POS, + CONF_VAC_STAT_SIZE, + CONF_ZOOM_LOCK_RATIO, + DECODED_TOPICS, + DEFAULT_IMAGE_SIZE, + DEFAULT_PIXEL_SIZE, + DEFAULT_VALUES, + FONTS_AVAILABLE, + ICON, + NAME, + NON_DECODED_TOPICS, + NOT_STREAMING_STATES, + SENSOR_NO_DATA, +) from .hypfer_handler import HypferMapImageHandler from .map_data import HyperMapData from .rand256_handler import ReImageHandler @@ -38,29 +90,86 @@ def get_default_font_path() -> str: __all__ = [ - "RoomsHandler", - "RandRoomsHandler", - "HyperMapData", - "HypferMapImageHandler", - "ReImageHandler", - "RRMapParser", + # Attribute Constants + "ATTR_CALIBRATION_POINTS", + "ATTR_CAMERA_MODE", + "ATTR_CONTENT_TYPE", + "ATTR_FRIENDLY_NAME", + "ATTR_IMAGE_LAST_UPDATED", + "ATTR_JSON_DATA", + "ATTR_OBSTACLES", + "ATTR_POINTS", + "ATTR_ROOMS", + "ATTR_ROTATE", + "ATTR_SNAPSHOT", + "ATTR_SNAPSHOT_PATH", + "ATTR_VACUUM_BATTERY", + "ATTR_VACUUM_CHARGING", + "ATTR_VACUUM_JSON_ID", + "ATTR_VACUUM_POSITION", + "ATTR_VACUUM_STATUS", + "ATTR_VACUUM_TOPIC", + "ATTR_ZONES", + # Configuration Constants + "CAMERA_STORAGE", + "COLORS", + "CONF_ASPECT_RATIO", + "CONF_AUTO_ZOOM", + "CONF_EXPORT_SVG", + "CONF_OFFSET_BOTTOM", + "CONF_OFFSET_LEFT", + "CONF_OFFSET_RIGHT", + "CONF_OFFSET_TOP", + "CONF_SNAPSHOTS_ENABLE", + "CONF_TRIMS_SAVE", + "CONF_VACUUM_CONFIG_ENTRY_ID", + "CONF_VACUUM_CONNECTION_STRING", + "CONF_VACUUM_ENTITY_ID", + "CONF_VACUUM_IDENTIFIERS", + "CONF_VAC_STAT", + "CONF_VAC_STAT_FONT", + "CONF_VAC_STAT_POS", + "CONF_VAC_STAT_SIZE", + "CONF_ZOOM_LOCK_RATIO", + # Default Values + "DECODED_TOPICS", + "DEFAULT_IMAGE_SIZE", + "DEFAULT_PIXEL_SIZE", + "DEFAULT_VALUES", + "FONTS_AVAILABLE", + "ICON", + "NAME", + "NON_DECODED_TOPICS", + "NOT_STREAMING_STATES", + "SENSOR_NO_DATA", + # Classes and Handlers "CameraShared", "CameraSharedManager", "ColorsManagement", "Drawable", "DrawableElement", "DrawingConfig", - "SnapshotStore", - "UserLanguageStore", - "RoomStore", - "RoomsProperties", - "TrimCropData", + "HyperMapData", + "HypferMapImageHandler", + "RRMapParser", + "RandRoomsHandler", + "ReImageHandler", + "RoomsHandler", + "StatusText", + # Types "CameraModes", + "ImageSize", "JsonType", - "PilPNG", "NumpyArray", - "ImageSize", - "StatusText", + "PilPNG", + "RoomsProperties", + "RoomStore", + "SnapshotStore", + "TrimCropData", + "UserLanguageStore", + # Utilities + "ResizeParams", "STATUS_TEXT_TRANSLATIONS", + "async_resize_image", "get_default_font_path", ] diff --git a/SCR/valetudo_map_parser/config/colors.py b/SCR/valetudo_map_parser/config/colors.py index 50356c2..3f9b0d3 100644 --- a/SCR/valetudo_map_parser/config/colors.py +++ b/SCR/valetudo_map_parser/config/colors.py @@ -6,9 +6,8 @@ from typing import Dict, List, Tuple import numpy as np -from scipy import ndimage -from .types import ( +from ..const import ( ALPHA_BACKGROUND, ALPHA_CHARGER, ALPHA_GO_TO, @@ -59,10 +58,8 @@ COLOR_TEXT, COLOR_WALL, COLOR_ZONE_CLEAN, - LOGGER, - Color, ) - +from .types import LOGGER, Color color_transparent = (0, 0, 0, 0) color_charger = (0, 128, 0, 255) @@ -404,120 +401,6 @@ def add_alpha_to_color(rgb: Tuple[int, int, int], alpha: float) -> Color: """ return (*rgb, int(alpha)) if rgb else (0, 0, 0, int(alpha)) - @staticmethod - def blend_colors(background: Color, foreground: Color) -> Color: - """ - Blend foreground color with background color based on alpha values. - Optimized version with more fast paths and simplified calculations. - - :param background: Background RGBA color (r,g,b,a) - :param foreground: Foreground RGBA color (r,g,b,a) to blend on top - :return: Blended RGBA color - """ - # Fast paths for common cases - fg_a = foreground[3] - - if fg_a == 255: # Fully opaque foreground - return foreground - - if fg_a == 0: # Fully transparent foreground - return background - - bg_a = background[3] - if bg_a == 0: # Fully transparent background - return foreground - - # Extract components (only after fast paths) - bg_r, bg_g, bg_b = background[:3] - fg_r, fg_g, fg_b = foreground[:3] - - # Pre-calculate the blend factor once (avoid repeated division) - blend = fg_a / 255.0 - inv_blend = 1.0 - blend - - # Simple linear interpolation for RGB channels - # This is faster than the previous implementation - out_r = int(fg_r * blend + bg_r * inv_blend) - out_g = int(fg_g * blend + bg_g * inv_blend) - out_b = int(fg_b * blend + bg_b * inv_blend) - - # Alpha blending - simplified calculation - out_a = int(fg_a + bg_a * inv_blend) - - # No need for min/max checks as the blend math keeps values in range - # when input values are valid (0-255) - - return [out_r, out_g, out_b, out_a] - - # Cache for recently sampled background colors - _bg_color_cache = {} - _cache_size = 1024 # Limit cache size to avoid memory issues - - @staticmethod - def sample_and_blend_color(array, x: int, y: int, foreground: Color) -> Color: - """ - Sample the background color from the array at coordinates (x,y) and blend with foreground color. - Optimized version with caching and faster sampling. - - Args: - array: The RGBA numpy array representing the image - x: Coordinate X to sample the background color from - y: Coordinate Y to sample the background color from - foreground: Foreground RGBA color (r,g,b,a) to blend on top - - Returns: - Blended RGBA color - """ - # Fast path for fully opaque foreground - no need to sample or blend - if foreground[3] == 255: - return foreground - - # Ensure array exists - if array is None: - return foreground - - # Check if coordinates are within bounds - height, width = array.shape[:2] - if not (0 <= y < height and 0 <= x < width): - return foreground - - # Check cache for this coordinate - cache_key = (id(array), x, y) - cache = ColorsManagement._bg_color_cache - - if cache_key in cache: - background = cache[cache_key] - else: - # Sample the background color using direct indexing (fastest method) - try: - background = tuple(map(int, array[y, x])) - - # Update cache (with simple LRU-like behavior) - try: - if len(cache) >= ColorsManagement._cache_size: - # Remove a random entry if cache is full - if cache: # Make sure cache is not empty - cache.pop(next(iter(cache))) - else: - # If cache is somehow empty but len reported >= _cache_size - # This is an edge case that shouldn't happen but we handle it - pass - cache[cache_key] = background - except KeyError: - # If we encounter a KeyError, reset the cache - # This is a rare edge case that might happen in concurrent access - ColorsManagement._bg_color_cache = {cache_key: background} - - except (IndexError, ValueError): - return foreground - - # Fast path for fully transparent foreground - if foreground[3] == 0: - return background - - # Blend the colors - return ColorsManagement.blend_colors(background, foreground) - def get_user_colors(self) -> List[Color]: """Return the list of RGBA colors for user-defined map elements.""" return self.user_colors @@ -525,303 +408,3 @@ def get_user_colors(self) -> List[Color]: def get_rooms_colors(self) -> List[Color]: """Return the list of RGBA colors for rooms.""" return self.rooms_colors - - @staticmethod - def batch_blend_colors(image_array, mask, foreground_color): - """ - Blend a foreground color with all pixels in an image where the mask is True. - Uses scipy.ndimage for efficient batch processing. - - Args: - image_array: NumPy array of shape (height, width, 4) containing RGBA image data - mask: Boolean mask of shape (height, width) indicating pixels to blend - foreground_color: RGBA color tuple to blend with the masked pixels - - Returns: - Modified image array with blended colors - """ - if not np.any(mask): - return image_array # No pixels to blend - - # Extract foreground components - fg_r, fg_g, fg_b, fg_a = foreground_color - - # Fast path for fully opaque foreground - if fg_a == 255: - # Just set the color directly where mask is True - image_array[mask, 0] = fg_r - image_array[mask, 1] = fg_g - image_array[mask, 2] = fg_b - image_array[mask, 3] = fg_a - return image_array - - # Fast path for fully transparent foreground - if fg_a == 0: - return image_array # No change needed - - # For semi-transparent foreground, we need to blend - # Extract background components where mask is True - bg_pixels = image_array[mask] - - # Convert alpha from [0-255] to [0-1] for calculations - fg_alpha = fg_a / 255.0 - bg_alpha = bg_pixels[:, 3] / 255.0 - - # Calculate resulting alpha - out_alpha = fg_alpha + bg_alpha * (1 - fg_alpha) - - # Calculate alpha ratios for blending - # Handle division by zero by setting ratio to 0 where out_alpha is near zero - alpha_ratio = np.zeros_like(out_alpha) - valid_alpha = out_alpha > 0.0001 - alpha_ratio[valid_alpha] = fg_alpha / out_alpha[valid_alpha] - inv_alpha_ratio = 1.0 - alpha_ratio - - # Calculate blended RGB components - out_r = np.clip( - (fg_r * alpha_ratio + bg_pixels[:, 0] * inv_alpha_ratio), 0, 255 - ).astype(np.uint8) - out_g = np.clip( - (fg_g * alpha_ratio + bg_pixels[:, 1] * inv_alpha_ratio), 0, 255 - ).astype(np.uint8) - out_b = np.clip( - (fg_b * alpha_ratio + bg_pixels[:, 2] * inv_alpha_ratio), 0, 255 - ).astype(np.uint8) - out_a = np.clip((out_alpha * 255), 0, 255).astype(np.uint8) - - # Update the image array with blended values - image_array[mask, 0] = out_r - image_array[mask, 1] = out_g - image_array[mask, 2] = out_b - image_array[mask, 3] = out_a - - return image_array - - @staticmethod - def process_regions_with_colors(image_array, regions_mask, colors): - """ - Process multiple regions in an image with different colors using scipy.ndimage. - This is much faster than processing each region separately. - - Args: - image_array: NumPy array of shape (height, width, 4) containing RGBA image data - regions_mask: NumPy array of shape (height, width) with integer labels for different regions - colors: List of RGBA color tuples corresponding to each region label - - Returns: - Modified image array with all regions colored and blended - """ - # Skip processing if no regions or colors - if regions_mask is None or not np.any(regions_mask) or not colors: - return image_array - - # Get unique region labels (excluding 0 which is typically background) - unique_labels = np.unique(regions_mask) - unique_labels = unique_labels[unique_labels > 0] # Skip background (0) - - if len(unique_labels) == 0: - return image_array # No regions to process - - # Process each region with its corresponding color - for label in unique_labels: - if label <= len(colors): - # Create mask for this region - region_mask = regions_mask == label - - # Get color for this region - color = colors[label - 1] if label - 1 < len(colors) else colors[0] - - # Apply color to this region - image_array = ColorsManagement.batch_blend_colors( - image_array, region_mask, color - ) - - return image_array - - @staticmethod - def apply_color_to_shapes(image_array, shapes, color, thickness=1): - """ - Apply a color to multiple shapes (lines, circles, etc.) using scipy.ndimage. - - Args: - image_array: NumPy array of shape (height, width, 4) containing RGBA image data - shapes: List of shape definitions (each a list of points or parameters) - color: RGBA color tuple to apply to the shapes - thickness: Line thickness for shapes - - Returns: - Modified image array with shapes drawn and blended - """ - height, width = image_array.shape[:2] - - # Create a mask for all shapes - shapes_mask = np.zeros((height, width), dtype=bool) - - # Draw all shapes into the mask - for shape in shapes: - if len(shape) >= 2: # At least two points for a line - # Draw line into mask - for i in range(len(shape) - 1): - x1, y1 = shape[i] - x2, y2 = shape[i + 1] - - # Use Bresenham's line algorithm via scipy.ndimage.map_coordinates - # Create coordinates for the line - length = int(np.hypot(x2 - x1, y2 - y1)) - if length == 0: - continue - - t = np.linspace(0, 1, length * 2) - x = np.round(x1 * (1 - t) + x2 * t).astype(int) - y = np.round(y1 * (1 - t) + y2 * t).astype(int) - - # Filter points outside the image - valid = (0 <= x) & (x < width) & (0 <= y) & (y < height) - x, y = x[valid], y[valid] - - # Add points to mask - if thickness == 1: - shapes_mask[y, x] = True - else: - # For thicker lines, use a disk structuring element - # Create a disk structuring element once - disk_radius = thickness - disk_size = 2 * disk_radius + 1 - disk_struct = np.zeros((disk_size, disk_size), dtype=bool) - y_grid, x_grid = np.ogrid[ - -disk_radius : disk_radius + 1, - -disk_radius : disk_radius + 1, - ] - mask = x_grid**2 + y_grid**2 <= disk_radius**2 - disk_struct[mask] = True - - # Use scipy.ndimage.binary_dilation for efficient dilation - # Create a temporary mask for this line segment - line_mask = np.zeros_like(shapes_mask) - line_mask[y, x] = True - # Dilate the line with the disk structuring element - dilated_line = ndimage.binary_dilation( - line_mask, structure=disk_struct - ) - # Add to the overall shapes mask - shapes_mask |= dilated_line - - # Apply color to all shapes at once - return ColorsManagement.batch_blend_colors(image_array, shapes_mask, color) - - @staticmethod - def batch_sample_colors(image_array, coordinates): - """ - Efficiently sample colors from multiple coordinates in an image using scipy.ndimage. - - Args: - image_array: NumPy array of shape (height, width, 4) containing RGBA image data - coordinates: List of (x,y) tuples or numpy array of shape (N,2) with coordinates to sample - - Returns: - NumPy array of shape (N,4) containing the RGBA colors at each coordinate - """ - if len(coordinates) == 0: - return np.array([]) - - height, width = image_array.shape[:2] - - # Convert coordinates to numpy array if not already - coords = np.array(coordinates) - - # Separate x and y coordinates - x_coords = coords[:, 0] - y_coords = coords[:, 1] - - # Create a mask for valid coordinates (within image bounds) - valid_mask = ( - (0 <= x_coords) & (x_coords < width) & (0 <= y_coords) & (y_coords < height) - ) - - # Initialize result array with zeros - result = np.zeros((len(coordinates), 4), dtype=np.uint8) - - if not np.any(valid_mask): - return result # No valid coordinates - - # Filter valid coordinates - valid_x = x_coords[valid_mask].astype(int) - valid_y = y_coords[valid_mask].astype(int) - - # Use scipy.ndimage.map_coordinates for efficient sampling - # This is much faster than looping through coordinates - for channel in range(4): - # Sample this color channel for all valid coordinates at once - channel_values = ndimage.map_coordinates( - image_array[..., channel], - np.vstack((valid_y, valid_x)), - order=0, # Use nearest-neighbor interpolation - mode="nearest", - ) - - # Assign sampled values to result array - result[valid_mask, channel] = channel_values - - return result - - def cached_blend_colors(self, background: Color, foreground: Color) -> Color: - """ - Cached version of blend_colors that stores frequently used combinations. - This improves performance when the same color combinations are used repeatedly. - - Args: - background: Background RGBA color tuple - foreground: Foreground RGBA color tuple - - Returns: - Blended RGBA color tuple - """ - # Fast paths for common cases - if foreground[3] == 255: - return foreground - if foreground[3] == 0: - return background - - # Create a cache key from the color tuples - cache_key = (background, foreground) - - # Check if this combination is in the cache - if cache_key in self.color_cache: - return self.color_cache[cache_key] - - # Calculate the blended color - result = ColorsManagement.blend_colors(background, foreground) - - # Store in cache (with a maximum cache size to prevent memory issues) - if len(self.color_cache) < 1000: # Limit cache size - self.color_cache[cache_key] = result - - return result - - def get_colour(self, supported_color: SupportedColor) -> Color: - """ - Retrieve the color for a specific map element, prioritizing user-defined values. - - :param supported_color: The SupportedColor key for the desired color. - :return: The RGBA color for the given map element. - """ - # Handle room-specific colors - if supported_color.startswith("color_room_"): - room_index = int(supported_color.split("_")[-1]) - try: - return self.rooms_colors[room_index] - except (IndexError, KeyError): - LOGGER.warning("Room index %s not found, using default.", room_index) - r, g, b = DefaultColors.DEFAULT_ROOM_COLORS[f"color_room_{room_index}"] - a = DefaultColors.DEFAULT_ALPHA[f"alpha_room_{room_index}"] - return r, g, b, int(a) - - # Handle general map element colors - try: - index = list(SupportedColor).index(supported_color) - return self.user_colors[index] - except (IndexError, KeyError, ValueError): - LOGGER.warning( - "Color for %s not found. Returning default.", supported_color - ) - return DefaultColors.get_rgba(supported_color, 255) # Transparent fallback diff --git a/SCR/valetudo_map_parser/config/drawable.py b/SCR/valetudo_map_parser/config/drawable.py index 203ba54..8eb0884 100644 --- a/SCR/valetudo_map_parser/config/drawable.py +++ b/SCR/valetudo_map_parser/config/drawable.py @@ -12,13 +12,14 @@ import logging from pathlib import Path +from typing import Tuple, Union import numpy as np from mvcrender.blend import get_blended_color, sample_and_blend_color from mvcrender.draw import circle_u8, line_u8, polygon_u8 -from PIL import Image, ImageDraw, ImageFont +from PIL import ImageDraw, ImageFont -from .types import Color, NumpyArray, PilPNG, Point, Tuple, Union +from .types import Color, NumpyArray, PilPNG, Point _LOGGER = logging.getLogger(__name__) diff --git a/SCR/valetudo_map_parser/config/shared.py b/SCR/valetudo_map_parser/config/shared.py index dbe14aa..6bf7455 100755 --- a/SCR/valetudo_map_parser/config/shared.py +++ b/SCR/valetudo_map_parser/config/shared.py @@ -11,7 +11,7 @@ from PIL import Image from .utils import pil_size_rotation -from .types import ( +from ..const import ( ATTR_CALIBRATION_POINTS, ATTR_CAMERA_MODE, ATTR_CONTENT_TYPE, @@ -39,8 +39,10 @@ CONF_VAC_STAT_POS, CONF_VAC_STAT_SIZE, CONF_ZOOM_LOCK_RATIO, - DEFAULT_VALUES, NOT_STREAMING_STATES, + DEFAULT_VALUES, +) +from .types import ( CameraModes, Colors, PilPNG, diff --git a/SCR/valetudo_map_parser/config/types.py b/SCR/valetudo_map_parser/config/types.py index c97e517..1681406 100644 --- a/SCR/valetudo_map_parser/config/types.py +++ b/SCR/valetudo_map_parser/config/types.py @@ -1,6 +1,6 @@ """ This module contains type aliases for the project. -Version 0.0.1 +Version 0.11.1 """ import asyncio @@ -229,402 +229,6 @@ async def async_set_vacuum_json(self, vacuum_id: str, json_data: Any) -> None: async with self._lock: self.vacuum_json_data[vacuum_id] = json_data - -Color = Union[Tuple[int, int, int], Tuple[int, int, int, int]] -Colors = Dict[str, Color] -CalibrationPoints = list[dict[str, Any]] -RobotPosition: type[tuple[Any, Any, dict[str, int | float] | None]] = tuple[ - Any, Any, dict[str, int | float] | None -] -ChargerPosition = dict[str, Any] -RoomsProperties = dict[str, RoomProperty] -ImageSize = dict[str, int | list[int]] -Size = dict[str, int] -JsonType = Any # json.loads() return type is Any -PilPNG = Image.Image # Keep for backward compatibility -NumpyArray = np.ndarray -Point = Tuple[int, int] - -CAMERA_STORAGE = "valetudo_camera" -ATTR_IMAGE_LAST_UPDATED = "image_last_updated" -ATTR_ROTATE = "rotate_image" -ATTR_CROP = "crop_image" -ATTR_MARGINS = "margins" -ATTR_CONTENT_TYPE = "content_type" -CONF_OFFSET_TOP = "offset_top" -CONF_OFFSET_BOTTOM = "offset_bottom" -CONF_OFFSET_LEFT = "offset_left" -CONF_OFFSET_RIGHT = "offset_right" -CONF_ASPECT_RATIO = "aspect_ratio" -CONF_VAC_STAT = "show_vac_status" -CONF_VAC_STAT_SIZE = "vac_status_size" -CONF_VAC_STAT_POS = "vac_status_position" -CONF_VAC_STAT_FONT = "vac_status_font" -CONF_VACUUM_CONNECTION_STRING = "vacuum_map" -CONF_VACUUM_ENTITY_ID = "vacuum_entity" -CONF_VACUUM_CONFIG_ENTRY_ID = "vacuum_config_entry" -CONF_VACUUM_IDENTIFIERS = "vacuum_identifiers" -CONF_SNAPSHOTS_ENABLE = "enable_www_snapshots" -CONF_EXPORT_SVG = "get_svg_file" -CONF_AUTO_ZOOM = "auto_zoom" -CONF_ZOOM_LOCK_RATIO = "zoom_lock_ratio" -CONF_TRIMS_SAVE = "save_trims" -ICON = "mdi:camera" -NAME = "MQTT Vacuum Camera" - -DEFAULT_IMAGE_SIZE = { - "x": 5120, - "y": 5120, - "centre": [(5120 // 2), (5120 // 2)], -} - -COLORS = [ - "wall", - "zone_clean", - "robot", - "background", - "move", - "charger", - "no_go", - "go_to", -] - -SENSOR_NO_DATA = { - "mainBrush": 0, - "sideBrush": 0, - "filter": 0, - "currentCleanTime": 0, - "currentCleanArea": 0, - "cleanTime": 0, - "cleanArea": 0, - "cleanCount": 0, - "battery": 0, - "state": 0, - "last_run_start": 0, - "last_run_end": 0, - "last_run_duration": 0, - "last_run_area": 0, - "last_bin_out": 0, - "last_bin_full": 0, - "last_loaded_map": "NoMap", - "robot_in_room": "Unsupported", -} - -DEFAULT_PIXEL_SIZE = 5 - -DEFAULT_VALUES = { - "rotate_image": "0", - "margins": "100", - "aspect_ratio": "None", - "offset_top": 0, - "offset_bottom": 0, - "offset_left": 0, - "offset_right": 0, - "auto_zoom": False, - "zoom_lock_ratio": True, - "show_vac_status": False, - "vac_status_font": "SCR/valetudo_map_parser/config/fonts/FiraSans.ttf", - "vac_status_size": 50, - "vac_status_position": True, - "get_svg_file": False, - "save_trims": True, - "trims_data": {"trim_left": 0, "trim_up": 0, "trim_right": 0, "trim_down": 0}, - "enable_www_snapshots": False, - "color_charger": [255, 128, 0], - "color_move": [238, 247, 255], - "color_wall": [255, 255, 0], - "color_robot": [255, 255, 204], - "color_go_to": [0, 255, 0], - "color_no_go": [255, 0, 0], - "color_zone_clean": [255, 255, 255], - "color_background": [0, 125, 255], - "color_text": [255, 255, 255], - "alpha_charger": 255.0, - "alpha_move": 255.0, - "alpha_wall": 255.0, - "alpha_robot": 255.0, - "alpha_go_to": 255.0, - "alpha_no_go": 125.0, - "alpha_zone_clean": 125.0, - "alpha_background": 255.0, - "alpha_text": 255.0, - "color_room_0": [135, 206, 250], - "color_room_1": [176, 226, 255], - "color_room_2": [165, 105, 18], - "color_room_3": [164, 211, 238], - "color_room_4": [141, 182, 205], - "color_room_5": [96, 123, 139], - "color_room_6": [224, 255, 255], - "color_room_7": [209, 238, 238], - "color_room_8": [180, 205, 205], - "color_room_9": [122, 139, 139], - "color_room_10": [175, 238, 238], - "color_room_11": [84, 153, 199], - "color_room_12": [133, 193, 233], - "color_room_13": [245, 176, 65], - "color_room_14": [82, 190, 128], - "color_room_15": [72, 201, 176], - "alpha_room_0": 255.0, - "alpha_room_1": 255.0, - "alpha_room_2": 255.0, - "alpha_room_3": 255.0, - "alpha_room_4": 255.0, - "alpha_room_5": 255.0, - "alpha_room_6": 255.0, - "alpha_room_7": 255.0, - "alpha_room_8": 255.0, - "alpha_room_9": 255.0, - "alpha_room_10": 255.0, - "alpha_room_11": 255.0, - "alpha_room_12": 255.0, - "alpha_room_13": 255.0, - "alpha_room_14": 255.0, - "alpha_room_15": 255.0, -} - -KEYS_TO_UPDATE = [ - "rotate_image", - "margins", - "aspect_ratio", - "offset_top", - "offset_bottom", - "offset_left", - "offset_right", - "trims_data", - "auto_zoom", - "zoom_lock_ratio", - "show_vac_status", - "vac_status_size", - "vac_status_position", - "vac_status_font", - "get_svg_file", - "enable_www_snapshots", - "color_charger", - "color_move", - "color_wall", - "color_robot", - "color_go_to", - "color_no_go", - "color_zone_clean", - "color_background", - "color_text", - "alpha_charger", - "alpha_move", - "alpha_wall", - "alpha_robot", - "alpha_go_to", - "alpha_no_go", - "alpha_zone_clean", - "alpha_background", - "alpha_text", - "color_room_0", - "color_room_1", - "color_room_2", - "color_room_3", - "color_room_4", - "color_room_5", - "color_room_6", - "color_room_7", - "color_room_8", - "color_room_9", - "color_room_10", - "color_room_11", - "color_room_12", - "color_room_13", - "color_room_14", - "color_room_15", - "alpha_room_0", - "alpha_room_1", - "alpha_room_2", - "alpha_room_3", - "alpha_room_4", - "alpha_room_5", - "alpha_room_6", - "alpha_room_7", - "alpha_room_8", - "alpha_room_9", - "alpha_room_10", - "alpha_room_11", - "alpha_room_12", - "alpha_room_13", - "alpha_room_14", - "alpha_room_15", -] - -ALPHA_VALUES = { - "min": 0.0, # Minimum value - "max": 255.0, # Maximum value - "step": 1.0, # Step value -} - -TEXT_SIZE_VALUES = { - "min": 5, # Minimum value - "max": 51, # Maximum value - "step": 1, # Step value -} - -ROTATION_VALUES = [ - {"label": "0", "value": "0"}, - {"label": "90", "value": "90"}, - {"label": "180", "value": "180"}, - {"label": "270", "value": "270"}, -] - -RATIO_VALUES = [ - {"label": "Original Ratio.", "value": "None"}, - {"label": "1:1", "value": "1, 1"}, - {"label": "2:1", "value": "2, 1"}, - {"label": "3:2", "value": "3, 2"}, - {"label": "5:4", "value": "5, 4"}, - {"label": "9:16", "value": "9, 16"}, - {"label": "16:9", "value": "16, 9"}, -] - -FONTS_AVAILABLE = [ - { - "label": "Fira Sans", - "value": "config/fonts/FiraSans.ttf", - }, - { - "label": "Inter", - "value": "config/fonts/Inter-VF.ttf", - }, - { - "label": "M Plus Regular", - "value": "config/fonts/MPLUSRegular.ttf", - }, - { - "label": "Noto Sans CJKhk", - "value": "config/fonts/NotoSansCJKhk-VF.ttf", - }, - { - "label": "Noto Kufi Arabic", - "value": "config/fonts/NotoKufiArabic-VF.ttf", - }, - { - "label": "Noto Sans Khojki", - "value": "config/fonts/NotoSansKhojki.ttf", - }, - { - "label": "Lato Regular", - "value": "config/fonts/Lato-Regular.ttf", - }, -] - -NOT_STREAMING_STATES = { - "idle", - "paused", - "charging", - "error", - "docked", -} - -DECODED_TOPICS = { - "/MapData/segments", - "/maploader/map", - "/maploader/status", - "/StatusStateAttribute/status", - "/StatusStateAttribute/error_description", - "/$state", - "/BatteryStateAttribute/level", - "/WifiConfigurationCapability/ips", - "/state", # Rand256 - "/destinations", # Rand256 - "/command", # Rand256 - "/custom_command", # Rand256 - "/attributes", # Rand256 -} - - -# self.command_topic need to be added to this dictionary after init. -NON_DECODED_TOPICS = { - "/MapData/map-data", - "/map_data", -} - -"""App Constants. Not in use, and dummy values""" -IDLE_SCAN_INTERVAL = 120 -CLEANING_SCAN_INTERVAL = 5 -IS_ALPHA = "add_base_alpha" -IS_ALPHA_R1 = "add_room_1_alpha" -IS_ALPHA_R2 = "add_room_2_alpha" -IS_OFFSET = "add_offset" - -"""Base Colours RGB""" -COLOR_CHARGER = "color_charger" -COLOR_MOVE = "color_move" -COLOR_ROBOT = "color_robot" -COLOR_NO_GO = "color_no_go" -COLOR_GO_TO = "color_go_to" -COLOR_BACKGROUND = "color_background" -COLOR_ZONE_CLEAN = "color_zone_clean" -COLOR_WALL = "color_wall" -COLOR_TEXT = "color_text" - -"Rooms Colours RGB" -COLOR_ROOM_0 = "color_room_0" -COLOR_ROOM_1 = "color_room_1" -COLOR_ROOM_2 = "color_room_2" -COLOR_ROOM_3 = "color_room_3" -COLOR_ROOM_4 = "color_room_4" -COLOR_ROOM_5 = "color_room_5" -COLOR_ROOM_6 = "color_room_6" -COLOR_ROOM_7 = "color_room_7" -COLOR_ROOM_8 = "color_room_8" -COLOR_ROOM_9 = "color_room_9" -COLOR_ROOM_10 = "color_room_10" -COLOR_ROOM_11 = "color_room_11" -COLOR_ROOM_12 = "color_room_12" -COLOR_ROOM_13 = "color_room_13" -COLOR_ROOM_14 = "color_room_14" -COLOR_ROOM_15 = "color_room_15" - -"""Alpha for RGBA Colours""" -ALPHA_CHARGER = "alpha_charger" -ALPHA_MOVE = "alpha_move" -ALPHA_ROBOT = "alpha_robot" -ALPHA_NO_GO = "alpha_no_go" -ALPHA_GO_TO = "alpha_go_to" -ALPHA_BACKGROUND = "alpha_background" -ALPHA_ZONE_CLEAN = "alpha_zone_clean" -ALPHA_WALL = "alpha_wall" -ALPHA_TEXT = "alpha_text" -ALPHA_ROOM_0 = "alpha_room_0" -ALPHA_ROOM_1 = "alpha_room_1" -ALPHA_ROOM_2 = "alpha_room_2" -ALPHA_ROOM_3 = "alpha_room_3" -ALPHA_ROOM_4 = "alpha_room_4" -ALPHA_ROOM_5 = "alpha_room_5" -ALPHA_ROOM_6 = "alpha_room_6" -ALPHA_ROOM_7 = "alpha_room_7" -ALPHA_ROOM_8 = "alpha_room_8" -ALPHA_ROOM_9 = "alpha_room_9" -ALPHA_ROOM_10 = "alpha_room_10" -ALPHA_ROOM_11 = "alpha_room_11" -ALPHA_ROOM_12 = "alpha_room_12" -ALPHA_ROOM_13 = "alpha_room_13" -ALPHA_ROOM_14 = "alpha_room_14" -ALPHA_ROOM_15 = "alpha_room_15" - -""" Constants for the attribute keys """ -ATTR_FRIENDLY_NAME = "friendly_name" -ATTR_VACUUM_BATTERY = "battery" -ATTR_VACUUM_CHARGING = "charging" -ATTR_VACUUM_POSITION = "vacuum_position" -ATTR_VACUUM_TOPIC = "vacuum_topic" -ATTR_VACUUM_STATUS = "vacuum_status" -ATTR_JSON_DATA = "json_data" -ATTR_VACUUM_JSON_ID = "vacuum_json_id" -ATTR_CALIBRATION_POINTS = "calibration_points" -ATTR_SNAPSHOT = "snapshot" -ATTR_SNAPSHOT_PATH = "snapshot_path" -ATTR_ROOMS = "rooms" -ATTR_ZONES = "zones" -ATTR_POINTS = "points" -ATTR_OBSTACLES = "obstacles" -ATTR_CAMERA_MODE = "camera_mode" - - class CameraModes: """Constants for the camera modes""" @@ -681,3 +285,18 @@ def clear(self) -> dict: self.trim_down = 0 self.trim_right = 0 return asdict(self) + +Color = Union[Tuple[int, int, int], Tuple[int, int, int, int]] +Colors = Dict[str, Color] +CalibrationPoints = list[dict[str, Any]] +RobotPosition: type[tuple[Any, Any, dict[str, int | float] | None]] = tuple[ + Any, Any, dict[str, int | float] | None +] +ChargerPosition = dict[str, Any] +RoomsProperties = dict[str, RoomProperty] +ImageSize = dict[str, int | list[int]] +Size = dict[str, int] +JsonType = Any # json.loads() return type is Any +PilPNG = Image.Image # Keep for backward compatibility +NumpyArray = np.ndarray +Point = Tuple[int, int] \ No newline at end of file diff --git a/SCR/valetudo_map_parser/const.py b/SCR/valetudo_map_parser/const.py new file mode 100644 index 0000000..dc1c038 --- /dev/null +++ b/SCR/valetudo_map_parser/const.py @@ -0,0 +1,280 @@ +CAMERA_STORAGE = "valetudo_camera" +ATTR_IMAGE_LAST_UPDATED = "image_last_updated" +ATTR_ROTATE = "rotate_image" +ATTR_CROP = "crop_image" +ATTR_MARGINS = "margins" +ATTR_CONTENT_TYPE = "content_type" +CONF_OFFSET_TOP = "offset_top" +CONF_OFFSET_BOTTOM = "offset_bottom" +CONF_OFFSET_LEFT = "offset_left" +CONF_OFFSET_RIGHT = "offset_right" +CONF_ASPECT_RATIO = "aspect_ratio" +CONF_VAC_STAT = "show_vac_status" +CONF_VAC_STAT_SIZE = "vac_status_size" +CONF_VAC_STAT_POS = "vac_status_position" +CONF_VAC_STAT_FONT = "vac_status_font" +CONF_VACUUM_CONNECTION_STRING = "vacuum_map" +CONF_VACUUM_ENTITY_ID = "vacuum_entity" +CONF_VACUUM_CONFIG_ENTRY_ID = "vacuum_config_entry" +CONF_VACUUM_IDENTIFIERS = "vacuum_identifiers" +CONF_SNAPSHOTS_ENABLE = "enable_www_snapshots" +CONF_EXPORT_SVG = "get_svg_file" +CONF_AUTO_ZOOM = "auto_zoom" +CONF_ZOOM_LOCK_RATIO = "zoom_lock_ratio" +CONF_TRIMS_SAVE = "save_trims" +ICON = "mdi:camera" +NAME = "MQTT Vacuum Camera" + +DEFAULT_IMAGE_SIZE = { + "x": 5120, + "y": 5120, + "centre": [(5120 // 2), (5120 // 2)], +} + +COLORS = [ + "wall", + "zone_clean", + "robot", + "background", + "move", + "charger", + "no_go", + "go_to", +] + +SENSOR_NO_DATA = { + "mainBrush": 0, + "sideBrush": 0, + "filter": 0, + "currentCleanTime": 0, + "currentCleanArea": 0, + "cleanTime": 0, + "cleanArea": 0, + "cleanCount": 0, + "battery": 0, + "state": 0, + "last_run_start": 0, + "last_run_end": 0, + "last_run_duration": 0, + "last_run_area": 0, + "last_bin_out": 0, + "last_bin_full": 0, + "last_loaded_map": "NoMap", + "robot_in_room": "Unsupported", +} + +DEFAULT_PIXEL_SIZE = 5 + +DEFAULT_VALUES = { + "rotate_image": "0", + "margins": "100", + "aspect_ratio": "None", + "offset_top": 0, + "offset_bottom": 0, + "offset_left": 0, + "offset_right": 0, + "auto_zoom": False, + "zoom_lock_ratio": True, + "show_vac_status": False, + "vac_status_font": "SCR/valetudo_map_parser/config/fonts/FiraSans.ttf", + "vac_status_size": 50, + "vac_status_position": True, + "get_svg_file": False, + "save_trims": True, + "trims_data": {"trim_left": 0, "trim_up": 0, "trim_right": 0, "trim_down": 0}, + "enable_www_snapshots": False, + "color_charger": [255, 128, 0], + "color_move": [238, 247, 255], + "color_wall": [255, 255, 0], + "color_robot": [255, 255, 204], + "color_go_to": [0, 255, 0], + "color_no_go": [255, 0, 0], + "color_zone_clean": [255, 255, 255], + "color_background": [0, 125, 255], + "color_text": [255, 255, 255], + "alpha_charger": 255.0, + "alpha_move": 255.0, + "alpha_wall": 255.0, + "alpha_robot": 255.0, + "alpha_go_to": 255.0, + "alpha_no_go": 125.0, + "alpha_zone_clean": 125.0, + "alpha_background": 255.0, + "alpha_text": 255.0, + "color_room_0": [135, 206, 250], + "color_room_1": [176, 226, 255], + "color_room_2": [165, 105, 18], + "color_room_3": [164, 211, 238], + "color_room_4": [141, 182, 205], + "color_room_5": [96, 123, 139], + "color_room_6": [224, 255, 255], + "color_room_7": [209, 238, 238], + "color_room_8": [180, 205, 205], + "color_room_9": [122, 139, 139], + "color_room_10": [175, 238, 238], + "color_room_11": [84, 153, 199], + "color_room_12": [133, 193, 233], + "color_room_13": [245, 176, 65], + "color_room_14": [82, 190, 128], + "color_room_15": [72, 201, 176], + "alpha_room_0": 255.0, + "alpha_room_1": 255.0, + "alpha_room_2": 255.0, + "alpha_room_3": 255.0, + "alpha_room_4": 255.0, + "alpha_room_5": 255.0, + "alpha_room_6": 255.0, + "alpha_room_7": 255.0, + "alpha_room_8": 255.0, + "alpha_room_9": 255.0, + "alpha_room_10": 255.0, + "alpha_room_11": 255.0, + "alpha_room_12": 255.0, + "alpha_room_13": 255.0, + "alpha_room_14": 255.0, + "alpha_room_15": 255.0, +} + +FONTS_AVAILABLE = [ + { + "label": "Fira Sans", + "value": "config/fonts/FiraSans.ttf", + }, + { + "label": "Inter", + "value": "config/fonts/Inter-VF.ttf", + }, + { + "label": "M Plus Regular", + "value": "config/fonts/MPLUSRegular.ttf", + }, + { + "label": "Noto Sans CJKhk", + "value": "config/fonts/NotoSansCJKhk-VF.ttf", + }, + { + "label": "Noto Kufi Arabic", + "value": "config/fonts/NotoKufiArabic-VF.ttf", + }, + { + "label": "Noto Sans Khojki", + "value": "config/fonts/NotoSansKhojki.ttf", + }, + { + "label": "Lato Regular", + "value": "config/fonts/Lato-Regular.ttf", + }, +] + +NOT_STREAMING_STATES = { + "idle", + "paused", + "charging", + "error", + "docked", +} + +DECODED_TOPICS = { + "/MapData/segments", + "/maploader/map", + "/maploader/status", + "/StatusStateAttribute/status", + "/StatusStateAttribute/error_description", + "/$state", + "/BatteryStateAttribute/level", + "/WifiConfigurationCapability/ips", + "/state", # Rand256 + "/destinations", # Rand256 + "/command", # Rand256 + "/custom_command", # Rand256 + "/attributes", # Rand256 +} + + +# self.command_topic need to be added to this dictionary after init. +NON_DECODED_TOPICS = { + "/MapData/map-data", + "/map_data", +} + +"""App Constants. Not in use, and dummy values""" +IDLE_SCAN_INTERVAL = 120 +CLEANING_SCAN_INTERVAL = 5 +IS_ALPHA = "add_base_alpha" +IS_ALPHA_R1 = "add_room_1_alpha" +IS_ALPHA_R2 = "add_room_2_alpha" +IS_OFFSET = "add_offset" + +"""Base Colours RGB""" +COLOR_CHARGER = "color_charger" +COLOR_MOVE = "color_move" +COLOR_ROBOT = "color_robot" +COLOR_NO_GO = "color_no_go" +COLOR_GO_TO = "color_go_to" +COLOR_BACKGROUND = "color_background" +COLOR_ZONE_CLEAN = "color_zone_clean" +COLOR_WALL = "color_wall" +COLOR_TEXT = "color_text" + +"Rooms Colours RGB" +COLOR_ROOM_0 = "color_room_0" +COLOR_ROOM_1 = "color_room_1" +COLOR_ROOM_2 = "color_room_2" +COLOR_ROOM_3 = "color_room_3" +COLOR_ROOM_4 = "color_room_4" +COLOR_ROOM_5 = "color_room_5" +COLOR_ROOM_6 = "color_room_6" +COLOR_ROOM_7 = "color_room_7" +COLOR_ROOM_8 = "color_room_8" +COLOR_ROOM_9 = "color_room_9" +COLOR_ROOM_10 = "color_room_10" +COLOR_ROOM_11 = "color_room_11" +COLOR_ROOM_12 = "color_room_12" +COLOR_ROOM_13 = "color_room_13" +COLOR_ROOM_14 = "color_room_14" +COLOR_ROOM_15 = "color_room_15" + +"""Alpha for RGBA Colours""" +ALPHA_CHARGER = "alpha_charger" +ALPHA_MOVE = "alpha_move" +ALPHA_ROBOT = "alpha_robot" +ALPHA_NO_GO = "alpha_no_go" +ALPHA_GO_TO = "alpha_go_to" +ALPHA_BACKGROUND = "alpha_background" +ALPHA_ZONE_CLEAN = "alpha_zone_clean" +ALPHA_WALL = "alpha_wall" +ALPHA_TEXT = "alpha_text" +ALPHA_ROOM_0 = "alpha_room_0" +ALPHA_ROOM_1 = "alpha_room_1" +ALPHA_ROOM_2 = "alpha_room_2" +ALPHA_ROOM_3 = "alpha_room_3" +ALPHA_ROOM_4 = "alpha_room_4" +ALPHA_ROOM_5 = "alpha_room_5" +ALPHA_ROOM_6 = "alpha_room_6" +ALPHA_ROOM_7 = "alpha_room_7" +ALPHA_ROOM_8 = "alpha_room_8" +ALPHA_ROOM_9 = "alpha_room_9" +ALPHA_ROOM_10 = "alpha_room_10" +ALPHA_ROOM_11 = "alpha_room_11" +ALPHA_ROOM_12 = "alpha_room_12" +ALPHA_ROOM_13 = "alpha_room_13" +ALPHA_ROOM_14 = "alpha_room_14" +ALPHA_ROOM_15 = "alpha_room_15" + +""" Constants for the attribute keys """ +ATTR_FRIENDLY_NAME = "friendly_name" +ATTR_VACUUM_BATTERY = "battery" +ATTR_VACUUM_CHARGING = "charging" +ATTR_VACUUM_POSITION = "vacuum_position" +ATTR_VACUUM_TOPIC = "vacuum_topic" +ATTR_VACUUM_STATUS = "vacuum_status" +ATTR_JSON_DATA = "json_data" +ATTR_VACUUM_JSON_ID = "vacuum_json_id" +ATTR_CALIBRATION_POINTS = "calibration_points" +ATTR_SNAPSHOT = "snapshot" +ATTR_SNAPSHOT_PATH = "snapshot_path" +ATTR_ROOMS = "rooms" +ATTR_ZONES = "zones" +ATTR_POINTS = "points" +ATTR_OBSTACLES = "obstacles" +ATTR_CAMERA_MODE = "camera_mode" \ No newline at end of file diff --git a/SCR/valetudo_map_parser/hypfer_handler.py b/SCR/valetudo_map_parser/hypfer_handler.py index c417d8c..9fd5167 100644 --- a/SCR/valetudo_map_parser/hypfer_handler.py +++ b/SCR/valetudo_map_parser/hypfer_handler.py @@ -16,8 +16,8 @@ from .config.async_utils import AsyncPIL from .config.drawable_elements import DrawableElement from .config.shared import CameraShared +from .const import COLORS from .config.types import ( - COLORS, LOGGER, CalibrationPoints, Colors, diff --git a/SCR/valetudo_map_parser/rand256_handler.py b/SCR/valetudo_map_parser/rand256_handler.py index 2d5dc30..75a83d9 100644 --- a/SCR/valetudo_map_parser/rand256_handler.py +++ b/SCR/valetudo_map_parser/rand256_handler.py @@ -15,10 +15,8 @@ from .config.async_utils import AsyncPIL from .config.drawable_elements import DrawableElement +from .const import COLORS, DEFAULT_IMAGE_SIZE, DEFAULT_PIXEL_SIZE from .config.types import ( - COLORS, - DEFAULT_IMAGE_SIZE, - DEFAULT_PIXEL_SIZE, LOGGER, Colors, Destinations, From ae071cca83a1b50885f6d7fce7a51797accc473e Mon Sep 17 00:00:00 2001 From: SCA075 <82227818+sca075@users.noreply.github.com> Date: Thu, 23 Oct 2025 17:49:01 +0200 Subject: [PATCH 20/25] map_data.py handle the Rand256 crash when no Segments ID are not available Signed-off-by: Sandro Cantarella --- SCR/valetudo_map_parser/map_data.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/SCR/valetudo_map_parser/map_data.py b/SCR/valetudo_map_parser/map_data.py index c7119ae..dfd7d3b 100755 --- a/SCR/valetudo_map_parser/map_data.py +++ b/SCR/valetudo_map_parser/map_data.py @@ -673,6 +673,11 @@ async def async_get_rrm_segments( img = RandImageData.get_rrm_image(json_data) seg_data = img.get("segments", {}) seg_ids = seg_data.get("id") + + # Handle missing or invalid segment IDs gracefully + if not seg_ids: + return [] + segments = [] outlines = [] count_seg = 0 From 66bf9ab8f03eec184ea6d6fc98a597c8258240a1 Mon Sep 17 00:00:00 2001 From: SCA075 <82227818+sca075@users.noreply.github.com> Date: Fri, 24 Oct 2025 00:15:09 +0200 Subject: [PATCH 21/25] status_text.py Signed-off-by: Sandro Cantarella --- .../config/status_text/status_text.py | 34 +++++++++---------- 1 file changed, 16 insertions(+), 18 deletions(-) diff --git a/SCR/valetudo_map_parser/config/status_text/status_text.py b/SCR/valetudo_map_parser/config/status_text/status_text.py index 83040a6..8bbf461 100644 --- a/SCR/valetudo_map_parser/config/status_text/status_text.py +++ b/SCR/valetudo_map_parser/config/status_text/status_text.py @@ -6,12 +6,13 @@ from __future__ import annotations from typing import Callable + +from ...const import text_size_coverage, charge_level, charging, dot from ..types import LOGGER, PilPNG from .translations import translations LOGGER.propagate = True -charge_level = "\u03de" # unicode Koppa symbol -charging = "\u2211" # unicode Charging symbol + class StatusText: """ @@ -29,7 +30,7 @@ def __init__(self, camera_shared): self._docked_ready, self._active, self._mqtt_disconnected, - ] + ] # static ordered sequence of compose functions @staticmethod async def _get_vacuum_status_translation( @@ -64,7 +65,7 @@ def _mqtt_disconnected(self, current_state: list[str]) -> list[str]: def _docked_charged(self, current_state: list[str]) -> list[str]: """Return the translated docked and charging status.""" if self._shared.vacuum_state == "docked" and self._shared.vacuum_bat_charged(): - current_state.append(" \u00b7 ") + current_state.append(dot) current_state.append(f"{charging}{charge_level} ") current_state.append(f"{self._shared.vacuum_battery}%") return current_state @@ -72,7 +73,7 @@ def _docked_charged(self, current_state: list[str]) -> list[str]: def _docked_ready(self, current_state: list[str]) -> list[str]: """Return the translated docked and ready status.""" if self._shared.vacuum_state == "docked" and not self._shared.vacuum_bat_charged(): - current_state.append(" \u00b7 ") + current_state.append(dot) current_state.append(f"{charge_level} ") ready_txt = (self._lang_map or {}).get( "ready", @@ -92,7 +93,7 @@ def _current_room(self, current_state: list[str]) -> list[str]: def _active(self, current_state: list[str]) -> list[str]: """Return the translated active status.""" if self._shared.vacuum_state != "docked": - current_state.append(" \u00b7 ") + current_state.append(dot) current_state.append(f"{charge_level}") current_state.append(f" {self._shared.vacuum_battery}%") return current_state @@ -103,18 +104,15 @@ async def get_status_text(self, text_img: PilPNG) -> tuple[list[str], int]: :param text_img: Image to draw the text on. :return status_text, text_size: List of the status text and the text size. """ - status_text = ["If you read me, something really went wrong.."] # default text - text_size_coverage = 1.5 # resize factor for the text text_size = self._shared.vacuum_status_size # default text size vacuum_state = await self._translate_vacuum_status() - if self._shared.show_vacuum_state: - status_text = [f"{self.file_name}: {vacuum_state}"] - # Compose Status Text with available data. - for func in self._compose_functions: - status_text = func(status_text) - if text_size >= 50 and getattr(text_img, "width", None): - text_pixels = max(1, sum(len(text) for text in status_text)) - text_size = int( - (text_size_coverage * text_img.width) // text_pixels - ) + status_text = [f"{self.file_name}: {vacuum_state}"] + # Compose Status Text with available data. + for func in self._compose_functions: + status_text = func(status_text) + if text_size >= 50 and getattr(text_img, "width", None): + text_pixels = max(1, sum(len(text) for text in status_text)) + text_size = int( + (text_size_coverage * text_img.width) // text_pixels + ) return status_text, text_size From 6ebae7dab61fbdd0b024b1ac366e8d674dd622ab Mon Sep 17 00:00:00 2001 From: SCA075 <82227818+sca075@users.noreply.github.com> Date: Fri, 24 Oct 2025 00:20:18 +0200 Subject: [PATCH 22/25] const.py Signed-off-by: Sandro Cantarella --- SCR/valetudo_map_parser/const.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/SCR/valetudo_map_parser/const.py b/SCR/valetudo_map_parser/const.py index dc1c038..10d3173 100644 --- a/SCR/valetudo_map_parser/const.py +++ b/SCR/valetudo_map_parser/const.py @@ -1,3 +1,4 @@ + CAMERA_STORAGE = "valetudo_camera" ATTR_IMAGE_LAST_UPDATED = "image_last_updated" ATTR_ROTATE = "rotate_image" @@ -216,7 +217,7 @@ COLOR_WALL = "color_wall" COLOR_TEXT = "color_text" -"Rooms Colours RGB" +"""Rooms Colours RGB""" COLOR_ROOM_0 = "color_room_0" COLOR_ROOM_1 = "color_room_1" COLOR_ROOM_2 = "color_room_2" @@ -277,4 +278,10 @@ ATTR_ZONES = "zones" ATTR_POINTS = "points" ATTR_OBSTACLES = "obstacles" -ATTR_CAMERA_MODE = "camera_mode" \ No newline at end of file +ATTR_CAMERA_MODE = "camera_mode" + +# Status text cost +charge_level = "\u03de" # unicode Koppa symbol +charging = "\u2211" # unicode Charging symbol +dot = " \u00b7 " # unicode middle dot +text_size_coverage = 1.5 # resize factor for the text From 3d86aa38ffefcf56b126c6f27342621dc30b64b9 Mon Sep 17 00:00:00 2001 From: SCA075 <82227818+sca075@users.noreply.github.com> Date: Fri, 24 Oct 2025 00:32:58 +0200 Subject: [PATCH 23/25] map_data.py Signed-off-by: Sandro Cantarella --- SCR/valetudo_map_parser/map_data.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/SCR/valetudo_map_parser/map_data.py b/SCR/valetudo_map_parser/map_data.py index dfd7d3b..2be9a9e 100755 --- a/SCR/valetudo_map_parser/map_data.py +++ b/SCR/valetudo_map_parser/map_data.py @@ -676,7 +676,7 @@ async def async_get_rrm_segments( # Handle missing or invalid segment IDs gracefully if not seg_ids: - return [] + return ([], []) if out_lines else [] segments = [] outlines = [] From 4c28fcee8e856a395e73a49813a0f5f2d1aa9de7 Mon Sep 17 00:00:00 2001 From: SCA075 <82227818+sca075@users.noreply.github.com> Date: Sun, 2 Nov 2025 12:03:36 +0100 Subject: [PATCH 24/25] pylinted code and ruff fix stability of room data Signed-off-by: Sandro Cantarella --- SCR/valetudo_map_parser/config/async_utils.py | 2 +- SCR/valetudo_map_parser/config/drawable.py | 80 +--- .../config/rand256_parser.py | 240 +++++----- SCR/valetudo_map_parser/config/shared.py | 21 +- .../config/status_text/__init__.py | 6 + .../config/status_text/status_text.py | 15 +- SCR/valetudo_map_parser/config/types.py | 65 ++- SCR/valetudo_map_parser/config/utils.py | 172 ++++--- SCR/valetudo_map_parser/const.py | 1 + SCR/valetudo_map_parser/hypfer_draw.py | 271 +++++------ SCR/valetudo_map_parser/hypfer_handler.py | 391 ++++++++-------- SCR/valetudo_map_parser/map_data.py | 26 ++ SCR/valetudo_map_parser/rand256_handler.py | 421 +++++++++--------- SCR/valetudo_map_parser/rooms_handler.py | 9 +- pyproject.toml | 2 +- 15 files changed, 881 insertions(+), 841 deletions(-) create mode 100644 SCR/valetudo_map_parser/config/status_text/__init__.py diff --git a/SCR/valetudo_map_parser/config/async_utils.py b/SCR/valetudo_map_parser/config/async_utils.py index b8ef7b6..7be4d29 100644 --- a/SCR/valetudo_map_parser/config/async_utils.py +++ b/SCR/valetudo_map_parser/config/async_utils.py @@ -49,7 +49,7 @@ async def async_resize( ) -> Image.Image: """Async image resizing.""" if resample is None: - resample = Image.LANCZOS + resample = Image.Resampling.LANCZOS return await make_async(image.resize, size, resample) @staticmethod diff --git a/SCR/valetudo_map_parser/config/drawable.py b/SCR/valetudo_map_parser/config/drawable.py index 8eb0884..401b6f0 100644 --- a/SCR/valetudo_map_parser/config/drawable.py +++ b/SCR/valetudo_map_parser/config/drawable.py @@ -12,7 +12,7 @@ import logging from pathlib import Path -from typing import Tuple, Union +from typing import Union import numpy as np from mvcrender.blend import get_blended_color, sample_and_blend_color @@ -205,24 +205,6 @@ async def go_to_flag( layer = Drawable._line(layer, xp1, yp1, xp2, yp2, pole_color, pole_width) return layer - @staticmethod - def point_inside(x: int, y: int, points: list[Tuple[int, int]]) -> bool: - """Check if a point (x, y) is inside a polygon defined by a list of points.""" - n = len(points) - inside = False - inters_x = 0.0 - p1x, p1y = points[0] - for i in range(1, n + 1): - p2x, p2y = points[i % n] - if y > min(p1y, p2y): - if y <= max(p1y, p2y) and x <= max(p1x, p2x): - if p1y != p2y: - inters_x = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x - if p1x == p2x or x <= inters_x: - inside = not inside - p1x, p1y = p2x, p2y - return inside - @staticmethod def _line( layer: NumpyArray, @@ -329,56 +311,6 @@ def _ellipse( image[y1:y2, x1:x2] = color return image - @staticmethod - def _polygon_outline( - arr: NumpyArray, - points: list[Tuple[int, int]], - width: int, - outline_color: Color, - fill_color: Color = None, - ) -> NumpyArray: - """ - Draw the outline of a polygon on the array using _line, and optionally fill it. - Uses NumPy vectorized operations for improved performance. - """ - # Draw the outline - for i, _ in enumerate(points): - current_point = points[i] - next_point = points[(i + 1) % len(points)] - arr = Drawable._line( - arr, - current_point[0], - current_point[1], - next_point[0], - next_point[1], - outline_color, - width, - ) - - # Fill the polygon if a fill color is provided - if fill_color is not None: - # Get the bounding box of the polygon - min_x = max(0, min(p[0] for p in points)) - max_x = min(arr.shape[1] - 1, max(p[0] for p in points)) - min_y = max(0, min(p[1] for p in points)) - max_y = min(arr.shape[0] - 1, max(p[1] for p in points)) - - # Create a mask for the polygon region - mask = np.zeros((max_y - min_y + 1, max_x - min_x + 1), dtype=bool) - - # Adjust points to the mask's coordinate system - adjusted_points = [(p[0] - min_x, p[1] - min_y) for p in points] - - # Test each point in the grid - for i in range(mask.shape[0]): - for j in range(mask.shape[1]): - mask[i, j] = Drawable.point_inside(j, i, adjusted_points) - - # Apply the fill color to the masked region - arr[min_y : max_y + 1, min_x : max_x + 1][mask] = fill_color - - return arr - @staticmethod async def zones(layers: NumpyArray, coordinates, color: Color) -> NumpyArray: """ @@ -420,14 +352,18 @@ async def zones(layers: NumpyArray, coordinates, color: Color) -> NumpyArray: mask_rgba = np.zeros((box_h, box_w, 4), dtype=np.uint8) # Convert points to xs, ys arrays (adjusted to local bbox coordinates) - xs = np.array([int(pts[i] - min_x) for i in range(0, len(pts), 2)], dtype=np.int32) - ys = np.array([int(pts[i] - min_y) for i in range(1, len(pts), 2)], dtype=np.int32) + xs = np.array( + [int(pts[i] - min_x) for i in range(0, len(pts), 2)], dtype=np.int32 + ) + ys = np.array( + [int(pts[i] - min_y) for i in range(1, len(pts), 2)], dtype=np.int32 + ) # Draw filled polygon on mask polygon_u8(mask_rgba, xs, ys, (0, 0, 0, 0), 0, (255, 255, 255, 255)) # Extract boolean mask from first channel - zone_mask = (mask_rgba[:, :, 0] > 0) + zone_mask = mask_rgba[:, :, 0] > 0 del mask_rgba del xs del ys diff --git a/SCR/valetudo_map_parser/config/rand256_parser.py b/SCR/valetudo_map_parser/config/rand256_parser.py index 11088ca..fe4131a 100644 --- a/SCR/valetudo_map_parser/config/rand256_parser.py +++ b/SCR/valetudo_map_parser/config/rand256_parser.py @@ -281,6 +281,49 @@ def parse_blocks(self, raw: bytes, pixels: bool = True) -> Dict[int, Any]: break return blocks + def _process_image_pixels( + self, + buf: bytes, + offset: int, + g3offset: int, + length: int, + pixels: bool, + parameters: Dict[str, Any], + ) -> None: + """Process image pixels sequentially - segments are organized as blocks.""" + current_segments = {} + + for i in range(length): + pixel_byte = struct.unpack( + "> 3 + if segment_id == 0 and pixels: + # Floor pixel + parameters["pixels"]["floor"].append(i) + elif segment_id != 0: + # Room segment - segments are sequential blocks + if segment_id not in current_segments: + parameters["segments"]["id"].append(segment_id) + parameters["segments"]["pixels_seg_" + str(segment_id)] = [] + current_segments[segment_id] = True + + if pixels: + parameters["segments"]["pixels_seg_" + str(segment_id)].append( + i + ) + def _parse_image_block( self, buf: bytes, offset: int, length: int, hlength: int, pixels: bool = True ) -> Dict[str, Any]: @@ -330,41 +373,9 @@ def _parse_image_block( parameters["dimensions"]["height"] > 0 and parameters["dimensions"]["width"] > 0 ): - # Process data sequentially - segments are organized as blocks - current_segments = {} - - for i in range(length): - pixel_byte = struct.unpack( - "> 3 - if segment_id == 0 and pixels: - # Floor pixel - parameters["pixels"]["floor"].append(i) - elif segment_id != 0: - # Room segment - segments are sequential blocks - if segment_id not in current_segments: - parameters["segments"]["id"].append(segment_id) - parameters["segments"][ - "pixels_seg_" + str(segment_id) - ] = [] - current_segments[segment_id] = True - - if pixels: - parameters["segments"][ - "pixels_seg_" + str(segment_id) - ].append(i) + self._process_image_pixels( + buf, offset, g3offset, length, pixels, parameters + ) parameters["segments"]["count"] = len(parameters["segments"]["id"]) return parameters @@ -377,6 +388,79 @@ def _parse_image_block( "pixels": {"floor": [], "walls": [], "segments": {}}, } + def _calculate_angle_from_points(self, points: list) -> Optional[float]: + """Calculate angle from last two points in a path.""" + if len(points) >= 2: + last_point = points[-1] + second_last = points[-2] + dx = last_point[0] - second_last[0] + dy = last_point[1] - second_last[1] + if dx != 0 or dy != 0: + angle_rad = math.atan2(dy, dx) + return math.degrees(angle_rad) + return None + + def _transform_path_coordinates(self, points: list) -> list: + """Apply coordinate transformation to path points.""" + return [[point[0], self.Tools.DIMENSION_MM - point[1]] for point in points] + + def _parse_path_data(self, blocks: dict, parsed_map_data: dict) -> list: + """Parse path data with coordinate transformation.""" + transformed_path_points = [] + if self.Types.PATH.value in blocks: + path_data = blocks[self.Types.PATH.value].copy() + transformed_path_points = self._transform_path_coordinates( + path_data["points"] + ) + path_data["points"] = transformed_path_points + + angle = self._calculate_angle_from_points(transformed_path_points) + if angle is not None: + path_data["current_angle"] = angle + parsed_map_data["path"] = path_data + return transformed_path_points + + def _parse_goto_path_data(self, blocks: dict, parsed_map_data: dict) -> None: + """Parse goto predicted path with coordinate transformation.""" + if self.Types.GOTO_PREDICTED_PATH.value in blocks: + goto_path_data = blocks[self.Types.GOTO_PREDICTED_PATH.value].copy() + goto_path_data["points"] = self._transform_path_coordinates( + goto_path_data["points"] + ) + + angle = self._calculate_angle_from_points(goto_path_data["points"]) + if angle is not None: + goto_path_data["current_angle"] = angle + parsed_map_data["goto_predicted_path"] = goto_path_data + + def _add_zone_data(self, blocks: dict, parsed_map_data: dict) -> None: + """Add zone and area data to parsed map.""" + parsed_map_data["currently_cleaned_zones"] = ( + blocks[self.Types.CURRENTLY_CLEANED_ZONES.value]["zones"] + if self.Types.CURRENTLY_CLEANED_ZONES.value in blocks + else [] + ) + parsed_map_data["forbidden_zones"] = ( + blocks[self.Types.FORBIDDEN_ZONES.value]["forbidden_zones"] + if self.Types.FORBIDDEN_ZONES.value in blocks + else [] + ) + parsed_map_data["forbidden_mop_zones"] = ( + blocks[self.Types.FORBIDDEN_MOP_ZONES.value]["forbidden_mop_zones"] + if self.Types.FORBIDDEN_MOP_ZONES.value in blocks + else [] + ) + parsed_map_data["virtual_walls"] = ( + blocks[self.Types.VIRTUAL_WALLS.value]["virtual_walls"] + if self.Types.VIRTUAL_WALLS.value in blocks + else [] + ) + parsed_map_data["carpet_areas"] = ( + blocks[self.Types.CARPET_MAP.value]["carpet_map"] + if self.Types.CARPET_MAP.value in blocks + else [] + ) + def parse_rrm_data( self, map_buf: bytes, pixels: bool = False ) -> Optional[Dict[str, Any]]: @@ -393,39 +477,14 @@ def parse_rrm_data( robot_data = blocks[self.Types.ROBOT_POSITION.value] parsed_map_data["robot"] = robot_data["position"] - # Parse path data with coordinate transformation FIRST - transformed_path_points = [] - if self.Types.PATH.value in blocks: - path_data = blocks[self.Types.PATH.value].copy() - # Apply coordinate transformation like current parser - transformed_path_points = [ - [point[0], self.Tools.DIMENSION_MM - point[1]] - for point in path_data["points"] - ] - path_data["points"] = transformed_path_points - - # Calculate current angle from transformed points - if len(transformed_path_points) >= 2: - last_point = transformed_path_points[-1] - second_last = transformed_path_points[-2] - dx = last_point[0] - second_last[0] - dy = last_point[1] - second_last[1] - if dx != 0 or dy != 0: - angle_rad = math.atan2(dy, dx) - path_data["current_angle"] = math.degrees(angle_rad) - parsed_map_data["path"] = path_data - - # Get robot angle from TRANSFORMED path data (like current implementation) - robot_angle = 0 - if len(transformed_path_points) >= 2: - last_point = transformed_path_points[-1] - second_last = transformed_path_points[-2] - dx = last_point[0] - second_last[0] - dy = last_point[1] - second_last[1] - if dx != 0 or dy != 0: - angle_rad = math.atan2(dy, dx) - robot_angle = int(math.degrees(angle_rad)) + # Parse path data with coordinate transformation + transformed_path_points = self._parse_path_data(blocks, parsed_map_data) + # Get robot angle from transformed path data + robot_angle = 0 + angle = self._calculate_angle_from_points(transformed_path_points) + if angle is not None: + robot_angle = int(angle) parsed_map_data["robot_angle"] = robot_angle # Parse charger position @@ -438,24 +497,7 @@ def parse_rrm_data( parsed_map_data["image"] = blocks[self.Types.IMAGE.value] # Parse goto predicted path - if self.Types.GOTO_PREDICTED_PATH.value in blocks: - goto_path_data = blocks[self.Types.GOTO_PREDICTED_PATH.value].copy() - # Apply coordinate transformation - goto_path_data["points"] = [ - [point[0], self.Tools.DIMENSION_MM - point[1]] - for point in goto_path_data["points"] - ] - # Calculate current angle from transformed points (like working parser) - if len(goto_path_data["points"]) >= 2: - points = goto_path_data["points"] - last_point = points[-1] - second_last = points[-2] - dx = last_point[0] - second_last[0] - dy = last_point[1] - second_last[1] - if dx != 0 or dy != 0: - angle_rad = math.atan2(dy, dx) - goto_path_data["current_angle"] = math.degrees(angle_rad) - parsed_map_data["goto_predicted_path"] = goto_path_data + self._parse_goto_path_data(blocks, parsed_map_data) # Parse goto target if self.Types.GOTO_TARGET.value in blocks: @@ -463,32 +505,8 @@ def parse_rrm_data( "position" ] - # Add missing fields to match expected JSON format - parsed_map_data["currently_cleaned_zones"] = ( - blocks[self.Types.CURRENTLY_CLEANED_ZONES.value]["zones"] - if self.Types.CURRENTLY_CLEANED_ZONES.value in blocks - else [] - ) - parsed_map_data["forbidden_zones"] = ( - blocks[self.Types.FORBIDDEN_ZONES.value]["forbidden_zones"] - if self.Types.FORBIDDEN_ZONES.value in blocks - else [] - ) - parsed_map_data["forbidden_mop_zones"] = ( - blocks[self.Types.FORBIDDEN_MOP_ZONES.value]["forbidden_mop_zones"] - if self.Types.FORBIDDEN_MOP_ZONES.value in blocks - else [] - ) - parsed_map_data["virtual_walls"] = ( - blocks[self.Types.VIRTUAL_WALLS.value]["virtual_walls"] - if self.Types.VIRTUAL_WALLS.value in blocks - else [] - ) - parsed_map_data["carpet_areas"] = ( - blocks[self.Types.CARPET_MAP.value]["carpet_map"] - if self.Types.CARPET_MAP.value in blocks - else [] - ) + # Add zone and area data + self._add_zone_data(blocks, parsed_map_data) parsed_map_data["is_valid"] = self.is_valid return parsed_map_data diff --git a/SCR/valetudo_map_parser/config/shared.py b/SCR/valetudo_map_parser/config/shared.py index 6bf7455..377dcfd 100755 --- a/SCR/valetudo_map_parser/config/shared.py +++ b/SCR/valetudo_map_parser/config/shared.py @@ -127,11 +127,15 @@ def __init__(self, file_name): def vacuum_bat_charged(self) -> bool: """Check if the vacuum is charging.""" if self.vacuum_state != "docked": - self._battery_state = "not_charging" - elif (self._battery_state == "charging_done") and (int(self.vacuum_battery) == 100): + self._battery_state = "not_charging" + elif (self._battery_state == "charging_done") and ( + int(self.vacuum_battery) == 100 + ): self._battery_state = "charged" else: - self._battery_state = "charging" if int(self.vacuum_battery) < 100 else "charging_done" + self._battery_state = ( + "charging" if int(self.vacuum_battery) < 100 else "charging_done" + ) return (self.vacuum_state == "docked") and (self._battery_state == "charging") @staticmethod @@ -222,9 +226,9 @@ def generate_attributes(self) -> dict: def is_streaming(self) -> bool: """Return true if the device is streaming.""" updated_status = self.vacuum_state - attr_is_streaming = ((updated_status not in NOT_STREAMING_STATES - or self.vacuum_bat_charged()) - or not self.binary_image) + attr_is_streaming = ( + updated_status not in NOT_STREAMING_STATES or self.vacuum_bat_charged() + ) or not self.binary_image return attr_is_streaming def to_dict(self) -> dict: @@ -233,7 +237,7 @@ def to_dict(self) -> dict: "image": { "binary": self.binary_image, "size": pil_size_rotation(self.image_rotate, self.new_image), - "streaming": self.is_streaming() + "streaming": self.is_streaming(), }, "attributes": self.generate_attributes(), } @@ -251,9 +255,6 @@ def __init__(self, file_name: str, device_info: dict = None): self.device_info = device_info self.update_shared_data(device_info) - # Automatically initialize shared data for the instance - # self._init_shared_data(device_info) - def update_shared_data(self, device_info): """Initialize the shared data with device_info.""" instance = self.get_instance() # Retrieve the correct instance diff --git a/SCR/valetudo_map_parser/config/status_text/__init__.py b/SCR/valetudo_map_parser/config/status_text/__init__.py new file mode 100644 index 0000000..d689de0 --- /dev/null +++ b/SCR/valetudo_map_parser/config/status_text/__init__.py @@ -0,0 +1,6 @@ +"""Status text module for vacuum cleaners.""" + +from .status_text import StatusText +from .translations import translations + +__all__ = ["StatusText", "translations"] diff --git a/SCR/valetudo_map_parser/config/status_text/status_text.py b/SCR/valetudo_map_parser/config/status_text/status_text.py index 8bbf461..81accd6 100644 --- a/SCR/valetudo_map_parser/config/status_text/status_text.py +++ b/SCR/valetudo_map_parser/config/status_text/status_text.py @@ -30,7 +30,7 @@ def __init__(self, camera_shared): self._docked_ready, self._active, self._mqtt_disconnected, - ] # static ordered sequence of compose functions + ] # static ordered sequence of compose functions @staticmethod async def _get_vacuum_status_translation( @@ -57,7 +57,9 @@ def _mqtt_disconnected(self, current_state: list[str]) -> list[str]: if not self._shared.vacuum_connection: mqtt_disc = (self._lang_map or {}).get( "mqtt_disconnected", - translations.get("en", {}).get("mqtt_disconnected", "Disconnected from MQTT?"), + translations.get("en", {}).get( + "mqtt_disconnected", "Disconnected from MQTT?" + ), ) return [f"{self.file_name}: {mqtt_disc}"] return current_state @@ -72,7 +74,10 @@ def _docked_charged(self, current_state: list[str]) -> list[str]: def _docked_ready(self, current_state: list[str]) -> list[str]: """Return the translated docked and ready status.""" - if self._shared.vacuum_state == "docked" and not self._shared.vacuum_bat_charged(): + if ( + self._shared.vacuum_state == "docked" + and not self._shared.vacuum_bat_charged() + ): current_state.append(dot) current_state.append(f"{charge_level} ") ready_txt = (self._lang_map or {}).get( @@ -112,7 +117,5 @@ async def get_status_text(self, text_img: PilPNG) -> tuple[list[str], int]: status_text = func(status_text) if text_size >= 50 and getattr(text_img, "width", None): text_pixels = max(1, sum(len(text) for text in status_text)) - text_size = int( - (text_size_coverage * text_img.width) // text_pixels - ) + text_size = int((text_size_coverage * text_img.width) // text_pixels) return status_text, text_size diff --git a/SCR/valetudo_map_parser/config/types.py b/SCR/valetudo_map_parser/config/types.py index 1681406..3f17d31 100644 --- a/SCR/valetudo_map_parser/config/types.py +++ b/SCR/valetudo_map_parser/config/types.py @@ -20,21 +20,29 @@ class Spot(TypedDict): + """Type definition for a spot location.""" + name: str coordinates: List[int] # [x, y] class Zone(TypedDict): + """Type definition for a zone area.""" + name: str coordinates: List[List[int]] # [[x1, y1, x2, y2, repeats], ...] class Room(TypedDict): + """Type definition for a room.""" + name: str id: int class Destinations(TypedDict, total=False): + """Type definition for destinations including spots, zones, and rooms.""" + spots: NotRequired[Optional[List[Spot]]] zones: NotRequired[Optional[List[Zone]]] rooms: NotRequired[Optional[List[Room]]] @@ -42,6 +50,8 @@ class Destinations(TypedDict, total=False): class RoomProperty(TypedDict): + """Type definition for room properties including outline.""" + number: int outline: list[tuple[int, int]] name: str @@ -94,30 +104,49 @@ def from_list(data: list): class RoomStore: + """Singleton storage for room data per vacuum ID. + + Stores room properties in format: {segment_id: RoomProperty} + Example: {"16": {"number": 16, "outline": [...], "name": "Living Room", "x": 100, "y": 200}} + """ + _instances: Dict[str, "RoomStore"] = {} _lock = threading.Lock() - def __new__(cls, vacuum_id: str, rooms_data: Optional[dict] = None) -> "RoomStore": + def __new__( + cls, vacuum_id: str, rooms_data: Optional[Dict[str, RoomProperty]] = None + ) -> "RoomStore": with cls._lock: if vacuum_id not in cls._instances: instance = super(RoomStore, cls).__new__(cls) - instance.vacuum_id = vacuum_id - instance.vacuums_data = rooms_data or {} - instance.rooms_count = instance.get_rooms_count() - instance.floor = None cls._instances[vacuum_id] = instance - else: - if rooms_data is not None: - cls._instances[vacuum_id].vacuums_data = rooms_data - return cls._instances[vacuum_id] + return cls._instances[vacuum_id] - def get_rooms(self) -> dict: + def __init__( + self, vacuum_id: str, rooms_data: Optional[Dict[str, RoomProperty]] = None + ) -> None: + # Only initialize if this is a new instance (not yet initialized) + if not hasattr(self, "vacuum_id"): + self.vacuum_id: str = vacuum_id + self.vacuums_data: Dict[str, RoomProperty] = rooms_data or {} + self.rooms_count: int = self.get_rooms_count() + self.floor: Optional[str] = None + elif rooms_data is not None: + # Update only if new data is provided + self.vacuums_data = rooms_data + self.rooms_count = self.get_rooms_count() + + def get_rooms(self) -> Dict[str, RoomProperty]: + """Get all rooms data.""" return self.vacuums_data - def set_rooms(self, rooms_data: dict) -> None: + def set_rooms(self, rooms_data: Dict[str, RoomProperty]) -> None: + """Set rooms data and update room count.""" self.vacuums_data = rooms_data + self.rooms_count = self.get_rooms_count() def get_rooms_count(self) -> int: + """Get the number of rooms, defaulting to 1 if no rooms are present.""" if isinstance(self.vacuums_data, dict): count = len(self.vacuums_data) return count if count > 0 else DEFAULT_ROOMS @@ -125,11 +154,14 @@ def get_rooms_count(self) -> int: @property def room_names(self) -> dict: - """Return room names in format {'room_0_name': 'SegmentID: RoomName', ...}.""" + """Return room names in format {'room_0_name': 'SegmentID: RoomName', ...}. + + Maximum of 16 rooms supported. + """ result = {} if isinstance(self.vacuums_data, dict): for idx, (segment_id, room_data) in enumerate(self.vacuums_data.items()): - if idx >= 16: # Max 16 rooms + if idx >= 16: # Max 16 rooms supported break room_name = room_data.get("name", f"Room {segment_id}") result[f"room_{idx}_name"] = f"{segment_id}: {room_name}" @@ -137,6 +169,7 @@ def room_names(self) -> dict: @classmethod def get_all_instances(cls) -> Dict[str, "RoomStore"]: + """Get all RoomStore instances for all vacuum IDs.""" return cls._instances @@ -229,8 +262,9 @@ async def async_set_vacuum_json(self, vacuum_id: str, json_data: Any) -> None: async with self._lock: self.vacuum_json_data[vacuum_id] = json_data + class CameraModes: - """Constants for the camera modes""" + """Constants for the camera modes.""" MAP_VIEW = "map_view" OBSTACLE_VIEW = "obstacle_view" @@ -286,6 +320,7 @@ def clear(self) -> dict: self.trim_right = 0 return asdict(self) + Color = Union[Tuple[int, int, int], Tuple[int, int, int, int]] Colors = Dict[str, Color] CalibrationPoints = list[dict[str, Any]] @@ -299,4 +334,4 @@ def clear(self) -> dict: JsonType = Any # json.loads() return type is Any PilPNG = Image.Image # Keep for backward compatibility NumpyArray = np.ndarray -Point = Tuple[int, int] \ No newline at end of file +Point = Tuple[int, int] diff --git a/SCR/valetudo_map_parser/config/utils.py b/SCR/valetudo_map_parser/config/utils.py index fb0019d..4a2780f 100644 --- a/SCR/valetudo_map_parser/config/utils.py +++ b/SCR/valetudo_map_parser/config/utils.py @@ -110,80 +110,17 @@ async def async_get_image( """ try: # Backup current image to last_image before processing new one - if hasattr(self.shared, "new_image") and self.shared.new_image is not None: - # Close old last_image to free memory before replacing it - if hasattr(self.shared, "last_image") and self.shared.last_image is not None: - try: - self.shared.last_image.close() - except Exception: - pass # Ignore errors if image is already closed - self.shared.last_image = self.shared.new_image + self._backup_last_image() # Call the appropriate handler method based on handler type - if hasattr(self, "get_image_from_rrm"): - # This is a Rand256 handler - new_image = await self.get_image_from_rrm( - m_json=m_json, - destinations=destinations, - ) - - elif hasattr(self, "async_get_image_from_json"): - # This is a Hypfer handler - self.json_data = await HyperMapData.async_from_valetudo_json(m_json) - new_image = await self.async_get_image_from_json( - m_json=m_json, - ) - else: - LOGGER.warning( - "%s: Handler type not recognized for async_get_image", - self.file_name, - ) - return ( - self.shared.last_image - if hasattr(self.shared, "last_image") - else None - ) + new_image = await self._generate_new_image(m_json, destinations) + if new_image is None: + return self._handle_failed_image_generation() - # Store the new image in shared data - if new_image is not None: - # Update shared data - await self._async_update_shared_data(destinations) - self.shared.new_image = new_image - # Add text to the image - if self.shared.show_vacuum_state: - text_editor = StatusText(self.shared) - img_text = await text_editor.get_status_text(new_image) - Drawable.status_text( - new_image, - img_text[1], - self.shared.user_colors[8], - img_text[0], - self.shared.vacuum_status_font, - self.shared.vacuum_status_position, - ) - # Convert to binary (PNG bytes) if requested - if bytes_format: - self.shared.binary_image = pil_to_png_bytes(new_image) - else: - self.shared.binary_image = pil_to_png_bytes(self.shared.last_image) - # Update the timestamp with current datetime - self.shared.image_last_updated = datetime.datetime.fromtimestamp(time()) - LOGGER.debug("%s: Frame Completed.", self.file_name) - data = {} - if bytes_format: - data = self.shared.to_dict() - return new_image, data - else: - LOGGER.warning( - "%s: Failed to generate image from JSON data", self.file_name - ) - return ( - self.shared.last_image - if hasattr(self.shared, "last_image") - else None - ), self.shared.to_dict() + # Process and store the new image + return await self._process_new_image(new_image, destinations, bytes_format) - except Exception as e: + except (ValueError, TypeError, AttributeError, KeyError, RuntimeError) as e: LOGGER.warning( "%s: Error in async_get_image: %s", self.file_name, @@ -191,24 +128,111 @@ async def async_get_image( exc_info=True, ) return ( - self.shared.last_image if hasattr(self.shared, "last_image") else None + self.shared.last_image if hasattr(self.shared, "last_image") else None, + {}, + ) + + def _backup_last_image(self): + """Backup current image to last_image before processing new one.""" + if hasattr(self.shared, "new_image") and self.shared.new_image is not None: + # Close old last_image to free memory before replacing it + if ( + hasattr(self.shared, "last_image") + and self.shared.last_image is not None + ): + try: + self.shared.last_image.close() + except (OSError, AttributeError, RuntimeError): + pass # Ignore errors if image is already closed + self.shared.last_image = self.shared.new_image + + async def _generate_new_image( + self, m_json: dict | None, destinations: Destinations | None + ) -> PilPNG | None: + """Generate new image based on handler type.""" + if hasattr(self, "get_image_from_rrm"): + # This is a Rand256 handler + return await self.get_image_from_rrm( + m_json=m_json, + destinations=destinations, ) + if hasattr(self, "async_get_image_from_json"): + # This is a Hypfer handler + self.json_data = await HyperMapData.async_from_valetudo_json(m_json) + return await self.async_get_image_from_json(m_json=m_json) + + LOGGER.warning( + "%s: Handler type not recognized for async_get_image", + self.file_name, + ) + return None + + def _handle_failed_image_generation(self) -> Tuple[PilPNG | None, dict]: + """Handle case when image generation fails.""" + LOGGER.warning("%s: Failed to generate image from JSON data", self.file_name) + return ( + self.shared.last_image if hasattr(self.shared, "last_image") else None + ), self.shared.to_dict() + + async def _process_new_image( + self, new_image: PilPNG, destinations: Destinations | None, bytes_format: bool + ) -> Tuple[PilPNG, dict]: + """Process and store the new image with text and binary conversion.""" + # Update shared data + await self._async_update_shared_data(destinations) + self.shared.new_image = new_image + + # Add text to the image + if self.shared.show_vacuum_state: + await self._add_status_text(new_image) + + # Convert to binary (PNG bytes) if requested + self._convert_to_binary(new_image, bytes_format) + + # Update the timestamp with current datetime + self.shared.image_last_updated = datetime.datetime.fromtimestamp(time()) + LOGGER.debug("%s: Frame Completed.", self.file_name) + + data = self.shared.to_dict() if bytes_format else {} + return new_image, data + + async def _add_status_text(self, new_image: PilPNG): + """Add status text to the image.""" + text_editor = StatusText(self.shared) + img_text = await text_editor.get_status_text(new_image) + Drawable.status_text( + new_image, + img_text[1], + self.shared.user_colors[8], + img_text[0], + self.shared.vacuum_status_font, + self.shared.vacuum_status_position, + ) + + def _convert_to_binary(self, new_image: PilPNG, bytes_format: bool): + """Convert image to binary PNG bytes.""" + if bytes_format: + self.shared.binary_image = pil_to_png_bytes(new_image) + else: + self.shared.binary_image = pil_to_png_bytes(self.shared.last_image) + async def _async_update_shared_data(self, destinations: Destinations | None = None): """Update the shared data with the latest information.""" if hasattr(self, "get_rooms_attributes") and ( self.shared.map_rooms is None and destinations is not None ): + # pylint: disable=no-member self.shared.map_rooms = await self.get_rooms_attributes(destinations) if self.shared.map_rooms: LOGGER.debug("%s: Rand256 attributes rooms updated", self.file_name) - if hasattr(self, "async_get_rooms_attributes") and ( self.shared.map_rooms is None ): if self.shared.map_rooms is None: + # pylint: disable=no-member self.shared.map_rooms = await self.async_get_rooms_attributes() if self.shared.map_rooms: LOGGER.debug("%s: Hyper attributes rooms updated", self.file_name) @@ -217,6 +241,7 @@ async def _async_update_shared_data(self, destinations: Destinations | None = No hasattr(self, "get_calibration_data") and self.shared.attr_calibration_points is None ): + # pylint: disable=no-member self.shared.attr_calibration_points = self.get_calibration_data( self.shared.image_rotate ) @@ -472,7 +497,8 @@ async def calculate_array_hash( return hashlib.sha256(data_json.encode()).hexdigest() return None - async def async_copy_array(self, original_array: NumpyArray) -> NumpyArray: + @staticmethod + async def async_copy_array(original_array: NumpyArray) -> NumpyArray: """Copy the array using AsyncNumPy to yield control to the event loop.""" return await AsyncNumPy.async_copy(original_array) diff --git a/SCR/valetudo_map_parser/const.py b/SCR/valetudo_map_parser/const.py index 10d3173..7062718 100644 --- a/SCR/valetudo_map_parser/const.py +++ b/SCR/valetudo_map_parser/const.py @@ -1,3 +1,4 @@ +"""Constants for the Valetudo Map Parser library.""" CAMERA_STORAGE = "valetudo_camera" ATTR_IMAGE_LAST_UPDATED = "image_last_updated" diff --git a/SCR/valetudo_map_parser/hypfer_draw.py b/SCR/valetudo_map_parser/hypfer_draw.py index fb74262..183b60a 100755 --- a/SCR/valetudo_map_parser/hypfer_draw.py +++ b/SCR/valetudo_map_parser/hypfer_draw.py @@ -376,170 +376,141 @@ def _check_active_zone_and_set_zooming(self) -> None: else: self.img_h.zooming = False + def _create_robot_position_dict( + self, robot_x: int, robot_y: int, angle: float, room_name: str | None + ) -> RobotPosition: + """Create a robot position dictionary.""" + return { + "x": robot_x, + "y": robot_y, + "angle": angle, + "in_room": room_name, + } + + def _check_cached_room_outline( + self, robot_x: int, robot_y: int, angle: float + ) -> RobotPosition | None: + """Check if robot is still in cached room using outline.""" + if "outline" in self.img_h.robot_in_room: + outline = self.img_h.robot_in_room["outline"] + if point_in_polygon(int(robot_x), int(robot_y), outline): + self._check_active_zone_and_set_zooming() + return self._create_robot_position_dict( + robot_x, robot_y, angle, self.img_h.robot_in_room["room"] + ) + return None + + def _check_cached_room_bbox( + self, robot_x: int, robot_y: int, angle: float + ) -> RobotPosition | None: + """Check if robot is still in cached room using bounding box.""" + if all(k in self.img_h.robot_in_room for k in ["left", "right", "up", "down"]): + if ( + (self.img_h.robot_in_room["right"] >= int(robot_x)) + and (self.img_h.robot_in_room["left"] <= int(robot_x)) + ) and ( + (self.img_h.robot_in_room["down"] >= int(robot_y)) + and (self.img_h.robot_in_room["up"] <= int(robot_y)) + ): + self._check_active_zone_and_set_zooming() + return self._create_robot_position_dict( + robot_x, robot_y, angle, self.img_h.robot_in_room["room"] + ) + return None + + def _check_room_with_outline( + self, room: dict, room_count: int, robot_x: int, robot_y: int, angle: float + ) -> RobotPosition | None: + """Check if robot is in room using outline polygon.""" + outline = room["outline"] + if point_in_polygon(int(robot_x), int(robot_y), outline): + self.img_h.robot_in_room = { + "id": room.get("id", room_count), + "room": str(room["name"]), + "outline": outline, + } + self._check_active_zone_and_set_zooming() + return self._create_robot_position_dict( + robot_x, robot_y, angle, self.img_h.robot_in_room["room"] + ) + return None + + def _check_room_with_corners( + self, room: dict, room_count: int, robot_x: int, robot_y: int, angle: float + ) -> RobotPosition | None: + """Check if robot is in room using corner bounding box.""" + corners = room["corners"] + self.img_h.robot_in_room = { + "id": room.get("id", room_count), + "left": int(corners[0][0]), + "right": int(corners[2][0]), + "up": int(corners[0][1]), + "down": int(corners[2][1]), + "room": str(room["name"]), + } + if ( + (self.img_h.robot_in_room["right"] >= int(robot_x)) + and (self.img_h.robot_in_room["left"] <= int(robot_x)) + ) and ( + (self.img_h.robot_in_room["down"] >= int(robot_y)) + and (self.img_h.robot_in_room["up"] <= int(robot_y)) + ): + self._check_active_zone_and_set_zooming() + return self._create_robot_position_dict( + robot_x, robot_y, angle, self.img_h.robot_in_room["room"] + ) + return None + async def async_get_robot_in_room( self, robot_y: int = 0, robot_x: int = 0, angle: float = 0.0 ) -> RobotPosition: """Get the robot position and return in what room is.""" - # First check if we already have a cached room and if the robot is still in it - if self.img_h.robot_in_room: - # If we have outline data, use point_in_polygon for accurate detection - if "outline" in self.img_h.robot_in_room: - outline = self.img_h.robot_in_room["outline"] - if point_in_polygon(int(robot_x), int(robot_y), outline): - temp = { - "x": robot_x, - "y": robot_y, - "angle": angle, - "in_room": self.img_h.robot_in_room["room"], - } - # Handle active zones - self._check_active_zone_and_set_zooming() - return temp - # Fallback to bounding box check if no outline data - elif all( - k in self.img_h.robot_in_room for k in ["left", "right", "up", "down"] - ): - if ( - (self.img_h.robot_in_room["right"] >= int(robot_x)) - and (self.img_h.robot_in_room["left"] <= int(robot_x)) - ) and ( - (self.img_h.robot_in_room["down"] >= int(robot_y)) - and (self.img_h.robot_in_room["up"] <= int(robot_y)) - ): - temp = { - "x": robot_x, - "y": robot_y, - "angle": angle, - "in_room": self.img_h.robot_in_room["room"], - } - # Handle active zones - self._check_active_zone_and_set_zooming() - return temp - - # If we don't have a cached room or the robot is not in it, search all rooms - last_room = None - room_count = 0 + # Check cached room first if self.img_h.robot_in_room: - last_room = self.img_h.robot_in_room - - # Check if the robot is far outside the normal map boundaries - # This helps prevent false positives for points very far from any room - map_boundary = 20000 # Typical map size is around 5000-10000 units - if abs(robot_x) > map_boundary or abs(robot_y) > map_boundary: - self.img_h.robot_in_room = last_room - self.img_h.zooming = False - temp = { - "x": robot_x, - "y": robot_y, - "angle": angle, - "in_room": last_room["room"] if last_room else None, - } - return temp - - # Search through all rooms to find which one contains the robot - if self.img_h.rooms_pos is None: + result = self._check_cached_room_outline(robot_x, robot_y, angle) + if result: + return result + result = self._check_cached_room_bbox(robot_x, robot_y, angle) + if result: + return result + + # Prepare for room search + last_room = self.img_h.robot_in_room + map_boundary = 20000 + + # Check boundary conditions or missing room data + if ( + abs(robot_x) > map_boundary + or abs(robot_y) > map_boundary + or self.img_h.rooms_pos is None + ): self.img_h.robot_in_room = last_room self.img_h.zooming = False - temp = { - "x": robot_x, - "y": robot_y, - "angle": angle, - "in_room": last_room["room"] if last_room else None, - } - return temp + return self._create_robot_position_dict( + robot_x, robot_y, angle, last_room["room"] if last_room else None + ) - for room in self.img_h.rooms_pos: - # Check if the room has an outline (polygon points) + # Search through all rooms + for room_count, room in enumerate(self.img_h.rooms_pos): if "outline" in room: - outline = room["outline"] - # Use point_in_polygon for accurate detection with complex shapes - if point_in_polygon(int(robot_x), int(robot_y), outline): - # Robot is in this room - self.img_h.robot_in_room = { - "id": room.get( - "id", room_count - ), # Use actual segment ID if available - "room": str(room["name"]), - "outline": outline, - } - temp = { - "x": robot_x, - "y": robot_y, - "angle": angle, - "in_room": self.img_h.robot_in_room["room"], - } - - # Handle active zones - Map segment ID to active_zones position - if self.img_h.active_zones: - segment_id = str(self.img_h.robot_in_room["id"]) - room_store = RoomStore(self.file_name) - room_keys = list(room_store.get_rooms().keys()) - - if segment_id in room_keys: - position = room_keys.index(segment_id) - if position < len(self.img_h.active_zones): - self.img_h.zooming = bool( - self.img_h.active_zones[position] - ) - else: - self.img_h.zooming = False - else: - _LOGGER.warning( - "%s: Segment ID %s not found in room_keys %s", - self.file_name, - segment_id, - room_keys, - ) - self.img_h.zooming = False - else: - self.img_h.zooming = False - - return temp - # Fallback to bounding box if no outline is available + result = self._check_room_with_outline( + room, room_count, robot_x, robot_y, angle + ) + if result: + return result elif "corners" in room: - corners = room["corners"] - # Create a bounding box from the corners - self.img_h.robot_in_room = { - "id": room.get( - "id", room_count - ), # Use actual segment ID if available - "left": int(corners[0][0]), - "right": int(corners[2][0]), - "up": int(corners[0][1]), - "down": int(corners[2][1]), - "room": str(room["name"]), - } - # Check if the robot is inside the bounding box - if ( - (self.img_h.robot_in_room["right"] >= int(robot_x)) - and (self.img_h.robot_in_room["left"] <= int(robot_x)) - ) and ( - (self.img_h.robot_in_room["down"] >= int(robot_y)) - and (self.img_h.robot_in_room["up"] <= int(robot_y)) - ): - temp = { - "x": robot_x, - "y": robot_y, - "angle": angle, - "in_room": self.img_h.robot_in_room["room"], - } - - # Handle active zones - self._check_active_zone_and_set_zooming() - - return temp - room_count += 1 + result = self._check_room_with_corners( + room, room_count, robot_x, robot_y, angle + ) + if result: + return result # Robot not found in any room self.img_h.robot_in_room = last_room self.img_h.zooming = False - temp = { - "x": robot_x, - "y": robot_y, - "angle": angle, - "in_room": last_room["room"] if last_room else None, - } - return temp + return self._create_robot_position_dict( + robot_x, robot_y, angle, last_room["room"] if last_room else None + ) async def async_get_robot_position(self, entity_dict: dict) -> tuple | None: """Get the robot position from the entity data.""" diff --git a/SCR/valetudo_map_parser/hypfer_handler.py b/SCR/valetudo_map_parser/hypfer_handler.py index 9fd5167..7073ee4 100644 --- a/SCR/valetudo_map_parser/hypfer_handler.py +++ b/SCR/valetudo_map_parser/hypfer_handler.py @@ -88,6 +88,189 @@ async def async_extract_room_properties(self, json_data) -> RoomsProperties: self.rooms_pos = None return room_properties + def _identify_disabled_rooms(self) -> set: + """Identify which rooms are disabled in the drawing configuration.""" + disabled_rooms = set() + room_id = 0 + for layer_type, _ in self.json_data.layers.items(): + if layer_type == "segment": + current_room_id = room_id + 1 + if 1 <= current_room_id <= 15: + room_element = getattr( + DrawableElement, f"ROOM_{current_room_id}", None + ) + if room_element and not self.drawing_config.is_enabled( + room_element + ): + disabled_rooms.add(room_id) + room_id = (room_id + 1) % 16 + return disabled_rooms + + async def _draw_layer_if_enabled( + self, + img_np_array, + layer_type, + compressed_pixels_list, + colors, + pixel_size, + disabled_rooms, + room_id, + ): + """Draw a layer if it's enabled in the drawing configuration.""" + is_room_layer = layer_type == "segment" + + if is_room_layer: + current_room_id = room_id + 1 + if 1 <= current_room_id <= 15: + room_element = getattr(DrawableElement, f"ROOM_{current_room_id}", None) + if not self.drawing_config.is_enabled(room_element): + return room_id + 1, img_np_array # Skip this room + + is_wall_layer = layer_type == "wall" + if is_wall_layer and not self.drawing_config.is_enabled(DrawableElement.WALL): + return room_id, img_np_array # Skip walls + + # Draw the layer + room_id, img_np_array = await self.imd.async_draw_base_layer( + img_np_array, + compressed_pixels_list, + layer_type, + colors["wall"], + colors["zone_clean"], + pixel_size, + disabled_rooms if layer_type == "wall" else None, + ) + return room_id, img_np_array + + async def _draw_base_layers(self, img_np_array, colors, pixel_size): + """Draw all base layers (rooms, walls, floors).""" + disabled_rooms = self._identify_disabled_rooms() + room_id = 0 + + for layer_type, compressed_pixels_list in self.json_data.layers.items(): + room_id, img_np_array = await self._draw_layer_if_enabled( + img_np_array, + layer_type, + compressed_pixels_list, + colors, + pixel_size, + disabled_rooms, + room_id, + ) + + return img_np_array, room_id + + async def _draw_additional_elements( + self, img_np_array, m_json, entity_dict, colors + ): + """Draw additional elements like walls, charger, and obstacles.""" + if self.drawing_config.is_enabled(DrawableElement.VIRTUAL_WALL): + img_np_array = await self.imd.async_draw_virtual_walls( + m_json, img_np_array, colors["no_go"] + ) + + if self.drawing_config.is_enabled(DrawableElement.CHARGER): + img_np_array = await self.imd.async_draw_charger( + img_np_array, entity_dict, colors["charger"] + ) + + if self.drawing_config.is_enabled(DrawableElement.OBSTACLE): + self.shared.obstacles_pos = self.data.get_obstacles(entity_dict) + if self.shared.obstacles_pos: + img_np_array = await self.imd.async_draw_obstacle( + img_np_array, self.shared.obstacles_pos, colors["no_go"] + ) + + return img_np_array + + async def _setup_room_and_robot_data( + self, room_id, robot_position, robot_position_angle + ): + """Setup room properties and robot position data.""" + if (room_id > 0) and not self.room_propriety: + self.room_propriety = await self.async_extract_room_properties( + self.json_data.json_data + ) + + if not self.rooms_pos and not self.room_propriety: + self.room_propriety = await self.async_extract_room_properties( + self.json_data.json_data + ) + + if self.rooms_pos and robot_position and robot_position_angle: + self.robot_pos = await self.imd.async_get_robot_in_room( + robot_x=(robot_position[0]), + robot_y=(robot_position[1]), + angle=robot_position_angle, + ) + + async def _prepare_data_tasks(self, m_json, entity_dict): + """Prepare and execute data extraction tasks in parallel.""" + data_tasks = [] + + if self.drawing_config.is_enabled(DrawableElement.RESTRICTED_AREA): + data_tasks.append(self._prepare_zone_data(m_json)) + + if self.drawing_config.is_enabled(DrawableElement.GO_TO_TARGET): + data_tasks.append(self._prepare_goto_data(entity_dict)) + + path_enabled = self.drawing_config.is_enabled(DrawableElement.PATH) + LOGGER.info("%s: PATH element enabled: %s", self.file_name, path_enabled) + if path_enabled: + LOGGER.info("%s: Drawing path", self.file_name) + data_tasks.append(self._prepare_path_data(m_json)) + + if data_tasks: + await asyncio.gather(*data_tasks) + + return path_enabled + + async def _draw_dynamic_elements( + self, img_np_array, m_json, entity_dict, colors, path_enabled + ): + """Draw dynamic elements like zones, paths, and go-to targets.""" + if self.drawing_config.is_enabled(DrawableElement.RESTRICTED_AREA): + img_np_array = await self.imd.async_draw_zones( + m_json, img_np_array, colors["zone_clean"], colors["no_go"] + ) + + if self.drawing_config.is_enabled(DrawableElement.GO_TO_TARGET): + img_np_array = await self.imd.draw_go_to_flag( + img_np_array, entity_dict, colors["go_to"] + ) + + if path_enabled: + img_np_array = await self.imd.async_draw_paths( + img_np_array, m_json, colors["move"], self.color_grey + ) + else: + LOGGER.info("%s: Skipping path drawing", self.file_name) + + return img_np_array + + async def _draw_robot_if_enabled( + self, img_np_array, robot_pos, robot_position, robot_position_angle, colors + ): + """Draw the robot on the map if enabled.""" + if self.shared.vacuum_state == "docked": + robot_position_angle -= 180 + + if robot_pos and self.drawing_config.is_enabled(DrawableElement.ROBOT): + robot_color = self.drawing_config.get_property( + DrawableElement.ROBOT, "color", colors["robot"] + ) + img_np_array = await self.draw.robot( + layers=img_np_array, + x=robot_position[0], + y=robot_position[1], + angle=robot_position_angle, + fill=robot_color, + radius=self.shared.robot_size, + robot_state=self.shared.vacuum_state, + ) + + return img_np_array + # noinspection PyUnresolvedReferences,PyUnboundLocalVariable async def async_get_image_from_json( self, @@ -132,126 +315,21 @@ async def async_get_image_from_json( img_np_array = await self.draw.create_empty_image( self.img_size["x"], self.img_size["y"], colors["background"] ) - # Draw layers and segments if enabled room_id = 0 - # Keep track of disabled rooms to skip their walls later - disabled_rooms = set() if self.drawing_config.is_enabled(DrawableElement.FLOOR): - # First pass: identify disabled rooms - for ( - layer_type, - compressed_pixels_list, - ) in self.json_data.layers.items(): - # Check if this is a room layer - if layer_type == "segment": - # The room_id is the current room being processed (0-based index) - # We need to check if ROOM_{room_id+1} is enabled (1-based in DrawableElement) - current_room_id = room_id + 1 - if 1 <= current_room_id <= 15: - room_element = getattr( - DrawableElement, f"ROOM_{current_room_id}", None - ) - if ( - room_element - and not self.drawing_config.is_enabled( - room_element - ) - ): - # Add this room to the disabled rooms set - disabled_rooms.add(room_id) - room_id = ( - room_id + 1 - ) % 16 # Cycle room_id back to 0 after 15 - - # Reset room_id for the actual drawing pass - room_id = 0 - - # Second pass: draw enabled rooms and walls - for ( - layer_type, - compressed_pixels_list, - ) in self.json_data.layers.items(): - # Check if this is a room layer - is_room_layer = layer_type == "segment" - - # If it's a room layer, check if the specific room is enabled - if is_room_layer: - # The room_id is the current room being processed (0-based index) - # We need to check if ROOM_{room_id+1} is enabled (1-based in DrawableElement) - current_room_id = room_id + 1 - if 1 <= current_room_id <= 15: - room_element = getattr( - DrawableElement, f"ROOM_{current_room_id}", None - ) - - # Skip this room if it's disabled - if not self.drawing_config.is_enabled(room_element): - room_id = ( - room_id + 1 - ) % 16 # Increment room_id even if we skip - continue - - # Draw the layer ONLY if enabled - is_wall_layer = layer_type == "wall" - if is_wall_layer: - # Skip walls entirely if disabled - if not self.drawing_config.is_enabled( - DrawableElement.WALL - ): - continue - # Draw the layer - ( - room_id, - img_np_array, - ) = await self.imd.async_draw_base_layer( - img_np_array, - compressed_pixels_list, - layer_type, - colors["wall"], - colors["zone_clean"], - pixel_size, - disabled_rooms if layer_type == "wall" else None, - ) - - # Draw the virtual walls if enabled - if self.drawing_config.is_enabled(DrawableElement.VIRTUAL_WALL): - img_np_array = await self.imd.async_draw_virtual_walls( - m_json, img_np_array, colors["no_go"] + img_np_array, room_id = await self._draw_base_layers( + img_np_array, colors, pixel_size ) - # Draw charger if enabled - if self.drawing_config.is_enabled(DrawableElement.CHARGER): - img_np_array = await self.imd.async_draw_charger( - img_np_array, entity_dict, colors["charger"] - ) - - # Draw obstacles if enabled - if self.drawing_config.is_enabled(DrawableElement.OBSTACLE): - self.shared.obstacles_pos = self.data.get_obstacles(entity_dict) - if self.shared.obstacles_pos: - img_np_array = await self.imd.async_draw_obstacle( - img_np_array, self.shared.obstacles_pos, colors["no_go"] - ) - # Robot and rooms position - if (room_id > 0) and not self.room_propriety: - self.room_propriety = await self.async_extract_room_properties( - self.json_data.json_data - ) + img_np_array = await self._draw_additional_elements( + img_np_array, m_json, entity_dict, colors + ) - # Ensure room data is available for robot room detection (even if not extracted above) - if not self.rooms_pos and not self.room_propriety: - self.room_propriety = await self.async_extract_room_properties( - self.json_data.json_data - ) + await self._setup_room_and_robot_data( + room_id, robot_position, robot_position_angle + ) - # Always check robot position for zooming (moved outside the condition) - if self.rooms_pos and robot_position and robot_position_angle: - self.robot_pos = await self.imd.async_get_robot_in_room( - robot_x=(robot_position[0]), - robot_y=(robot_position[1]), - angle=robot_position_angle, - ) LOGGER.info("%s: Completed base Layers", self.file_name) # Copy the new array in base layer. # Delete old base layer before creating new one to free memory @@ -282,73 +360,22 @@ async def async_get_image_from_json( np.copyto(self.img_work_layer, self.img_base_layer) img_np_array = self.img_work_layer - # Prepare parallel data extraction tasks - data_tasks = [] - - # Prepare zone data extraction - if self.drawing_config.is_enabled(DrawableElement.RESTRICTED_AREA): - data_tasks.append(self._prepare_zone_data(m_json)) + # Prepare and execute data extraction tasks + path_enabled = await self._prepare_data_tasks(m_json, entity_dict) - # Prepare go_to flag data extraction - if self.drawing_config.is_enabled(DrawableElement.GO_TO_TARGET): - data_tasks.append(self._prepare_goto_data(entity_dict)) - - # Prepare path data extraction - path_enabled = self.drawing_config.is_enabled(DrawableElement.PATH) - LOGGER.info( - "%s: PATH element enabled: %s", self.file_name, path_enabled + # Draw dynamic elements + img_np_array = await self._draw_dynamic_elements( + img_np_array, m_json, entity_dict, colors, path_enabled ) - if path_enabled: - LOGGER.info("%s: Drawing path", self.file_name) - data_tasks.append(self._prepare_path_data(m_json)) - - # Await all data preparation tasks if any were created - if data_tasks: - await asyncio.gather(*data_tasks) - - # Process drawing operations sequentially (since they modify the same array) - # Draw zones if enabled - if self.drawing_config.is_enabled(DrawableElement.RESTRICTED_AREA): - img_np_array = await self.imd.async_draw_zones( - m_json, img_np_array, colors["zone_clean"], colors["no_go"] - ) - - # Draw the go_to target flag if enabled - if self.drawing_config.is_enabled(DrawableElement.GO_TO_TARGET): - img_np_array = await self.imd.draw_go_to_flag( - img_np_array, entity_dict, colors["go_to"] - ) - # Draw paths if enabled - if path_enabled: - img_np_array = await self.imd.async_draw_paths( - img_np_array, m_json, colors["move"], self.color_grey - ) - else: - LOGGER.info("%s: Skipping path drawing", self.file_name) - - # Check if the robot is docked. - if self.shared.vacuum_state == "docked": - # Adjust the robot angle. - robot_position_angle -= 180 - - # Draw the robot if enabled - if robot_pos and self.drawing_config.is_enabled(DrawableElement.ROBOT): - # Get robot color (allows for customization) - robot_color = self.drawing_config.get_property( - DrawableElement.ROBOT, "color", colors["robot"] - ) - - # Draw the robot - img_np_array = await self.draw.robot( - layers=img_np_array, - x=robot_position[0], - y=robot_position[1], - angle=robot_position_angle, - fill=robot_color, - radius=self.shared.robot_size, - robot_state=self.shared.vacuum_state, - ) + # Draw robot + img_np_array = await self._draw_robot_if_enabled( + img_np_array, + robot_pos, + robot_position, + robot_position_angle, + colors, + ) # Synchronize zooming state from ImageDraw to handler before auto-crop self.zooming = self.imd.img_h.zooming @@ -376,11 +403,11 @@ async def async_get_image_from_json( # Return PIL Image return resized_image - else: - # Return PIL Image (convert from NumPy) - pil_img = await AsyncPIL.async_fromarray(img_np_array, mode="RGBA") - del img_np_array - return pil_img + + # Return PIL Image (convert from NumPy) + pil_img = await AsyncPIL.async_fromarray(img_np_array, mode="RGBA") + del img_np_array + return pil_img except (RuntimeError, RuntimeWarning) as e: LOGGER.warning( "%s: Error %s during image creation.", diff --git a/SCR/valetudo_map_parser/map_data.py b/SCR/valetudo_map_parser/map_data.py index 2be9a9e..3c74d58 100755 --- a/SCR/valetudo_map_parser/map_data.py +++ b/SCR/valetudo_map_parser/map_data.py @@ -30,6 +30,8 @@ class RangeStats(TypedDict): + """Statistics for a range of values (min, max, mid, avg).""" + min: int max: int mid: int @@ -37,6 +39,8 @@ class RangeStats(TypedDict): class Dimensions(TypedDict): + """Dimensions with x/y range statistics and pixel count.""" + x: RangeStats y: RangeStats pixelCount: int @@ -46,10 +50,14 @@ class Dimensions(TypedDict): class FloorWallMeta(TypedDict, total=False): + """Metadata for floor and wall layers.""" + area: int class SegmentMeta(TypedDict, total=False): + """Metadata for segment layers including segment ID and active state.""" + segmentId: str active: bool source: str @@ -57,6 +65,8 @@ class SegmentMeta(TypedDict, total=False): class MapLayerBase(TypedDict): + """Base structure for map layers with pixels and dimensions.""" + __class__: Literal["MapLayer"] type: str pixels: list[int] @@ -65,11 +75,15 @@ class MapLayerBase(TypedDict): class FloorWallLayer(MapLayerBase): + """Map layer representing floor or wall areas.""" + metaData: FloorWallMeta type: Literal["floor", "wall"] class SegmentLayer(MapLayerBase): + """Map layer representing a room segment.""" + metaData: SegmentMeta type: Literal["segment"] @@ -78,12 +92,16 @@ class SegmentLayer(MapLayerBase): class PointMeta(TypedDict, total=False): + """Metadata for point entities including angle, label, and ID.""" + angle: float label: str id: str class PointMapEntity(TypedDict): + """Point-based map entity (robot, charger, obstacle, etc.).""" + __class__: Literal["PointMapEntity"] type: str points: list[int] @@ -91,6 +109,8 @@ class PointMapEntity(TypedDict): class PathMapEntity(TypedDict): + """Path-based map entity representing robot movement paths.""" + __class__: Literal["PathMapEntity"] type: str points: list[int] @@ -103,16 +123,22 @@ class PathMapEntity(TypedDict): class MapMeta(TypedDict, total=False): + """Metadata for the Valetudo map including version and total area.""" + version: int totalLayerArea: int class Size(TypedDict): + """Map size with x and y dimensions.""" + x: int y: int class ValetudoMap(TypedDict): + """Complete Valetudo map structure with layers and entities.""" + __class__: Literal["ValetudoMap"] metaData: MapMeta size: Size diff --git a/SCR/valetudo_map_parser/rand256_handler.py b/SCR/valetudo_map_parser/rand256_handler.py index 75a83d9..4888500 100644 --- a/SCR/valetudo_map_parser/rand256_handler.py +++ b/SCR/valetudo_map_parser/rand256_handler.py @@ -48,6 +48,8 @@ def __init__(self, shared_data): AutoCrop.__init__(self, self) self.auto_crop = None # Auto crop flag self.segment_data = None # Segment data + self.element_map = None # Element map for tracking drawable elements + self.robot_position = None # Robot position for zoom functionality self.outlines = None # Outlines data self.calibration_data = None # Calibration data self.data = RandImageData # Image Data @@ -84,7 +86,6 @@ async def extract_room_properties( json_data, size_x, size_y, top, left, True ) - dest_json = destinations if destinations else {} zones_data = dest_json.get("zones", []) points_data = dest_json.get("spots", []) @@ -96,7 +97,7 @@ async def extract_room_properties( # Update self.rooms_pos from room_properties for compatibility with other methods self.rooms_pos = [] - for room_id, room_data in room_properties.items(): + for _, room_data in room_properties.items(): self.rooms_pos.append( {"name": room_data["name"], "outline": room_data["outline"]} ) @@ -195,6 +196,94 @@ async def get_image_from_rrm( # If we reach here without returning, return None return None + async def _initialize_base_layer( + self, + m_json, + size_x, + size_y, + colors, + destinations, + robot_position, + robot_position_angle, + ): + """Initialize the base layer on first frame.""" + self.element_map = np.zeros((size_y, size_x), dtype=np.int32) + self.element_map[:] = DrawableElement.FLOOR + + if self.drawing_config.is_enabled(DrawableElement.FLOOR): + room_id, img_np_array = await self.imd.async_draw_base_layer( + m_json, + size_x, + size_y, + colors["wall"], + colors["zone_clean"], + colors["background"], + DEFAULT_PIXEL_SIZE, + ) + LOGGER.info("%s: Completed base Layers", self.file_name) + + if room_id > 0 and not self.room_propriety: + self.room_propriety = await self.get_rooms_attributes(destinations) + + if not self.rooms_pos and not self.room_propriety: + self.room_propriety = await self.get_rooms_attributes(destinations) + + if ( + self.rooms_pos + and robot_position + and (self.robot_pos is None or "in_room" not in self.robot_pos) + ): + self.robot_pos = await self.async_get_robot_in_room( + (robot_position[0] * 10), + (robot_position[1] * 10), + robot_position_angle, + ) + else: + background_color = self.drawing_config.get_property( + DrawableElement.FLOOR, "color", colors["background"] + ) + img_np_array = await self.draw.create_empty_image( + size_x, size_y, background_color + ) + + if self.img_base_layer is not None: + del self.img_base_layer + self.img_base_layer = await self.async_copy_array(img_np_array) + del img_np_array + + async def _check_zoom_conditions(self, m_json, robot_position, destinations): + """Check and set zoom conditions based on active zones.""" + if not ( + self.shared.image_auto_zoom + and self.shared.vacuum_state == "cleaning" + and robot_position + and destinations + ): + return + + try: + temp_room_properties = ( + await self.rooms_handler.async_extract_room_properties( + m_json, destinations + ) + ) + if temp_room_properties: + temp_rooms_pos = [] + for _, room_data in temp_room_properties.items(): + temp_rooms_pos.append( + {"name": room_data["name"], "outline": room_data["outline"]} + ) + original_rooms_pos = self.rooms_pos + self.rooms_pos = temp_rooms_pos + self.rooms_pos = original_rooms_pos + except (ValueError, KeyError, TypeError): + if ( + self.shared.image_auto_zoom + and self.shared.vacuum_state == "cleaning" + and robot_position + ): + self.zooming = True + async def _setup_robot_and_image( self, m_json, size_x, size_y, colors, destinations ): @@ -206,100 +295,17 @@ async def _setup_robot_and_image( ) = await self.imd.async_get_robot_position(m_json) if self.frame_number == 0: - # Create element map for tracking what's drawn where - self.element_map = np.zeros((size_y, size_x), dtype=np.int32) - self.element_map[:] = DrawableElement.FLOOR - - # Draw base layer if floor is enabled - if self.drawing_config.is_enabled(DrawableElement.FLOOR): - room_id, img_np_array = await self.imd.async_draw_base_layer( - m_json, - size_x, - size_y, - colors["wall"], - colors["zone_clean"], - colors["background"], - DEFAULT_PIXEL_SIZE, - ) - LOGGER.info("%s: Completed base Layers", self.file_name) - - if room_id > 0 and not self.room_propriety: - self.room_propriety = await self.get_rooms_attributes(destinations) + await self._initialize_base_layer( + m_json, + size_x, + size_y, + colors, + destinations, + robot_position, + robot_position_angle, + ) - # Ensure room data is available for robot room detection (even if not extracted above) - if not self.rooms_pos and not self.room_propriety: - self.room_propriety = await self.get_rooms_attributes(destinations) - - # Always check robot position for zooming (update if room info is missing) - if ( - self.rooms_pos - and robot_position - and (self.robot_pos is None or "in_room" not in self.robot_pos) - ): - self.robot_pos = await self.async_get_robot_in_room( - (robot_position[0] * 10), - (robot_position[1] * 10), - robot_position_angle, - ) - # Delete old base layer before creating new one to free memory - if self.img_base_layer is not None: - del self.img_base_layer - self.img_base_layer = await self.async_copy_array(img_np_array) - # Delete source array after copying to free memory - del img_np_array - else: - # If floor is disabled, create an empty image - background_color = self.drawing_config.get_property( - DrawableElement.FLOOR, "color", colors["background"] - ) - img_np_array = await self.draw.create_empty_image( - size_x, size_y, background_color - ) - # Delete old base layer before creating new one to free memory - if self.img_base_layer is not None: - del self.img_base_layer - self.img_base_layer = await self.async_copy_array(img_np_array) - # Delete source array after copying to free memory - del img_np_array - - # Check active zones BEFORE auto-crop to enable proper zoom functionality - # This needs to run on every frame, not just frame 0 - if ( - self.shared.image_auto_zoom - and self.shared.vacuum_state == "cleaning" - and robot_position - and destinations # Check if we have destinations data for room extraction - ): - # Extract room data early if we have destinations - try: - temp_room_properties = ( - await self.rooms_handler.async_extract_room_properties( - m_json, destinations - ) - ) - if temp_room_properties: - # Create temporary rooms_pos for robot room detection - temp_rooms_pos = [] - for room_id, room_data in temp_room_properties.items(): - temp_rooms_pos.append( - {"name": room_data["name"], "outline": room_data["outline"]} - ) - - # Store original rooms_pos and temporarily use the new one - original_rooms_pos = self.rooms_pos - self.rooms_pos = temp_rooms_pos - - # Restore original rooms_pos - self.rooms_pos = original_rooms_pos - - except (ValueError, KeyError, TypeError): - # Fallback to robot-position-based zoom if room extraction fails - if ( - self.shared.image_auto_zoom - and self.shared.vacuum_state == "cleaning" - and robot_position - ): - self.zooming = True + await self._check_zoom_conditions(m_json, robot_position, destinations) return self.img_base_layer, robot_position, robot_position_angle @@ -401,139 +407,124 @@ async def get_rooms_attributes( ) return self.room_propriety + def _create_robot_position_dict( + self, robot_x: int, robot_y: int, angle: float, room_name: str + ) -> RobotPosition: + """Create a robot position dictionary.""" + return { + "x": robot_x, + "y": robot_y, + "angle": angle, + "in_room": room_name, + } + + def _set_zooming_from_active_zones(self) -> None: + """Set zooming based on active zones.""" + self.active_zones = self.shared.rand256_active_zone + self.zooming = False + if self.active_zones and ( + self.robot_in_room["id"] in range(len(self.active_zones)) + ): + self.zooming = bool(self.active_zones[self.robot_in_room["id"]]) + + def _check_cached_room_outline_rand( + self, robot_x: int, robot_y: int, angle: float + ) -> RobotPosition | None: + """Check if robot is still in cached room using outline.""" + if "outline" in self.robot_in_room: + outline = self.robot_in_room["outline"] + if point_in_polygon(int(robot_x), int(robot_y), outline): + self._set_zooming_from_active_zones() + LOGGER.debug( + "%s: Robot is in %s room (polygon detection). %s", + self.file_name, + self.robot_in_room["room"], + self.active_zones, + ) + return self._create_robot_position_dict( + robot_x, robot_y, angle, self.robot_in_room["room"] + ) + return None + + def _check_cached_room_bbox_rand( + self, robot_x: int, robot_y: int, angle: float + ) -> RobotPosition | None: + """Check if robot is still in cached room using bounding box.""" + if all(k in self.robot_in_room for k in ["left", "right", "up", "down"]): + if ( + self.robot_in_room["right"] + <= int(robot_x) + <= self.robot_in_room["left"] + ) and ( + self.robot_in_room["up"] <= int(robot_y) <= self.robot_in_room["down"] + ): + self._set_zooming_from_active_zones() + return self._create_robot_position_dict( + robot_x, robot_y, angle, self.robot_in_room["room"] + ) + return None + + def _check_room_with_outline_rand( + self, room: dict, room_count: int, robot_x: int, robot_y: int, angle: float + ) -> RobotPosition | None: + """Check if robot is in room using outline polygon.""" + outline = room["outline"] + if point_in_polygon(int(robot_x), int(robot_y), outline): + self.robot_in_room = { + "id": room_count, + "room": str(room["name"]), + "outline": outline, + } + self._set_zooming_from_active_zones() + return self._create_robot_position_dict( + robot_x, robot_y, angle, self.robot_in_room["room"] + ) + return None + async def async_get_robot_in_room( self, robot_x: int, robot_y: int, angle: float ) -> RobotPosition: """Get the robot position and return in what room is.""" - # First check if we already have a cached room and if the robot is still in it - if self.robot_in_room: - # If we have outline data, use point_in_polygon for accurate detection - if "outline" in self.robot_in_room: - outline = self.robot_in_room["outline"] - if point_in_polygon(int(robot_x), int(robot_y), outline): - temp = { - "x": robot_x, - "y": robot_y, - "angle": angle, - "in_room": self.robot_in_room["room"], - } - # Handle active zones - self.active_zones = self.shared.rand256_active_zone - LOGGER.debug( - "%s: Robot is in %s room (polygon detection). %s", - self.file_name, - self.robot_in_room["room"], - self.active_zones, - ) - self.zooming = False - if self.active_zones and ( - self.robot_in_room["id"] in range(len(self.active_zones)) - ): - self.zooming = bool(self.active_zones[self.robot_in_room["id"]]) - else: - self.zooming = False - return temp - # Fallback to bounding box check if no outline data - elif all(k in self.robot_in_room for k in ["left", "right", "up", "down"]): - if ( - self.robot_in_room["right"] - <= int(robot_x) - <= self.robot_in_room["left"] - ) and ( - self.robot_in_room["up"] - <= int(robot_y) - <= self.robot_in_room["down"] - ): - temp = { - "x": robot_x, - "y": robot_y, - "angle": angle, - "in_room": self.robot_in_room["room"], - } - # Handle active zones - self.active_zones = self.shared.rand256_active_zone - self.zooming = False - if self.active_zones and ( - self.robot_in_room["id"] in range(len(self.active_zones)) - ): - self.zooming = bool(self.active_zones[self.robot_in_room["id"]]) - else: - self.zooming = False - return temp - - # If we don't have a cached room or the robot is not in it, search all rooms - last_room = None - room_count = 0 + # Check cached room first if self.robot_in_room: - last_room = self.robot_in_room - - # Check if the robot is far outside the normal map boundaries - # This helps prevent false positives for points very far from any room - map_boundary = 50000 # Typical map size is around 25000-30000 units for Rand25 - if abs(robot_x) > map_boundary or abs(robot_y) > map_boundary: - self.robot_in_room = last_room - self.zooming = False - temp = { - "x": robot_x, - "y": robot_y, - "angle": angle, - "in_room": last_room["room"] if last_room else "unknown", - } - return temp - - # Search through all rooms to find which one contains the robot - if not self.rooms_pos: + result = self._check_cached_room_outline_rand(robot_x, robot_y, angle) + if result: + return result + result = self._check_cached_room_bbox_rand(robot_x, robot_y, angle) + if result: + return result + + # Prepare for room search + last_room = self.robot_in_room + map_boundary = 50000 + + # Check boundary conditions or missing room data + if ( + abs(robot_x) > map_boundary + or abs(robot_y) > map_boundary + or not self.rooms_pos + ): self.robot_in_room = last_room self.zooming = False - temp = { - "x": robot_x, - "y": robot_y, - "angle": angle, - "in_room": last_room["room"] if last_room else "unknown", - } - return temp + return self._create_robot_position_dict( + robot_x, robot_y, angle, last_room["room"] if last_room else "unknown" + ) - for room in self.rooms_pos: - # Check if the room has an outline (polygon points) + # Search through all rooms + for room_count, room in enumerate(self.rooms_pos): if "outline" in room: - outline = room["outline"] - # Use point_in_polygon for accurate detection with complex shapes - if point_in_polygon(int(robot_x), int(robot_y), outline): - # Robot is in this room - self.robot_in_room = { - "id": room_count, - "room": str(room["name"]), - "outline": outline, - } - temp = { - "x": robot_x, - "y": robot_y, - "angle": angle, - "in_room": self.robot_in_room["room"], - } - - # Handle active zones - Set zooming based on active zones - self.active_zones = self.shared.rand256_active_zone - if self.active_zones and ( - self.robot_in_room["id"] in range(len(self.active_zones)) - ): - self.zooming = bool(self.active_zones[self.robot_in_room["id"]]) - else: - self.zooming = False - - return temp - room_count += 1 + result = self._check_room_with_outline_rand( + room, room_count, robot_x, robot_y, angle + ) + if result: + return result # Robot not found in any room self.robot_in_room = last_room self.zooming = False - temp = { - "x": robot_x, - "y": robot_y, - "angle": angle, - "in_room": last_room["room"] if last_room else "unknown", - } - return temp + return self._create_robot_position_dict( + robot_x, robot_y, angle, last_room["room"] if last_room else "unknown" + ) def get_calibration_data(self, rotation_angle: int = 0) -> Any: """Return the map calibration data.""" diff --git a/SCR/valetudo_map_parser/rooms_handler.py b/SCR/valetudo_map_parser/rooms_handler.py index 8affc6b..56a55ae 100644 --- a/SCR/valetudo_map_parser/rooms_handler.py +++ b/SCR/valetudo_map_parser/rooms_handler.py @@ -11,7 +11,7 @@ import numpy as np from scipy.ndimage import binary_dilation, binary_erosion -from scipy.spatial import ConvexHull +from scipy.spatial import ConvexHull # pylint: disable=no-name-in-module from .config.drawable_elements import DrawableElement, DrawingConfig from .config.types import LOGGER, RoomsProperties @@ -83,7 +83,7 @@ async def _process_room_layer( """ meta_data = layer.get("metaData", {}) segment_id = meta_data.get("segmentId") - name = meta_data.get("name", "Room {}".format(segment_id)) + name = meta_data.get("name", f"Room {segment_id}") compressed_pixels = layer.get("compressedPixels", []) pixels = self.sublist(compressed_pixels, 3) @@ -296,8 +296,8 @@ def convex_hull_outline(points: List[Tuple[int, int]]) -> List[Tuple[int, int]]: return hull_points - except Exception as e: - LOGGER.warning(f"Error calculating convex hull: {e}") + except (ValueError, RuntimeError) as e: + LOGGER.warning("Error calculating convex hull: %s", e) # Fallback to bounding box if convex hull fails x_min, y_min = np.min(points_array, axis=0) @@ -342,7 +342,6 @@ async def _process_segment_data( except (ValueError, TypeError): # If segment_id is not a valid integer, we can't map it to a room element # In this case, we'll include the room (fail open) - pass LOGGER.debug( "Could not convert segment_id %s to room element", segment_id ) diff --git a/pyproject.toml b/pyproject.toml index 5487ce7..fb0a65c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "valetudo-map-parser" -version = "0.1.11" +version = "0.1.12b0" description = "A Python library to parse Valetudo map data returning a PIL Image object." authors = ["Sandro Cantarella "] license = "Apache-2.0" From d186556569976d173bdb4e2390c4b66c491445b9 Mon Sep 17 00:00:00 2001 From: SCA075 <82227818+sca075@users.noreply.github.com> Date: Tue, 4 Nov 2025 18:06:54 +0100 Subject: [PATCH 25/25] last files for 12 isort / ruff and lint Signed-off-by: Sandro Cantarella --- SCR/valetudo_map_parser/__init__.py | 8 ++--- SCR/valetudo_map_parser/config/colors.py | 1 + SCR/valetudo_map_parser/config/shared.py | 7 ++-- .../config/status_text/__init__.py | 1 + .../config/status_text/status_text.py | 4 ++- SCR/valetudo_map_parser/config/types.py | 33 +++++++++++++++++++ SCR/valetudo_map_parser/config/utils.py | 5 +++ SCR/valetudo_map_parser/hypfer_handler.py | 2 +- SCR/valetudo_map_parser/rand256_handler.py | 7 +--- pyproject.toml | 2 +- 10 files changed, 55 insertions(+), 15 deletions(-) diff --git a/SCR/valetudo_map_parser/__init__.py b/SCR/valetudo_map_parser/__init__.py index 3b672a2..b3b0bfd 100644 --- a/SCR/valetudo_map_parser/__init__.py +++ b/SCR/valetudo_map_parser/__init__.py @@ -54,14 +54,14 @@ CONF_OFFSET_TOP, CONF_SNAPSHOTS_ENABLE, CONF_TRIMS_SAVE, - CONF_VACUUM_CONFIG_ENTRY_ID, - CONF_VACUUM_CONNECTION_STRING, - CONF_VACUUM_ENTITY_ID, - CONF_VACUUM_IDENTIFIERS, CONF_VAC_STAT, CONF_VAC_STAT_FONT, CONF_VAC_STAT_POS, CONF_VAC_STAT_SIZE, + CONF_VACUUM_CONFIG_ENTRY_ID, + CONF_VACUUM_CONNECTION_STRING, + CONF_VACUUM_ENTITY_ID, + CONF_VACUUM_IDENTIFIERS, CONF_ZOOM_LOCK_RATIO, DECODED_TOPICS, DEFAULT_IMAGE_SIZE, diff --git a/SCR/valetudo_map_parser/config/colors.py b/SCR/valetudo_map_parser/config/colors.py index 3f9b0d3..6640336 100644 --- a/SCR/valetudo_map_parser/config/colors.py +++ b/SCR/valetudo_map_parser/config/colors.py @@ -61,6 +61,7 @@ ) from .types import LOGGER, Color + color_transparent = (0, 0, 0, 0) color_charger = (0, 128, 0, 255) color_move = (238, 247, 255, 255) diff --git a/SCR/valetudo_map_parser/config/shared.py b/SCR/valetudo_map_parser/config/shared.py index 377dcfd..b811c64 100755 --- a/SCR/valetudo_map_parser/config/shared.py +++ b/SCR/valetudo_map_parser/config/shared.py @@ -10,7 +10,6 @@ from PIL import Image -from .utils import pil_size_rotation from ..const import ( ATTR_CALIBRATION_POINTS, ATTR_CAMERA_MODE, @@ -39,15 +38,17 @@ CONF_VAC_STAT_POS, CONF_VAC_STAT_SIZE, CONF_ZOOM_LOCK_RATIO, - NOT_STREAMING_STATES, DEFAULT_VALUES, + NOT_STREAMING_STATES, ) from .types import ( CameraModes, Colors, + FloorData, PilPNG, TrimsData, ) +from .utils import pil_size_rotation _LOGGER = logging.getLogger(__name__) @@ -120,6 +121,8 @@ def __init__(self, file_name): self.user_language = None self.trim_crop_data = None self.trims = TrimsData.from_dict(DEFAULT_VALUES["trims_data"]) + self.floors_trims: FloorData = {} + self.current_floor: str = "floor_0" self.skip_room_ids: List[str] = [] self.device_info = None self._battery_state = None diff --git a/SCR/valetudo_map_parser/config/status_text/__init__.py b/SCR/valetudo_map_parser/config/status_text/__init__.py index d689de0..f6b85ea 100644 --- a/SCR/valetudo_map_parser/config/status_text/__init__.py +++ b/SCR/valetudo_map_parser/config/status_text/__init__.py @@ -3,4 +3,5 @@ from .status_text import StatusText from .translations import translations + __all__ = ["StatusText", "translations"] diff --git a/SCR/valetudo_map_parser/config/status_text/status_text.py b/SCR/valetudo_map_parser/config/status_text/status_text.py index 81accd6..8a48c4e 100644 --- a/SCR/valetudo_map_parser/config/status_text/status_text.py +++ b/SCR/valetudo_map_parser/config/status_text/status_text.py @@ -5,12 +5,14 @@ """ from __future__ import annotations + from typing import Callable -from ...const import text_size_coverage, charge_level, charging, dot +from ...const import charge_level, charging, dot, text_size_coverage from ..types import LOGGER, PilPNG from .translations import translations + LOGGER.propagate = True diff --git a/SCR/valetudo_map_parser/config/types.py b/SCR/valetudo_map_parser/config/types.py index 3f17d31..8e703ff 100644 --- a/SCR/valetudo_map_parser/config/types.py +++ b/SCR/valetudo_map_parser/config/types.py @@ -311,6 +311,19 @@ def to_dict(self) -> dict: """Convert TrimData to a dictionary.""" return asdict(self) + @classmethod + def from_list(cls, crop_area: List[int], floor: Optional[str] = None): + """ + Initialize TrimsData from a list [trim_up, trim_left, trim_down, trim_right] + """ + return cls( + trim_up=crop_area[0], + trim_left=crop_area[1], + trim_down=crop_area[2], + trim_right=crop_area[3], + floor=floor, + ) + def clear(self) -> dict: """Clear all the trims.""" self.floor = "" @@ -321,6 +334,26 @@ def clear(self) -> dict: return asdict(self) +@dataclass +class FloorData: + """Dataclass to store floor configuration.""" + + trims: TrimsData + map_name: str = "" + + @classmethod + def from_dict(cls, data: dict): + """Initialize FloorData from a dictionary.""" + return cls( + trims=TrimsData.from_dict(data.get("trims", {})), + map_name=data.get("map_name", ""), + ) + + def to_dict(self) -> dict: + """Convert FloorData to a dictionary.""" + return {"trims": self.trims.to_dict(), "map_name": self.map_name} + + Color = Union[Tuple[int, int, int], Tuple[int, int, int, int]] Colors = Dict[str, Color] CalibrationPoints = list[dict[str, Any]] diff --git a/SCR/valetudo_map_parser/config/utils.py b/SCR/valetudo_map_parser/config/utils.py index 4a2780f..c03facd 100644 --- a/SCR/valetudo_map_parser/config/utils.py +++ b/SCR/valetudo_map_parser/config/utils.py @@ -24,6 +24,7 @@ PilPNG, RobotPosition, Size, + TrimsData, ) @@ -272,6 +273,10 @@ def prepare_resize_params( is_rand=rand, ) + def update_trims(self) -> None: + """Update the trims.""" + self.shared.trims = TrimsData.from_list(self.crop_area) + def get_charger_position(self) -> ChargerPosition | None: """Return the charger position.""" return self.charger_pos diff --git a/SCR/valetudo_map_parser/hypfer_handler.py b/SCR/valetudo_map_parser/hypfer_handler.py index 7073ee4..03935d5 100644 --- a/SCR/valetudo_map_parser/hypfer_handler.py +++ b/SCR/valetudo_map_parser/hypfer_handler.py @@ -16,7 +16,6 @@ from .config.async_utils import AsyncPIL from .config.drawable_elements import DrawableElement from .config.shared import CameraShared -from .const import COLORS from .config.types import ( LOGGER, CalibrationPoints, @@ -29,6 +28,7 @@ BaseHandler, initialize_drawing_config, ) +from .const import COLORS from .hypfer_draw import ImageDraw as ImDraw from .map_data import ImageData from .rooms_handler import RoomsHandler diff --git a/SCR/valetudo_map_parser/rand256_handler.py b/SCR/valetudo_map_parser/rand256_handler.py index 4888500..82ce597 100644 --- a/SCR/valetudo_map_parser/rand256_handler.py +++ b/SCR/valetudo_map_parser/rand256_handler.py @@ -15,7 +15,6 @@ from .config.async_utils import AsyncPIL from .config.drawable_elements import DrawableElement -from .const import COLORS, DEFAULT_IMAGE_SIZE, DEFAULT_PIXEL_SIZE from .config.types import ( LOGGER, Colors, @@ -31,6 +30,7 @@ initialize_drawing_config, point_in_polygon, ) +from .const import COLORS, DEFAULT_IMAGE_SIZE, DEFAULT_PIXEL_SIZE from .map_data import RandImageData from .reimg_draw import ImageDraw from .rooms_handler import RandRoomsHandler @@ -390,11 +390,6 @@ async def _finalize_image(self, pil_img): if self.check_zoom_and_aspect_ratio(): resize_params = self.prepare_resize_params(pil_img, True) pil_img = await self.async_resize_images(resize_params) - else: - LOGGER.warning( - "%s: Invalid image dimensions. Returning original image.", - self.file_name, - ) return pil_img async def get_rooms_attributes( diff --git a/pyproject.toml b/pyproject.toml index fb0a65c..5c93bb1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "valetudo-map-parser" -version = "0.1.12b0" +version = "0.1.12" description = "A Python library to parse Valetudo map data returning a PIL Image object." authors = ["Sandro Cantarella "] license = "Apache-2.0"