diff --git a/drivers/SmartThings/matter-switch/fingerprints.yml b/drivers/SmartThings/matter-switch/fingerprints.yml index fa7c02b79f..c67eec7bf2 100644 --- a/drivers/SmartThings/matter-switch/fingerprints.yml +++ b/drivers/SmartThings/matter-switch/fingerprints.yml @@ -3068,6 +3068,11 @@ matterGeneric: - id: 0x0101 # Dimmable Light - id: 0x0107 # Occupancy Sensor deviceProfileName: light-level-motion + - id: "matter/camera" + deviceLabel: Matter Camera + deviceTypes: + - id: 0x0142 # Camera + deviceProfileName: camera matterThing: - id: SmartThings/MatterThing diff --git a/drivers/SmartThings/matter-switch/profiles/camera.yml b/drivers/SmartThings/matter-switch/profiles/camera.yml new file mode 100644 index 0000000000..2835810ae3 --- /dev/null +++ b/drivers/SmartThings/matter-switch/profiles/camera.yml @@ -0,0 +1,128 @@ +name: camera +components: + - id: main + capabilities: + - id: webrtc + version: 1 + optional: true + - id: videoCapture2 + version: 1 + optional: true + - id: videoStreamSettings + version: 1 + optional: true + - id: imageCapture + version: 1 + optional: true + - id: mechanicalPanTiltZoom + version: 1 + optional: true + - id: hdr + version: 1 + optional: true + - id: nightVision + version: 1 + optional: true + - id: imageControl + version: 1 + optional: true + - id: audioRecording + version: 1 + optional: true + - id: sounds + version: 1 + optional: true + - id: cameraPrivacyMode + version: 1 + optional: true + - id: zoneManagement + version: 1 + optional: true + - id: localMediaStorage + version: 1 + optional: true + - id: cameraViewportSettings + version: 1 + optional: true + - id: motionSensor + version: 1 + optional: true + - id: firmwareUpdate + version: 1 + - id: refresh + version: 1 + categories: + - name: Camera + - id: statusLed + optional: true + capabilities: + - id: switch + version: 1 + optional: true + - id: mode + version: 1 + optional: true + - id: speaker + optional: true + capabilities: + - id: audioMute + version: 1 + optional: true + - id: audioVolume + version: 1 + optional: true + - id: microphone + optional: true + capabilities: + - id: audioMute + version: 1 + optional: true + - id: audioVolume + version: 1 + optional: true + - id: doorbell + optional: true + capabilities: + - id: button + version: 1 + optional: true +deviceConfig: + dashboard: + states: + - component: main + capability: imageCapture + version: 1 + values: + - label: "{{___PO_CODE_SAMSUNGELECTRONICS.IM_DEFAULT_IMAGE_CAPTURE}}" + visibleCondition: + component: main + capability: imageCapture + version: 1 + value: captureTime.value + valueType: string + operator: CONTAINS + operand: T + isOffline: false + basicPlus: + - displayType: camera + camera: + image: + component: main + capability: imageCapture + version: 1 + value: image.value + detailView: + - component: main + capability: webrtc + version: 1 + - component: main + capability: mechanicalPanTiltZoom + version: 1 + dpInfo: + - os: ios + dpUri: "storyboard://HMVSController/HMVSViewController" + - os: android + dpUri: "plugin://com.samsung.android.plugin.camera" +metadata: + mnmn: SmartThingsEdge + vid: matter-camera diff --git a/drivers/SmartThings/matter-switch/src/init.lua b/drivers/SmartThings/matter-switch/src/init.lua index 2302ffc4dc..403a14a991 100644 --- a/drivers/SmartThings/matter-switch/src/init.lua +++ b/drivers/SmartThings/matter-switch/src/init.lua @@ -296,6 +296,7 @@ local matter_driver_template = { supported_capabilities = fields.supported_capabilities, sub_drivers = { require("sub_drivers.aqara_cube"), + switch_utils.lazy_load_if_possible("sub_drivers.camera"), require("sub_drivers.eve_energy"), require("sub_drivers.third_reality_mk1") } diff --git a/drivers/SmartThings/matter-switch/src/sub_drivers/camera/camera_handlers/attribute_handlers.lua b/drivers/SmartThings/matter-switch/src/sub_drivers/camera/camera_handlers/attribute_handlers.lua new file mode 100644 index 0000000000..0e3fe5f843 --- /dev/null +++ b/drivers/SmartThings/matter-switch/src/sub_drivers/camera/camera_handlers/attribute_handlers.lua @@ -0,0 +1,422 @@ +-- Copyright © 2025 SmartThings, Inc. +-- Licensed under the Apache License, Version 2.0 + +local camera_fields = require "sub_drivers.camera.camera_utils.fields" +local camera_utils = require "sub_drivers.camera.camera_utils.utils" +local capabilities = require "st.capabilities" +local clusters = require "st.matter.clusters" +local camera_cfg = require "sub_drivers.camera.camera_utils.device_configuration" +local fields = require "switch_utils.fields" +local utils = require "st.utils" + +local CameraAttributeHandlers = {} + +CameraAttributeHandlers.enabled_state_factory = function(attribute) + return function(driver, device, ib, response) + device:emit_event_for_endpoint(ib, attribute(ib.data.value and "enabled" or "disabled")) + if attribute == capabilities.imageControl.imageFlipHorizontal then + camera_utils.update_supported_attributes(device, ib, capabilities.imageControl, "imageFlipHorizontal") + elseif attribute == capabilities.imageControl.imageFlipVertical then + camera_utils.update_supported_attributes(device, ib, capabilities.imageControl, "imageFlipVertical") + elseif attribute == capabilities.cameraPrivacyMode.hardPrivacyMode then + camera_utils.update_supported_attributes(device, ib, capabilities.cameraPrivacyMode, "hardPrivacyMode") + end + end +end + +CameraAttributeHandlers.night_vision_factory = function(attribute) + return function(driver, device, ib, response) + if camera_fields.tri_state_map[ib.data.value] then + device:emit_event_for_endpoint(ib, attribute(camera_fields.tri_state_map[ib.data.value])) + if attribute == capabilities.nightVision.illumination then + local _ = device:get_latest_state(camera_fields.profile_components.main, capabilities.nightVision.ID, capabilities.nightVision.supportedAttributes.NAME) or + device:emit_event_for_endpoint(ib, capabilities.nightVision.supportedAttributes({"illumination"})) + end + end + end +end + +function CameraAttributeHandlers.image_rotation_handler(driver, device, ib, response) + local degrees = utils.clamp_value(ib.data.value, 0, 359) + device:emit_event_for_endpoint(ib, capabilities.imageControl.imageRotation(degrees)) + camera_utils.update_supported_attributes(device, ib, capabilities.imageControl, "imageRotation") +end + +function CameraAttributeHandlers.two_way_talk_support_handler(driver, device, ib, response) + local two_way_talk_supported = ib.data.value == clusters.CameraAvStreamManagement.types.TwoWayTalkSupportTypeEnum.HALF_DUPLEX or + ib.data.value == clusters.CameraAvStreamManagement.types.TwoWayTalkSupportTypeEnum.FULL_DUPLEX + device:emit_event_for_endpoint(ib, capabilities.webrtc.talkback(two_way_talk_supported)) + if two_way_talk_supported then + device:emit_event_for_endpoint(ib, capabilities.webrtc.talkbackDuplex( + ib.data.value == clusters.CameraAvStreamManagement.types.TwoWayTalkSupportTypeEnum.HALF_DUPLEX and "halfDuplex" or "fullDuplex" + )) + end +end + +function CameraAttributeHandlers.muted_handler(driver, device, ib, response) + device:emit_event_for_endpoint(ib, capabilities.audioMute.mute(ib.data.value and "muted" or "unmuted")) +end + +function CameraAttributeHandlers.volume_level_handler(driver, device, ib, response) + local component = device:endpoint_to_component(ib) + local max_volume = device:get_field(camera_fields.MAX_VOLUME_LEVEL .. "_" .. component) or camera_fields.ABS_VOL_MAX + local min_volume = device:get_field(camera_fields.MIN_VOLUME_LEVEL .. "_" .. component) or camera_fields.ABS_VOL_MIN + -- Convert from [min_volume, max_volume] to [0, 100] before emitting capability + local limited_range = max_volume - min_volume + local normalized_volume = utils.round((ib.data.value - min_volume) * 100.0 / limited_range) + device:emit_event_for_endpoint(ib, capabilities.audioVolume.volume(normalized_volume)) +end + +function CameraAttributeHandlers.max_volume_level_handler(driver, device, ib, response) + local component = device:endpoint_to_component(ib) + local max_volume = ib.data.value + local min_volume = device:get_field(camera_fields.MIN_VOLUME_LEVEL .. "_" .. component) + if max_volume > camera_fields.ABS_VOL_MAX or (min_volume and max_volume <= min_volume) then + device.log.warn(string.format("Device reported invalid maximum (%d) %s volume level range value", ib.data.value, component)) + max_volume = camera_fields.ABS_VOL_MAX + end + device:set_field(camera_fields.MAX_VOLUME_LEVEL .. "_" .. component, max_volume) +end + +function CameraAttributeHandlers.min_volume_level_handler(driver, device, ib, response) + local component = device:endpoint_to_component(ib) + local min_volume = ib.data.value + local max_volume = device:get_field(camera_fields.MAX_VOLUME_LEVEL .. "_" .. component) + if min_volume < camera_fields.ABS_VOL_MIN or (max_volume and min_volume >= max_volume) then + device.log.warn(string.format("Device reported invalid minimum (%d) %s volume level range value", ib.data.value, component)) + min_volume = camera_fields.ABS_VOL_MIN + end + device:set_field(camera_fields.MIN_VOLUME_LEVEL .. "_" .. component, min_volume) +end + +function CameraAttributeHandlers.status_light_enabled_handler(driver, device, ib, response) + device:emit_event_for_endpoint(ib, ib.data.value and capabilities.switch.switch.on() or capabilities.switch.switch.off()) +end + +function CameraAttributeHandlers.status_light_brightness_handler(driver, device, ib, response) + local component = device:endpoint_to_component(ib) + local _ = device:get_latest_state(component, capabilities.mode.ID, capabilities.mode.supportedModes.NAME) or + device:emit_event_for_endpoint(ib, capabilities.mode.supportedModes({"low", "medium", "high", "auto"}, {visibility = {displayed = false}})) + local _ = device:get_latest_state(component, capabilities.mode.ID, capabilities.mode.supportedArguments.NAME) or + device:emit_event_for_endpoint(ib, capabilities.mode.supportedArguments({"low", "medium", "high", "auto"}, {visibility = {displayed = false}})) + local mode = "auto" + if ib.data.value == clusters.Global.types.ThreeLevelAutoEnum.LOW then + mode = "low" + elseif ib.data.value == clusters.Global.types.ThreeLevelAutoEnum.MEDIUM then + mode = "medium" + elseif ib.data.value == clusters.Global.types.ThreeLevelAutoEnum.HIGH then + mode = "high" + end + device:emit_event_for_endpoint(ib, capabilities.mode.mode(mode)) +end + +function CameraAttributeHandlers.rate_distortion_trade_off_points_handler(driver, device, ib, response) + if not ib.data.elements then return end + local resolutions = {} + local max_encoded_pixel_rate = device:get_field(camera_fields.MAX_ENCODED_PIXEL_RATE) + local max_fps = device:get_field(camera_fields.MAX_FRAMES_PER_SECOND) + local emit_capability = max_encoded_pixel_rate ~= nil and max_fps ~= nil + for _, v in ipairs(ib.data.elements) do + local rate_distortion_trade_off_points = v.elements + local width = rate_distortion_trade_off_points.resolution.elements.width.value + local height = rate_distortion_trade_off_points.resolution.elements.height.value + table.insert(resolutions, { + width = width, + height = height + }) + if emit_capability then + local fps = camera_utils.compute_fps(max_encoded_pixel_rate, width, height, max_fps) + if fps > 0 then + resolutions[#resolutions].fps = fps + end + end + end + if emit_capability then + device:emit_event_for_endpoint(ib, capabilities.videoStreamSettings.supportedResolutions(resolutions)) + end + device:set_field(camera_fields.SUPPORTED_RESOLUTIONS, resolutions) +end + +function CameraAttributeHandlers.max_encoded_pixel_rate_handler(driver, device, ib, response) + local resolutions = device:get_field(camera_fields.SUPPORTED_RESOLUTIONS) + local max_fps = device:get_field(camera_fields.MAX_FRAMES_PER_SECOND) + local emit_capability = resolutions ~= nil and max_fps ~= nil + if emit_capability then + for _, v in pairs(resolutions or {}) do + local fps = camera_utils.compute_fps(ib.data.value, v.width, v.height, max_fps) + if fps > 0 then + v.fps = fps + end + end + device:emit_event_for_endpoint(ib, capabilities.videoStreamSettings.supportedResolutions(resolutions)) + end + device:set_field(camera_fields.MAX_ENCODED_PIXEL_RATE, ib.data.value) +end + +function CameraAttributeHandlers.video_sensor_parameters_handler(driver, device, ib, response) + if not ib.data.elements then return end + local resolutions = device:get_field(camera_fields.SUPPORTED_RESOLUTIONS) + local max_encoded_pixel_rate = device:get_field(camera_fields.MAX_ENCODED_PIXEL_RATE) + local emit_capability = resolutions ~= nil and max_encoded_pixel_rate ~= nil + local sensor_width, sensor_height, max_fps + for _, v in pairs(ib.data.elements) do + if v.field_id == 0 then + sensor_width = v.value + elseif v.field_id == 1 then + sensor_height = v.value + elseif v.field_id == 2 then + max_fps = v.value + end + end + + if max_fps then + if sensor_width and sensor_height then + device:emit_event_for_endpoint(ib, capabilities.cameraViewportSettings.videoSensorParameters({ + width = sensor_width, + height = sensor_height, + maxFPS = max_fps + })) + end + if emit_capability then + for _, v in pairs(resolutions or {}) do + local fps = camera_utils.compute_fps(max_encoded_pixel_rate, v.width, v.height, max_fps) + if fps > 0 then + v.fps = fps + end + end + device:emit_event_for_endpoint(ib, capabilities.videoStreamSettings.supportedResolutions(resolutions)) + end + device:set_field(camera_fields.MAX_FRAMES_PER_SECOND, max_fps) + end +end + +function CameraAttributeHandlers.min_viewport_handler(driver, device, ib, response) + device:emit_event_for_endpoint(ib, capabilities.cameraViewportSettings.minViewportResolution({ + width = ib.data.elements.width.value, + height = ib.data.elements.height.value + })) +end + +function CameraAttributeHandlers.allocated_video_streams_handler(driver, device, ib, response) + if not ib.data.elements then return end + local streams = {} + for i, v in ipairs(ib.data.elements) do + local stream = v.elements + local video_stream = { + streamId = stream.video_stream_id.value, + data = { + label = "Stream " .. i, + type = stream.stream_usage.value == clusters.Global.types.StreamUsageEnum.LIVE_VIEW and "liveStream" or "clipRecording", + resolution = { + width = stream.min_resolution.elements.width.value, + height = stream.min_resolution.elements.height.value, + fps = stream.min_frame_rate.value + } + } + } + local viewport = device:get_field(camera_fields.VIEWPORT) + if viewport then + video_stream.data.viewport = viewport + end + if camera_utils.feature_supported(device, clusters.CameraAvStreamManagement.ID, clusters.CameraAvStreamManagement.types.Feature.WATERMARK) then + video_stream.data.watermark = stream.watermark_enabled.value and "enabled" or "disabled" + end + if camera_utils.feature_supported(device, clusters.CameraAvStreamManagement.ID, clusters.CameraAvStreamManagement.types.Feature.ON_SCREEN_DISPLAY) then + video_stream.data.onScreenDisplay = stream.osd_enabled.value and "enabled" or "disabled" + end + table.insert(streams, video_stream) + end + if #streams > 0 then + device:emit_event_for_endpoint(ib, capabilities.videoStreamSettings.videoStreams(streams)) + end +end + +function CameraAttributeHandlers.viewport_handler(driver, device, ib, response) + device:emit_event_for_endpoint(ib, capabilities.cameraViewportSettings.defaultViewport({ + upperLeftVertex = { x = ib.data.elements.x1.value, y = ib.data.elements.y1.value }, + lowerRightVertex = { x = ib.data.elements.x2.value, y = ib.data.elements.y2.value }, + })) +end + +function CameraAttributeHandlers.ptz_position_handler(driver, device, ib, response) + local ptz_map = camera_utils.get_ptz_map(device) + local emit_event = function(idx, value) + if value ~= ptz_map[idx].current then + device:emit_event_for_endpoint(ib, ptz_map[idx].attribute( + utils.clamp_value(value, ptz_map[idx].range.minimum, ptz_map[idx].range.maximum) + )) + end + end + if camera_utils.feature_supported(device, clusters.CameraAvSettingsUserLevelManagement.ID, clusters.CameraAvSettingsUserLevelManagement.types.Feature.MPAN) then + emit_event(camera_fields.PAN_IDX, ib.data.elements.pan.value) + end + if camera_utils.feature_supported(device, clusters.CameraAvSettingsUserLevelManagement.ID, clusters.CameraAvSettingsUserLevelManagement.types.Feature.MTILT) then + emit_event(camera_fields.TILT_IDX, ib.data.elements.tilt.value) + end + if camera_utils.feature_supported(device, clusters.CameraAvSettingsUserLevelManagement.ID, clusters.CameraAvSettingsUserLevelManagement.types.Feature.MZOOM) then + emit_event(camera_fields.ZOOM_IDX, ib.data.elements.zoom.value) + end +end + +function CameraAttributeHandlers.ptz_presets_handler(driver, device, ib, response) + if not ib.data.elements then return end + local presets = {} + for _, v in ipairs(ib.data.elements) do + local preset = v.elements + local pan, tilt, zoom = 0, 0, 1 + if camera_utils.feature_supported(device, clusters.CameraAvSettingsUserLevelManagement.ID, clusters.CameraAvSettingsUserLevelManagement.types.Feature.MPAN) then + pan = preset.settings.elements.pan.value + end + if camera_utils.feature_supported(device, clusters.CameraAvSettingsUserLevelManagement.ID, clusters.CameraAvSettingsUserLevelManagement.types.Feature.MTILT) then + tilt = preset.settings.elements.tilt.value + end + if camera_utils.feature_supported(device, clusters.CameraAvSettingsUserLevelManagement.ID, clusters.CameraAvSettingsUserLevelManagement.types.Feature.MZOOM) then + zoom = preset.settings.elements.zoom.value + end + table.insert(presets, { id = preset.preset_id.value, label = preset.name.value, pan = pan, tilt = tilt, zoom = zoom }) + end + device:emit_event_for_endpoint(ib, capabilities.mechanicalPanTiltZoom.presets(presets)) +end + +function CameraAttributeHandlers.max_presets_handler(driver, device, ib, response) + device:emit_event_for_endpoint(ib, capabilities.mechanicalPanTiltZoom.maxPresets(ib.data.value)) +end + +function CameraAttributeHandlers.zoom_max_handler(driver, device, ib, response) + if ib.data.value <= camera_fields.ABS_ZOOM_MAX then + device:emit_event_for_endpoint(ib, capabilities.mechanicalPanTiltZoom.zoomRange({ value = { minimum = 1, maximum = ib.data.value } })) + else + device.log.warn(string.format("Device reported invalid maximum zoom (%d)", ib.data.value)) + end +end + +CameraAttributeHandlers.pt_range_handler_factory = function(attribute, limit_field) + return function(driver, device, ib, response) + device:set_field(limit_field, ib.data.value) + local field = string.find(limit_field, "PAN") and "PAN" or "TILT" + local min = device:get_field(camera_fields.pt_range_fields[field].min) + local max = device:get_field(camera_fields.pt_range_fields[field].max) + if min ~= nil and max ~= nil then + local abs_min = field == "PAN" and camera_fields.ABS_PAN_MIN or camera_fields.ABS_TILT_MIN + local abs_max = field == "PAN" and camera_fields.ABS_PAN_MAX or camera_fields.ABS_TILT_MAX + if min < max and min >= abs_min and max <= abs_max then + device:emit_event_for_endpoint(ib, attribute({ value = { minimum = min, maximum = max } })) + device:set_field(camera_fields.pt_range_fields[field].min, nil) + device:set_field(camera_fields.pt_range_fields[field].max, nil) + else + device.log.warn(string.format("Device reported invalid minimum (%d) and maximum (%d) %s " .. + "range values (should be between %d and %d)", min, max, string.lower(field), abs_min, abs_max)) + end + end + end +end + +function CameraAttributeHandlers.max_zones_handler(driver, device, ib, response) + device:emit_event_for_endpoint(ib, capabilities.zoneManagement.maxZones(ib.data.value)) +end + +function CameraAttributeHandlers.zones_handler(driver, device, ib, response) + if not ib.data.elements then return end + local zones = {} + for _, v in ipairs(ib.data.elements) do + local zone = v.elements + local zone_id = zone.zone_id.value + local zone_type = zone.zone_type.value + local zone_source = zone.zone_source.value + local zone_vertices = {} + if camera_utils.feature_supported(device, clusters.ZoneManagement.ID, clusters.ZoneManagement.types.Feature.TWO_DIMENSIONAL_CARTESIAN_ZONE) and + zone_type == clusters.ZoneManagement.types.ZoneTypeEnum.TWODCART_ZONE then + local zone_name = zone.two_d_cartesian_zone.elements.name.value + local zone_use = zone.two_d_cartesian_zone.elements.use.value + for _, vertex in pairs(zone.two_d_cartesian_zone.elements.vertices.elements or {}) do + table.insert(zone_vertices, {vertex = {x = vertex.elements.x.value, y = vertex.elements.y.value}}) + end + local zone_uses = { + [clusters.ZoneManagement.types.ZoneUseEnum.MOTION] = "motion", + [clusters.ZoneManagement.types.ZoneUseEnum.FOCUS] = "focus", + [clusters.ZoneManagement.types.ZoneUseEnum.PRIVACY] = "privacy" + } + local zone_color = zone.two_d_cartesian_zone.elements.color and zone.two_d_cartesian_zone.elements.color.value or nil + table.insert(zones, { + id = zone_id, + name = zone_name, + type = "2DCartesian", + polygonVertices = zone_vertices, + source = zone_source == clusters.ZoneManagement.types.ZoneSourceEnum.MFG and "manufacturer" or "user", + use = zone_uses[zone_use], + color = zone_color + }) + else + device.log.warn(string.format("Zone type not currently supported: (%s)", zone_type)) + end + end + device:emit_event_for_endpoint(ib, capabilities.zoneManagement.zones({value = zones})) +end + +function CameraAttributeHandlers.triggers_handler(driver, device, ib, response) + if not ib.data.elements then return end + local triggers = {} + for _, v in ipairs(ib.data.elements) do + local trigger = v.elements + table.insert(triggers, { + zoneId = trigger.zone_id.value, + initialDuration = trigger.initial_duration.value, + augmentationDuration = trigger.augmentation_duration.value, + maxDuration = trigger.max_duration.value, + blindDuration = trigger.blind_duration.value, + sensitivity = camera_utils.feature_supported(device, clusters.ZoneManagement.ID, clusters.ZoneManagement.types.Feature.PER_ZONE_SENSITIVITY) and trigger.sensitivity.value + }) + end + device:emit_event_for_endpoint(ib, capabilities.zoneManagement.triggers(triggers)) +end + +function CameraAttributeHandlers.sensitivity_max_handler(driver, device, ib, response) + device:emit_event_for_endpoint(ib, capabilities.zoneManagement.sensitivityRange({minimum = 1, maximum = ib.data.value}, + {visibility = {displayed = false}})) +end + +function CameraAttributeHandlers.sensitivity_handler(driver, device, ib, response) + device:emit_event_for_endpoint(ib, capabilities.zoneManagement.sensitivity(ib.data.value, {visibility = {displayed = false}})) +end + +function CameraAttributeHandlers.installed_chime_sounds_handler(driver, device, ib, response) + if not ib.data.elements then return end + local installed_chimes = {} + for _, v in ipairs(ib.data.elements) do + local chime = v.elements + table.insert(installed_chimes, {id = chime.chime_id.value, label = chime.name.value}) + end + device:emit_event_for_endpoint(ib, capabilities.sounds.supportedSounds(installed_chimes, {visibility = {displayed = false}})) +end + +function CameraAttributeHandlers.selected_chime_handler(driver, device, ib, response) + device:emit_event_for_endpoint(ib, capabilities.sounds.selectedSound(ib.data.value)) +end + +function CameraAttributeHandlers.camera_av_stream_management_attribute_list_handler(driver, device, ib, response) + if not ib.data.elements then return end + local status_light_enabled_present, status_light_brightness_present = false, false + local attribute_ids, capability_ids = {}, {} + for _, attr in ipairs(ib.data.elements) do + if attr.value == clusters.CameraAvStreamManagement.attributes.StatusLightEnabled.ID then + status_light_enabled_present = true + table.insert(capability_ids, capabilities.switch.ID) + table.insert(attribute_ids, clusters.CameraAvStreamManagement.attributes.StatusLightEnabled.ID) + elseif attr.value == clusters.CameraAvStreamManagement.attributes.StatusLightBrightness.ID then + status_light_brightness_present = true + table.insert(capability_ids, capabilities.mode.ID) + table.insert(attribute_ids, clusters.CameraAvStreamManagement.attributes.StatusLightBrightness.ID) + end + end + local component_map = device:get_field(fields.COMPONENT_TO_ENDPOINT_MAP) or {} + component_map.statusLed = { + endpoint_id = ib.endpoint_id, + cluster_id = ib.cluster_id, + attribute_ids = attribute_ids, + capability_ids = capability_ids + } + device:set_field(fields.COMPONENT_TO_ENDPOINT_MAP, component_map, {persist=true}) + camera_cfg.match_profile(device, status_light_enabled_present, status_light_brightness_present) +end + +return CameraAttributeHandlers \ No newline at end of file diff --git a/drivers/SmartThings/matter-switch/src/sub_drivers/camera/camera_handlers/capability_handlers.lua b/drivers/SmartThings/matter-switch/src/sub_drivers/camera/camera_handlers/capability_handlers.lua new file mode 100644 index 0000000000..fb85eb863f --- /dev/null +++ b/drivers/SmartThings/matter-switch/src/sub_drivers/camera/camera_handlers/capability_handlers.lua @@ -0,0 +1,356 @@ +-- Copyright © 2025 SmartThings, Inc. +-- Licensed under the Apache License, Version 2.0 + +local camera_fields = require "sub_drivers.camera.camera_utils.fields" +local camera_utils = require "sub_drivers.camera.camera_utils.utils" +local capabilities = require "st.capabilities" +local clusters = require "st.matter.clusters" +local utils = require "st.utils" + +local CameraCapabilityHandlers = {} + +CameraCapabilityHandlers.set_enabled_factory = function(attribute) + return function(driver, device, cmd) + local endpoint_id = device:component_to_endpoint(cmd.component) + device:send(attribute:write(device, endpoint_id, cmd.args.state == "enabled")) + end +end + +CameraCapabilityHandlers.set_night_vision_factory = function(attribute) + return function(driver, device, cmd) + local endpoint_id = device:component_to_endpoint(cmd.component) + for i, v in pairs(camera_fields.tri_state_map) do + if v == cmd.args.mode then + device:send(attribute:write(device, endpoint_id, i)) + return + end + end + device.log.warn(string.format("Capability command sent with unknown value: (%s)", cmd.args.mode)) + end +end + +function CameraCapabilityHandlers.handle_set_image_rotation(driver, device, cmd) + local endpoint_id = device:component_to_endpoint(cmd.component) + local degrees = utils.clamp_value(cmd.args.rotation, 0, 359) + device:send(clusters.CameraAvStreamManagement.attributes.ImageRotation:write(device, endpoint_id, degrees)) +end + +CameraCapabilityHandlers.handle_mute_commands_factory = function(command) + return function(driver, device, cmd) + local attr + if cmd.component == camera_fields.profile_components.speaker then + attr = clusters.CameraAvStreamManagement.attributes.SpeakerMuted + elseif cmd.component == camera_fields.profile_components.microphone then + attr = clusters.CameraAvStreamManagement.attributes.MicrophoneMuted + else + device.log.warn(string.format("Capability command sent from unknown component: (%s)", cmd.component)) + return + end + local endpoint_id = device:component_to_endpoint(cmd.component) + local mute_state = false + if command == capabilities.audioMute.commands.setMute.NAME then + mute_state = cmd.args.state == "muted" + elseif command == capabilities.audioMute.commands.mute.NAME then + mute_state = true + end + device:send(attr:write(device, endpoint_id, mute_state)) + end +end + +function CameraCapabilityHandlers.handle_set_volume(driver, device, cmd) + local endpoint_id = device:component_to_endpoint(cmd.component) + local max_volume = device:get_field(camera_fields.MAX_VOLUME_LEVEL .. "_" .. cmd.component) or camera_fields.ABS_VOL_MAX + local min_volume = device:get_field(camera_fields.MIN_VOLUME_LEVEL .. "_" .. cmd.component) or camera_fields.ABS_VOL_MIN + -- Convert from [0, 100] to [min_volume, max_volume] before writing attribute + local volume_range = max_volume - min_volume + local volume = utils.round(cmd.args.volume * volume_range / 100.0 + min_volume) + if cmd.component == camera_fields.profile_components.speaker then + device:send(clusters.CameraAvStreamManagement.attributes.SpeakerVolumeLevel:write(device, endpoint_id, volume)) + elseif cmd.component == camera_fields.profile_components.microphone then + device:send(clusters.CameraAvStreamManagement.attributes.MicrophoneVolumeLevel:write(device, endpoint_id, volume)) + else + device.log.warn(string.format("Capability command sent from unknown component: (%s)", cmd.component)) + end +end + +function CameraCapabilityHandlers.handle_volume_up(driver, device, cmd) + local endpoint_id = device:component_to_endpoint(cmd.component) + local max_volume = device:get_field(camera_fields.MAX_VOLUME_LEVEL .. "_" .. cmd.component) or camera_fields.ABS_VOL_MAX + local min_volume = device:get_field(camera_fields.MIN_VOLUME_LEVEL .. "_" .. cmd.component) or camera_fields.ABS_VOL_MIN + local volume = device:get_latest_state(cmd.component, capabilities.audioVolume.ID, capabilities.audioVolume.volume.NAME) + if not volume or volume >= max_volume then return end + -- Convert from [0, 100] to [min_volume, max_volume] before writing attribute + local volume_range = max_volume - min_volume + local converted_volume = utils.round((volume + 1) * volume_range / 100.0 + min_volume) + if cmd.component == camera_fields.profile_components.speaker then + device:send(clusters.CameraAvStreamManagement.attributes.SpeakerVolumeLevel:write(device, endpoint_id, converted_volume)) + elseif cmd.component == camera_fields.profile_components.microphone then + device:send(clusters.CameraAvStreamManagement.attributes.MicrophoneVolumeLevel:write(device, endpoint_id, converted_volume)) + end +end + +function CameraCapabilityHandlers.handle_volume_down(driver, device, cmd) + local endpoint_id = device:component_to_endpoint(cmd.component) + local max_volume = device:get_field(camera_fields.MAX_VOLUME_LEVEL .. "_" .. cmd.component) or camera_fields.ABS_VOL_MAX + local min_volume = device:get_field(camera_fields.MIN_VOLUME_LEVEL .. "_" .. cmd.component) or camera_fields.ABS_VOL_MIN + local volume = device:get_latest_state(cmd.component, capabilities.audioVolume.ID, capabilities.audioVolume.volume.NAME) + if not volume or volume <= min_volume then return end + -- Convert from [0, 100] to [min_volume, max_volume] before writing attribute + local volume_range = max_volume - min_volume + local converted_volume = utils.round((volume - 1) * volume_range / 100.0 + min_volume) + if cmd.component == camera_fields.profile_components.speaker then + device:send(clusters.CameraAvStreamManagement.attributes.SpeakerVolumeLevel:write(device, endpoint_id, converted_volume)) + elseif cmd.component == camera_fields.profile_components.microphone then + device:send(clusters.CameraAvStreamManagement.attributes.MicrophoneVolumeLevel:write(device, endpoint_id, converted_volume)) + end +end + +function CameraCapabilityHandlers.handle_set_status_light_mode(driver, device, cmd) + local endpoint_id = device:component_to_endpoint(cmd.component) + local level_auto_value + if cmd.args.mode == "low" then level_auto_value = "LOW" + elseif cmd.args.mode == "medium" then level_auto_value = "MEDIUM" + elseif cmd.args.mode == "high" then level_auto_value = "HIGH" + elseif cmd.args.mode == "auto" then level_auto_value = "AUTO" end + if not level_auto_value then + device.log.warn(string.format("Invalid mode received from setMode command: %s", cmd.args.mode)) + return + end + device:send(clusters.CameraAvStreamManagement.attributes.StatusLightBrightness:write(device, endpoint_id, + clusters.Global.types.ThreeLevelAutoEnum[level_auto_value])) +end + +function CameraCapabilityHandlers.handle_status_led_on(driver, device, cmd) + local endpoint_id = device:component_to_endpoint(cmd.component) + device:send(clusters.CameraAvStreamManagement.attributes.StatusLightEnabled:write(device, endpoint_id, true)) +end + +function CameraCapabilityHandlers.handle_status_led_off(driver, device, cmd) + local endpoint_id = device:component_to_endpoint(cmd.component) + device:send(clusters.CameraAvStreamManagement.attributes.StatusLightEnabled:write(device, endpoint_id, false)) +end + +function CameraCapabilityHandlers.handle_audio_recording(driver, device, cmd) + -- TODO: Allocate audio stream if it doesn't exist + local component = device.profile.components[cmd.component] + device:emit_component_event(component, capabilities.audioRecording.audioRecording(cmd.args.state)) +end + +CameraCapabilityHandlers.ptz_relative_move_factory = function(index) + return function (driver, device, cmd) + local endpoint_id = device:component_to_endpoint(cmd.component) + local pan_delta = index == camera_fields.PAN_IDX and cmd.args.delta or 0 + local tilt_delta = index == camera_fields.TILT_IDX and cmd.args.delta or 0 + local zoom_delta = index == camera_fields.ZOOM_IDX and cmd.args.delta or 0 + device:send(clusters.CameraAvSettingsUserLevelManagement.server.commands.MPTZRelativeMove( + device, endpoint_id, pan_delta, tilt_delta, zoom_delta + )) + end +end + +CameraCapabilityHandlers.ptz_set_position_factory = function(command) + return function (driver, device, cmd) + local ptz_map = camera_utils.get_ptz_map(device) + if command == capabilities.mechanicalPanTiltZoom.commands.setPanTiltZoom then + ptz_map[camera_fields.PAN_IDX].current = cmd.args.pan + ptz_map[camera_fields.TILT_IDX].current = cmd.args.tilt + ptz_map[camera_fields.ZOOM_IDX].current = cmd.args.zoom + elseif command == capabilities.mechanicalPanTiltZoom.commands.setPan then + ptz_map[camera_fields.PAN_IDX].current = cmd.args.pan + elseif command == capabilities.mechanicalPanTiltZoom.commands.setTilt then + ptz_map[camera_fields.TILT_IDX].current = cmd.args.tilt + else + ptz_map[camera_fields.ZOOM_IDX].current = cmd.args.zoom + end + for _, v in pairs(ptz_map) do + v.current = utils.clamp_value(v.current, v.range.minimum, v.range.maximum) + end + local endpoint_id = device:component_to_endpoint(cmd.component) + device:send(clusters.CameraAvSettingsUserLevelManagement.server.commands.MPTZSetPosition(device, endpoint_id, + ptz_map[camera_fields.PAN_IDX].current, ptz_map[camera_fields.TILT_IDX].current, ptz_map[camera_fields.ZOOM_IDX].current + )) + end +end + +function CameraCapabilityHandlers.handle_save_preset(driver, device, cmd) + local endpoint_id = device:component_to_endpoint(cmd.component) + device:send(clusters.CameraAvSettingsUserLevelManagement.server.commands.MPTZSavePreset( + device, endpoint_id, cmd.args.id, cmd.args.label + )) +end + +function CameraCapabilityHandlers.handle_remove_preset(driver, device, cmd) + local endpoint_id = device:component_to_endpoint(cmd.component) + device:send(clusters.CameraAvSettingsUserLevelManagement.server.commands.MPTZRemovePreset(device, endpoint_id, cmd.args.id)) +end + +function CameraCapabilityHandlers.handle_move_to_preset(driver, device, cmd) + local endpoint_id = device:component_to_endpoint(cmd.component) + device:send(clusters.CameraAvSettingsUserLevelManagement.server.commands.MPTZMoveToPreset(device, endpoint_id, cmd.args.id)) +end + +function CameraCapabilityHandlers.handle_new_zone(driver, device, cmd) + local zone_uses = { + ["motion"] = clusters.ZoneManagement.types.ZoneUseEnum.MOTION, + ["focus"] = camera_utils.feature_supported(device, clusters.ZoneManagement.ID, clusters.ZoneManagement.types.Feature.FOCUSZONES) and + clusters.ZoneManagement.types.ZoneUseEnum.FOCUS or clusters.ZoneManagement.types.ZoneUseEnum.PRIVACY, + ["privacy"] = clusters.ZoneManagement.types.ZoneUseEnum.PRIVACY + } + local vertices = {} + for _, v in pairs(cmd.args.polygonVertices or {}) do + table.insert(vertices, clusters.ZoneManagement.types.TwoDCartesianVertexStruct({x = v.value.x, y = v.value.y})) + end + local endpoint_id = device:component_to_endpoint(cmd.component) + device:send(clusters.ZoneManagement.server.commands.CreateTwoDCartesianZone( + device, endpoint_id, clusters.ZoneManagement.types.TwoDCartesianZoneStruct( + { + name = cmd.args.name, + use = zone_uses[cmd.args.use], + vertices = vertices, + color = cmd.args.color + } + ) + )) +end + +function CameraCapabilityHandlers.handle_update_zone(driver, device, cmd) + local zone_uses = { + ["motion"] = clusters.ZoneManagement.types.ZoneUseEnum.MOTION, + ["focus"] = camera_utils.feature_supported(device, clusters.ZoneManagement.ID, clusters.ZoneManagement.types.Feature.FOCUSZONES) and + clusters.ZoneManagement.types.ZoneUseEnum.FOCUS or clusters.ZoneManagement.types.ZoneUseEnum.PRIVACY, + ["privacy"] = clusters.ZoneManagement.types.ZoneUseEnum.PRIVACY + } + if not cmd.args.name or not cmd.args.polygonVertices or not cmd.args.use or not cmd.args.color then + local zones = device:get_latest_state( + camera_fields.profile_components.main, capabilities.zoneManagement.ID, capabilities.zoneManagement.zones.NAME + ) or {} + local found_zone = false + for _, v in pairs(zones) do + if v.id == cmd.args.zoneId then + if not cmd.args.name then cmd.args.name = v.name end + if not cmd.args.polygonVertices then cmd.args.polygonVertices = v.polygonVertices end + if not cmd.args.use then cmd.args.use = v.use end + if not cmd.args.color then cmd.args.color = v.color end -- color may be nil, but it is optional in TwoDCartesianZoneStruct + found_zone = true + break + end + end + if not found_zone then + device.log.warn_with({hub_logs = true}, string.format("Zone does not exist, cannot update the zone.")) + return + end + end + local vertices = {} + for _, v in pairs(cmd.args.polygonVertices or {}) do + table.insert(vertices, clusters.ZoneManagement.types.TwoDCartesianVertexStruct({x = v.value.x, y = v.value.y})) + end + local endpoint_id = device:component_to_endpoint(cmd.component) + device:send(clusters.ZoneManagement.server.commands.UpdateTwoDCartesianZone( + device, endpoint_id, cmd.args.zoneId, clusters.ZoneManagement.types.TwoDCartesianZoneStruct( + { + name = cmd.args.name, + use = zone_uses[cmd.args.use], + vertices = vertices, + color = cmd.args.color + } + ) + )) +end + +function CameraCapabilityHandlers.handle_remove_zone(driver, device, cmd) + local endpoint_id = device:component_to_endpoint(cmd.component) + device:send(clusters.ZoneManagement.server.commands.RemoveZone(device, endpoint_id, cmd.args.zoneId)) +end + +function CameraCapabilityHandlers.handle_create_or_update_trigger(driver, device, cmd) + if not cmd.args.augmentationDuration or not cmd.args.maxDuration or not cmd.args.blindDuration or + (camera_utils.feature_supported(device, clusters.ZoneManagement.ID, clusters.ZoneManagement.types.Feature.PER_ZONE_SENSITIVITY) and + not cmd.args.sensitivity) then + local triggers = device:get_latest_state( + camera_fields.profile_components.main, capabilities.zoneManagement.ID, capabilities.zoneManagement.triggers.NAME + ) or {} + local found_trigger = false + for _, v in pairs(triggers) do + if v.zoneId == cmd.args.zoneId then + if not cmd.args.augmentationDuration then cmd.args.augmentationDuration = v.augmentationDuration end + if not cmd.args.maxDuration then cmd.args.maxDuration = v.maxDuration end + if not cmd.args.blindDuration then cmd.args.blindDuration = v.blindDuration end + if camera_utils.feature_supported(device, clusters.ZoneManagement.ID, clusters.ZoneManagement.types.Feature.PER_ZONE_SENSITIVITY) and + not cmd.args.sensitivity then + cmd.args.sensitivity = v.sensitivity + end + found_trigger = true + break + end + end + if not found_trigger then + device.log.warn_with({hub_logs = true}, string.format("Missing fields needed to create trigger.")) + return + end + end + local endpoint_id = device:component_to_endpoint(cmd.component) + device:send(clusters.ZoneManagement.server.commands.CreateOrUpdateTrigger( + device, endpoint_id, clusters.ZoneManagement.types.ZoneTriggerControlStruct( + { + zone_id = cmd.args.zoneId, + initial_duration = cmd.args.initialDuration, + augmentation_duration = cmd.args.augmentationDuration, + max_duration = cmd.args.maxDuration, + blind_duration = cmd.args.blindDuration, + sensitivity = cmd.args.sensitivity + } + ) + )) +end + +function CameraCapabilityHandlers.handle_remove_trigger(driver, device, cmd) + local endpoint_id = device:component_to_endpoint(cmd.component) + device:send(clusters.ZoneManagement.server.commands.RemoveTrigger(device, endpoint_id, cmd.args.zoneId)) +end + +function CameraCapabilityHandlers.handle_set_sensitivity(driver, device, cmd) + local endpoint_id = device:component_to_endpoint(cmd.component) + if not camera_utils.feature_supported(device, clusters.ZoneManagement.ID, clusters.ZoneManagement.types.Feature.PER_ZONE_SENSITIVITY) then + device:send(clusters.ZoneManagement.attributes.Sensitivity:write(device, endpoint_id, cmd.args.id)) + else + device.log.warn(string.format("Can't set global zone sensitivity setting, per zone sensitivity enabled.")) + end +end + +function CameraCapabilityHandlers.handle_play_sound(driver, device, cmd) + local endpoint_id = device:component_to_endpoint(cmd.component) + device:send(clusters.Chime.server.commands.PlayChimeSound(device, endpoint_id)) +end + +function CameraCapabilityHandlers.handle_set_selected_sound(driver, device, cmd) + local endpoint_id = device:component_to_endpoint(cmd.component) + device:send(clusters.Chime.attributes.SelectedChime:write(device, endpoint_id, cmd.args.id)) +end + +function CameraCapabilityHandlers.handle_set_stream(driver, device, cmd) + local endpoint_id = device:component_to_endpoint(cmd.component) + local watermark_enabled, on_screen_display_enabled + if camera_utils.feature_supported(device, clusters.CameraAvStreamManagement.ID, clusters.CameraAvStreamManagement.types.Feature.WATERMARK) then + watermark_enabled = cmd.args.watermark == "enabled" + end + if camera_utils.feature_supported(device, clusters.CameraAvStreamManagement.ID, clusters.CameraAvStreamManagement.types.Feature.ON_SCREEN_DISPLAY) then + on_screen_display_enabled = cmd.args.onScreenDisplay == "enabled" + end + device:send(clusters.CameraAvStreamManagement.server.commands.VideoStreamModify(device, endpoint_id, + cmd.args.streamId, watermark_enabled, on_screen_display_enabled + )) +end + +function CameraCapabilityHandlers.handle_set_default_viewport(driver, device, cmd) + local endpoint_id = device:component_to_endpoint(cmd.component) + device:send(clusters.CameraAvStreamManagement.attributes.Viewport:write( + device, endpoint_id, clusters.Global.types.ViewportStruct({ + x1 = cmd.args.upperLeftVertex.x, + x2 = cmd.args.lowerRightVertex.x, + y1 = cmd.args.upperLeftVertex.y, + y2 = cmd.args.lowerRightVertex.y + }) + )) +end + +return CameraCapabilityHandlers diff --git a/drivers/SmartThings/matter-switch/src/sub_drivers/camera/camera_handlers/event_handlers.lua b/drivers/SmartThings/matter-switch/src/sub_drivers/camera/camera_handlers/event_handlers.lua new file mode 100644 index 0000000000..02b63bb37f --- /dev/null +++ b/drivers/SmartThings/matter-switch/src/sub_drivers/camera/camera_handlers/event_handlers.lua @@ -0,0 +1,30 @@ +-- Copyright © 2025 SmartThings, Inc. +-- Licensed under the Apache License, Version 2.0 + +local camera_fields = require "sub_drivers.camera.camera_utils.fields" +local capabilities = require "st.capabilities" +local switch_utils = require "switch_utils.utils" + +local CameraEventHandlers = {} + +function CameraEventHandlers.zone_triggered_handler(driver, device, ib, response) + local triggered_zones = device:get_field(camera_fields.TRIGGERED_ZONES) or {} + if not switch_utils.tbl_contains(triggered_zones, ib.data.elements.zone.value) then + table.insert(triggered_zones, {zoneId = ib.data.elements.zone.value}) + device:set_field(camera_fields.TRIGGERED_ZONES, triggered_zones) + device:emit_event_for_endpoint(ib, capabilities.zoneManagement.triggeredZones(triggered_zones)) + end +end + +function CameraEventHandlers.zone_stopped_handler(driver, device, ib, response) + local triggered_zones = device:get_field(camera_fields.TRIGGERED_ZONES) or {} + for i, v in pairs(triggered_zones) do + if v.zoneId == ib.data.elements.zone.value then + table.remove(triggered_zones, i) + device:set_field(camera_fields.TRIGGERED_ZONES, triggered_zones) + device:emit_event_for_endpoint(ib, capabilities.zoneManagement.triggeredZones(triggered_zones)) + end + end +end + +return CameraEventHandlers diff --git a/drivers/SmartThings/matter-switch/src/sub_drivers/camera/camera_utils/device_configuration.lua b/drivers/SmartThings/matter-switch/src/sub_drivers/camera/camera_utils/device_configuration.lua new file mode 100644 index 0000000000..80ac3be711 --- /dev/null +++ b/drivers/SmartThings/matter-switch/src/sub_drivers/camera/camera_utils/device_configuration.lua @@ -0,0 +1,275 @@ +-- Copyright © 2025 SmartThings, Inc. +-- Licensed under the Apache License, Version 2.0 + +local button_cfg = require("switch_utils.device_configuration").ButtonCfg +local camera_fields = require "sub_drivers.camera.camera_utils.fields" +local camera_utils = require "sub_drivers.camera.camera_utils.utils" +local capabilities = require "st.capabilities" +local clusters = require "st.matter.clusters" +local device_cfg = require "switch_utils.device_configuration" +local fields = require "switch_utils.fields" +local switch_utils = require "switch_utils.utils" + +local CameraDeviceConfiguration = {} + +function CameraDeviceConfiguration.create_child_devices(driver, device) + local num_floodlight_eps = 0 + local parent_child_device = false + for _, ep in ipairs(device.endpoints or {}) do + if device:supports_server_cluster(clusters.OnOff.ID, ep.endpoint_id) then + local child_profile = device_cfg.SwitchCfg.assign_profile_for_onoff_ep(device, ep.endpoint_id) + if child_profile then + num_floodlight_eps = num_floodlight_eps + 1 + local name = string.format("%s %d", "Floodlight", num_floodlight_eps) + driver:try_create_device( + { + type = "EDGE_CHILD", + label = name, + profile = child_profile, + parent_device_id = device.id, + parent_assigned_child_key = string.format("%d", ep.endpoint_id), + vendor_provided_label = name + } + ) + parent_child_device = true + end + end + end + if parent_child_device then + device:set_field(fields.IS_PARENT_CHILD_DEVICE, true, {persist = true}) + device:set_find_child(switch_utils.find_child) + end +end + +function CameraDeviceConfiguration.match_profile(device, status_light_enabled_present, status_light_brightness_present) + local optional_supported_component_capabilities = {} + local main_component_capabilities = {} + local status_led_component_capabilities = {} + local speaker_component_capabilities = {} + local microphone_component_capabilities = {} + local doorbell_component_capabilities = {} + + local camera_endpoints = switch_utils.get_endpoints_by_device_type(device, fields.DEVICE_TYPE_ID.CAMERA) + if #camera_endpoints > 0 then + local camera_ep = switch_utils.get_endpoint_info(device, camera_endpoints[1]) + for _, ep_cluster in pairs(camera_ep.clusters or {}) do + if ep_cluster.cluster_id == clusters.CameraAvStreamManagement.ID then + local clus_has_feature = function(feature_bitmap) + return clusters.CameraAvStreamManagement.are_features_supported(feature_bitmap, ep_cluster.feature_map) + end + if clus_has_feature(clusters.CameraAvStreamManagement.types.Feature.VIDEO) then + table.insert(main_component_capabilities, capabilities.videoCapture2.ID) + table.insert(main_component_capabilities, capabilities.cameraViewportSettings.ID) + end + if clus_has_feature(clusters.CameraAvStreamManagement.types.Feature.LOCAL_STORAGE) then + table.insert(main_component_capabilities, capabilities.localMediaStorage.ID) + end + if clus_has_feature(clusters.CameraAvStreamManagement.types.Feature.AUDIO) then + table.insert(main_component_capabilities, capabilities.audioRecording.ID) + table.insert(microphone_component_capabilities, capabilities.audioMute.ID) + table.insert(microphone_component_capabilities, capabilities.audioVolume.ID) + end + if clus_has_feature(clusters.CameraAvStreamManagement.types.Feature.SNAPSHOT) then + table.insert(main_component_capabilities, capabilities.imageCapture.ID) + end + if clus_has_feature(clusters.CameraAvStreamManagement.types.Feature.PRIVACY) then + table.insert(main_component_capabilities, capabilities.cameraPrivacyMode.ID) + end + if clus_has_feature(clusters.CameraAvStreamManagement.types.Feature.SPEAKER) then + table.insert(speaker_component_capabilities, capabilities.audioMute.ID) + table.insert(speaker_component_capabilities, capabilities.audioVolume.ID) + end + if clus_has_feature(clusters.CameraAvStreamManagement.types.Feature.IMAGE_CONTROL) then + table.insert(main_component_capabilities, capabilities.imageControl.ID) + end + if clus_has_feature(clusters.CameraAvStreamManagement.types.Feature.HIGH_DYNAMIC_RANGE) then + table.insert(main_component_capabilities, capabilities.hdr.ID) + end + if clus_has_feature(clusters.CameraAvStreamManagement.types.Feature.NIGHT_VISION) then + table.insert(main_component_capabilities, capabilities.nightVision.ID) + end + elseif ep_cluster.cluster_id == clusters.CameraAvSettingsUserLevelManagement.ID then + local clus_has_feature = function(feature_bitmap) + return clusters.CameraAvSettingsUserLevelManagement.are_features_supported(feature_bitmap, ep_cluster.feature_map) + end + if clus_has_feature(clusters.CameraAvSettingsUserLevelManagement.types.Feature.MECHANICAL_PAN) or + clus_has_feature(clusters.CameraAvSettingsUserLevelManagement.types.Feature.MECHANICAL_TILT) or + clus_has_feature(clusters.CameraAvSettingsUserLevelManagement.types.Feature.MECHANICAL_ZOOM) then + table.insert(main_component_capabilities, capabilities.mechanicalPanTiltZoom.ID) + end + table.insert(main_component_capabilities, capabilities.videoStreamSettings.ID) + elseif ep_cluster.cluster_id == clusters.ZoneManagement.ID then + table.insert(main_component_capabilities, capabilities.zoneManagement.ID) + elseif ep_cluster.cluster_id == clusters.OccupancySensing.ID then + table.insert(main_component_capabilities, capabilities.motionSensor.ID) + elseif ep_cluster.cluster_id == clusters.WebRTCTransportProvider.ID and + #device:get_endpoints(clusters.WebRTCTransportRequestor.ID, {cluster_type = "CLIENT"}) > 0 then + table.insert(main_component_capabilities, capabilities.webrtc.ID) + end + end + end + local chime_endpoints = switch_utils.get_endpoints_by_device_type(device, fields.DEVICE_TYPE_ID.CHIME) + if #chime_endpoints > 0 then + table.insert(main_component_capabilities, capabilities.sounds.ID) + end + local doorbell_endpoints = switch_utils.get_endpoints_by_device_type(device, fields.DEVICE_TYPE_ID.DOORBELL) + if #doorbell_endpoints > 0 then + table.insert(doorbell_component_capabilities, capabilities.button.ID) + CameraDeviceConfiguration.update_doorbell_component_map(device, doorbell_endpoints[1]) + button_cfg.configure_buttons(device) + end + if status_light_enabled_present then + table.insert(status_led_component_capabilities, capabilities.switch.ID) + end + if status_light_brightness_present then + table.insert(status_led_component_capabilities, capabilities.mode.ID) + end + + table.insert(optional_supported_component_capabilities, {camera_fields.profile_components.main, main_component_capabilities}) + if #status_led_component_capabilities > 0 then + table.insert(optional_supported_component_capabilities, {camera_fields.profile_components.statusLed, status_led_component_capabilities}) + end + if #speaker_component_capabilities > 0 then + table.insert(optional_supported_component_capabilities, {camera_fields.profile_components.speaker, speaker_component_capabilities}) + end + if #microphone_component_capabilities > 0 then + table.insert(optional_supported_component_capabilities, {camera_fields.profile_components.microphone, microphone_component_capabilities}) + end + if #doorbell_component_capabilities > 0 then + table.insert(optional_supported_component_capabilities, {camera_fields.profile_components.doorbell, doorbell_component_capabilities}) + end + + if camera_utils.optional_capabilities_list_changed(optional_supported_component_capabilities, device.profile.components) then + device:try_update_metadata({profile = "camera", optional_component_capabilities = optional_supported_component_capabilities}) + end +end + +local function init_webrtc(device) + if device:supports_capability(capabilities.webrtc) then + -- TODO: Check for individual audio/video and talkback features + local transport_provider_ep_ids = device:get_endpoints(clusters.WebRTCTransportProvider.ID) + device:emit_event_for_endpoint(transport_provider_ep_ids[1], capabilities.webrtc.supportedFeatures({ + value = { + bundle = true, + order = "audio/video", + audio = "sendrecv", + video = "recvonly", + turnSource = "player", + supportTrickleICE = true + } + })) + end +end + +local function init_ptz(device) + if device:supports_capability(capabilities.mechanicalPanTiltZoom) then + local supported_attributes = {} + if camera_utils.feature_supported(device, clusters.CameraAvSettingsUserLevelManagement.ID, clusters.CameraAvSettingsUserLevelManagement.types.Feature.MPAN) then + table.insert(supported_attributes, "pan") + table.insert(supported_attributes, "panRange") + end + if camera_utils.feature_supported(device, clusters.CameraAvSettingsUserLevelManagement.ID, clusters.CameraAvSettingsUserLevelManagement.types.Feature.MTILT) then + table.insert(supported_attributes, "tilt") + table.insert(supported_attributes, "tiltRange") + end + if camera_utils.feature_supported(device, clusters.CameraAvSettingsUserLevelManagement.ID, clusters.CameraAvSettingsUserLevelManagement.types.Feature.MZOOM) then + table.insert(supported_attributes, "zoom") + table.insert(supported_attributes, "zoomRange") + end + if camera_utils.feature_supported(device, clusters.CameraAvSettingsUserLevelManagement.ID, clusters.CameraAvSettingsUserLevelManagement.types.Feature.MPRESETS) then + table.insert(supported_attributes, "presets") + table.insert(supported_attributes, "maxPresets") + end + local av_settings_ep_ids = device:get_endpoints(clusters.CameraAvSettingsUserLevelManagement.ID) + device:emit_event_for_endpoint(av_settings_ep_ids[1], capabilities.mechanicalPanTiltZoom.supportedAttributes(supported_attributes)) + end +end + +local function init_zone_management(device) + if device:supports_capability(capabilities.zoneManagement) then + local supported_features = {} + table.insert(supported_features, "triggerAugmentation") + if camera_utils.feature_supported(device, clusters.ZoneManagement.ID, clusters.ZoneManagement.types.Feature.PER_ZONE_SENSITIVITY) then + table.insert(supported_features, "perZoneSensitivity") + end + local zone_management_ep_ids = device:get_endpoints(clusters.ZoneManagement.ID) + device:emit_event_for_endpoint(zone_management_ep_ids[1], capabilities.zoneManagement.supportedFeatures(supported_features)) + end +end + +local function init_local_media_storage(device) + if device:supports_capability(capabilities.localMediaStorage) then + local supported_attributes = {} + if camera_utils.feature_supported(device, clusters.CameraAvStreamManagement.ID, clusters.CameraAvStreamManagement.types.Feature.VIDEO) then + table.insert(supported_attributes, "localVideoRecording") + end + if camera_utils.feature_supported(device, clusters.CameraAvStreamManagement.ID, clusters.CameraAvStreamManagement.types.Feature.SNAPSHOT) then + table.insert(supported_attributes, "localSnapshotRecording") + end + local av_stream_management_ep_ids = device:get_endpoints(clusters.CameraAvStreamManagement.ID) + device:emit_event_for_endpoint(av_stream_management_ep_ids[1], capabilities.localMediaStorage.supportedAttributes(supported_attributes)) + end +end + +local function init_audio_recording(device) + if device:supports_capability(capabilities.audioRecording) then + local audio_enabled_state = device:get_latest_state( + camera_fields.profile_components.main, capabilities.audioRecording.ID, capabilities.audioRecording.audioRecording.NAME + ) + if audio_enabled_state == nil then + -- Initialize with enabled default if state is unset + local av_stream_management_ep_ids = device:get_endpoints(clusters.CameraAvStreamManagement.ID) + device:emit_event_for_endpoint(av_stream_management_ep_ids[1], capabilities.audioRecording.audioRecording("enabled")) + end + end +end + +local function init_video_stream_settings(device) + if device:supports_capability(capabilities.videoStreamSettings) then + local supported_features = {} + if camera_utils.feature_supported(device, clusters.CameraAvStreamManagement.ID, clusters.CameraAvStreamManagement.types.Feature.VIDEO) then + table.insert(supported_features, "liveStreaming") + table.insert(supported_features, "clipRecording") + table.insert(supported_features, "perStreamViewports") + end + if camera_utils.feature_supported(device, clusters.CameraAvStreamManagement.ID, clusters.CameraAvStreamManagement.types.Feature.WATERMARK) then + table.insert(supported_features, "watermark") + end + if camera_utils.feature_supported(device, clusters.CameraAvStreamManagement.ID, clusters.CameraAvStreamManagement.types.Feature.ON_SCREEN_DISPLAY) then + table.insert(supported_features, "onScreenDisplay") + end + local av_stream_management_ep_ids = device:get_endpoints(clusters.CameraAvStreamManagement.ID) + device:emit_event_for_endpoint(av_stream_management_ep_ids[1], capabilities.videoStreamSettings.supportedFeatures(supported_features)) + end +end + +local function init_camera_privacy_mode(device) + if device:supports_capability(capabilities.cameraPrivacyMode) then + local supported_attributes, supported_commands = {}, {} + table.insert(supported_attributes, "softRecordingPrivacyMode") + table.insert(supported_attributes, "softLivestreamPrivacyMode") + table.insert(supported_commands, "setSoftRecordingPrivacyMode") + table.insert(supported_commands, "setSoftLivestreamPrivacyMode") + local av_stream_management_ep_ids = device:get_endpoints(clusters.CameraAvStreamManagement.ID) + device:emit_event_for_endpoint(av_stream_management_ep_ids[1], capabilities.cameraPrivacyMode.supportedAttributes(supported_attributes)) + device:emit_event_for_endpoint(av_stream_management_ep_ids[1], capabilities.cameraPrivacyMode.supportedCommands(supported_commands)) + end +end + +function CameraDeviceConfiguration.initialize_camera_capabilities(device) + init_webrtc(device) + init_ptz(device) + init_zone_management(device) + init_local_media_storage(device) + init_audio_recording(device) + init_video_stream_settings(device) + init_camera_privacy_mode(device) +end + +function CameraDeviceConfiguration.update_doorbell_component_map(device, ep) + local component_map = device:get_field(fields.COMPONENT_TO_ENDPOINT_MAP) or {} + component_map.doorbell = ep + device:set_field(fields.COMPONENT_TO_ENDPOINT_MAP, component_map, {persist = true}) +end + +return CameraDeviceConfiguration diff --git a/drivers/SmartThings/matter-switch/src/sub_drivers/camera/camera_utils/fields.lua b/drivers/SmartThings/matter-switch/src/sub_drivers/camera/camera_utils/fields.lua new file mode 100644 index 0000000000..677e2d5dd6 --- /dev/null +++ b/drivers/SmartThings/matter-switch/src/sub_drivers/camera/camera_utils/fields.lua @@ -0,0 +1,48 @@ +-- Copyright © 2025 SmartThings, Inc. +-- Licensed under the Apache License, Version 2.0 + +local clusters = require "st.matter.clusters" + +local CameraFields = {} + +CameraFields.MAX_ENCODED_PIXEL_RATE = "__max_encoded_pixel_rate" +CameraFields.MAX_FRAMES_PER_SECOND = "__max_frames_per_second" +CameraFields.MAX_VOLUME_LEVEL = "__max_volume_level" +CameraFields.MIN_VOLUME_LEVEL = "__min_volume_level" +CameraFields.SUPPORTED_RESOLUTIONS = "__supported_resolutions" +CameraFields.TRIGGERED_ZONES = "__triggered_zones" +CameraFields.VIEWPORT = "__viewport" + +CameraFields.PAN_IDX = "PAN" +CameraFields.TILT_IDX = "TILT" +CameraFields.ZOOM_IDX = "ZOOM" + +CameraFields.pt_range_fields = { + [CameraFields.PAN_IDX] = { max = "__MAX_PAN" , min = "__MIN_PAN" }, + [CameraFields.TILT_IDX] = { max = "__MAX_TILT" , min = "__MIN_TILT" } +} + +CameraFields.profile_components = { + main = "main", + statusLed = "statusLed", + speaker = "speaker", + microphone = "microphone", + doorbell = "doorbell" +} + +CameraFields.tri_state_map = { + [clusters.CameraAvStreamManagement.types.TriStateAutoEnum.OFF] = "off", + [clusters.CameraAvStreamManagement.types.TriStateAutoEnum.ON] = "on", + [clusters.CameraAvStreamManagement.types.TriStateAutoEnum.AUTO] = "auto" +} + +CameraFields.ABS_PAN_MAX = 180 +CameraFields.ABS_PAN_MIN = -180 +CameraFields.ABS_TILT_MAX = 180 +CameraFields.ABS_TILT_MIN = -180 +CameraFields.ABS_ZOOM_MAX = 100 +CameraFields.ABS_ZOOM_MIN = 1 +CameraFields.ABS_VOL_MAX = 254.0 +CameraFields.ABS_VOL_MIN = 0.0 + +return CameraFields diff --git a/drivers/SmartThings/matter-switch/src/sub_drivers/camera/camera_utils/utils.lua b/drivers/SmartThings/matter-switch/src/sub_drivers/camera/camera_utils/utils.lua new file mode 100644 index 0000000000..4334d2a304 --- /dev/null +++ b/drivers/SmartThings/matter-switch/src/sub_drivers/camera/camera_utils/utils.lua @@ -0,0 +1,304 @@ +-- Copyright © 2025 SmartThings, Inc. +-- Licensed under the Apache License, Version 2.0 + +local camera_fields = require "sub_drivers.camera.camera_utils.fields" +local capabilities = require "st.capabilities" +local clusters = require "st.matter.clusters" +local fields = require "switch_utils.fields" +local switch_utils = require "switch_utils.utils" + +local CameraUtils = {} + +function CameraUtils.component_to_endpoint(device, component) + local camera_eps = device:get_endpoints(clusters.CameraAvStreamManagement.ID) + table.sort(camera_eps) + for _, ep in ipairs(camera_eps or {}) do + if ep ~= 0 then -- 0 is the matter RootNode endpoint + return ep + end + end + return nil +end + +function CameraUtils.update_camera_component_map(device) + local camera_av_ep_ids = device:get_endpoints(clusters.CameraAvStreamManagement.ID) + if #camera_av_ep_ids > 0 then + -- An assumption here: there is only 1 CameraAvStreamManagement cluster on the device (which is all our profile supports) + local component_map = {} + if CameraUtils.feature_supported(device, clusters.CameraAvStreamManagement.ID, clusters.CameraAvStreamManagement.types.Feature.AUDIO) then + component_map.microphone = { + endpoint_id = camera_av_ep_ids[1], + cluster_id = clusters.CameraAvStreamManagement.ID, + attribute_ids = { + clusters.CameraAvStreamManagement.attributes.MicrophoneMuted.ID, + clusters.CameraAvStreamManagement.attributes.MicrophoneVolumeLevel.ID, + clusters.CameraAvStreamManagement.attributes.MicrophoneMaxLevel.ID, + clusters.CameraAvStreamManagement.attributes.MicrophoneMinLevel.ID, + }, + capability_ids = { + capabilities.audioMute.ID, + capabilities.audioVolume.ID, + } + } + end + if CameraUtils.feature_supported(device, clusters.CameraAvStreamManagement.ID, clusters.CameraAvStreamManagement.types.Feature.VIDEO) then + component_map.speaker = { + endpoint_id = camera_av_ep_ids[1], + cluster_id = clusters.CameraAvStreamManagement.ID, + attribute_ids = { + clusters.CameraAvStreamManagement.attributes.SpeakerMuted.ID, + clusters.CameraAvStreamManagement.attributes.SpeakerVolumeLevel.ID, + clusters.CameraAvStreamManagement.attributes.SpeakerMaxLevel.ID, + clusters.CameraAvStreamManagement.attributes.SpeakerMinLevel.ID, + }, + capability_ids = { + capabilities.audioMute.ID, + capabilities.audioVolume.ID, + } + } + end + device:set_field(fields.COMPONENT_TO_ENDPOINT_MAP, component_map, {persist = true}) + end +end + +function CameraUtils.get_ptz_map(device) + local mechanicalPanTiltZoom = capabilities.mechanicalPanTiltZoom + local ptz_map = { + [camera_fields.PAN_IDX] = { + current = device:get_latest_state("main", mechanicalPanTiltZoom.ID, mechanicalPanTiltZoom.pan.NAME), + range = device:get_latest_state("main", mechanicalPanTiltZoom.ID, mechanicalPanTiltZoom.panRange.NAME) or + { minimum = camera_fields.ABS_PAN_MIN, maximum = camera_fields.ABS_PAN_MAX }, + attribute = mechanicalPanTiltZoom.pan + }, + [camera_fields.TILT_IDX] = { + current = device:get_latest_state("main", mechanicalPanTiltZoom.ID, mechanicalPanTiltZoom.tilt.NAME), + range = device:get_latest_state("main", mechanicalPanTiltZoom.ID, mechanicalPanTiltZoom.tiltRange.NAME) or + { minimum = camera_fields.ABS_TILT_MIN, maximum = camera_fields.ABS_TILT_MAX }, + attribute = mechanicalPanTiltZoom.tilt + }, + [camera_fields.ZOOM_IDX] = { + current = device:get_latest_state("main", mechanicalPanTiltZoom.ID, mechanicalPanTiltZoom.zoom.NAME), + range = device:get_latest_state("main", mechanicalPanTiltZoom.ID, mechanicalPanTiltZoom.zoomRange.NAME) or + { minimum = camera_fields.ABS_ZOOM_MIN, maximum = camera_fields.ABS_ZOOM_MAX }, + attribute = mechanicalPanTiltZoom.zoom + } + } + return ptz_map +end + +function CameraUtils.feature_supported(device, cluster_id, feature_flag) + return #device:get_endpoints(cluster_id, { feature_bitmap = feature_flag }) > 0 +end + +function CameraUtils.update_supported_attributes(device, ib, capability, attribute) + local attribute_set = device:get_latest_state( + camera_fields.profile_components.main, capability.ID, capability.supportedAttributes.NAME + ) or {} + if not switch_utils.tbl_contains(attribute_set, attribute) then + local updated_attribute_set = {} + for _, v in ipairs(attribute_set) do + table.insert(updated_attribute_set, v) + end + table.insert(updated_attribute_set, attribute) + device:emit_event_for_endpoint(ib, capability.supportedAttributes(updated_attribute_set)) + end +end + +function CameraUtils.compute_fps(max_encoded_pixel_rate, width, height, max_fps) + local fps_step = 15.0 + local fps = math.min(max_encoded_pixel_rate / (width * height), max_fps) + return math.tointeger(math.floor(fps / fps_step) * fps_step) +end + +function CameraUtils.profile_changed(synced_components, prev_components) + if #synced_components ~= #prev_components then + return true + end + for _, component in pairs(synced_components or {}) do + if (prev_components[component.id] == nil) or + (#component.capabilities ~= #prev_components[component.id].capabilities) then + return true + end + for _, capability in pairs(component.capabilities or {}) do + if prev_components[component.id][capability.id] == nil then + return true + end + end + end + return false +end + +function CameraUtils.optional_capabilities_list_changed(optional_capabilities, prev_component_list) + local prev_optional_capabilities = {} + for idx, comp in pairs(prev_component_list or {}) do + local cap_list = {} + for _, capability in pairs(comp.capabilities or {}) do + table.insert(cap_list, capability.id) + end + table.insert(prev_optional_capabilities, {idx, cap_list}) + end + if #optional_capabilities ~= #prev_optional_capabilities then + return true + end + for _, capability in pairs(optional_capabilities or {}) do + if not switch_utils.tbl_contains(prev_optional_capabilities, capability) then + return true + end + end + for _, capability in pairs(prev_optional_capabilities or {}) do + if not switch_utils.tbl_contains(optional_capabilities, capability) then + return true + end + end + return false +end + +function CameraUtils.subscribe(device) + local camera_subscribed_attributes = { + [capabilities.hdr.ID] = { + clusters.CameraAvStreamManagement.attributes.HDRModeEnabled, + clusters.CameraAvStreamManagement.attributes.ImageRotation + }, + [capabilities.nightVision.ID] = { + clusters.CameraAvStreamManagement.attributes.NightVision, + clusters.CameraAvStreamManagement.attributes.NightVisionIllum + }, + [capabilities.imageControl.ID] = { + clusters.CameraAvStreamManagement.attributes.ImageFlipHorizontal, + clusters.CameraAvStreamManagement.attributes.ImageFlipVertical + }, + [capabilities.cameraPrivacyMode.ID] = { + clusters.CameraAvStreamManagement.attributes.SoftRecordingPrivacyModeEnabled, + clusters.CameraAvStreamManagement.attributes.SoftLivestreamPrivacyModeEnabled, + clusters.CameraAvStreamManagement.attributes.HardPrivacyModeOn + }, + [capabilities.webrtc.ID] = { + clusters.CameraAvStreamManagement.attributes.TwoWayTalkSupport + }, + [capabilities.mechanicalPanTiltZoom.ID] = { + clusters.CameraAvSettingsUserLevelManagement.attributes.MPTZPosition, + clusters.CameraAvSettingsUserLevelManagement.attributes.MPTZPresets, + clusters.CameraAvSettingsUserLevelManagement.attributes.MaxPresets, + clusters.CameraAvSettingsUserLevelManagement.attributes.ZoomMax, + clusters.CameraAvSettingsUserLevelManagement.attributes.PanMax, + clusters.CameraAvSettingsUserLevelManagement.attributes.PanMin, + clusters.CameraAvSettingsUserLevelManagement.attributes.TiltMax, + clusters.CameraAvSettingsUserLevelManagement.attributes.TiltMin + }, + [capabilities.audioMute.ID] = { + clusters.CameraAvStreamManagement.attributes.SpeakerMuted, + clusters.CameraAvStreamManagement.attributes.MicrophoneMuted + }, + [capabilities.audioVolume.ID] = { + clusters.CameraAvStreamManagement.attributes.SpeakerVolumeLevel, + clusters.CameraAvStreamManagement.attributes.SpeakerMaxLevel, + clusters.CameraAvStreamManagement.attributes.SpeakerMinLevel, + clusters.CameraAvStreamManagement.attributes.MicrophoneVolumeLevel, + clusters.CameraAvStreamManagement.attributes.MicrophoneMaxLevel, + clusters.CameraAvStreamManagement.attributes.MicrophoneMinLevel + }, + [capabilities.mode.ID] = { + clusters.CameraAvStreamManagement.attributes.StatusLightBrightness + }, + [capabilities.switch.ID] = { + clusters.CameraAvStreamManagement.attributes.StatusLightEnabled + }, + [capabilities.videoStreamSettings.ID] = { + clusters.CameraAvStreamManagement.attributes.RateDistortionTradeOffPoints, + clusters.CameraAvStreamManagement.attributes.MaxEncodedPixelRate, + clusters.CameraAvStreamManagement.attributes.VideoSensorParams, + clusters.CameraAvStreamManagement.attributes.AllocatedVideoStreams + }, + [capabilities.zoneManagement.ID] = { + clusters.ZoneManagement.attributes.MaxZones, + clusters.ZoneManagement.attributes.Zones, + clusters.ZoneManagement.attributes.Triggers, + clusters.ZoneManagement.attributes.SensitivityMax, + clusters.ZoneManagement.attributes.Sensitivity + }, + [capabilities.sounds.ID] = { + clusters.Chime.attributes.InstalledChimeSounds, + clusters.Chime.attributes.SelectedChime + }, + [capabilities.localMediaStorage.ID] = { + clusters.CameraAvStreamManagement.attributes.LocalSnapshotRecordingEnabled, + clusters.CameraAvStreamManagement.attributes.LocalVideoRecordingEnabled + }, + [capabilities.cameraViewportSettings.ID] = { + clusters.CameraAvStreamManagement.attributes.MinViewportResolution, + clusters.CameraAvStreamManagement.attributes.VideoSensorParams, + clusters.CameraAvStreamManagement.attributes.Viewport + }, + [capabilities.motionSensor.ID] = { + clusters.OccupancySensing.attributes.Occupancy + } + } + local camera_subscribed_events = { + [capabilities.zoneManagement.ID] = { + clusters.ZoneManagement.events.ZoneTriggered, + clusters.ZoneManagement.events.ZoneStopped + }, + [capabilities.button.ID] = { + clusters.Switch.events.InitialPress, + clusters.Switch.events.LongPress, + clusters.Switch.events.ShortRelease, + clusters.Switch.events.MultiPressComplete + } + } + + for capability, attr_list in pairs(camera_subscribed_attributes) do + if device:supports_capability_by_id(capability) then + for _, attr in pairs(attr_list) do + device:add_subscribed_attribute(attr) + end + end + end + for capability, event_list in pairs(camera_subscribed_events) do + if device:supports_capability_by_id(capability) then + for _, event in pairs(event_list) do + device:add_subscribed_event(event) + end + end + end + + -- match_profile is called from the CameraAvStreamManagement AttributeList handler, + -- so the subscription needs to be added here first + if #device:get_endpoints(clusters.CameraAvStreamManagement.ID) > 0 then + device:add_subscribed_attribute(clusters.CameraAvStreamManagement.attributes.AttributeList) + end + + -- Add subscription for attributes specific to child devices + if device:get_field(fields.IS_PARENT_CHILD_DEVICE) then + for _, ep in ipairs(device.endpoints or {}) do + local id = 0 + for _, dt in ipairs(ep.device_types or {}) do + if dt.device_type_id ~= fields.DEVICE_TYPE_ID.GENERIC_SWITCH then + id = math.max(id, dt.device_type_id) + end + end + for _, attr in pairs(fields.device_type_attribute_map[id] or {}) do + device:add_subscribed_attribute(attr) + end + end + end + + local im = require "st.matter.interaction_model" + local subscribed_attributes = device:get_field("__subscribed_attributes") or {} + local subscribed_events = device:get_field("__subscribed_events") or {} + local subscribe_request = im.InteractionRequest(im.InteractionRequest.RequestType.SUBSCRIBE, {}) + for _, attributes in pairs(subscribed_attributes) do + for _, ib in pairs(attributes) do + subscribe_request:with_info_block(ib) + end + end + for _, events in pairs(subscribed_events) do + for _, ib in pairs(events) do + subscribe_request:with_info_block(ib) + end + end + if #subscribe_request.info_blocks > 0 then + device:send(subscribe_request) + end +end + +return CameraUtils diff --git a/drivers/SmartThings/matter-switch/src/sub_drivers/camera/can_handle.lua b/drivers/SmartThings/matter-switch/src/sub_drivers/camera/can_handle.lua new file mode 100644 index 0000000000..25a441d641 --- /dev/null +++ b/drivers/SmartThings/matter-switch/src/sub_drivers/camera/can_handle.lua @@ -0,0 +1,16 @@ +-- Copyright © 2025 SmartThings, Inc. +-- Licensed under the Apache License, Version 2.0 + +return function(opts, driver, device, ...) + local device_lib = require "st.device" + local fields = require "switch_utils.fields" + local switch_utils = require "switch_utils.utils" + if device.network_type == device_lib.NETWORK_TYPE_MATTER then + local version = require "version" + if version.rpc >= 10 and version.api >= 16 and + #switch_utils.get_endpoints_by_device_type(device, fields.DEVICE_TYPE_ID.CAMERA) > 0 then + return true, require("sub_drivers.camera") + end + end + return false +end diff --git a/drivers/SmartThings/matter-switch/src/sub_drivers/camera/init.lua b/drivers/SmartThings/matter-switch/src/sub_drivers/camera/init.lua new file mode 100644 index 0000000000..8244f4fd62 --- /dev/null +++ b/drivers/SmartThings/matter-switch/src/sub_drivers/camera/init.lua @@ -0,0 +1,200 @@ +-- Copyright © 2025 SmartThings, Inc. +-- Licensed under the Apache License, Version 2.0 + +------------------------------------------------------------------------------------- +-- Matter Camera Sub Driver +------------------------------------------------------------------------------------- + +local attribute_handlers = require "sub_drivers.camera.camera_handlers.attribute_handlers" +local button_cfg = require("switch_utils.device_configuration").ButtonCfg +local camera_cfg = require "sub_drivers.camera.camera_utils.device_configuration" +local camera_fields = require "sub_drivers.camera.camera_utils.fields" +local camera_utils = require "sub_drivers.camera.camera_utils.utils" +local capabilities = require "st.capabilities" +local capability_handlers = require "sub_drivers.camera.camera_handlers.capability_handlers" +local clusters = require "st.matter.clusters" +local event_handlers = require "sub_drivers.camera.camera_handlers.event_handlers" +local fields = require "switch_utils.fields" +local switch_utils = require "switch_utils.utils" + +local CameraLifecycleHandlers = {} + +function CameraLifecycleHandlers.device_init(driver, device) + device:set_component_to_endpoint_fn(camera_utils.component_to_endpoint) + device:set_endpoint_to_component_fn(switch_utils.endpoint_to_component) + device:extend_device("emit_event_for_endpoint", switch_utils.emit_event_for_endpoint) + if device:get_field(fields.IS_PARENT_CHILD_DEVICE) then + device:set_find_child(switch_utils.find_child) + end + device:extend_device("subscribe", camera_utils.subscribe) + device:subscribe() +end + +function CameraLifecycleHandlers.do_configure(driver, device) + camera_utils.update_camera_component_map(device) + if #device:get_endpoints(clusters.CameraAvStreamManagement.ID) == 0 then + camera_cfg.match_profile(device, false, false) + end + camera_cfg.create_child_devices(driver, device) + camera_cfg.initialize_camera_capabilities(device) +end + +function CameraLifecycleHandlers.info_changed(driver, device, event, args) + if camera_utils.profile_changed(device.profile.components, args.old_st_store.profile.components) then + camera_cfg.initialize_camera_capabilities(device) + if #switch_utils.get_endpoints_by_device_type(device, fields.DEVICE_TYPE_ID.DOORBELL) > 0 then + button_cfg.configure_buttons(device) + end + device:subscribe() + end +end + +function CameraLifecycleHandlers.added() end + +local camera_handler = { + NAME = "Camera Handler", + lifecycle_handlers = { + init = CameraLifecycleHandlers.device_init, + infoChanged = CameraLifecycleHandlers.info_changed, + doConfigure = CameraLifecycleHandlers.do_configure, + driverSwitched = CameraLifecycleHandlers.do_configure, + added = CameraLifecycleHandlers.added + }, + matter_handlers = { + attr = { + [clusters.CameraAvStreamManagement.ID] = { + [clusters.CameraAvStreamManagement.attributes.HDRModeEnabled.ID] = attribute_handlers.enabled_state_factory(capabilities.hdr.hdr), + [clusters.CameraAvStreamManagement.attributes.NightVision.ID] = attribute_handlers.night_vision_factory(capabilities.nightVision.nightVision), + [clusters.CameraAvStreamManagement.attributes.NightVisionIllum.ID] = attribute_handlers.night_vision_factory(capabilities.nightVision.illumination), + [clusters.CameraAvStreamManagement.attributes.ImageFlipHorizontal.ID] = attribute_handlers.enabled_state_factory(capabilities.imageControl.imageFlipHorizontal), + [clusters.CameraAvStreamManagement.attributes.ImageFlipVertical.ID] = attribute_handlers.enabled_state_factory(capabilities.imageControl.imageFlipVertical), + [clusters.CameraAvStreamManagement.attributes.ImageRotation.ID] = attribute_handlers.image_rotation_handler, + [clusters.CameraAvStreamManagement.attributes.SoftRecordingPrivacyModeEnabled.ID] = attribute_handlers.enabled_state_factory(capabilities.cameraPrivacyMode.softRecordingPrivacyMode), + [clusters.CameraAvStreamManagement.attributes.SoftLivestreamPrivacyModeEnabled.ID] = attribute_handlers.enabled_state_factory(capabilities.cameraPrivacyMode.softLivestreamPrivacyMode), + [clusters.CameraAvStreamManagement.attributes.HardPrivacyModeOn.ID] = attribute_handlers.enabled_state_factory(capabilities.cameraPrivacyMode.hardPrivacyMode), + [clusters.CameraAvStreamManagement.attributes.TwoWayTalkSupport.ID] = attribute_handlers.two_way_talk_support_handler, + [clusters.CameraAvStreamManagement.attributes.SpeakerMuted.ID] = attribute_handlers.muted_handler, + [clusters.CameraAvStreamManagement.attributes.SpeakerVolumeLevel.ID] = attribute_handlers.volume_level_handler, + [clusters.CameraAvStreamManagement.attributes.SpeakerMaxLevel.ID] = attribute_handlers.max_volume_level_handler, + [clusters.CameraAvStreamManagement.attributes.SpeakerMinLevel.ID] = attribute_handlers.min_volume_level_handler, + [clusters.CameraAvStreamManagement.attributes.MicrophoneMuted.ID] = attribute_handlers.muted_handler, + [clusters.CameraAvStreamManagement.attributes.MicrophoneVolumeLevel.ID] = attribute_handlers.volume_level_handler, + [clusters.CameraAvStreamManagement.attributes.MicrophoneMaxLevel.ID] = attribute_handlers.max_volume_level_handler, + [clusters.CameraAvStreamManagement.attributes.MicrophoneMinLevel.ID] = attribute_handlers.min_volume_level_handler, + [clusters.CameraAvStreamManagement.attributes.StatusLightEnabled.ID] = attribute_handlers.status_light_enabled_handler, + [clusters.CameraAvStreamManagement.attributes.StatusLightBrightness.ID] = attribute_handlers.status_light_brightness_handler, + [clusters.CameraAvStreamManagement.attributes.RateDistortionTradeOffPoints.ID] = attribute_handlers.rate_distortion_trade_off_points_handler, + [clusters.CameraAvStreamManagement.attributes.MaxEncodedPixelRate.ID] = attribute_handlers.max_encoded_pixel_rate_handler, + [clusters.CameraAvStreamManagement.attributes.VideoSensorParams.ID] = attribute_handlers.video_sensor_parameters_handler, + [clusters.CameraAvStreamManagement.attributes.MinViewportResolution.ID] = attribute_handlers.min_viewport_handler, + [clusters.CameraAvStreamManagement.attributes.AllocatedVideoStreams.ID] = attribute_handlers.allocated_video_streams_handler, + [clusters.CameraAvStreamManagement.attributes.Viewport.ID] = attribute_handlers.viewport_handler, + [clusters.CameraAvStreamManagement.attributes.LocalSnapshotRecordingEnabled.ID] = attribute_handlers.enabled_state_factory(capabilities.localMediaStorage.localSnapshotRecording), + [clusters.CameraAvStreamManagement.attributes.LocalVideoRecordingEnabled.ID] = attribute_handlers.enabled_state_factory(capabilities.localMediaStorage.localVideoRecording), + [clusters.CameraAvStreamManagement.attributes.AttributeList.ID] = attribute_handlers.camera_av_stream_management_attribute_list_handler + }, + [clusters.CameraAvSettingsUserLevelManagement.ID] = { + [clusters.CameraAvSettingsUserLevelManagement.attributes.MPTZPosition.ID] = attribute_handlers.ptz_position_handler, + [clusters.CameraAvSettingsUserLevelManagement.attributes.MPTZPresets.ID] = attribute_handlers.ptz_presets_handler, + [clusters.CameraAvSettingsUserLevelManagement.attributes.MaxPresets.ID] = attribute_handlers.max_presets_handler, + [clusters.CameraAvSettingsUserLevelManagement.attributes.ZoomMax.ID] = attribute_handlers.zoom_max_handler, + [clusters.CameraAvSettingsUserLevelManagement.attributes.PanMax.ID] = attribute_handlers.pt_range_handler_factory(capabilities.mechanicalPanTiltZoom.panRange, camera_fields.pt_range_fields[camera_fields.PAN_IDX].max), + [clusters.CameraAvSettingsUserLevelManagement.attributes.PanMin.ID] = attribute_handlers.pt_range_handler_factory(capabilities.mechanicalPanTiltZoom.panRange, camera_fields.pt_range_fields[camera_fields.PAN_IDX].min), + [clusters.CameraAvSettingsUserLevelManagement.attributes.TiltMax.ID] = attribute_handlers.pt_range_handler_factory(capabilities.mechanicalPanTiltZoom.tiltRange, camera_fields.pt_range_fields[camera_fields.TILT_IDX].max), + [clusters.CameraAvSettingsUserLevelManagement.attributes.TiltMin.ID] = attribute_handlers.pt_range_handler_factory(capabilities.mechanicalPanTiltZoom.tiltRange, camera_fields.pt_range_fields[camera_fields.TILT_IDX].min) + }, + [clusters.ZoneManagement.ID] = { + [clusters.ZoneManagement.attributes.MaxZones.ID] = attribute_handlers.max_zones_handler, + [clusters.ZoneManagement.attributes.Zones.ID] = attribute_handlers.zones_handler, + [clusters.ZoneManagement.attributes.Triggers.ID] = attribute_handlers.triggers_handler, + [clusters.ZoneManagement.attributes.SensitivityMax.ID] = attribute_handlers.sensitivity_max_handler, + [clusters.ZoneManagement.attributes.Sensitivity.ID] = attribute_handlers.sensitivity_handler, + }, + [clusters.Chime.ID] = { + [clusters.Chime.attributes.InstalledChimeSounds.ID] = attribute_handlers.installed_chime_sounds_handler, + [clusters.Chime.attributes.SelectedChime.ID] = attribute_handlers.selected_chime_handler + } + }, + event = { + [clusters.ZoneManagement.ID] = { + [clusters.ZoneManagement.events.ZoneTriggered.ID] = event_handlers.zone_triggered_handler, + [clusters.ZoneManagement.events.ZoneStopped.ID] = event_handlers.zone_stopped_handler + } + } + }, + capability_handlers = { + [capabilities.hdr.ID] = { + [capabilities.hdr.commands.setHdr.NAME] = capability_handlers.set_enabled_factory(clusters.CameraAvStreamManagement.attributes.HDRModeEnabled) + }, + [capabilities.nightVision.ID] = { + [capabilities.nightVision.commands.setNightVision.NAME] = capability_handlers.set_night_vision_factory(clusters.CameraAvStreamManagement.attributes.NightVision), + [capabilities.nightVision.commands.setIllumination.NAME] = capability_handlers.set_night_vision_factory(clusters.CameraAvStreamManagement.attributes.NightVisionIllum) + }, + [capabilities.imageControl.ID] = { + [capabilities.imageControl.commands.setImageFlipHorizontal.NAME] = capability_handlers.set_enabled_factory(clusters.CameraAvStreamManagement.attributes.ImageFlipHorizontal), + [capabilities.imageControl.commands.setImageFlipVertical.NAME] = capability_handlers.set_enabled_factory(clusters.CameraAvStreamManagement.attributes.ImageFlipVertical), + [capabilities.imageControl.commands.setImageRotation.NAME] = capability_handlers.handle_set_image_rotation + }, + [capabilities.cameraPrivacyMode.ID] = { + [capabilities.cameraPrivacyMode.commands.setSoftLivestreamPrivacyMode.NAME] = capability_handlers.set_enabled_factory(clusters.CameraAvStreamManagement.attributes.SoftLivestreamPrivacyModeEnabled), + [capabilities.cameraPrivacyMode.commands.setSoftRecordingPrivacyMode.NAME] = capability_handlers.set_enabled_factory(clusters.CameraAvStreamManagement.attributes.SoftRecordingPrivacyModeEnabled) + }, + [capabilities.audioMute.ID] = { + [capabilities.audioMute.commands.setMute.NAME] = capability_handlers.handle_mute_commands_factory(capabilities.audioMute.commands.setMute.NAME), + [capabilities.audioMute.commands.mute.NAME] = capability_handlers.handle_mute_commands_factory(capabilities.audioMute.commands.mute.NAME), + [capabilities.audioMute.commands.unmute.NAME] = capability_handlers.handle_mute_commands_factory(capabilities.audioMute.commands.unmute.NAME) + }, + [capabilities.audioVolume.ID] = { + [capabilities.audioVolume.commands.setVolume.NAME] = capability_handlers.handle_set_volume, + [capabilities.audioVolume.commands.volumeUp.NAME] = capability_handlers.handle_volume_up, + [capabilities.audioVolume.commands.volumeDown.NAME] = capability_handlers.handle_volume_down + }, + [capabilities.mode.ID] = { + [capabilities.mode.commands.setMode.NAME] = capability_handlers.handle_set_status_light_mode + }, + [capabilities.switch.ID] = { + [capabilities.switch.commands.on.NAME] = capability_handlers.handle_status_led_on, + [capabilities.switch.commands.off.NAME] = capability_handlers.handle_status_led_off + }, + [capabilities.audioRecording.ID] = { + [capabilities.audioRecording.commands.setAudioRecording.NAME] = capability_handlers.handle_audio_recording + }, + [capabilities.mechanicalPanTiltZoom.ID] = { + [capabilities.mechanicalPanTiltZoom.commands.panRelative.NAME] = capability_handlers.ptz_relative_move_factory(camera_fields.PAN_IDX), + [capabilities.mechanicalPanTiltZoom.commands.tiltRelative.NAME] = capability_handlers.ptz_relative_move_factory(camera_fields.TILT_IDX), + [capabilities.mechanicalPanTiltZoom.commands.zoomRelative.NAME] = capability_handlers.ptz_relative_move_factory(camera_fields.ZOOM_IDX), + [capabilities.mechanicalPanTiltZoom.commands.setPan.NAME] = capability_handlers.ptz_set_position_factory(capabilities.mechanicalPanTiltZoom.commands.setPan), + [capabilities.mechanicalPanTiltZoom.commands.setTilt.NAME] = capability_handlers.ptz_set_position_factory(capabilities.mechanicalPanTiltZoom.commands.setTilt), + [capabilities.mechanicalPanTiltZoom.commands.setZoom.NAME] = capability_handlers.ptz_set_position_factory(capabilities.mechanicalPanTiltZoom.commands.setZoom), + [capabilities.mechanicalPanTiltZoom.commands.setPanTiltZoom.NAME] = capability_handlers.ptz_set_position_factory(capabilities.mechanicalPanTiltZoom.commands.setPanTiltZoom), + [capabilities.mechanicalPanTiltZoom.commands.savePreset.NAME] = capability_handlers.handle_save_preset, + [capabilities.mechanicalPanTiltZoom.commands.removePreset.NAME] = capability_handlers.handle_remove_preset, + [capabilities.mechanicalPanTiltZoom.commands.moveToPreset.NAME] = capability_handlers.handle_move_to_preset + }, + [capabilities.zoneManagement.ID] = { + [capabilities.zoneManagement.commands.newZone.NAME] = capability_handlers.handle_new_zone, + [capabilities.zoneManagement.commands.updateZone.NAME] = capability_handlers.handle_update_zone, + [capabilities.zoneManagement.commands.removeZone.NAME] = capability_handlers.handle_remove_zone, + [capabilities.zoneManagement.commands.createOrUpdateTrigger.NAME] = capability_handlers.handle_create_or_update_trigger, + [capabilities.zoneManagement.commands.removeTrigger.NAME] = capability_handlers.handle_remove_trigger, + [capabilities.zoneManagement.commands.setSensitivity.NAME] = capability_handlers.handle_set_sensitivity + }, + [capabilities.sounds.ID] = { + [capabilities.sounds.commands.playSound.NAME] = capability_handlers.handle_play_sound, + [capabilities.sounds.commands.setSelectedSound.NAME] = capability_handlers.handle_set_selected_sound + }, + [capabilities.videoStreamSettings.ID] = { + [capabilities.videoStreamSettings.commands.setStream.NAME] = capability_handlers.handle_set_stream + }, + [capabilities.cameraViewportSettings.ID] = { + [capabilities.cameraViewportSettings.commands.setDefaultViewport.NAME] = capability_handlers.handle_set_default_viewport + }, + [capabilities.localMediaStorage.ID] = { + [capabilities.localMediaStorage.commands.setLocalSnapshotRecording.NAME] = capability_handlers.set_enabled_factory(clusters.CameraAvStreamManagement.attributes.LocalSnapshotRecordingEnabled), + [capabilities.localMediaStorage.commands.setLocalVideoRecording.NAME] = capability_handlers.set_enabled_factory(clusters.CameraAvStreamManagement.attributes.LocalVideoRecordingEnabled) + } + }, + can_handle = require("sub_drivers.camera.can_handle") +} + +return camera_handler diff --git a/drivers/SmartThings/matter-switch/src/switch_utils/fields.lua b/drivers/SmartThings/matter-switch/src/switch_utils/fields.lua index f66773d77f..c350a7adaf 100644 --- a/drivers/SmartThings/matter-switch/src/switch_utils/fields.lua +++ b/drivers/SmartThings/matter-switch/src/switch_utils/fields.lua @@ -36,7 +36,10 @@ SwitchFields.CURRENT_HUESAT_ATTR_MAX = 254 SwitchFields.DEVICE_TYPE_ID = { AGGREGATOR = 0x000E, + CAMERA = 0x0142, + CHIME = 0x0146, DIMMABLE_PLUG_IN_UNIT = 0x010B, + DOORBELL = 0x0143, ELECTRICAL_SENSOR = 0x0510, GENERIC_SWITCH = 0x000F, MOUNTED_ON_OFF_CONTROL = 0x010F, @@ -177,24 +180,38 @@ SwitchFields.OPTIONS_OVERRIDE = 0x01 SwitchFields.supported_capabilities = { + capabilities.audioMute, + capabilities.audioRecording, + capabilities.audioVolume, capabilities.battery, capabilities.batteryLevel, capabilities.button, + capabilities.cameraPrivacyMode, + capabilities.cameraViewportSettings, capabilities.colorControl, capabilities.colorTemperature, capabilities.energyMeter, capabilities.fanMode, capabilities.fanSpeedPercent, + capabilities.hdr, capabilities.illuminanceMeasurement, + capabilities.imageControl, capabilities.level, + capabilities.localMediaStorage, + capabilities.mechanicalPanTiltZoom, capabilities.motionSensor, + capabilities.nightVision, capabilities.powerMeter, capabilities.powerConsumptionReport, capabilities.relativeHumidityMeasurement, + capabilities.sounds, capabilities.switch, capabilities.switchLevel, capabilities.temperatureMeasurement, capabilities.valve, + capabilities.videoStreamSettings, + capabilities.webrtc, + capabilities.zoneManagement } SwitchFields.device_type_attribute_map = { diff --git a/drivers/SmartThings/matter-switch/src/switch_utils/utils.lua b/drivers/SmartThings/matter-switch/src/switch_utils/utils.lua index 0f7ac435cb..46b82678a4 100644 --- a/drivers/SmartThings/matter-switch/src/switch_utils/utils.lua +++ b/drivers/SmartThings/matter-switch/src/switch_utils/utils.lua @@ -1,11 +1,13 @@ -- Copyright © 2025 SmartThings, Inc. -- Licensed under the Apache License, Version 2.0 +local MatterDriver = require "st.matter.driver" local fields = require "switch_utils.fields" local st_utils = require "st.utils" local clusters = require "st.matter.clusters" local capabilities = require "st.capabilities" local log = require "log" +local version = require "version" local utils = {} @@ -166,16 +168,74 @@ function utils.component_to_endpoint(device, component) return utils.find_default_endpoint(device) end -function utils.endpoint_to_component(device, ep) - local map = device:get_field(fields.COMPONENT_TO_ENDPOINT_MAP) or {} - for component, endpoint in pairs(map) do - if endpoint == ep then +--- An extension of the library function endpoint_to_component, to support a mapping scheme +--- that includes cluster and attribute id's so that we can use multiple components for a +--- single endpoint. +--- +--- @param device any a Matter device object +--- @param opts number|table either is an ep_id or a table { endpoint_id, capability_id } +--- @return string component +function utils.endpoint_to_component(device, opts) + local ep_info = {} + if type(opts) == "number" then + ep_info.endpoint_id = opts + elseif type(opts) == "table" then + if opts.endpoint_info then + ep_info = opts.endpoint_info + else + ep_info = { + endpoint_id = opts.endpoint_id, + cluster_id = opts.cluster_id, + attribute_id = opts.attribute_id + } + end + end + for component, map_info in pairs(device:get_field(fields.COMPONENT_TO_ENDPOINT_MAP) or {}) do + if type(map_info) == "number" and map_info == ep_info.endpoint_id then return component + elseif type(map_info) == "table" and map_info.endpoint_id == ep_info.endpoint_id then + if (not map_info.cluster_id or (map_info.cluster_id == ep_info.cluster_id + and utils.tbl_contains(map_info.attribute_ids, ep_info.attribute_id))) + and (not opts.capability_id or utils.tbl_contains(map_info.capability_ids, opts.capability_id)) then + return component + end end end return "main" end +--- An extension of the library function emit_event_for_endpoint, to support devices with +--- multiple components defined for the same endpoint, since they can't be easily +--- differentiated based on a simple endpoint id to component mapping, but we can extend +--- this mapping to include the cluster and attribute id's so that we know which component +--- to route events to. +--- +--- @param device any a Matter device object +--- @param ep_info number|table endpoint_id or ib (includes endpoint_id, cluster_id, attribute_id) +--- @param event any a capability event object +function utils.emit_event_for_endpoint(device, ep_info, event) + if type(ep_info) == "number" then + ep_info = { endpoint_id = ep_info } + elseif type(ep_info) == "table" then + ep_info = { + endpoint_id = ep_info.endpoint_id, + cluster_id = ep_info.cluster_id, + attribute_id = ep_info.attribute_id + } + end + if device:get_field(fields.IS_PARENT_CHILD_DEVICE) then + local child = utils.find_child(device, ep_info.endpoint_id) + if child ~= nil then + child:emit_event(event) + return + end + end + local opts = { endpoint_info = ep_info, capability_id = event.capability.ID } + local comp_id = utils.endpoint_to_component(device, opts) + local comp = device.profile.components[comp_id] + device:emit_component_event(comp, event) +end + function utils.find_child(parent, ep_id) return parent:get_child_by_parent_assigned_key(string.format("%d", ep_id)) end @@ -268,4 +328,14 @@ function utils.report_power_consumption_to_st_energy(device, latest_total_import end end +function utils.lazy_load_if_possible(sub_driver_name) + if version.api >= 16 then + return MatterDriver.lazy_load_sub_driver_v2(sub_driver_name) + elseif version.api >= 9 then + return MatterDriver.lazy_load_sub_driver(require(sub_driver_name)) + else + return require(sub_driver_name) + end +end + return utils diff --git a/drivers/SmartThings/matter-switch/src/test/test_matter_camera.lua b/drivers/SmartThings/matter-switch/src/test/test_matter_camera.lua new file mode 100644 index 0000000000..b9804a2a76 --- /dev/null +++ b/drivers/SmartThings/matter-switch/src/test/test_matter_camera.lua @@ -0,0 +1,1814 @@ +-- Copyright © 2025 SmartThings, Inc. +-- Licensed under the Apache License, Version 2.0 + +local capabilities = require "st.capabilities" +local clusters = require "st.matter.clusters" +local t_utils = require "integration_test.utils" +local test = require "integration_test" + +test.disable_startup_messages() + +local CAMERA_EP, FLOODLIGHT_EP, CHIME_EP, DOORBELL_EP = 1, 2, 3, 4 + +local mock_device = test.mock_device.build_test_matter_device({ + profile = t_utils.get_profile_definition("camera.yml"), + manufacturer_info = {vendor_id = 0x0000, product_id = 0x0000}, + endpoints = { + { + endpoint_id = 0, + clusters = { + { cluster_id = clusters.Basic.ID, cluster_type = "SERVER" } + }, + device_types = { + { device_type_id = 0x0016, device_type_revision = 1 } -- RootNode + } + }, + { + endpoint_id = CAMERA_EP, + clusters = { + { + cluster_id = clusters.CameraAvStreamManagement.ID, + feature_map = clusters.CameraAvStreamManagement.types.Feature.VIDEO | + clusters.CameraAvStreamManagement.types.Feature.PRIVACY | + clusters.CameraAvStreamManagement.types.Feature.AUDIO | + clusters.CameraAvStreamManagement.types.Feature.LOCAL_STORAGE | + clusters.CameraAvStreamManagement.types.Feature.PRIVACY | + clusters.CameraAvStreamManagement.types.Feature.SPEAKER | + clusters.CameraAvStreamManagement.types.Feature.IMAGE_CONTROL | + clusters.CameraAvStreamManagement.types.Feature.SPEAKER | + clusters.CameraAvStreamManagement.types.Feature.HIGH_DYNAMIC_RANGE | + clusters.CameraAvStreamManagement.types.Feature.NIGHT_VISION | + clusters.CameraAvStreamManagement.types.Feature.WATERMARK | + clusters.CameraAvStreamManagement.types.Feature.ON_SCREEN_DISPLAY, + cluster_type = "SERVER" + }, + { + cluster_id = clusters.CameraAvSettingsUserLevelManagement.ID, + feature_map = clusters.CameraAvSettingsUserLevelManagement.types.Feature.MECHANICAL_PAN | + clusters.CameraAvSettingsUserLevelManagement.types.Feature.MECHANICAL_TILT | + clusters.CameraAvSettingsUserLevelManagement.types.Feature.MECHANICAL_ZOOM | + clusters.CameraAvSettingsUserLevelManagement.types.Feature.MECHANICAL_PRESETS, + cluster_type = "SERVER" + }, + { + cluster_id = clusters.ZoneManagement.ID, + feature_map = clusters.ZoneManagement.types.Feature.TWO_DIMENSIONAL_CARTESIAN_ZONE | + clusters.ZoneManagement.types.Feature.PER_ZONE_SENSITIVITY, + cluster_type = "SERVER" + }, + { + cluster_id = clusters.WebRTCTransportProvider.ID, + cluster_type = "SERVER" + }, + { + cluster_id = clusters.WebRTCTransportRequestor.ID, + cluster_type = "CLIENT" + }, + { + cluster_id = clusters.OccupancySensing.ID, + cluster_type = "SERVER" + } + }, + device_types = { + {device_type_id = 0x0142, device_type_revision = 1} -- Camera + } + }, + { + endpoint_id = FLOODLIGHT_EP, + clusters = { + {cluster_id = clusters.OnOff.ID, cluster_type = "SERVER"}, + {cluster_id = clusters.LevelControl.ID, cluster_type = "SERVER", feature_map = 2}, + {cluster_id = clusters.ColorControl.ID, cluster_type = "BOTH", feature_map = 30} + }, + device_types = { + {device_type_id = 0x010D, device_type_revision = 2} -- Extended Color Light + } + }, + { + endpoint_id = CHIME_EP, + clusters = { + { + cluster_id = clusters.Chime.ID, + cluster_type = "SERVER" + }, + }, + device_types = { + {device_type_id = 0x0146, device_type_revision = 1} -- Chime + } + }, + { + endpoint_id = DOORBELL_EP, + clusters = { + { + cluster_id = clusters.Switch.ID, + feature_map = clusters.Switch.types.SwitchFeature.MOMENTARY_SWITCH | + clusters.Switch.types.SwitchFeature.MOMENTARY_SWITCH_MULTI_PRESS | + clusters.Switch.types.SwitchFeature.MOMENTARY_SWITCH_LONG_PRESS, + cluster_type = "SERVER", + } + }, + device_types = { + {device_type_id = 0x0143, device_type_revision = 1} -- Doorbell + } + } + } +}) + +local subscribe_request +local subscribed_attributes = { + clusters.CameraAvStreamManagement.attributes.AttributeList, +} + +local function test_init() + test.mock_device.add_test_device(mock_device) + test.socket.device_lifecycle:__queue_receive({ mock_device.id, "added" }) + test.socket.device_lifecycle:__queue_receive({ mock_device.id, "init" }) + local floodlight_child_device_data = { + profile = t_utils.get_profile_definition("light-color-level.yml"), + device_network_id = string.format("%s:%d", mock_device.id, FLOODLIGHT_EP), + parent_device_id = mock_device.id, + parent_assigned_child_key = string.format("%d", FLOODLIGHT_EP) + } + test.mock_device.add_test_device(test.mock_device.build_test_child_device(floodlight_child_device_data)) + mock_device:expect_device_create({ + type = "EDGE_CHILD", + label = "Floodlight 1", + profile = "light-color-level", + parent_device_id = mock_device.id, + parent_assigned_child_key = string.format("%d", FLOODLIGHT_EP) + }) + subscribe_request = subscribed_attributes[1]:subscribe(mock_device) + for i, attr in ipairs(subscribed_attributes) do + if i > 1 then subscribe_request:merge(attr:subscribe(mock_device)) end + end + test.socket.matter:__expect_send({mock_device.id, subscribe_request}) + test.socket.device_lifecycle:__queue_receive({ mock_device.id, "doConfigure" }) + mock_device:expect_metadata_update({ provisioning_state = "PROVISIONED" }) +end + +test.set_test_init_function(test_init) + +local function update_device_profile() + test.socket.matter:__set_channel_ordering("relaxed") + local uint32 = require "st.matter.data_types.Uint32" + local expected_metadata = { + optional_component_capabilities = { + { + "main", + { + "videoCapture2", + "cameraViewportSettings", + "localMediaStorage", + "audioRecording", + "cameraPrivacyMode", + "imageControl", + "hdr", + "nightVision", + "mechanicalPanTiltZoom", + "videoStreamSettings", + "zoneManagement", + "webrtc", + "motionSensor", + "sounds", + } + }, + { + "statusLed", + { + "switch", + "mode" + } + }, + { + "speaker", + { + "audioMute", + "audioVolume" + } + }, + { + "microphone", + { + "audioMute", + "audioVolume" + } + }, + { + "doorbell", + { + "button" + } + } + }, + profile = "camera" + } + test.socket.matter:__queue_receive({ + mock_device.id, + clusters.CameraAvStreamManagement.attributes.AttributeList:build_test_report_data(mock_device, CAMERA_EP, { + uint32(clusters.CameraAvStreamManagement.attributes.StatusLightEnabled.ID), + uint32(clusters.CameraAvStreamManagement.attributes.StatusLightBrightness.ID) + }) + }) + test.socket.matter:__expect_send({mock_device.id, clusters.Switch.attributes.MultiPressMax:read(mock_device, DOORBELL_EP)}) + mock_device:expect_metadata_update(expected_metadata) + local updated_device_profile = t_utils.get_profile_definition( + "camera.yml", {enabled_optional_capabilities = expected_metadata.optional_component_capabilities} + ) + test.socket.device_lifecycle:__queue_receive(mock_device:generate_info_changed({ profile = updated_device_profile })) + test.socket.capability:__expect_send( + mock_device:generate_test_message("main", capabilities.webrtc.supportedFeatures( + {audio="sendrecv", bundle=true, order="audio/video", supportTrickleICE=true, turnSource="player", video="recvonly"} + )) + ) + test.socket.capability:__expect_send( + mock_device:generate_test_message("main", capabilities.mechanicalPanTiltZoom.supportedAttributes( + {"pan", "panRange", "tilt", "tiltRange", "zoom", "zoomRange", "presets", "maxPresets"} + )) + ) + test.socket.capability:__expect_send( + mock_device:generate_test_message("main", capabilities.zoneManagement.supportedFeatures( + {"triggerAugmentation", "perZoneSensitivity"} + )) + ) + test.socket.capability:__expect_send( + mock_device:generate_test_message("main", capabilities.localMediaStorage.supportedAttributes( + {"localVideoRecording"} + )) + ) + test.socket.capability:__expect_send( + mock_device:generate_test_message("main", capabilities.audioRecording.audioRecording("enabled")) + ) + test.socket.capability:__expect_send( + mock_device:generate_test_message("main", capabilities.videoStreamSettings.supportedFeatures( + {"liveStreaming", "clipRecording", "perStreamViewports", "watermark", "onScreenDisplay"} + )) + ) + test.socket.capability:__expect_send( + mock_device:generate_test_message("main", capabilities.cameraPrivacyMode.supportedAttributes( + {"softRecordingPrivacyMode", "softLivestreamPrivacyMode"} + )) + ) + test.socket.capability:__expect_send( + mock_device:generate_test_message("main", capabilities.cameraPrivacyMode.supportedCommands( + {"setSoftRecordingPrivacyMode", "setSoftLivestreamPrivacyMode"} + )) + ) + local additional_subscribed_attributes = { + clusters.CameraAvStreamManagement.attributes.HDRModeEnabled, + clusters.CameraAvStreamManagement.attributes.ImageRotation, + clusters.CameraAvStreamManagement.attributes.NightVision, + clusters.CameraAvStreamManagement.attributes.NightVisionIllum, + clusters.CameraAvStreamManagement.attributes.ImageFlipHorizontal, + clusters.CameraAvStreamManagement.attributes.ImageFlipVertical, + clusters.CameraAvStreamManagement.attributes.SoftRecordingPrivacyModeEnabled, + clusters.CameraAvStreamManagement.attributes.SoftLivestreamPrivacyModeEnabled, + clusters.CameraAvStreamManagement.attributes.HardPrivacyModeOn, + clusters.CameraAvStreamManagement.attributes.TwoWayTalkSupport, + clusters.CameraAvStreamManagement.attributes.SpeakerMuted, + clusters.CameraAvStreamManagement.attributes.MicrophoneMuted, + clusters.CameraAvStreamManagement.attributes.SpeakerVolumeLevel, + clusters.CameraAvStreamManagement.attributes.SpeakerMaxLevel, + clusters.CameraAvStreamManagement.attributes.SpeakerMinLevel, + clusters.CameraAvStreamManagement.attributes.MicrophoneVolumeLevel, + clusters.CameraAvStreamManagement.attributes.MicrophoneMaxLevel, + clusters.CameraAvStreamManagement.attributes.MicrophoneMinLevel, + clusters.CameraAvStreamManagement.attributes.StatusLightBrightness, + clusters.CameraAvStreamManagement.attributes.StatusLightEnabled, + clusters.CameraAvStreamManagement.attributes.RateDistortionTradeOffPoints, + clusters.CameraAvStreamManagement.attributes.LocalSnapshotRecordingEnabled, + clusters.CameraAvStreamManagement.attributes.LocalVideoRecordingEnabled, + clusters.CameraAvStreamManagement.attributes.MaxEncodedPixelRate, + clusters.CameraAvStreamManagement.attributes.VideoSensorParams, + clusters.CameraAvStreamManagement.attributes.AllocatedVideoStreams, + clusters.CameraAvStreamManagement.attributes.Viewport, + clusters.CameraAvStreamManagement.attributes.MinViewportResolution, + clusters.CameraAvStreamManagement.attributes.AttributeList, + clusters.CameraAvSettingsUserLevelManagement.attributes.MPTZPosition, + clusters.CameraAvSettingsUserLevelManagement.attributes.MPTZPresets, + clusters.CameraAvSettingsUserLevelManagement.attributes.MaxPresets, + clusters.CameraAvSettingsUserLevelManagement.attributes.ZoomMax, + clusters.CameraAvSettingsUserLevelManagement.attributes.PanMax, + clusters.CameraAvSettingsUserLevelManagement.attributes.PanMin, + clusters.CameraAvSettingsUserLevelManagement.attributes.TiltMax, + clusters.CameraAvSettingsUserLevelManagement.attributes.TiltMin, + clusters.Chime.attributes.InstalledChimeSounds, + clusters.Chime.attributes.SelectedChime, + clusters.ZoneManagement.attributes.MaxZones, + clusters.ZoneManagement.attributes.Zones, + clusters.ZoneManagement.attributes.Triggers, + clusters.ZoneManagement.attributes.SensitivityMax, + clusters.ZoneManagement.attributes.Sensitivity, + clusters.ZoneManagement.events.ZoneTriggered, + clusters.ZoneManagement.events.ZoneStopped, + clusters.OnOff.attributes.OnOff, + clusters.LevelControl.attributes.CurrentLevel, + clusters.LevelControl.attributes.MaxLevel, + clusters.LevelControl.attributes.MinLevel, + clusters.ColorControl.attributes.ColorTemperatureMireds, + clusters.ColorControl.attributes.ColorTempPhysicalMaxMireds, + clusters.ColorControl.attributes.ColorTempPhysicalMinMireds, + clusters.ColorControl.attributes.CurrentHue, + clusters.ColorControl.attributes.CurrentSaturation, + clusters.ColorControl.attributes.CurrentX, + clusters.ColorControl.attributes.CurrentY, + clusters.OccupancySensing.attributes.Occupancy, + clusters.Switch.server.events.InitialPress, + clusters.Switch.server.events.LongPress, + clusters.Switch.server.events.ShortRelease, + clusters.Switch.server.events.MultiPressComplete + } + for _, attr in ipairs(additional_subscribed_attributes) do + subscribe_request:merge(attr:subscribe(mock_device)) + end + test.socket.matter:__expect_send({mock_device.id, clusters.Switch.attributes.MultiPressMax:read(mock_device, DOORBELL_EP)}) + test.socket.capability:__expect_send(mock_device:generate_test_message("doorbell", capabilities.button.button.pushed({state_change = false}))) + test.socket.matter:__expect_send({mock_device.id, subscribe_request}) +end + +-- Matter Handler UTs + +test.register_coroutine_test( + "Reports mapping to EnabledState capability data type should generate appropriate events", + function() + update_device_profile() + test.wait_for_events() + local cluster_to_capability_map = { + {cluster = clusters.CameraAvStreamManagement.server.attributes.HDRModeEnabled, capability = capabilities.hdr.hdr}, + {cluster = clusters.CameraAvStreamManagement.server.attributes.ImageFlipHorizontal, capability = capabilities.imageControl.imageFlipHorizontal}, + {cluster = clusters.CameraAvStreamManagement.server.attributes.ImageFlipVertical, capability = capabilities.imageControl.imageFlipVertical}, + {cluster = clusters.CameraAvStreamManagement.server.attributes.SoftRecordingPrivacyModeEnabled, capability = capabilities.cameraPrivacyMode.softRecordingPrivacyMode}, + {cluster = clusters.CameraAvStreamManagement.server.attributes.SoftLivestreamPrivacyModeEnabled, capability = capabilities.cameraPrivacyMode.softLivestreamPrivacyMode}, + {cluster = clusters.CameraAvStreamManagement.server.attributes.HardPrivacyModeOn, capability = capabilities.cameraPrivacyMode.hardPrivacyMode}, + {cluster = clusters.CameraAvStreamManagement.server.attributes.LocalSnapshotRecordingEnabled, capability = capabilities.localMediaStorage.localSnapshotRecording}, + {cluster = clusters.CameraAvStreamManagement.server.attributes.LocalVideoRecordingEnabled, capability = capabilities.localMediaStorage.localVideoRecording} + } + for _, v in ipairs(cluster_to_capability_map) do + test.socket.matter:__queue_receive({ + mock_device.id, + v.cluster:build_test_report_data(mock_device, CAMERA_EP, true) + }) + test.socket.capability:__expect_send( + mock_device:generate_test_message("main", v.capability("enabled")) + ) + if v.capability == capabilities.imageControl.imageFlipHorizontal then + test.socket.capability:__expect_send( + mock_device:generate_test_message("main", capabilities.imageControl.supportedAttributes({"imageFlipHorizontal"})) + ) + elseif v.capability == capabilities.imageControl.imageFlipVertical then + test.socket.capability:__expect_send( + mock_device:generate_test_message("main", capabilities.imageControl.supportedAttributes({"imageFlipHorizontal", "imageFlipVertical"})) + ) + elseif v.capability == capabilities.cameraPrivacyMode.hardPrivacyMode then + test.socket.capability:__expect_send( + mock_device:generate_test_message("main", capabilities.cameraPrivacyMode.supportedAttributes({"softRecordingPrivacyMode", "softLivestreamPrivacyMode", "hardPrivacyMode"})) + ) + end + test.socket.matter:__queue_receive({ + mock_device.id, + v.cluster:build_test_report_data(mock_device, CAMERA_EP, false) + }) + test.socket.capability:__expect_send( + mock_device:generate_test_message("main", v.capability("disabled")) + ) + end + end +) + +test.register_coroutine_test( + "Night Vision reports should generate appropriate events", + function() + update_device_profile() + test.wait_for_events() + local cluster_to_capability_map = { + {cluster = clusters.CameraAvStreamManagement.server.attributes.NightVision, capability = capabilities.nightVision.nightVision}, + {cluster = clusters.CameraAvStreamManagement.server.attributes.NightVisionIllum, capability = capabilities.nightVision.illumination} + } + for _, v in ipairs(cluster_to_capability_map) do + test.socket.matter:__queue_receive({ + mock_device.id, + v.cluster:build_test_report_data(mock_device, CAMERA_EP, clusters.CameraAvStreamManagement.types.TriStateAutoEnum.OFF) + }) + test.socket.capability:__expect_send( + mock_device:generate_test_message("main", v.capability("off")) + ) + if v.capability == capabilities.nightVision.illumination then + test.socket.capability:__expect_send( + mock_device:generate_test_message("main", capabilities.nightVision.supportedAttributes({"illumination"})) + ) + end + test.socket.matter:__queue_receive({ + mock_device.id, + v.cluster:build_test_report_data(mock_device, CAMERA_EP, clusters.CameraAvStreamManagement.types.TriStateAutoEnum.ON) + }) + test.socket.capability:__expect_send( + mock_device:generate_test_message("main", v.capability("on")) + ) + test.socket.matter:__queue_receive({ + mock_device.id, + v.cluster:build_test_report_data(mock_device, CAMERA_EP, clusters.CameraAvStreamManagement.types.TriStateAutoEnum.AUTO) + }) + test.socket.capability:__expect_send( + mock_device:generate_test_message("main", v.capability("auto")) + ) + end + end +) + +test.register_coroutine_test( + "Image Rotation reports should generate appropriate events", + function() + local utils = require "st.utils" + update_device_profile() + test.wait_for_events() + local first_value = true + for angle = 0, 400, 50 do + test.socket.matter:__queue_receive({ + mock_device.id, + clusters.CameraAvStreamManagement.server.attributes.ImageRotation:build_test_report_data(mock_device, CAMERA_EP, angle) + }) + local clamped_angle = utils.clamp_value(angle, 0, 359) + test.socket.capability:__expect_send( + mock_device:generate_test_message("main", capabilities.imageControl.imageRotation(clamped_angle)) + ) + if first_value then + test.socket.capability:__expect_send( + mock_device:generate_test_message("main", capabilities.imageControl.supportedAttributes({"imageRotation"})) + ) + first_value = false + end + end + end +) + +test.register_coroutine_test( + "Two Way Talk Support reports should generate appropriate events", + function() + update_device_profile() + test.wait_for_events() + test.socket.matter:__queue_receive({ + mock_device.id, + clusters.CameraAvStreamManagement.server.attributes.TwoWayTalkSupport:build_test_report_data( + mock_device, CAMERA_EP, clusters.CameraAvStreamManagement.types.TwoWayTalkSupportTypeEnum.HALF_DUPLEX + ) + }) + test.socket.capability:__expect_send( + mock_device:generate_test_message("main", capabilities.webrtc.talkback(true)) + ) + test.socket.capability:__expect_send( + mock_device:generate_test_message("main", capabilities.webrtc.talkbackDuplex("halfDuplex")) + ) + test.socket.matter:__queue_receive({ + mock_device.id, + clusters.CameraAvStreamManagement.server.attributes.TwoWayTalkSupport:build_test_report_data( + mock_device, CAMERA_EP, clusters.CameraAvStreamManagement.types.TwoWayTalkSupportTypeEnum.FULL_DUPLEX + ) + }) + test.socket.capability:__expect_send( + mock_device:generate_test_message("main", capabilities.webrtc.talkback(true)) + ) + test.socket.capability:__expect_send( + mock_device:generate_test_message("main", capabilities.webrtc.talkbackDuplex("fullDuplex")) + ) + test.socket.matter:__queue_receive({ + mock_device.id, + clusters.CameraAvStreamManagement.server.attributes.TwoWayTalkSupport:build_test_report_data( + mock_device, CAMERA_EP, clusters.CameraAvStreamManagement.types.TwoWayTalkSupportTypeEnum.NOT_SUPPORTED + ) + }) + test.socket.capability:__expect_send( + mock_device:generate_test_message("main", capabilities.webrtc.talkback(false)) + ) + end +) + +test.register_coroutine_test( + "Muted reports should generate appropriate events", + function() + update_device_profile() + test.wait_for_events() + local cluster_to_component_map = { + {cluster = clusters.CameraAvStreamManagement.server.attributes.SpeakerMuted, component = "speaker"}, + {cluster = clusters.CameraAvStreamManagement.server.attributes.MicrophoneMuted, component = "microphone"} + } + for _, v in ipairs(cluster_to_component_map) do + test.socket.matter:__queue_receive({ + mock_device.id, + v.cluster:build_test_report_data(mock_device, CAMERA_EP, true) + }) + test.socket.capability:__expect_send( + mock_device:generate_test_message(v.component, capabilities.audioMute.mute("muted")) + ) + test.socket.matter:__queue_receive({ + mock_device.id, + v.cluster:build_test_report_data(mock_device, CAMERA_EP, false) + }) + test.socket.capability:__expect_send( + mock_device:generate_test_message(v.component, capabilities.audioMute.mute("unmuted")) + ) + end + end +) + +test.register_coroutine_test( + "Volume Level reports should generate appropriate events", + function() + update_device_profile() + test.wait_for_events() + local max_vol = 200 + local min_vol = 0 + test.socket.matter:__queue_receive({ + mock_device.id, + clusters.CameraAvStreamManagement.server.attributes.SpeakerMaxLevel:build_test_report_data(mock_device, CAMERA_EP, max_vol) + }) + test.socket.matter:__queue_receive({ + mock_device.id, + clusters.CameraAvStreamManagement.server.attributes.SpeakerMinLevel:build_test_report_data(mock_device, CAMERA_EP, min_vol) + }) + test.socket.matter:__queue_receive({ + mock_device.id, + clusters.CameraAvStreamManagement.server.attributes.MicrophoneMaxLevel:build_test_report_data(mock_device, CAMERA_EP, max_vol) + }) + test.socket.matter:__queue_receive({ + mock_device.id, + clusters.CameraAvStreamManagement.server.attributes.MicrophoneMinLevel:build_test_report_data(mock_device, CAMERA_EP, min_vol) + }) + test.wait_for_events() + local cluster_to_component_map = { + { cluster = clusters.CameraAvStreamManagement.server.attributes.SpeakerVolumeLevel, component = "speaker"}, + { cluster = clusters.CameraAvStreamManagement.server.attributes.MicrophoneVolumeLevel, component = "microphone"} + } + for _, v in ipairs(cluster_to_component_map) do + test.socket.matter:__queue_receive({ + mock_device.id, + v.cluster:build_test_report_data(mock_device, CAMERA_EP, 130) + }) + test.socket.capability:__expect_send( + mock_device:generate_test_message(v.component, capabilities.audioVolume.volume(65)) + ) + test.socket.matter:__queue_receive({ + mock_device.id, + v.cluster:build_test_report_data(mock_device, CAMERA_EP, 64) + }) + test.socket.capability:__expect_send( + mock_device:generate_test_message(v.component, capabilities.audioVolume.volume(32)) + ) + end + end +) + +test.register_coroutine_test( + "Status Light Enabled reports should generate appropriate events", + function() + update_device_profile() + test.wait_for_events() + test.socket.matter:__queue_receive({ + mock_device.id, + clusters.CameraAvStreamManagement.attributes.StatusLightEnabled:build_test_report_data(mock_device, CAMERA_EP, true) + }) + test.socket.capability:__expect_send( + mock_device:generate_test_message("statusLed", capabilities.switch.switch.on()) + ) + test.socket.matter:__queue_receive({ + mock_device.id, + clusters.CameraAvStreamManagement.attributes.StatusLightEnabled:build_test_report_data(mock_device, CAMERA_EP, false) + }) + test.socket.capability:__expect_send( + mock_device:generate_test_message("statusLed", capabilities.switch.switch.off()) + ) + end +) + +test.register_coroutine_test( + "Status Light Brightness reports should generate appropriate events", + function() + update_device_profile() + test.wait_for_events() + test.socket.matter:__queue_receive({ + mock_device.id, + clusters.CameraAvStreamManagement.attributes.StatusLightBrightness:build_test_report_data( + mock_device, CAMERA_EP, clusters.Global.types.ThreeLevelAutoEnum.LOW) + }) + test.socket.capability:__expect_send( + mock_device:generate_test_message("statusLed", capabilities.mode.supportedModes( + {"low", "medium", "high", "auto"}, {visibility = {displayed = false}}) + ) + ) + test.socket.capability:__expect_send( + mock_device:generate_test_message("statusLed", capabilities.mode.supportedArguments( + {"low", "medium", "high", "auto"}, {visibility = {displayed = false}}) + ) + ) + test.socket.capability:__expect_send( + mock_device:generate_test_message("statusLed", capabilities.mode.mode("low")) + ) + test.socket.matter:__queue_receive({ + mock_device.id, + clusters.CameraAvStreamManagement.attributes.StatusLightBrightness:build_test_report_data( + mock_device, CAMERA_EP, clusters.Global.types.ThreeLevelAutoEnum.MEDIUM) + }) + test.socket.capability:__expect_send( + mock_device:generate_test_message("statusLed", capabilities.mode.mode("medium")) + ) + test.socket.matter:__queue_receive({ + mock_device.id, + clusters.CameraAvStreamManagement.attributes.StatusLightBrightness:build_test_report_data( + mock_device, CAMERA_EP, clusters.Global.types.ThreeLevelAutoEnum.HIGH) + }) + test.socket.capability:__expect_send( + mock_device:generate_test_message("statusLed", capabilities.mode.mode("high")) + ) + test.socket.matter:__queue_receive({ + mock_device.id, + clusters.CameraAvStreamManagement.attributes.StatusLightBrightness:build_test_report_data( + mock_device, CAMERA_EP, clusters.Global.types.ThreeLevelAutoEnum.AUTO) + }) + test.socket.capability:__expect_send( + mock_device:generate_test_message("statusLed", capabilities.mode.mode("auto")) + ) + end +) + +local function receive_rate_distortion_trade_off_points() + test.socket.matter:__queue_receive({ + mock_device.id, + clusters.CameraAvStreamManagement.attributes.RateDistortionTradeOffPoints:build_test_report_data( + mock_device, CAMERA_EP, { + clusters.CameraAvStreamManagement.types.RateDistortionTradeOffPointsStruct({ + codec = clusters.CameraAvStreamManagement.types.VideoCodecEnum.H264, + resolution = clusters.CameraAvStreamManagement.types.VideoResolutionStruct({ + width = 1920, + height = 1080 + }), + min_bit_rate = 5000000 + }), + clusters.CameraAvStreamManagement.types.RateDistortionTradeOffPointsStruct({ + codec = clusters.CameraAvStreamManagement.types.VideoCodecEnum.HEVC, + resolution = clusters.CameraAvStreamManagement.types.VideoResolutionStruct({ + width = 3840, + height = 2160 + }), + min_bit_rate = 20000000 + }) + } + ) + }) +end + +local function receive_max_encoded_pixel_rate() + test.socket.matter:__queue_receive({ + mock_device.id, + clusters.CameraAvStreamManagement.attributes.MaxEncodedPixelRate:build_test_report_data( + mock_device, CAMERA_EP, 124416000) -- 1080p @ 60 fps or 4K @ 15 fps + }) +end + +local function receive_video_sensor_params() + test.socket.matter:__queue_receive({ + mock_device.id, + clusters.CameraAvStreamManagement.attributes.VideoSensorParams:build_test_report_data( + mock_device, CAMERA_EP, clusters.CameraAvStreamManagement.types.VideoSensorParamsStruct({ + sensor_width = 7360, + sensor_height = 4912, + max_fps = 60, + max_hdrfps = 30 + }) + ) + }) +end + +local function emit_video_sensor_parameters() + test.socket.capability:__expect_send( + mock_device:generate_test_message("main", capabilities.cameraViewportSettings.videoSensorParameters({ + width = 7360, + height = 4912, + maxFPS = 60 + })) + ) +end + +local function emit_supported_resolutions() + test.socket.capability:__expect_send( + mock_device:generate_test_message("main", capabilities.videoStreamSettings.supportedResolutions({ + { + width = 1920, + height = 1080, + fps = 60 + }, + { + width = 3840, + height = 2160, + fps = 15 + } + })) + ) +end + +-- Test receiving RateDistortionTradeOffPoints, MaxEncodedPixelRate, and VideoSensorParams in various orders +-- to ensure that cameraViewportSettings and videoStreamSettings capabilities are updated as expected. Note that +-- cameraViewportSettings.videoSensorParameters is set in the VideoSensorParams handler and +-- videoStreamSettings.supportedResolutions is emitted after all three attributes are received. + +test.register_coroutine_test( + "Rate Distortion Trade Off Points, MaxEncodedPixelRate, VideoSensorParams reports should generate appropriate events", + function() + update_device_profile() + test.wait_for_events() + receive_rate_distortion_trade_off_points() + receive_max_encoded_pixel_rate() + receive_video_sensor_params() + emit_video_sensor_parameters() + emit_supported_resolutions() + end +) + +test.register_coroutine_test( + "Rate Distortion Trade Off Points, VideoSensorParams, MaxEncodedPixelRate reports should generate appropriate events", + function() + update_device_profile() + test.wait_for_events() + receive_rate_distortion_trade_off_points() + receive_video_sensor_params() + emit_video_sensor_parameters() + receive_max_encoded_pixel_rate() + emit_supported_resolutions() + end +) + +test.register_coroutine_test( + "MaxEncodedPixelRate, VideoSensorParams, Rate Distortion Trade Off Points reports should generate appropriate events", + function() + update_device_profile() + test.wait_for_events() + receive_max_encoded_pixel_rate() + receive_video_sensor_params() + emit_video_sensor_parameters() + receive_rate_distortion_trade_off_points() + emit_supported_resolutions() + end +) + +test.register_coroutine_test( + "PTZ Position reports should generate appropriate events", + function() + update_device_profile() + test.wait_for_events() + test.socket.matter:__queue_receive({ + mock_device.id, + clusters.CameraAvSettingsUserLevelManagement.attributes.PanMax:build_test_report_data(mock_device, CAMERA_EP, 150) + }) + test.socket.matter:__queue_receive({ + mock_device.id, + clusters.CameraAvSettingsUserLevelManagement.attributes.PanMin:build_test_report_data(mock_device, CAMERA_EP, -150) + }) + test.socket.capability:__expect_send( + mock_device:generate_test_message("main", capabilities.mechanicalPanTiltZoom.panRange({value = {minimum = -150, maximum = 150}})) + ) + test.socket.matter:__queue_receive({ + mock_device.id, + clusters.CameraAvSettingsUserLevelManagement.attributes.TiltMax:build_test_report_data(mock_device, CAMERA_EP, 80) + }) + test.socket.matter:__queue_receive({ + mock_device.id, + clusters.CameraAvSettingsUserLevelManagement.attributes.TiltMin:build_test_report_data(mock_device, CAMERA_EP, -80) + }) + test.socket.capability:__expect_send( + mock_device:generate_test_message("main", capabilities.mechanicalPanTiltZoom.tiltRange({value = {minimum = -80, maximum = 80}})) + ) + test.socket.matter:__queue_receive({ + mock_device.id, + clusters.CameraAvSettingsUserLevelManagement.attributes.ZoomMax:build_test_report_data(mock_device, CAMERA_EP, 70) + }) + test.socket.capability:__expect_send( + mock_device:generate_test_message("main", capabilities.mechanicalPanTiltZoom.zoomRange({value = {minimum = 1, maximum = 70}})) + ) + test.socket.matter:__queue_receive({ + mock_device.id, + clusters.CameraAvSettingsUserLevelManagement.attributes.MPTZPosition:build_test_report_data( + mock_device, CAMERA_EP, {pan = 10, tilt = 20, zoom = 30}) + }) + test.socket.capability:__set_channel_ordering("relaxed") + test.socket.capability:__expect_send( + mock_device:generate_test_message("main", capabilities.mechanicalPanTiltZoom.pan(10)) + ) + test.socket.capability:__expect_send( + mock_device:generate_test_message("main", capabilities.mechanicalPanTiltZoom.tilt(20)) + ) + test.socket.capability:__expect_send( + mock_device:generate_test_message("main", capabilities.mechanicalPanTiltZoom.zoom(30)) + ) + end +) + +test.register_coroutine_test( + "PTZ Presets reports should generate appropriate events", + function() + update_device_profile() + test.wait_for_events() + test.socket.matter:__queue_receive({ + mock_device.id, + clusters.CameraAvSettingsUserLevelManagement.attributes.MPTZPresets:build_test_report_data( + mock_device, CAMERA_EP, {{preset_id = 1, name = "Preset 1", settings = {pan = 10, tilt = 20, zoom = 30}}, + {preset_id = 2, name = "Preset 2", settings = {pan = -55, tilt = 80, zoom = 60}}} + ) + }) + test.socket.capability:__expect_send( + mock_device:generate_test_message("main", capabilities.mechanicalPanTiltZoom.presets({ + { id = 1, label = "Preset 1", pan = 10, tilt = 20, zoom = 30}, + { id = 2, label = "Preset 2", pan = -55, tilt = 80, zoom = 60} + })) + ) + end +) + +test.register_coroutine_test( + "Max Presets reports should generate appropriate events", + function() + update_device_profile() + test.wait_for_events() + test.socket.matter:__queue_receive({ + mock_device.id, + clusters.CameraAvSettingsUserLevelManagement.attributes.MaxPresets:build_test_report_data(mock_device, CAMERA_EP, 10) + }) + test.socket.capability:__expect_send( + mock_device:generate_test_message("main", capabilities.mechanicalPanTiltZoom.maxPresets(10)) + ) + end +) + +test.register_coroutine_test( + "Max Zones reports should generate appropriate events", + function() + update_device_profile() + test.wait_for_events() + test.socket.matter:__queue_receive({ + mock_device.id, + clusters.ZoneManagement.attributes.MaxZones:build_test_report_data(mock_device, CAMERA_EP, 10) + }) + test.socket.capability:__expect_send( + mock_device:generate_test_message("main", capabilities.zoneManagement.maxZones(10)) + ) + end +) + +test.register_coroutine_test( + "Zones reports should generate appropriate events", + function() + update_device_profile() + test.wait_for_events() + test.socket.matter:__queue_receive({ + mock_device.id, + clusters.ZoneManagement.attributes.Zones:build_test_report_data( + mock_device, CAMERA_EP, { + clusters.ZoneManagement.types.ZoneInformationStruct({ + zone_id = 1, + zone_type = clusters.ZoneManagement.types.ZoneTypeEnum.TWODCART_ZONE, + zone_source = clusters.ZoneManagement.types.ZoneSourceEnum.MFG, + two_d_cartesian_zone = clusters.ZoneManagement.types.TwoDCartesianZoneStruct({ + name = "Zone 1", + use = clusters.ZoneManagement.types.ZoneUseEnum.MOTION, + vertices = { + clusters.ZoneManagement.types.TwoDCartesianVertexStruct({ x = 0, y = 0 }), + clusters.ZoneManagement.types.TwoDCartesianVertexStruct({ x = 1920, y = 1080 }) + }, + color = "#FFFFFF" + }) + }) + } + ) + }) + test.socket.capability:__expect_send( + mock_device:generate_test_message("main", capabilities.zoneManagement.zones({ + { + id = 1, + name = "Zone 1", + type = "2DCartesian", + polygonVertices = { + {vertex = {x = 0, y = 0}}, + {vertex = {x = 1920, y = 1080}} + }, + source = "manufacturer", + use = "motion", + color = "#FFFFFF" + } + })) + ) + end +) + +test.register_coroutine_test( + "Triggers reports should generate appropriate events", + function() + update_device_profile() + test.wait_for_events() + test.socket.matter:__queue_receive({ + mock_device.id, + clusters.ZoneManagement.attributes.Triggers:build_test_report_data( + mock_device, CAMERA_EP, { + clusters.ZoneManagement.types.ZoneTriggerControlStruct({ + zone_id = 1, + initial_duration = 8, + augmentation_duration = 4, + max_duration = 20, + blind_duration = 3, + sensitivity = 4 + }) + } + ) + }) + test.socket.capability:__expect_send( + mock_device:generate_test_message("main", capabilities.zoneManagement.triggers({ + { + zoneId = 1, + initialDuration = 8, + augmentationDuration = 4, + maxDuration = 20, + blindDuration = 3, + sensitivity = 4 + } + })) + ) + end +) + +test.register_coroutine_test( + "Sensitivity reports should generate appropriate events", + function() + update_device_profile() + test.wait_for_events() + test.socket.matter:__queue_receive({ + mock_device.id, + clusters.ZoneManagement.attributes.SensitivityMax:build_test_report_data(mock_device, CAMERA_EP, 7) + }) + test.socket.capability:__expect_send( + mock_device:generate_test_message("main", capabilities.zoneManagement.sensitivityRange({ minimum = 1, maximum = 7}, + {visibility = {displayed = false}})) + ) + test.socket.matter:__queue_receive({ + mock_device.id, + clusters.ZoneManagement.attributes.Sensitivity:build_test_report_data(mock_device, CAMERA_EP, 5) + }) + test.socket.capability:__expect_send( + mock_device:generate_test_message("main", capabilities.zoneManagement.sensitivity(5, {visibility = {displayed = false}})) + ) + end +) + +test.register_coroutine_test( + "Chime reports should generate appropriate events", + function() + update_device_profile() + test.wait_for_events() + test.socket.matter:__queue_receive({ + mock_device.id, + clusters.Chime.attributes.InstalledChimeSounds:build_test_report_data(mock_device, CAMERA_EP, { + clusters.Chime.types.ChimeSoundStruct({chime_id = 1, name = "Sound 1"}), + clusters.Chime.types.ChimeSoundStruct({chime_id = 2, name = "Sound 2"}) + }) + }) + test.socket.capability:__expect_send( + mock_device:generate_test_message("main", capabilities.sounds.supportedSounds({ + {id = 1, label = "Sound 1"}, + {id = 2, label = "Sound 2"}, + }, {visibility = {displayed = false}})) + ) + test.socket.matter:__queue_receive({ + mock_device.id, + clusters.Chime.attributes.SelectedChime:build_test_report_data(mock_device, CAMERA_EP, 2) + }) + test.socket.capability:__expect_send(mock_device:generate_test_message("main", capabilities.sounds.selectedSound(2))) + end +) + +-- Event Handler UTs + +test.register_coroutine_test( + "Zone events should generate appropriate events", + function() + update_device_profile() + test.wait_for_events() + test.socket.matter:__queue_receive({ + mock_device.id, + clusters.ZoneManagement.events.ZoneTriggered:build_test_event_report(mock_device, CAMERA_EP, { + zone = 2, + reason = clusters.ZoneManagement.types.ZoneEventTriggeredReasonEnum.MOTION + }) + }) + test.socket.capability:__expect_send( + mock_device:generate_test_message("main", capabilities.zoneManagement.triggeredZones({{zoneId = 2}})) + ) + test.socket.matter:__queue_receive({ + mock_device.id, + clusters.ZoneManagement.events.ZoneTriggered:build_test_event_report(mock_device, CAMERA_EP, { + zone = 3, + reason = clusters.ZoneManagement.types.ZoneEventTriggeredReasonEnum.MOTION + }) + }) + test.socket.capability:__expect_send( + mock_device:generate_test_message("main", capabilities.zoneManagement.triggeredZones({{zoneId = 2}, {zoneId = 3}})) + ) + test.socket.matter:__queue_receive({ + mock_device.id, + clusters.ZoneManagement.events.ZoneStopped:build_test_event_report(mock_device, CAMERA_EP, { + zone = 2, + reason = clusters.ZoneManagement.types.ZoneEventStoppedReasonEnum.ACTION_STOPPED + }) + }) + test.socket.capability:__expect_send( + mock_device:generate_test_message("main", capabilities.zoneManagement.triggeredZones({{zoneId = 3}})) + ) + end +) + +test.register_coroutine_test( + "Button events should generate appropriate events", + function() + update_device_profile() + test.wait_for_events() + test.socket.matter:__queue_receive({ + mock_device.id, + clusters.Switch.server.events.InitialPress:build_test_event_report(mock_device, DOORBELL_EP, {new_position = 1}) + }) + test.socket.matter:__queue_receive({ + mock_device.id, + clusters.Switch.server.events.MultiPressComplete:build_test_event_report(mock_device, DOORBELL_EP, { + new_position = 1, + total_number_of_presses_counted = 2, + previous_position = 0 + }) + }) + test.socket.capability:__expect_send( + mock_device:generate_test_message("doorbell", capabilities.button.button.double({state_change = true})) + ) + end +) + +-- Capability Handler UTs + +test.register_coroutine_test( + "Set night vision commands should send the appropriate commands", + function() + update_device_profile() + test.wait_for_events() + local command_to_attribute_map = { + ["setNightVision"] = clusters.CameraAvStreamManagement.attributes.NightVision, + ["setIllumination"] = clusters.CameraAvStreamManagement.attributes.NightVisionIllum + } + for cmd, attr in pairs(command_to_attribute_map) do + test.socket.capability:__queue_receive({ + mock_device.id, + { capability = "nightVision", component = "main", command = cmd, args = { "off" } }, + }) + test.socket.matter:__expect_send({ + mock_device.id, attr:write(mock_device, CAMERA_EP, clusters.CameraAvStreamManagement.types.TriStateAutoEnum.OFF) + }) + test.socket.capability:__queue_receive({ + mock_device.id, + { capability = "nightVision", component = "main", command = cmd, args = { "on" } }, + }) + test.socket.matter:__expect_send({ + mock_device.id, attr:write(mock_device, CAMERA_EP, clusters.CameraAvStreamManagement.types.TriStateAutoEnum.ON) + }) + test.socket.capability:__queue_receive({ + mock_device.id, + { capability = "nightVision", component = "main", command = cmd, args = { "auto" } }, + }) + test.socket.matter:__expect_send({ + mock_device.id, attr:write(mock_device, CAMERA_EP, clusters.CameraAvStreamManagement.types.TriStateAutoEnum.AUTO) + }) + end + end +) + +test.register_coroutine_test( + "Set enabled commands should send the appropriate commands", + function() + update_device_profile() + test.wait_for_events() + local command_to_attribute_map = { + ["setHdr"] = { capability = "hdr", attr = clusters.CameraAvStreamManagement.attributes.HDRModeEnabled}, + ["setImageFlipHorizontal"] = { capability = "imageControl", attr = clusters.CameraAvStreamManagement.attributes.ImageFlipHorizontal}, + ["setImageFlipVertical"] = { capability = "imageControl", attr = clusters.CameraAvStreamManagement.attributes.ImageFlipVertical}, + ["setSoftLivestreamPrivacyMode"] = { capability = "cameraPrivacyMode", attr = clusters.CameraAvStreamManagement.attributes.SoftLivestreamPrivacyModeEnabled}, + ["setSoftRecordingPrivacyMode"] = { capability = "cameraPrivacyMode", attr = clusters.CameraAvStreamManagement.attributes.SoftRecordingPrivacyModeEnabled}, + ["setLocalSnapshotRecording"] = { capability = "localMediaStorage", attr = clusters.CameraAvStreamManagement.attributes.LocalSnapshotRecordingEnabled}, + ["setLocalVideoRecording"] = { capability = "localMediaStorage", attr = clusters.CameraAvStreamManagement.attributes.LocalVideoRecordingEnabled} + } + for i, v in pairs(command_to_attribute_map) do + test.socket.capability:__queue_receive({ + mock_device.id, + { capability = v.capability, component = "main", command = i, args = { "enabled" } }, + }) + test.socket.matter:__expect_send({ + mock_device.id, v.attr:write(mock_device, CAMERA_EP, true) + }) + test.socket.capability:__queue_receive({ + mock_device.id, + { capability = v.capability, component = "main", command = i, args = { "disabled" } }, + }) + test.socket.matter:__expect_send({ + mock_device.id, v.attr:write(mock_device, CAMERA_EP, false) + }) + end + end +) + +test.register_coroutine_test( + "Set image rotation command should send the appropriate commands", + function() + update_device_profile() + test.wait_for_events() + test.socket.capability:__queue_receive({ + mock_device.id, + { capability = "imageControl", component = "main", command = "setImageRotation", args = { 10 } }, + }) + test.socket.matter:__expect_send({ + mock_device.id, clusters.CameraAvStreamManagement.attributes.ImageRotation:write(mock_device, CAMERA_EP, 10) + }) + test.socket.capability:__queue_receive({ + mock_device.id, + { capability = "imageControl", component = "main", command = "setImageRotation", args = { 257 } }, + }) + test.socket.matter:__expect_send({ + mock_device.id, clusters.CameraAvStreamManagement.attributes.ImageRotation:write(mock_device, CAMERA_EP, 257) + }) + end +) + +test.register_coroutine_test( + "Set mute commands should send the appropriate commands", + function() + update_device_profile() + test.wait_for_events() + test.socket.capability:__queue_receive({ + mock_device.id, + { capability = "audioMute", component = "speaker", command = "setMute", args = { "muted" } }, + }) + test.socket.matter:__expect_send({ + mock_device.id, clusters.CameraAvStreamManagement.attributes.SpeakerMuted:write(mock_device, CAMERA_EP, true) + }) + test.socket.capability:__queue_receive({ + mock_device.id, + { capability = "audioMute", component = "speaker", command = "setMute", args = { "unmuted" } }, + }) + test.socket.matter:__expect_send({ + mock_device.id, clusters.CameraAvStreamManagement.attributes.SpeakerMuted:write(mock_device, CAMERA_EP, false) + }) + test.socket.capability:__queue_receive({ + mock_device.id, + { capability = "audioMute", component = "speaker", command = "mute", args = { } }, + }) + test.socket.matter:__expect_send({ + mock_device.id, clusters.CameraAvStreamManagement.attributes.SpeakerMuted:write(mock_device, CAMERA_EP, true) + }) + test.socket.capability:__queue_receive({ + mock_device.id, + { capability = "audioMute", component = "speaker", command = "unmute", args = { } }, + }) + test.socket.matter:__expect_send({ + mock_device.id, clusters.CameraAvStreamManagement.attributes.SpeakerMuted:write(mock_device, CAMERA_EP, false) + }) + test.socket.capability:__queue_receive({ + mock_device.id, + { capability = "audioMute", component = "microphone", command = "setMute", args = { "muted" } }, + }) + test.socket.matter:__expect_send({ + mock_device.id, clusters.CameraAvStreamManagement.attributes.MicrophoneMuted:write(mock_device, CAMERA_EP, true) + }) + test.socket.capability:__queue_receive({ + mock_device.id, + { capability = "audioMute", component = "microphone", command = "setMute", args = { "unmuted" } }, + }) + test.socket.matter:__expect_send({ + mock_device.id, clusters.CameraAvStreamManagement.attributes.MicrophoneMuted:write(mock_device, CAMERA_EP, false) + }) + test.socket.capability:__queue_receive({ + mock_device.id, + { capability = "audioMute", component = "microphone", command = "mute", args = { } }, + }) + test.socket.matter:__expect_send({ + mock_device.id, clusters.CameraAvStreamManagement.attributes.MicrophoneMuted:write(mock_device, CAMERA_EP, true) + }) + test.socket.capability:__queue_receive({ + mock_device.id, + { capability = "audioMute", component = "microphone", command = "unmute", args = { } }, + }) + test.socket.matter:__expect_send({ + mock_device.id, clusters.CameraAvStreamManagement.attributes.MicrophoneMuted:write(mock_device, CAMERA_EP, false) + }) + end +) + +test.register_coroutine_test( + "Set Volume command should send the appropriate commands", + function() + update_device_profile() + test.wait_for_events() + local max_vol = 200 + local min_vol = 5 + test.socket.matter:__queue_receive({ + mock_device.id, + clusters.CameraAvStreamManagement.server.attributes.SpeakerMaxLevel:build_test_report_data(mock_device, CAMERA_EP, max_vol) + }) + test.socket.matter:__queue_receive({ + mock_device.id, + clusters.CameraAvStreamManagement.server.attributes.SpeakerMinLevel:build_test_report_data(mock_device, CAMERA_EP, min_vol) + }) + test.socket.matter:__queue_receive({ + mock_device.id, + clusters.CameraAvStreamManagement.server.attributes.MicrophoneMaxLevel:build_test_report_data(mock_device, CAMERA_EP, max_vol) + }) + test.socket.matter:__queue_receive({ + mock_device.id, + clusters.CameraAvStreamManagement.server.attributes.MicrophoneMinLevel:build_test_report_data(mock_device, CAMERA_EP, min_vol) + }) + test.wait_for_events() + test.socket.capability:__queue_receive({ + mock_device.id, + { capability = "audioVolume", component = "speaker", command = "setVolume", args = { 0 } }, + }) + test.socket.matter:__expect_send({ + mock_device.id, clusters.CameraAvStreamManagement.attributes.SpeakerVolumeLevel:write(mock_device, CAMERA_EP, 5) + }) + test.socket.capability:__queue_receive({ + mock_device.id, + { capability = "audioVolume", component = "speaker", command = "setVolume", args = { 35 } }, + }) + test.socket.matter:__expect_send({ + mock_device.id, clusters.CameraAvStreamManagement.attributes.SpeakerVolumeLevel:write(mock_device, CAMERA_EP, 73) + }) + test.socket.capability:__queue_receive({ + mock_device.id, + { capability = "audioVolume", component = "microphone", command = "setVolume", args = { 77 } }, + }) + test.socket.matter:__expect_send({ + mock_device.id, clusters.CameraAvStreamManagement.attributes.MicrophoneVolumeLevel:write(mock_device, CAMERA_EP, 155) + }) + test.socket.capability:__queue_receive({ + mock_device.id, + { capability = "audioVolume", component = "microphone", command = "setVolume", args = { 100 } }, + }) + test.socket.matter:__expect_send({ + mock_device.id, clusters.CameraAvStreamManagement.attributes.MicrophoneVolumeLevel:write(mock_device, CAMERA_EP, 200) + }) + + ---- test volumeUp command + test.socket.matter:__queue_receive({ + mock_device.id, + clusters.CameraAvStreamManagement.attributes.SpeakerVolumeLevel:build_test_report_data(mock_device, CAMERA_EP, 103) + }) + test.socket.capability:__expect_send( + mock_device:generate_test_message("speaker", capabilities.audioVolume.volume(50)) + ) + test.wait_for_events() + test.socket.capability:__queue_receive({ + mock_device.id, + { capability = "audioVolume", component = "speaker", command = "volumeUp", args = { } }, + }) + test.socket.matter:__expect_send({ + mock_device.id, clusters.CameraAvStreamManagement.attributes.SpeakerVolumeLevel:write(mock_device, CAMERA_EP, 104) + }) + test.wait_for_events() + test.socket.matter:__queue_receive({ + mock_device.id, + clusters.CameraAvStreamManagement.attributes.SpeakerVolumeLevel:build_test_report_data(mock_device, CAMERA_EP, 104) + }) + test.socket.capability:__expect_send( + mock_device:generate_test_message("speaker", capabilities.audioVolume.volume(51)) + ) + + -- test volumeDown command + test.socket.matter:__queue_receive({ + mock_device.id, + clusters.CameraAvStreamManagement.attributes.MicrophoneVolumeLevel:build_test_report_data(mock_device, CAMERA_EP, 200) + }) + test.socket.capability:__expect_send( + mock_device:generate_test_message("microphone", capabilities.audioVolume.volume(100)) + ) + test.wait_for_events() + test.socket.capability:__queue_receive({ + mock_device.id, + { capability = "audioVolume", component = "microphone", command = "volumeDown", args = { } }, + }) + test.socket.matter:__expect_send({ + mock_device.id, clusters.CameraAvStreamManagement.attributes.MicrophoneVolumeLevel:write(mock_device, CAMERA_EP, 198) + }) + test.wait_for_events() + test.socket.matter:__queue_receive({ + mock_device.id, + clusters.CameraAvStreamManagement.attributes.MicrophoneVolumeLevel:build_test_report_data(mock_device, CAMERA_EP, 198) + }) + test.socket.capability:__expect_send( + mock_device:generate_test_message("microphone", capabilities.audioVolume.volume(99)) + ) + end +) + +test.register_coroutine_test( + "Set Mode command should send the appropriate commands", + function() + update_device_profile() + test.wait_for_events() + local mode_to_enum_map = { + ["low"] = clusters.Global.types.ThreeLevelAutoEnum.LOW, + ["medium"] = clusters.Global.types.ThreeLevelAutoEnum.MEDIUM, + ["high"] = clusters.Global.types.ThreeLevelAutoEnum.HIGH, + ["auto"] = clusters.Global.types.ThreeLevelAutoEnum.AUTO + } + for i, v in pairs(mode_to_enum_map) do + test.socket.capability:__queue_receive({ + mock_device.id, + { capability = "mode", component = "speaker", command = "setMode", args = { i } }, + }) + test.socket.matter:__expect_send({ + mock_device.id, clusters.CameraAvStreamManagement.attributes.StatusLightBrightness:write(mock_device, CAMERA_EP, v) + }) + end + end +) + +test.register_coroutine_test( + "Set Status LED commands should send the appropriate commands", + function() + update_device_profile() + test.wait_for_events() + test.socket.capability:__queue_receive({ + mock_device.id, + { capability = "switch", component = "statusLed", command = "on", args = { } }, + }) + test.socket.matter:__expect_send({ + mock_device.id, clusters.CameraAvStreamManagement.attributes.StatusLightEnabled:write(mock_device, CAMERA_EP, true) + }) + test.socket.capability:__queue_receive({ + mock_device.id, + { capability = "switch", component = "statusLed", command = "off", args = { } }, + }) + test.socket.matter:__expect_send({ + mock_device.id, clusters.CameraAvStreamManagement.attributes.StatusLightEnabled:write(mock_device, CAMERA_EP, false) + }) + end +) + +test.register_coroutine_test( + "Set Relative PTZ commands should send the appropriate commands", + function() + update_device_profile() + test.wait_for_events() + test.socket.capability:__queue_receive({ + mock_device.id, + { capability = "mechanicalPanTiltZoom", component = "main", command = "panRelative", args = { 10 } }, + }) + test.socket.matter:__expect_send({ + mock_device.id, clusters.CameraAvSettingsUserLevelManagement.server.commands.MPTZRelativeMove(mock_device, CAMERA_EP, 10, 0, 0) + }) + test.socket.capability:__queue_receive({ + mock_device.id, + { capability = "mechanicalPanTiltZoom", component = "main", command = "tiltRelative", args = { -35 } }, + }) + test.socket.matter:__expect_send({ + mock_device.id, clusters.CameraAvSettingsUserLevelManagement.server.commands.MPTZRelativeMove(mock_device, CAMERA_EP, 0, -35, 0) + }) + test.socket.capability:__queue_receive({ + mock_device.id, + { capability = "mechanicalPanTiltZoom", component = "main", command = "zoomRelative", args = { 80 } }, + }) + test.socket.matter:__expect_send({ + mock_device.id, clusters.CameraAvSettingsUserLevelManagement.server.commands.MPTZRelativeMove(mock_device, CAMERA_EP, 0, 0, 80) + }) + end +) + +test.register_coroutine_test( + "Set PTZ commands should send the appropriate commands", + function() + update_device_profile() + test.wait_for_events() + test.socket.capability:__queue_receive({ + mock_device.id, + { capability = "mechanicalPanTiltZoom", component = "main", command = "setPanTiltZoom", args = { 10, 20, 30 } }, + }) + test.socket.matter:__expect_send({ + mock_device.id, clusters.CameraAvSettingsUserLevelManagement.server.commands.MPTZSetPosition(mock_device, CAMERA_EP, 10, 20, 30) + }) + test.socket.matter:__queue_receive({ + mock_device.id, + clusters.CameraAvSettingsUserLevelManagement.attributes.MPTZPosition:build_test_report_data( + mock_device, CAMERA_EP, {pan = 10, tilt = 20, zoom = 30}) + }) + test.socket.capability:__set_channel_ordering("relaxed") + test.socket.capability:__expect_send( + mock_device:generate_test_message("main", capabilities.mechanicalPanTiltZoom.pan(10)) + ) + test.socket.capability:__expect_send( + mock_device:generate_test_message("main", capabilities.mechanicalPanTiltZoom.tilt(20)) + ) + test.socket.capability:__expect_send( + mock_device:generate_test_message("main", capabilities.mechanicalPanTiltZoom.zoom(30)) + ) + test.wait_for_events() + test.socket.capability:__queue_receive({ + mock_device.id, + { capability = "mechanicalPanTiltZoom", component = "main", command = "setPan", args = { 50 } }, + }) + test.socket.matter:__expect_send({ + mock_device.id, clusters.CameraAvSettingsUserLevelManagement.server.commands.MPTZSetPosition(mock_device, CAMERA_EP, 50, 20, 30) + }) + test.socket.matter:__queue_receive({ + mock_device.id, + clusters.CameraAvSettingsUserLevelManagement.attributes.MPTZPosition:build_test_report_data( + mock_device, CAMERA_EP, {pan = 50, tilt = 20, zoom = 30}) + }) + test.socket.capability:__expect_send( + mock_device:generate_test_message("main", capabilities.mechanicalPanTiltZoom.pan(50)) + ) + test.wait_for_events() + test.socket.capability:__queue_receive({ + mock_device.id, + { capability = "mechanicalPanTiltZoom", component = "main", command = "setTilt", args = { -44 } }, + }) + test.socket.matter:__expect_send({ + mock_device.id, clusters.CameraAvSettingsUserLevelManagement.server.commands.MPTZSetPosition(mock_device, CAMERA_EP, 50, -44, 30) + }) + test.socket.matter:__queue_receive({ + mock_device.id, + clusters.CameraAvSettingsUserLevelManagement.attributes.MPTZPosition:build_test_report_data( + mock_device, CAMERA_EP, {pan = 50, tilt = -44, zoom = 30}) + }) + test.socket.capability:__expect_send( + mock_device:generate_test_message("main", capabilities.mechanicalPanTiltZoom.tilt(-44)) + ) + test.wait_for_events() + test.socket.capability:__queue_receive({ + mock_device.id, + { capability = "mechanicalPanTiltZoom", component = "main", command = "setZoom", args = { 5 } }, + }) + test.socket.matter:__expect_send({ + mock_device.id, clusters.CameraAvSettingsUserLevelManagement.server.commands.MPTZSetPosition(mock_device, CAMERA_EP, 50, -44, 5) + }) + test.socket.matter:__queue_receive({ + mock_device.id, + clusters.CameraAvSettingsUserLevelManagement.attributes.MPTZPosition:build_test_report_data( + mock_device, CAMERA_EP, {pan = 50, tilt = -44, zoom = 5}) + }) + test.socket.capability:__expect_send( + mock_device:generate_test_message("main", capabilities.mechanicalPanTiltZoom.zoom(5)) + ) + end +) + +test.register_coroutine_test( + "Preset commands should send the appropriate commands", + function() + update_device_profile() + test.wait_for_events() + test.socket.capability:__queue_receive({ + mock_device.id, + { capability = "mechanicalPanTiltZoom", component = "main", command = "savePreset", args = { 1, "Preset 1" } }, + }) + test.socket.matter:__expect_send({ + mock_device.id, clusters.CameraAvSettingsUserLevelManagement.server.commands.MPTZSavePreset(mock_device, CAMERA_EP, 1, "Preset 1") + }) + test.socket.capability:__queue_receive({ + mock_device.id, + { capability = "mechanicalPanTiltZoom", component = "main", command = "removePreset", args = { 1 } }, + }) + test.socket.matter:__expect_send({ + mock_device.id, clusters.CameraAvSettingsUserLevelManagement.server.commands.MPTZRemovePreset(mock_device, CAMERA_EP, 1) + }) + test.socket.capability:__queue_receive({ + mock_device.id, + { capability = "mechanicalPanTiltZoom", component = "main", command = "moveToPreset", args = { 2 } }, + }) + test.socket.matter:__expect_send({ + mock_device.id, clusters.CameraAvSettingsUserLevelManagement.server.commands.MPTZMoveToPreset(mock_device, CAMERA_EP, 2) + }) + end +) + +test.register_coroutine_test( + "Sound commands should send the appropriate commands", + function() + update_device_profile() + test.wait_for_events() + test.socket.capability:__queue_receive({ + mock_device.id, + { capability = "sounds", component = "main", command = "setSelectedSound", args = { 1 } }, + }) + test.socket.matter:__expect_send({ + mock_device.id, clusters.Chime.attributes.SelectedChime:write(mock_device, CAMERA_EP, 1) + }) + test.socket.capability:__queue_receive({ + mock_device.id, + { capability = "sounds", component = "main", command = "playSound", args = {} }, + }) + test.socket.matter:__expect_send({ + mock_device.id, clusters.Chime.server.commands.PlayChimeSound(mock_device, CAMERA_EP) + }) + end +) + +test.register_coroutine_test( + "Zone Management zone commands should send the appropriate commands", + function() + update_device_profile() + test.wait_for_events() + local use_map = { + ["motion"] = clusters.ZoneManagement.types.ZoneUseEnum.MOTION, + ["focus"] = clusters.ZoneManagement.types.ZoneUseEnum.FOCUS, + ["privacy"] = clusters.ZoneManagement.types.ZoneUseEnum.PRIVACY + } + for i, v in pairs(use_map) do + test.socket.capability:__queue_receive({ + mock_device.id, + { capability = "zoneManagement", component = "main", command = "newZone", args = { + i .. " zone", {{value = {x = 0, y = 0}}, {value = {x = 1920, y = 1080}} }, i, "blue" + }} + }) + test.socket.matter:__expect_send({ + mock_device.id, clusters.ZoneManagement.server.commands.CreateTwoDCartesianZone(mock_device, CAMERA_EP, + clusters.ZoneManagement.types.TwoDCartesianZoneStruct( + { + name = i .. " zone", + use = v, + vertices = { + clusters.ZoneManagement.types.TwoDCartesianVertexStruct({x = 0, y = 0}), + clusters.ZoneManagement.types.TwoDCartesianVertexStruct({x = 1920, y = 1080}) + }, + color = "blue" + } + ) + ) + }) + end + local zone_id = 1 + for i, v in pairs(use_map) do + test.socket.capability:__queue_receive({ + mock_device.id, + { capability = "zoneManagement", component = "main", command = "updateZone", args = { + zone_id, "updated " .. i .. " zone", {{value = {x = 50, y = 50}}, {value = {x = 1000, y = 1000}} }, i, "red" + }} + }) + test.socket.matter:__expect_send({ + mock_device.id, clusters.ZoneManagement.server.commands.UpdateTwoDCartesianZone(mock_device, CAMERA_EP, + zone_id, + clusters.ZoneManagement.types.TwoDCartesianZoneStruct( + { + name = "updated " .. i .. " zone", + use = v, + vertices = { + clusters.ZoneManagement.types.TwoDCartesianVertexStruct({ x = 50, y = 50 }), + clusters.ZoneManagement.types.TwoDCartesianVertexStruct({ x = 1000, y = 1000 }) + }, + color = "red" + } + ) + ) + }) + zone_id = zone_id + 1 + end + for i = 1, 3 do + test.socket.capability:__queue_receive({ + mock_device.id, + { capability = "zoneManagement", component = "main", command = "removeZone", args = { i } } + }) + test.socket.matter:__expect_send({ + mock_device.id, clusters.ZoneManagement.server.commands.RemoveZone(mock_device, CAMERA_EP, i) + }) + end + end +) + +test.register_coroutine_test( + "Zone Management zone commands should send the appropriate commands - missing optional color argument", + function() + update_device_profile() + test.wait_for_events() + local use_map = { + ["motion"] = clusters.ZoneManagement.types.ZoneUseEnum.MOTION, + ["focus"] = clusters.ZoneManagement.types.ZoneUseEnum.FOCUS, + ["privacy"] = clusters.ZoneManagement.types.ZoneUseEnum.PRIVACY + } + for i, v in pairs(use_map) do + test.socket.capability:__queue_receive({ + mock_device.id, + { capability = "zoneManagement", component = "main", command = "newZone", args = { + i .. " zone", {{value = {x = 0, y = 0}}, {value = {x = 1920, y = 1080}} }, i + }} + }) + test.socket.matter:__expect_send({ + mock_device.id, clusters.ZoneManagement.server.commands.CreateTwoDCartesianZone(mock_device, CAMERA_EP, + clusters.ZoneManagement.types.TwoDCartesianZoneStruct( + { + name = i .. " zone", + use = v, + vertices = { + clusters.ZoneManagement.types.TwoDCartesianVertexStruct({x = 0, y = 0}), + clusters.ZoneManagement.types.TwoDCartesianVertexStruct({x = 1920, y = 1080}) + }, + } + ) + ) + }) + end + local zone_id = 1 + for i, v in pairs(use_map) do + test.socket.capability:__queue_receive({ + mock_device.id, + { capability = "zoneManagement", component = "main", command = "updateZone", args = { + zone_id, "updated " .. i .. " zone", {{value = {x = 50, y = 50}}, {value = {x = 1000, y = 1000}} }, i, "red" + }} + }) + test.socket.matter:__expect_send({ + mock_device.id, clusters.ZoneManagement.server.commands.UpdateTwoDCartesianZone(mock_device, CAMERA_EP, + zone_id, + clusters.ZoneManagement.types.TwoDCartesianZoneStruct( + { + name = "updated " .. i .. " zone", + use = v, + vertices = { + clusters.ZoneManagement.types.TwoDCartesianVertexStruct({ x = 50, y = 50 }), + clusters.ZoneManagement.types.TwoDCartesianVertexStruct({ x = 1000, y = 1000 }) + }, + color = "red" + } + ) + ) + }) + zone_id = zone_id + 1 + end + for i = 1, 3 do + test.socket.capability:__queue_receive({ + mock_device.id, + { capability = "zoneManagement", component = "main", command = "removeZone", args = { i } } + }) + test.socket.matter:__expect_send({ + mock_device.id, clusters.ZoneManagement.server.commands.RemoveZone(mock_device, CAMERA_EP, i) + }) + end + end +) + +test.register_coroutine_test( + "Zone Management trigger commands should send the appropriate commands", + function() + update_device_profile() + test.wait_for_events() + + -- Create the trigger + test.socket.capability:__queue_receive({ + mock_device.id, + { capability = "zoneManagement", component = "main", command = "createOrUpdateTrigger", args = { + 1, 10, 3, 15, 3, 5 + }} + }) + test.socket.matter:__expect_send({ + mock_device.id, clusters.ZoneManagement.server.commands.CreateOrUpdateTrigger(mock_device, CAMERA_EP, { + zone_id = 1, + initial_duration = 10, + augmentation_duration = 3, + max_duration = 15, + blind_duration = 3, + sensitivity = 5 + }) + }) + + -- The device reports the Triggers attribute with the newly created trigger and the capability is updated + test.socket.matter:__queue_receive({ + mock_device.id, + clusters.ZoneManagement.attributes.Triggers:build_test_report_data( + mock_device, CAMERA_EP, { + clusters.ZoneManagement.types.ZoneTriggerControlStruct({ + zone_id = 1, initial_duration = 10, augmentation_duration = 3, max_duration = 15, blind_duration = 3, sensitivity = 5 + }) + } + ) + }) + test.socket.capability:__expect_send( + mock_device:generate_test_message("main", capabilities.zoneManagement.triggers({{ + zoneId = 1, initialDuration = 10, augmentationDuration = 3, maxDuration = 15, blindDuration = 3, sensitivity = 5 + }})) + ) + test.wait_for_events() + + -- Update trigger, note that some arguments are optional. In this case, + -- blindDuration is not specified in the capability command. + + test.socket.capability:__queue_receive({ + mock_device.id, + { capability = "zoneManagement", component = "main", command = "createOrUpdateTrigger", args = { + 1, 8, 7, 25, 3, 1 + }} + }) + test.socket.matter:__expect_send({ + mock_device.id, clusters.ZoneManagement.server.commands.CreateOrUpdateTrigger(mock_device, CAMERA_EP, { + zone_id = 1, + initial_duration = 8, + augmentation_duration = 7, + max_duration = 25, + blind_duration = 3, + sensitivity = 1 + }) + }) + + -- Remove the trigger + test.socket.capability:__queue_receive({ + mock_device.id, + { capability = "zoneManagement", component = "main", command = "removeTrigger", args = { 1 } } + }) + test.socket.matter:__expect_send({ + mock_device.id, clusters.ZoneManagement.server.commands.RemoveTrigger(mock_device, CAMERA_EP, 1) + }) + end +) + +test.register_coroutine_test( + "Stream management commands should send the appropriate commands", + function() + update_device_profile() + test.wait_for_events() + test.socket.capability:__queue_receive({ + mock_device.id, + { capability = "videoStreamSettings", component = "main", command = "setStream", args = { + 3, + "liveStream", + "Stream 3", + { width = 1920, height = 1080, fps = 30 }, + { upperLeftVertex = {x = 0, y = 0}, lowerRightVertex = {x = 1920, y = 1080} }, + "enabled", + "disabled" + }} + }) + test.socket.matter:__expect_send({ + mock_device.id, clusters.CameraAvStreamManagement.server.commands.VideoStreamModify(mock_device, CAMERA_EP, + 3, true, false + ) + }) + end +) + +test.register_coroutine_test( + "Stream management setStream command should modify an existing stream", + function() + update_device_profile() + test.wait_for_events() + test.socket.matter:__queue_receive({ + mock_device.id, + clusters.CameraAvStreamManagement.attributes.AllocatedVideoStreams:build_test_report_data( + mock_device, CAMERA_EP, { + clusters.CameraAvStreamManagement.types.VideoStreamStruct({ + video_stream_id = 1, + stream_usage = clusters.Global.types.StreamUsageEnum.LIVE_VIEW, + video_codec = clusters.CameraAvStreamManagement.types.VideoCodecEnum.H264, + min_frame_rate = 30, + max_frame_rate = 60, + min_resolution = clusters.CameraAvStreamManagement.types.VideoResolutionStruct({width = 640, height = 360}), + max_resolution = clusters.CameraAvStreamManagement.types.VideoResolutionStruct({width = 640, height = 360}), + min_bit_rate = 10000, + max_bit_rate = 10000, + key_frame_interval = 4000, + watermark_enabled = true, + osd_enabled = false, + reference_count = 0 + }) + } + ) + }) + test.socket.capability:__expect_send( + mock_device:generate_test_message("main", capabilities.videoStreamSettings.videoStreams({ + { + streamId = 1, + data = { + label = "Stream 1", + type = "liveStream", + resolution = { + width = 640, + height = 360, + fps = 30 + }, + watermark = "enabled", + onScreenDisplay = "disabled" + } + } + })) + ) + test.socket.capability:__queue_receive({ + mock_device.id, + { capability = "videoStreamSettings", component = "main", command = "setStream", args = { + 1, + "liveStream", + "Stream 1", + { width = 640, height = 360, fps = 30 }, + { upperLeftVertex = {x = 0, y = 0}, lowerRightVertex = {x = 640, y = 360} }, + "disabled", + "enabled" + }} + }) + test.socket.matter:__expect_send({ + mock_device.id, clusters.CameraAvStreamManagement.server.commands.VideoStreamModify(mock_device, CAMERA_EP, + 1, false, true + ) + }) + end +) + +-- run the tests +test.run_registered_tests()