From 615fbcc6f2b2462d64f66c1eded222a53741d322 Mon Sep 17 00:00:00 2001 From: puddly <32534428+puddly@users.noreply.github.com> Date: Sat, 16 Apr 2022 09:14:19 -0400 Subject: [PATCH 01/15] Persist attribute writes in the database (fixes #958) (#962) --- tests/test_zcl.py | 14 ++++++++++++++ zigpy/zcl/__init__.py | 4 ++-- 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/tests/test_zcl.py b/tests/test_zcl.py index 5aaa206eb..00321663e 100644 --- a/tests/test_zcl.py +++ b/tests/test_zcl.py @@ -468,6 +468,9 @@ async def test_write_attributes_cache_default_response(cluster, status): ), ) async def test_write_attributes_cache_success(cluster, attributes, result): + listener = MagicMock() + cluster.add_listener(listener) + rsp_type = t.List[foundation.WriteAttributesStatusRecord] write_mock = AsyncMock(return_value=[rsp_type.deserialize(result)[0]]) with patch.object(cluster, "_write_attributes", write_mock): @@ -475,6 +478,7 @@ async def test_write_attributes_cache_success(cluster, attributes, result): assert cluster._write_attributes.call_count == 1 for attr_id in attributes: assert cluster._attr_cache[attr_id] == attributes[attr_id] + listener.attribute_updated.assert_any_call(attr_id, attributes[attr_id]) @pytest.mark.parametrize( @@ -501,6 +505,9 @@ async def test_write_attributes_cache_success(cluster, attributes, result): ), ) async def test_write_attributes_cache_failure(cluster, attributes, result, failed): + listener = MagicMock() + cluster.add_listener(listener) + rsp_type = foundation.WriteAttributesResponse write_mock = AsyncMock(return_value=[rsp_type.deserialize(result)[0]]) @@ -510,8 +517,15 @@ async def test_write_attributes_cache_failure(cluster, attributes, result, faile for attr_id in attributes: if attr_id in failed: assert attr_id not in cluster._attr_cache + + # Failed writes do not propagate + with pytest.raises(AssertionError): + listener.attribute_updated.assert_any_call( + attr_id, attributes[attr_id] + ) else: assert cluster._attr_cache[attr_id] == attributes[attr_id] + listener.attribute_updated.assert_any_call(attr_id, attributes[attr_id]) def test_read_attributes_response(cluster): diff --git a/zigpy/zcl/__init__.py b/zigpy/zcl/__init__.py index 1ee1bafd8..c90d29b73 100644 --- a/zigpy/zcl/__init__.py +++ b/zigpy/zcl/__init__.py @@ -546,12 +546,12 @@ async def write_attributes_raw( records = result[0] if len(records) == 1 and records[0].status == foundation.Status.SUCCESS: for attr_rec in attrs: - self._attr_cache[attr_rec.attrid] = attr_rec.value.value + self._update_attribute(attr_rec.attrid, attr_rec.value.value) else: failed = [rec.attrid for rec in records] for attr_rec in attrs: if attr_rec.attrid not in failed: - self._attr_cache[attr_rec.attrid] = attr_rec.value.value + self._update_attribute(attr_rec.attrid, attr_rec.value.value) return result From 09f29cf1a10e9bfabaecf15244ce655c2b8b93cd Mon Sep 17 00:00:00 2001 From: Hedda Date: Tue, 19 Apr 2022 10:28:36 +0200 Subject: [PATCH 02/15] Update OTA_URLs.md Philips Hue URLs Update OTA_URLs.md Philips Hue URLs --- zigpy/ota/OTA_URLs.md | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/zigpy/ota/OTA_URLs.md b/zigpy/ota/OTA_URLs.md index d4887cb93..44a9879b0 100644 --- a/zigpy/ota/OTA_URLs.md +++ b/zigpy/ota/OTA_URLs.md @@ -76,7 +76,19 @@ https://github.com/dresden-elektronik/deconz-rest-plugin/wiki/OTA-Image-Types--- ### Philips Hue (Signify) -Philips Hue (Signify) Zigbee OTA firmware images are made publicly available by Dresden Elektronik (third-party) at the following URL: +Philips Hue OTA firmware images are available for different Hue devices for several official sources that do not all use the same APIs: + +https://firmware.meethue.com/v1/checkUpdate + +https://firmware.meethue.com/storage/ + +http://fds.dc1.philips.com/firmware/ + +Philips Hue (Signify) Zigbee OTA firmware images direct URLs are available by Koenkk zigbee-OTA repository (third-party) at following URL: + +https://raw.githubusercontent.com/Koenkk/zigbee-OTA/master/index.json + +Philips Hue (Signify) Zigbee OTA firmware images are also made publicly available by Dresden Elektronik (third-party) at following URL: https://github.com/dresden-elektronik/deconz-rest-plugin/wiki/OTA-Image-Types---Firmware-versions#philips-hue From 7a749011aa12e03f91eaed7e241737baff14e95b Mon Sep 17 00:00:00 2001 From: puddly <32534428+puddly@users.noreply.github.com> Date: Wed, 20 Apr 2022 17:13:07 -0400 Subject: [PATCH 03/15] Do not return the radio library status code when `expect_reply` is false --- tests/test_device.py | 15 +++++++++++++++ zigpy/device.py | 11 ++++++----- 2 files changed, 21 insertions(+), 5 deletions(-) diff --git a/tests/test_device.py b/tests/test_device.py index 797eec0f5..4896d009b 100644 --- a/tests/test_device.py +++ b/tests/test_device.py @@ -111,6 +111,21 @@ async def mock_req(*args, **kwargs): assert dev.last_seen is not None +async def test_request_without_reply(dev): + seq = sentinel.tsn + + async def mock_req(*args, **kwargs): + dev._pending[seq].result.set_result(sentinel.result) + return 0, sentinel.radio_status + + dev.application.request.side_effect = mock_req + assert dev.last_seen is None + r = await dev.request(1, 2, 3, 3, seq, b"", expect_reply=False) + assert r is None + assert dev._application.request.call_count == 1 + assert dev.last_seen is not None + + async def test_failed_request(dev): assert dev.last_seen is None dev._application.request = AsyncMock(return_value=(1, "error")) diff --git a/zigpy/device.py b/zigpy/device.py index 8da253a7d..a79546beb 100644 --- a/zigpy/device.py +++ b/zigpy/device.py @@ -271,7 +271,7 @@ async def request( self.debug("Extending timeout for 0x%02x request", sequence) timeout = APS_REPLY_TIMEOUT_EXTENDED with self._pending.new(sequence) as req: - result, msg = await self._application.request( + radio_result, msg = await self._application.request( self, profile, cluster, @@ -282,7 +282,7 @@ async def request( expect_reply=expect_reply, use_ieee=use_ieee, ) - if result != foundation.Status.SUCCESS: + if radio_result != foundation.Status.SUCCESS: self.debug( ( "Delivery error for seq # 0x%02x, on endpoint id %s " @@ -301,10 +301,11 @@ async def request( # If application.request raises an exception, we won't get here, so # won't update last_seen, as expected self.last_seen = time.time() - if expect_reply: - result = await asyncio.wait_for(req.result, timeout) - return result + if not expect_reply: + return None + + return await asyncio.wait_for(req.result, timeout) def deserialize(self, endpoint_id, cluster_id, data): return self.endpoints[endpoint_id].deserialize(cluster_id, data) From 981598104e417d4879a3b1f8d801d3cc683ead7f Mon Sep 17 00:00:00 2001 From: puddly <32534428+puddly@users.noreply.github.com> Date: Fri, 11 Mar 2022 16:08:07 -0500 Subject: [PATCH 04/15] Replace PyCryptodome with Cryptography --- setup.py | 2 +- tests/test_zigbee_util.py | 169 +++----------------------------------- zigpy/types/named.py | 14 +++- zigpy/types/struct.py | 2 +- zigpy/util.py | 54 ++++++------ 5 files changed, 58 insertions(+), 183 deletions(-) diff --git a/setup.py b/setup.py index 03f2a8289..750c01638 100644 --- a/setup.py +++ b/setup.py @@ -6,7 +6,7 @@ import zigpy -REQUIRES = ["aiohttp", "aiosqlite>=0.16.0", "crccheck", "pycryptodome", "voluptuous"] +REQUIRES = ["aiohttp", "aiosqlite>=0.16.0", "crccheck", "cryptography", "voluptuous"] setup( name="zigpy", diff --git a/tests/test_zigbee_util.py b/tests/test_zigbee_util.py index 6c8339e93..548a6b4ae 100644 --- a/tests/test_zigbee_util.py +++ b/tests/test_zigbee_util.py @@ -6,6 +6,7 @@ from zigpy import util from zigpy.exceptions import ControllerException +from zigpy.types.named import KeyData from .async_mock import AsyncMock, MagicMock, call, patch, sentinel @@ -176,173 +177,29 @@ async def test_retryable_once(): def test_zigbee_security_hash(): - message = bytes([0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x4A, 0xF7]) + message = bytes.fromhex("11223344556677884AF7") key = util.aes_mmo_hash(message) - assert key == [ - 0x41, - 0x61, - 0x8F, - 0xC0, - 0xC8, - 0x3B, - 0x0E, - 0x14, - 0xA5, - 0x89, - 0x95, - 0x4B, - 0x16, - 0xE3, - 0x14, - 0x66, - ] - - message = bytes( - [ - 0x7A, - 0x93, - 0x97, - 0x23, - 0xA5, - 0xC6, - 0x39, - 0xB2, - 0x69, - 0x16, - 0x18, - 0x02, - 0x81, - 0x9B, - ] - ) + assert key == KeyData.convert("41618FC0C83B0E14A589954B16E31466") + + message = bytes.fromhex("7A939723A5C639B269161802819B") key = util.aes_mmo_hash(message) - assert key == [ - 0xF9, - 0x39, - 0x03, - 0x72, - 0x16, - 0x85, - 0xFD, - 0x32, - 0x9D, - 0x26, - 0x84, - 0x9B, - 0x90, - 0xF2, - 0x95, - 0x9A, - ] - - message = bytes( - [ - 0x83, - 0xFE, - 0xD3, - 0x40, - 0x7A, - 0x93, - 0x97, - 0x23, - 0xA5, - 0xC6, - 0x39, - 0xB2, - 0x69, - 0x16, - 0x18, - 0x02, - 0xAE, - 0xBB, - ] - ) + assert key == KeyData.convert("F93903721685FD329D26849B90F2959A") + + message = bytes.fromhex("83FED3407A939723A5C639B269161802AEBB") key = util.aes_mmo_hash(message) - assert key == [ - 0x33, - 0x3C, - 0x23, - 0x68, - 0x60, - 0x79, - 0x46, - 0x8E, - 0xB2, - 0x7B, - 0xA2, - 0x4B, - 0xD9, - 0xC7, - 0xE5, - 0x64, - ] + assert key == KeyData.convert("333C23686079468EB27BA24BD9C7E564") @pytest.mark.parametrize( "message, expected_key", [ ( - bytes([0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x4A, 0xF7]), - [ - 0x41, - 0x61, - 0x8F, - 0xC0, - 0xC8, - 0x3B, - 0x0E, - 0x14, - 0xA5, - 0x89, - 0x95, - 0x4B, - 0x16, - 0xE3, - 0x14, - 0x66, - ], + bytes.fromhex("11223344556677884AF7"), + KeyData.convert("41618FC0C83B0E14A589954B16E31466"), ), ( - bytes( - [ - 0x83, - 0xFE, - 0xD3, - 0x40, - 0x7A, - 0x93, - 0x97, - 0x23, - 0xA5, - 0xC6, - 0x39, - 0xB2, - 0x69, - 0x16, - 0xD5, - 0x05, - 0xC3, - 0xB5, - ] - ), - [ - 0x66, - 0xB6, - 0x90, - 0x09, - 0x81, - 0xE1, - 0xEE, - 0x3C, - 0xA4, - 0x20, - 0x6B, - 0x6B, - 0x86, - 0x1C, - 0x02, - 0xBB, - ], + bytes.fromhex("83FED3407A939723A5C639B26916D505C3B5"), + KeyData.convert("66B6900981E1EE3CA4206B6B861C02BB"), ), ], ) diff --git a/zigpy/types/named.py b/zigpy/types/named.py index 2d4b80824..37d25ad05 100644 --- a/zigpy/types/named.py +++ b/zigpy/types/named.py @@ -6,6 +6,16 @@ from .struct import Struct +def _hex_string_to_bytes(hex_string: str) -> bytes: + """ + Parses a hex string with optional colon delimiters and whitespace into bytes. + """ + + # Strips out whitespace and colons + cleaned = "".join(hex_string.replace(":", "").split()).upper() + return bytes.fromhex(cleaned) + + class BroadcastAddress(basic.enum16): ALL_DEVICES = 0xFFFF RESERVED_FFFE = 0xFFFE @@ -29,7 +39,7 @@ def __hash__(self): def convert(cls, ieee: str): if ieee is None: return None - ieee = [basic.uint8_t(p, base=16) for p in ieee.split(":")[::-1]] + ieee = [basic.uint8_t(p) for p in _hex_string_to_bytes(ieee)[::-1]] assert len(ieee) == cls._length return cls(ieee) @@ -40,7 +50,7 @@ def __repr__(self): @classmethod def convert(cls, key: str) -> KeyData: - key = [basic.uint8_t(p, base=16) for p in key.split(":")] + key = [basic.uint8_t(p) for p in _hex_string_to_bytes(key)] assert len(key) == cls._length return cls(key) diff --git a/zigpy/types/struct.py b/zigpy/types/struct.py index 01f457f11..9522c6358 100644 --- a/zigpy/types/struct.py +++ b/zigpy/types/struct.py @@ -296,7 +296,7 @@ def deserialize(cls: type[_STRUCT], data: bytes) -> tuple[_STRUCT, bytes]: return instance, data # TODO: improve? def replace(self: typing.Type[_STRUCT], **kwargs) -> _STRUCT: - def replace(self, **kwargs) -> Struct: + def replace(self, **kwargs: dict[str, typing.Any]) -> Struct: d = self.as_dict().copy() d.update(kwargs) diff --git a/zigpy/util.py b/zigpy/util.py index 4430533d5..ae08b9c68 100644 --- a/zigpy/util.py +++ b/zigpy/util.py @@ -9,8 +9,10 @@ import traceback from typing import Any, Coroutine -from Crypto.Cipher import AES from crccheck.crc import CrcX25 +from cryptography.hazmat.primitives.ciphers import Cipher +from cryptography.hazmat.primitives.ciphers.algorithms import AES +from cryptography.hazmat.primitives.ciphers.modes import ECB from zigpy.exceptions import ControllerException, ZigbeeException import zigpy.types as t @@ -147,36 +149,42 @@ def wrapper(*args, tries=tries, delay=delay, **kwargs): retryable_request = retryable((ZigbeeException, asyncio.TimeoutError)) -def aes_mmo_hash_update(length, result, data): - while len(data) >= AES.block_size: +def aes_mmo_hash_update(length: int, result: bytes, data: bytes) -> tuple[int, bytes]: + block_size = AES.block_size // 8 + + while len(data) >= block_size: + block = bytes(data[:block_size]) + # Encrypt - aes = AES.new(bytes(result), AES.MODE_ECB) - result = bytearray(aes.encrypt(bytes(data[: AES.block_size]))) + aes = Cipher(AES(bytes(result)), ECB()).encryptor() + result = bytearray(aes.update(block) + aes.finalize()) - # XOR - for i in range(AES.block_size): - result[i] ^= bytes(data[: AES.block_size])[i] + # XOR plaintext into ciphertext + for i in range(block_size): + result[i] ^= block[i] - data = data[AES.block_size :] - length += AES.block_size + data = data[block_size:] + length += block_size return (length, result) -def aes_mmo_hash(data): +def aes_mmo_hash(data: bytes) -> t.KeyData: + block_size = AES.block_size // 8 + result_len = 0 remaining_length = 0 length = len(data) - result = bytearray([0] * AES.block_size) - temp = bytearray([0] * AES.block_size) + result = bytearray([0] * block_size) + temp = bytearray([0] * block_size) if data and length > 0: - remaining_length = length & (AES.block_size - 1) - if length >= AES.block_size: + remaining_length = length & (block_size - 1) + if length >= block_size: # Mask out the lower byte since hash update will hash # everything except the last piece, if the last piece # is less than 16 bytes. - hashed_length = length & ~(AES.block_size - 1) + hashed_length = length & ~(block_size - 1) (result_len, result) = aes_mmo_hash_update(result_len, result, data) data = data[hashed_length:] @@ -190,25 +198,25 @@ def aes_mmo_hash(data): # If appending the bit string will push us beyond the 16-byte boundary # we must hash that block and append another 16-byte block. - if (AES.block_size - remaining_length) < 3: + if (block_size - remaining_length) < 3: (result_len, result) = aes_mmo_hash_update(result_len, result, temp) # Since this extra data is due to the concatenation, # we remove that length. We want the length of data only # and not the padding. - result_len -= AES.block_size - temp = bytearray([0] * AES.block_size) + result_len -= block_size + temp = bytearray([0] * block_size) bit_size = result_len * 8 - temp[AES.block_size - 2] = (bit_size >> 8) & 0xFF - temp[AES.block_size - 1] = (bit_size) & 0xFF + temp[block_size - 2] = (bit_size >> 8) & 0xFF + temp[block_size - 1] = (bit_size) & 0xFF (result_len, result) = aes_mmo_hash_update(result_len, result, temp) - return t.KeyData([t.uint8_t(c) for c in result]) + return t.KeyData(result) -def convert_install_code(code): +def convert_install_code(code: bytes) -> t.KeyData: if len(code) not in (8, 10, 14, 18): return None From 6483624e275a0746f724add8b34eb595032fe905 Mon Sep 17 00:00:00 2001 From: puddly <32534428+puddly@users.noreply.github.com> Date: Fri, 22 Apr 2022 10:41:45 -0400 Subject: [PATCH 05/15] Hash `setup.py` as well when caching the CI environment --- .github/workflows/ci.yml | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 87231a328..2ba9143b9 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -35,7 +35,7 @@ jobs: key: >- ${{ env.CACHE_VERSION}}-${{ runner.os }}-base-venv-${{ steps.python.outputs.python-version }}-${{ - hashFiles('requirements_test.txt') }} + hashFiles('setup.py', 'requirements_test.txt') }} restore-keys: | ${{ env.CACHE_VERSION}}-${{ runner.os }}-base-venv-${{ steps.python.outputs.python-version }}- - name: Create Python virtual environment @@ -67,7 +67,7 @@ jobs: key: >- ${{ env.CACHE_VERSION}}-${{ runner.os }}-base-venv-${{ steps.python.outputs.python-version }}-${{ - hashFiles('requirements_test.txt') }} + hashFiles('setup.py', 'requirements_test.txt') }} - name: Fail job if Python cache restore failed if: steps.cache-venv.outputs.cache-hit != 'true' run: | @@ -108,7 +108,7 @@ jobs: key: >- ${{ env.CACHE_VERSION}}-${{ runner.os }}-base-venv-${{ steps.python.outputs.python-version }}-${{ - hashFiles('requirements_test.txt') }} + hashFiles('setup.py', 'requirements_test.txt') }} - name: Fail job if Python cache restore failed if: steps.cache-venv.outputs.cache-hit != 'true' run: | @@ -151,7 +151,7 @@ jobs: key: >- ${{ env.CACHE_VERSION}}-${{ runner.os }}-base-venv-${{ steps.python.outputs.python-version }}-${{ - hashFiles('requirements_test.txt') }} + hashFiles('setup.py', 'requirements_test.txt') }} - name: Fail job if Python cache restore failed if: steps.cache-venv.outputs.cache-hit != 'true' run: | @@ -197,7 +197,7 @@ jobs: key: >- ${{ env.CACHE_VERSION}}-${{ runner.os }}-base-venv-${{ steps.python.outputs.python-version }}-${{ - hashFiles('requirements_test.txt') }} + hashFiles('setup.py', 'requirements_test.txt') }} - name: Fail job if Python cache restore failed if: steps.cache-venv.outputs.cache-hit != 'true' run: | @@ -240,7 +240,7 @@ jobs: key: >- ${{ env.CACHE_VERSION}}-${{ runner.os }}-base-venv-${{ steps.python.outputs.python-version }}-${{ - hashFiles('requirements_test.txt') }} + hashFiles('setup.py', 'requirements_test.txt') }} - name: Fail job if Python cache restore failed if: steps.cache-venv.outputs.cache-hit != 'true' run: | @@ -286,7 +286,7 @@ jobs: key: >- ${{ env.CACHE_VERSION}}-${{ runner.os }}-base-venv-${{ steps.python.outputs.python-version }}-${{ - hashFiles('requirements_test.txt') }} + hashFiles('setup.py', 'requirements_test.txt') }} - name: Fail job if Python cache restore failed if: steps.cache-venv.outputs.cache-hit != 'true' run: | @@ -334,7 +334,7 @@ jobs: key: >- ${{ env.CACHE_VERSION}}-${{ runner.os }}-base-venv-${{ steps.python.outputs.python-version }}-${{ - hashFiles('requirements_test.txt') }} + hashFiles('setup.py', 'requirements_test.txt') }} - name: Fail job if Python cache restore failed if: steps.cache-venv.outputs.cache-hit != 'true' run: | @@ -396,7 +396,7 @@ jobs: key: >- ${{ env.CACHE_VERSION}}-${{ runner.os }}-base-venv-${{ steps.python.outputs.python-version }}-${{ - hashFiles('requirements_test.txt') }} + hashFiles('setup.py', 'requirements_test.txt') }} - name: Fail job if Python cache restore failed if: steps.cache-venv.outputs.cache-hit != 'true' run: | From 72c8b5f9da472f8a1374fc7397f319ec0bc5f468 Mon Sep 17 00:00:00 2001 From: puddly <32534428+puddly@users.noreply.github.com> Date: Thu, 21 Apr 2022 12:17:32 -0400 Subject: [PATCH 06/15] Persist `last_seen` in the application database --- tests/test_appdb.py | 9 ++ zigpy/appdb.py | 70 ++++++++++- zigpy/appdb_schemas/schema_v8.sql | 201 ++++++++++++++++++++++++++++++ zigpy/application.py | 4 + zigpy/device.py | 27 +++- 5 files changed, 302 insertions(+), 9 deletions(-) create mode 100644 zigpy/appdb_schemas/schema_v8.sql diff --git a/tests/test_appdb.py b/tests/test_appdb.py index 923a49579..635852463 100644 --- a/tests/test_appdb.py +++ b/tests/test_appdb.py @@ -1,4 +1,5 @@ import asyncio +from datetime import datetime import os import sqlite3 import sys @@ -172,6 +173,8 @@ async def test_database(tmpdir): dev.model = "Model" assert dev.get_signature()[SIG_MANUFACTURER] == "Custom" assert dev.get_signature()[SIG_MODEL] == "Model" + dev_last_seen = dev.last_seen + assert isinstance(dev_last_seen, datetime) # Test a CustomDevice custom_ieee = make_ieee(1) @@ -193,6 +196,9 @@ async def test_database(tmpdir): dev.endpoints[99].level._update_attribute(0x0011, 17) assert dev.endpoints[1].in_clusters[0x0008]._attr_cache[0x0011] == 17 assert dev.endpoints[99].in_clusters[0x0008]._attr_cache[0x0011] == 17 + custom_dev_last_seen = dev.last_seen + assert isinstance(custom_dev_last_seen, datetime) + await app.pre_shutdown() # Everything should've been saved - check that it re-loads @@ -209,6 +215,8 @@ async def test_database(tmpdir): assert dev.endpoints[2].out_clusters[1].cluster_id == 1 assert dev.endpoints[3].device_type == profiles.zll.DeviceType.COLOR_LIGHT assert dev.relays == relays_1 + # The timestamp won't be restored exactly but it is more than close enough + assert abs((dev.last_seen - dev_last_seen).total_seconds()) < 0.01 dev = app2.get_device(custom_ieee) # This virtual attribute is added by the quirk, there is no corresponding cluster @@ -216,6 +224,7 @@ async def test_database(tmpdir): assert dev.endpoints[1].in_clusters[0x0008]._attr_cache[0x0011] == 17 assert dev.endpoints[99].in_clusters[0x0008]._attr_cache[0x0011] == 17 assert dev.relays == relays_2 + assert abs((dev.last_seen - custom_dev_last_seen).total_seconds()) < 0.01 dev.relays = None app.handle_leave(99, ieee) diff --git a/zigpy/appdb.py b/zigpy/appdb.py index 4d4f47c5b..7601c7b15 100644 --- a/zigpy/appdb.py +++ b/zigpy/appdb.py @@ -1,6 +1,7 @@ from __future__ import annotations import asyncio +from datetime import datetime, timezone import logging import types from typing import Any @@ -22,10 +23,12 @@ LOGGER = logging.getLogger(__name__) -DB_VERSION = 7 +DB_VERSION = 8 DB_V = f"_v{DB_VERSION}" MIN_SQLITE_VERSION = (3, 24, 0) +UNIX_EPOCH = datetime.fromtimestamp(0, tz=timezone.utc) + def _import_compatible_sqlite3(min_version: tuple[int, int, int]) -> types.ModuleType: """ @@ -72,6 +75,16 @@ def convert_ieee(s): sqlite3.register_converter("ieee", convert_ieee) + def adapt_datetime(dt): + return int(dt.timestamp() * 1000) + + sqlite3.register_adapter(datetime, adapt_datetime) + + def convert_timestamp(ts): + return datetime.fromtimestamp(int(ts.decode("ascii"), 10) / 1000, timezone.utc) + + sqlite3.register_converter("unix_timestamp", convert_timestamp) + def aiosqlite_connect( database: str, iter_chunk_size: int = 64, **kwargs @@ -205,6 +218,18 @@ def device_initialized(self, device: zigpy.typing.DeviceType) -> None: def device_left(self, device: zigpy.typing.DeviceType) -> None: pass + def device_last_seen_updated( + self, device: zigpy.typing.DeviceType, last_seen: datetime + ) -> None: + """Device last_seen time is updated.""" + self.enqueue("_save_device_last_seen", device.ieee, last_seen) + + async def _save_device_last_seen(self, ieee: t.EUI64, last_seen: datetime) -> None: + await self.execute( + f"UPDATE devices{DB_V} SET last_seen=? WHERE ieee=?", (ieee, last_seen) + ) + await self._db.commit() + def device_relays_updated( self, device: zigpy.typing.DeviceType, relays: t.Relays | None ) -> None: @@ -337,10 +362,16 @@ def raw_device_initialized(self, device: zigpy.typing.DeviceType) -> None: self.enqueue("_save_device", device) async def _save_device(self, device: zigpy.typing.DeviceType) -> None: - q = f"""INSERT INTO devices{DB_V} (ieee, nwk, status) VALUES (?, ?, ?) + q = f"""INSERT INTO devices{DB_V} (ieee, nwk, status, last_seen) + VALUES (?, ?, ?, ?) ON CONFLICT (ieee) - DO UPDATE SET nwk=excluded.nwk, status=excluded.status""" - await self.execute(q, (device.ieee, device.nwk, device.status)) + DO UPDATE SET + nwk=excluded.nwk, + status=excluded.status, + last_seen=excluded.last_seen""" + await self.execute( + q, (device.ieee, device.nwk, device.status, device.last_seen) + ) if device.node_desc is not None: await self._save_node_descriptor(device) @@ -529,9 +560,10 @@ async def _load_unsupported_attributes(self) -> None: async def _load_devices(self) -> None: async with self.execute(f"SELECT * FROM devices{DB_V}") as cursor: - async for (ieee, nwk, status) in cursor: + async for (ieee, nwk, status, last_seen) in cursor: dev = self._application.add_device(ieee, nwk) dev.status = zigpy.device.Status(status) + dev._last_seen = last_seen async def _load_node_descriptors(self) -> None: async with self.execute(f"SELECT * FROM node_descriptors{DB_V}") as cursor: @@ -640,6 +672,7 @@ async def _run_migrations(self): (self._migrate_to_v5, 5), (self._migrate_to_v6, 6), (self._migrate_to_v7, 7), + (self._migrate_to_v8, 8), ]: if db_version >= min(to_db_version, DB_VERSION): continue @@ -799,3 +832,30 @@ async def _migrate_to_v7(self): "node_descriptors_v6": "node_descriptors_v7", } ) + + async def _migrate_to_v8(self): + """Schema v8 added the `devices_v8.last_seen` column.""" + + async with self.execute("SELECT * FROM devices_v7") as cursor: + async for (ieee, nwk, status) in cursor: + # Set the default `last_seen` to the unix epoch + await self.execute( + "INSERT INTO devices_v8 VALUES (?, ?, ?, ?)", + (ieee, nwk, status, UNIX_EPOCH), + ) + + # Copy the devices table first, it should have no conflicts + await self._migrate_tables( + { + "endpoints_v7": "endpoints_v8", + "in_clusters_v7": "in_clusters_v8", + "out_clusters_v7": "out_clusters_v8", + "groups_v7": "groups_v8", + "group_members_v7": "group_members_v8", + "relays_v7": "relays_v8", + "attributes_cache_v7": "attributes_cache_v8", + "neighbors_v7": "neighbors_v8", + "node_descriptors_v7": "node_descriptors_v8", + "unsupported_attributes_v7": "unsupported_attributes_v8", + } + ) diff --git a/zigpy/appdb_schemas/schema_v8.sql b/zigpy/appdb_schemas/schema_v8.sql new file mode 100644 index 000000000..64ea64b51 --- /dev/null +++ b/zigpy/appdb_schemas/schema_v8.sql @@ -0,0 +1,201 @@ +PRAGMA user_version = 8; + +-- devices +DROP TABLE IF EXISTS devices_v8; +CREATE TABLE devices_v8 ( + ieee ieee NOT NULL, + nwk INTEGER NOT NULL, + status INTEGER NOT NULL, + last_seen unix_timestamp NOT NULL +); + +CREATE UNIQUE INDEX devices_idx_v8 + ON devices_v8(ieee); + + +-- endpoints +DROP TABLE IF EXISTS endpoints_v8; +CREATE TABLE endpoints_v8 ( + ieee ieee NOT NULL, + endpoint_id INTEGER NOT NULL, + profile_id INTEGER NOT NULL, + device_type INTEGER NOT NULL, + status INTEGER NOT NULL, + + FOREIGN KEY(ieee) + REFERENCES devices_v8(ieee) + ON DELETE CASCADE +); + +CREATE UNIQUE INDEX endpoint_idx_v8 + ON endpoints_v8(ieee, endpoint_id); + + +-- clusters +DROP TABLE IF EXISTS in_clusters_v8; +CREATE TABLE in_clusters_v8 ( + ieee ieee NOT NULL, + endpoint_id INTEGER NOT NULL, + cluster INTEGER NOT NULL, + + FOREIGN KEY(ieee, endpoint_id) + REFERENCES endpoints_v8(ieee, endpoint_id) + ON DELETE CASCADE +); + +CREATE UNIQUE INDEX in_clusters_idx_v8 + ON in_clusters_v8(ieee, endpoint_id, cluster); + + +-- neighbors +DROP TABLE IF EXISTS neighbors_v8; +CREATE TABLE neighbors_v8 ( + device_ieee ieee NOT NULL, + extended_pan_id ieee NOT NULL, + ieee ieee NOT NULL, + nwk INTEGER NOT NULL, + device_type INTEGER NOT NULL, + rx_on_when_idle INTEGER NOT NULL, + relationship INTEGER NOT NULL, + reserved1 INTEGER NOT NULL, + permit_joining INTEGER NOT NULL, + reserved2 INTEGER NOT NULL, + depth INTEGER NOT NULL, + lqi INTEGER NOT NULL, + + FOREIGN KEY(device_ieee) + REFERENCES devices_v8(ieee) + ON DELETE CASCADE +); + +CREATE INDEX neighbors_idx_v8 + ON neighbors_v8(device_ieee); + + +-- node descriptors +DROP TABLE IF EXISTS node_descriptors_v8; +CREATE TABLE node_descriptors_v8 ( + ieee ieee NOT NULL, + + logical_type INTEGER NOT NULL, + complex_descriptor_available INTEGER NOT NULL, + user_descriptor_available INTEGER NOT NULL, + reserved INTEGER NOT NULL, + aps_flags INTEGER NOT NULL, + frequency_band INTEGER NOT NULL, + mac_capability_flags INTEGER NOT NULL, + manufacturer_code INTEGER NOT NULL, + maximum_buffer_size INTEGER NOT NULL, + maximum_incoming_transfer_size INTEGER NOT NULL, + server_mask INTEGER NOT NULL, + maximum_outgoing_transfer_size INTEGER NOT NULL, + descriptor_capability_field INTEGER NOT NULL, + + FOREIGN KEY(ieee) + REFERENCES devices_v8(ieee) + ON DELETE CASCADE +); + +CREATE UNIQUE INDEX node_descriptors_idx_v8 + ON node_descriptors_v8(ieee); + + +-- output clusters +DROP TABLE IF EXISTS out_clusters_v8; +CREATE TABLE out_clusters_v8 ( + ieee ieee NOT NULL, + endpoint_id INTEGER NOT NULL, + cluster INTEGER NOT NULL, + + FOREIGN KEY(ieee, endpoint_id) + REFERENCES endpoints_v8(ieee, endpoint_id) + ON DELETE CASCADE +); + +CREATE UNIQUE INDEX out_clusters_idx_v8 + ON out_clusters_v8(ieee, endpoint_id, cluster); + + +-- attributes +DROP TABLE IF EXISTS attributes_cache_v8; +CREATE TABLE attributes_cache_v8 ( + ieee ieee NOT NULL, + endpoint_id INTEGER NOT NULL, + cluster INTEGER NOT NULL, + attrid INTEGER NOT NULL, + value BLOB NOT NULL, + + -- Quirks can create "virtual" clusters and endpoints that won't be present in the + -- DB but whose values still need to be cached + FOREIGN KEY(ieee) + REFERENCES devices_v8(ieee) + ON DELETE CASCADE +); + +CREATE UNIQUE INDEX attributes_idx_v8 + ON attributes_cache_v8(ieee, endpoint_id, cluster, attrid); + + +-- groups +DROP TABLE IF EXISTS groups_v8; +CREATE TABLE groups_v8 ( + group_id INTEGER NOT NULL, + name TEXT NOT NULL +); + +CREATE UNIQUE INDEX groups_idx_v8 + ON groups_v8(group_id); + + +-- group members +DROP TABLE IF EXISTS group_members_v8; +CREATE TABLE group_members_v8 ( + group_id INTEGER NOT NULL, + ieee ieee NOT NULL, + endpoint_id INTEGER NOT NULL, + + FOREIGN KEY(group_id) + REFERENCES groups_v8(group_id) + ON DELETE CASCADE, + FOREIGN KEY(ieee, endpoint_id) + REFERENCES endpoints_v8(ieee, endpoint_id) + ON DELETE CASCADE +); + +CREATE UNIQUE INDEX group_members_idx_v8 + ON group_members_v8(group_id, ieee, endpoint_id); + + +-- relays +DROP TABLE IF EXISTS relays_v8; +CREATE TABLE relays_v8 ( + ieee ieee NOT NULL, + relays BLOB NOT NULL, + + FOREIGN KEY(ieee) + REFERENCES devices_v8(ieee) + ON DELETE CASCADE +); + +CREATE UNIQUE INDEX relays_idx_v8 + ON relays_v8(ieee); + + +-- unsupported attributes +DROP TABLE IF EXISTS unsupported_attributes_v8; +CREATE TABLE unsupported_attributes_v8 ( + ieee ieee NOT NULL, + endpoint_id INTEGER NOT NULL, + cluster INTEGER NOT NULL, + attrid INTEGER NOT NULL, + + FOREIGN KEY(ieee) + REFERENCES devices_v8(ieee) + ON DELETE CASCADE, + FOREIGN KEY(ieee, endpoint_id, cluster) + REFERENCES in_clusters_v8(ieee, endpoint_id, cluster) + ON DELETE CASCADE +); + +CREATE UNIQUE INDEX unsupported_attributes_idx_v8 + ON unsupported_attributes_v8(ieee, endpoint_id, cluster, attrid); diff --git a/zigpy/application.py b/zigpy/application.py index d8acb4fba..4ba05d21a 100644 --- a/zigpy/application.py +++ b/zigpy/application.py @@ -299,6 +299,10 @@ def handle_join(self, nwk: t.NWK, ieee: t.EUI64, parent_nwk: t.NWK) -> None: LOGGER.debug("Device %s changed id (0x%04x => 0x%04x)", ieee, dev.nwk, nwk) new_join = True + # Not all stacks send a ZDO command when a device joins so the last_seen should + # be updated + dev.update_last_seen() + if new_join: self.listener_event("device_joined", dev) dev.schedule_initialize() diff --git a/zigpy/device.py b/zigpy/device.py index a79546beb..3067cc81f 100644 --- a/zigpy/device.py +++ b/zigpy/device.py @@ -2,9 +2,9 @@ import asyncio import binascii +from datetime import datetime, timezone import enum import logging -import time from typing import TYPE_CHECKING, Any from zigpy.const import ( @@ -58,7 +58,7 @@ def __init__(self, application, ieee, nwk): self.endpoints: dict[int, zdo.ZDO | zigpy.endpoint.Endpoint] = {0: self.zdo} self.lqi: int | None = None self.rssi: int | None = None - self.last_seen: float | None = None + self._last_seen: datetime | None = None self._initialize_task: asyncio.Task | None = None self._group_scan_task: asyncio.Task | None = None self._listeners = {} @@ -77,6 +77,25 @@ def __init__(self, application, ieee, nwk): def name(self) -> str: return f"0x{self.nwk:04X}" + def update_last_seen(self) -> None: + """ + Update the `last_seen` attribute to the current time and emit an event. + """ + + self.last_seen = datetime.now(timezone.utc) + + @property + def last_seen(self) -> datetime | None: + return self._last_seen + + @last_seen.setter + def last_seen(self, value: datetime | int | float): + if isinstance(value, (int, float)): + value = datetime.fromtimestamp(value, timezone.utc) + + self._last_seen = value + self.listener_event("device_last_seen_updated", self._last_seen) + @property def non_zdo_endpoints(self) -> list[zigpy.endpoint.Endpoint]: return [ @@ -300,7 +319,7 @@ async def request( ) # If application.request raises an exception, we won't get here, so # won't update last_seen, as expected - self.last_seen = time.time() + self.update_last_seen() if not expect_reply: return None @@ -321,7 +340,7 @@ def handle_message( dst_addressing: None | (Addressing.Group | Addressing.IEEE | Addressing.NWK) = None, ): - self.last_seen = time.time() + self.update_last_seen() try: hdr, args = self.deserialize(src_ep, cluster, message) From 1344485cbe266de908e5e5e04b2ca88a4a42fe50 Mon Sep 17 00:00:00 2001 From: puddly <32534428+puddly@users.noreply.github.com> Date: Thu, 21 Apr 2022 13:05:36 -0400 Subject: [PATCH 07/15] Ensure all tables are explicitly handled in every migration --- tests/test_appdb_migration.py | 33 ++++++++++++++++++- zigpy/appdb.py | 60 +++++++++++++++++++++++++++++++---- 2 files changed, 86 insertions(+), 7 deletions(-) diff --git a/tests/test_appdb_migration.py b/tests/test_appdb_migration.py index 7ff26ca7f..0dd79bde8 100644 --- a/tests/test_appdb_migration.py +++ b/tests/test_appdb_migration.py @@ -9,7 +9,7 @@ import zigpy.types as t from zigpy.zdo import types as zdo_t -from tests.async_mock import patch +from tests.async_mock import AsyncMock, MagicMock, patch from tests.test_appdb import auto_kill_aiosqlite, make_app # noqa: F401 @@ -457,3 +457,34 @@ async def test_v5_to_v7_migration(test_db): app = await make_app(test_db_v5) await app.pre_shutdown() + + +async def test_migration_missing_tables(): + conn = AsyncMock() + app = MagicMock() + appdb = zigpy.appdb.PersistingListener(conn, app) + + appdb._get_table_versions = AsyncMock( + return_value={"table1_v1": "1", "table1": "", "table2_v1": "1"} + ) + + with patch.object(appdb, "execute") as execute: + execute.return_value = AsyncMock() + execute.return_value.__aenter__.return_value = AsyncMock(return_value=[]) + + # Migrations must explicitly specify all old tables, even if they will be untouched + with pytest.raises(RuntimeError): + await appdb._migrate_tables( + { + "table1_v1": "table1_v2", + # "table2_v1": "table2_v2", + } + ) + + # The untouched table will never be queried + await appdb._migrate_tables({"table1_v1": "table1_v2", "table2_v1": None}) + + appdb.execute.assert_called_once_with("SELECT * FROM table1_v1") + + with pytest.raises(AssertionError): + appdb.execute.assert_called_once_with("SELECT * FROM table2_v1") diff --git a/zigpy/appdb.py b/zigpy/appdb.py index 7601c7b15..22f6c3f00 100644 --- a/zigpy/appdb.py +++ b/zigpy/appdb.py @@ -3,6 +3,7 @@ import asyncio from datetime import datetime, timezone import logging +import re import types from typing import Any @@ -28,6 +29,7 @@ MIN_SQLITE_VERSION = (3, 24, 0) UNIX_EPOCH = datetime.fromtimestamp(0, tz=timezone.utc) +DB_V_REGEX = re.compile(r"(?:_v\d+)?$") def _import_compatible_sqlite3(min_version: tuple[int, int, int]) -> types.ModuleType: @@ -631,14 +633,23 @@ async def _register_device_listeners(self) -> None: dev.add_context_listener(self) dev.neighbors.add_context_listener(self) - async def _table_exists(self, name: str) -> bool: + async def _get_table_versions(self) -> dict[str, str]: + tables = {} + async with self.execute( - "SELECT count(*) FROM sqlite_master WHERE type='table' AND name=?", - [name], + "SELECT name FROM sqlite_master WHERE type='table'" ) as cursor: - (count,) = await cursor.fetchone() + async for (name,) in cursor: + # The regex will always return a match + match = DB_V_REGEX.search(name) + assert match is not None + + tables[name] = match.group(0) - return bool(count) + return tables + + async def _table_exists(self, name: str) -> bool: + return name in (await self._get_table_versions()) async def _run_migrations(self): """Migrates the database to the newest schema.""" @@ -695,9 +706,27 @@ async def _migrate_tables( ): """Copy rows from one set of tables into another.""" + # Extract the "old" table version suffix + tables = await self._get_table_versions() + old_table_name = list(table_map.keys())[0] + old_version = tables[old_table_name] + + # Check which tables would not be migrated + old_tables = [t for t, v in tables.items() if v == old_version] + unmigrated_old_tables = [t for t in old_tables if t not in table_map] + + if unmigrated_old_tables: + raise RuntimeError( + f"The following tables were not migrated: {unmigrated_old_tables}" + ) + # Insertion order matters for foreign key constraints but any rows that fail # to insert due to constraint violations can be discarded for old_table, new_table in table_map.items(): + # Ignore tables without a migration + if new_table is None: + continue + async with self.execute(f"SELECT * FROM {old_table}") as cursor: async for row in cursor: placeholders = ",".join("?" * len(row)) @@ -773,6 +802,10 @@ async def _migrate_to_v5(self): # These were migrated in v4 "neighbors_v4": "neighbors_v5", "node_descriptors_v4": "node_descriptors_v5", + # Explicitly specify which tables will not be migrated + "devices": None, + "neighbors": None, + "node_descriptors": None, }, errors="warn", ) @@ -793,6 +826,7 @@ async def _migrate_to_v6(self): "attributes_cache_v5": "attributes_cache_v6", "neighbors_v5": "neighbors_v6", "node_descriptors_v5": "node_descriptors_v6", + "devices_v5": None, } ) @@ -811,7 +845,19 @@ async def _migrate_to_v6(self): ) await self._migrate_tables( - {"attributes": "attributes_cache_v6"}, errors="ignore" + { + "attributes": "attributes_cache_v6", + "devices": None, + "endpoints": None, + "clusters": None, + "neighbors": None, + "node_descriptors": None, + "output_clusters": None, + "groups": None, + "group_members": None, + "relays": None, + }, + errors="ignore", ) async def _migrate_to_v7(self): @@ -830,6 +876,7 @@ async def _migrate_to_v7(self): "attributes_cache_v6": "attributes_cache_v7", "neighbors_v6": "neighbors_v7", "node_descriptors_v6": "node_descriptors_v7", + "devices_v6": None, } ) @@ -857,5 +904,6 @@ async def _migrate_to_v8(self): "neighbors_v7": "neighbors_v8", "node_descriptors_v7": "node_descriptors_v8", "unsupported_attributes_v7": "unsupported_attributes_v8", + "devices_v7": None, } ) From aabf7c57324c3aee723b79f98f80009509e5c26d Mon Sep 17 00:00:00 2001 From: puddly <32534428+puddly@users.noreply.github.com> Date: Thu, 21 Apr 2022 15:14:21 -0400 Subject: [PATCH 08/15] Make unit tests compatible with all Python versions --- tests/async_mock.py | 12 ++++++++++ tests/test_appdb_migration.py | 43 ++++++++++++++++++++--------------- 2 files changed, 37 insertions(+), 18 deletions(-) diff --git a/tests/async_mock.py b/tests/async_mock.py index 8257ddd3b..c0bf8fb45 100644 --- a/tests/async_mock.py +++ b/tests/async_mock.py @@ -3,7 +3,19 @@ if sys.version_info[:2] < (3, 8): from asynctest.mock import * # noqa + from asynctest.mock import MagicMock as _MagicMock AsyncMock = CoroutineMock # noqa: F405 + + class MagicMock(_MagicMock): + async def __aenter__(self): + return self.aenter + + async def __aexit__(self, *args): + pass + + async def __aiter__(self): + return self.aiter + else: from unittest.mock import * # noqa diff --git a/tests/test_appdb_migration.py b/tests/test_appdb_migration.py index 0dd79bde8..b2ebfe398 100644 --- a/tests/test_appdb_migration.py +++ b/tests/test_appdb_migration.py @@ -460,31 +460,38 @@ async def test_v5_to_v7_migration(test_db): async def test_migration_missing_tables(): - conn = AsyncMock() app = MagicMock() + conn = MagicMock() + conn.close = AsyncMock() + appdb = zigpy.appdb.PersistingListener(conn, app) appdb._get_table_versions = AsyncMock( return_value={"table1_v1": "1", "table1": "", "table2_v1": "1"} ) - with patch.object(appdb, "execute") as execute: - execute.return_value = AsyncMock() - execute.return_value.__aenter__.return_value = AsyncMock(return_value=[]) - - # Migrations must explicitly specify all old tables, even if they will be untouched - with pytest.raises(RuntimeError): - await appdb._migrate_tables( - { - "table1_v1": "table1_v2", - # "table2_v1": "table2_v2", - } - ) + results = MagicMock() + results.__aiter__.return_value = results + results.__anext__.side_effect = StopIteration + + appdb.execute = MagicMock() + appdb.execute.return_value.__aenter__.return_value = results + + # Migrations must explicitly specify all old tables, even if they will be untouched + with pytest.raises(RuntimeError): + await appdb._migrate_tables( + { + "table1_v1": "table1_v2", + # "table2_v1": "table2_v2", + } + ) + + # The untouched table will never be queried + await appdb._migrate_tables({"table1_v1": "table1_v2", "table2_v1": None}) - # The untouched table will never be queried - await appdb._migrate_tables({"table1_v1": "table1_v2", "table2_v1": None}) + appdb.execute.assert_called_once_with("SELECT * FROM table1_v1") - appdb.execute.assert_called_once_with("SELECT * FROM table1_v1") + with pytest.raises(AssertionError): + appdb.execute.assert_called_once_with("SELECT * FROM table2_v1") - with pytest.raises(AssertionError): - appdb.execute.assert_called_once_with("SELECT * FROM table2_v1") + await appdb.shutdown() From 08fecee9724555bad3dfedcaa0f7e570f5f3ac21 Mon Sep 17 00:00:00 2001 From: puddly <32534428+puddly@users.noreply.github.com> Date: Thu, 21 Apr 2022 15:32:15 -0400 Subject: [PATCH 09/15] Add more unit tests --- tests/test_appdb.py | 7 ++++++- tests/test_device.py | 22 +++++++++++++++++++++- 2 files changed, 27 insertions(+), 2 deletions(-) diff --git a/tests/test_appdb.py b/tests/test_appdb.py index 635852463..060e1a3c2 100644 --- a/tests/test_appdb.py +++ b/tests/test_appdb.py @@ -4,6 +4,7 @@ import sqlite3 import sys import threading +import time import aiosqlite import pytest @@ -173,8 +174,12 @@ async def test_database(tmpdir): dev.model = "Model" assert dev.get_signature()[SIG_MANUFACTURER] == "Custom" assert dev.get_signature()[SIG_MODEL] == "Model" + + ts = time.time() + dev.last_seen = ts dev_last_seen = dev.last_seen - assert isinstance(dev_last_seen, datetime) + assert isinstance(dev.last_seen, datetime) + assert abs(dev.last_seen.timestamp() - ts) < 0.1 # Test a CustomDevice custom_ieee = make_ieee(1) diff --git a/tests/test_device.py b/tests/test_device.py index 4896d009b..be65adb9d 100644 --- a/tests/test_device.py +++ b/tests/test_device.py @@ -1,4 +1,5 @@ import asyncio +from datetime import datetime, timezone import logging import pytest @@ -11,7 +12,7 @@ import zigpy.types as t from zigpy.zdo import types as zdo_t -from .async_mock import AsyncMock, MagicMock, patch, sentinel +from .async_mock import ANY, AsyncMock, MagicMock, patch, sentinel @pytest.fixture @@ -429,3 +430,22 @@ def test_device_name(dev): assert dev.nwk == 0xFFFF assert dev.name == "0xFFFF" + + +def test_device_last_seen(dev, monkeypatch): + """Test the device last_seen property handles updates and broadcasts events.""" + + monkeypatch.setattr(dev, "listener_event", MagicMock()) + assert dev.last_seen is None + + dev.last_seen = 0 + epoch = datetime(1970, 1, 1, 0, 0, 0, 0, tzinfo=timezone.utc) + assert dev.last_seen == epoch + + dev.listener_event.assert_called_once_with("device_last_seen_updated", epoch) + dev.listener_event.reset_mock() + + dev.update_last_seen() + dev.listener_event.assert_called_once_with("device_last_seen_updated", ANY) + event_time = dev.listener_event.mock_calls[0][1][1] + assert (event_time - datetime.now(timezone.utc)).total_seconds() < 0.1 From bf6cd388cfc5c68cf8236213ae0740ea907199e8 Mon Sep 17 00:00:00 2001 From: puddly <32534428+puddly@users.noreply.github.com> Date: Tue, 26 Apr 2022 17:43:06 -0400 Subject: [PATCH 10/15] 0.46.0.dev0 version bump --- zigpy/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zigpy/__init__.py b/zigpy/__init__.py index 2fa1947b5..532c3ca6e 100644 --- a/zigpy/__init__.py +++ b/zigpy/__init__.py @@ -1,5 +1,5 @@ MAJOR_VERSION = 0 -MINOR_VERSION = 45 +MINOR_VERSION = 46 PATCH_VERSION = "0.dev0" __short_version__ = f"{MAJOR_VERSION}.{MINOR_VERSION}" __version__ = f"{__short_version__}.{PATCH_VERSION}" From 8a9caec8be9ef386b89931ab820413e6f9c67b9a Mon Sep 17 00:00:00 2001 From: puddly <32534428+puddly@users.noreply.github.com> Date: Tue, 26 Apr 2022 22:41:23 -0400 Subject: [PATCH 11/15] Make sure `last_seen` is still a float --- tests/test_appdb.py | 11 +++++------ tests/test_device.py | 2 +- zigpy/appdb.py | 2 +- zigpy/device.py | 4 ++-- 4 files changed, 9 insertions(+), 10 deletions(-) diff --git a/tests/test_appdb.py b/tests/test_appdb.py index 060e1a3c2..6a59ea214 100644 --- a/tests/test_appdb.py +++ b/tests/test_appdb.py @@ -1,5 +1,4 @@ import asyncio -from datetime import datetime import os import sqlite3 import sys @@ -178,8 +177,8 @@ async def test_database(tmpdir): ts = time.time() dev.last_seen = ts dev_last_seen = dev.last_seen - assert isinstance(dev.last_seen, datetime) - assert abs(dev.last_seen.timestamp() - ts) < 0.1 + assert isinstance(dev.last_seen, float) + assert abs(dev.last_seen - ts) < 0.01 # Test a CustomDevice custom_ieee = make_ieee(1) @@ -202,7 +201,7 @@ async def test_database(tmpdir): assert dev.endpoints[1].in_clusters[0x0008]._attr_cache[0x0011] == 17 assert dev.endpoints[99].in_clusters[0x0008]._attr_cache[0x0011] == 17 custom_dev_last_seen = dev.last_seen - assert isinstance(custom_dev_last_seen, datetime) + assert isinstance(custom_dev_last_seen, float) await app.pre_shutdown() @@ -221,7 +220,7 @@ async def test_database(tmpdir): assert dev.endpoints[3].device_type == profiles.zll.DeviceType.COLOR_LIGHT assert dev.relays == relays_1 # The timestamp won't be restored exactly but it is more than close enough - assert abs((dev.last_seen - dev_last_seen).total_seconds()) < 0.01 + assert abs(dev.last_seen - dev_last_seen) < 0.01 dev = app2.get_device(custom_ieee) # This virtual attribute is added by the quirk, there is no corresponding cluster @@ -229,7 +228,7 @@ async def test_database(tmpdir): assert dev.endpoints[1].in_clusters[0x0008]._attr_cache[0x0011] == 17 assert dev.endpoints[99].in_clusters[0x0008]._attr_cache[0x0011] == 17 assert dev.relays == relays_2 - assert abs((dev.last_seen - custom_dev_last_seen).total_seconds()) < 0.01 + assert abs(dev.last_seen - custom_dev_last_seen) < 0.01 dev.relays = None app.handle_leave(99, ieee) diff --git a/tests/test_device.py b/tests/test_device.py index be65adb9d..419f719a3 100644 --- a/tests/test_device.py +++ b/tests/test_device.py @@ -440,7 +440,7 @@ def test_device_last_seen(dev, monkeypatch): dev.last_seen = 0 epoch = datetime(1970, 1, 1, 0, 0, 0, 0, tzinfo=timezone.utc) - assert dev.last_seen == epoch + assert dev.last_seen == epoch.timestamp() dev.listener_event.assert_called_once_with("device_last_seen_updated", epoch) dev.listener_event.reset_mock() diff --git a/zigpy/appdb.py b/zigpy/appdb.py index 22f6c3f00..0dcb33b9a 100644 --- a/zigpy/appdb.py +++ b/zigpy/appdb.py @@ -372,7 +372,7 @@ async def _save_device(self, device: zigpy.typing.DeviceType) -> None: status=excluded.status, last_seen=excluded.last_seen""" await self.execute( - q, (device.ieee, device.nwk, device.status, device.last_seen) + q, (device.ieee, device.nwk, device.status, device._last_seen) ) if device.node_desc is not None: diff --git a/zigpy/device.py b/zigpy/device.py index 3067cc81f..6218de836 100644 --- a/zigpy/device.py +++ b/zigpy/device.py @@ -85,8 +85,8 @@ def update_last_seen(self) -> None: self.last_seen = datetime.now(timezone.utc) @property - def last_seen(self) -> datetime | None: - return self._last_seen + def last_seen(self) -> float | None: + return self._last_seen.timestamp() if self._last_seen is not None else None @last_seen.setter def last_seen(self, value: datetime | int | float): From 6a68054682eb30d071ca4921136927fcee34d9d0 Mon Sep 17 00:00:00 2001 From: puddly <32534428+puddly@users.noreply.github.com> Date: Tue, 26 Apr 2022 23:40:43 -0400 Subject: [PATCH 12/15] Set `last_seen = None` instead of `0` --- zigpy/appdb.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/zigpy/appdb.py b/zigpy/appdb.py index 0dcb33b9a..627b0b969 100644 --- a/zigpy/appdb.py +++ b/zigpy/appdb.py @@ -565,7 +565,9 @@ async def _load_devices(self) -> None: async for (ieee, nwk, status, last_seen) in cursor: dev = self._application.add_device(ieee, nwk) dev.status = zigpy.device.Status(status) - dev._last_seen = last_seen + + if last_seen > UNIX_EPOCH: + dev._last_seen = last_seen async def _load_node_descriptors(self) -> None: async with self.execute(f"SELECT * FROM node_descriptors{DB_V}") as cursor: From 79990a111f495f6cabfeefe29fbc29717d52468b Mon Sep 17 00:00:00 2001 From: puddly <32534428+puddly@users.noreply.github.com> Date: Tue, 26 Apr 2022 23:41:19 -0400 Subject: [PATCH 13/15] Fix silent SQL error and add more thorough unit tests --- tests/test_appdb.py | 44 +++++++++++++++++++++++++++++++++++ tests/test_appdb_migration.py | 25 ++++++++++++++------ zigpy/appdb.py | 4 ++-- 3 files changed, 64 insertions(+), 9 deletions(-) diff --git a/tests/test_appdb.py b/tests/test_appdb.py index 6a59ea214..eef700ab6 100644 --- a/tests/test_appdb.py +++ b/tests/test_appdb.py @@ -773,6 +773,50 @@ async def test_load_unsupp_attr_wrong_cluster(tmpdir): await app.pre_shutdown() +async def test_last_seen(tmpdir): + db = os.path.join(str(tmpdir), "test.db") + app = await make_app(db) + + ieee = make_ieee() + app.handle_join(99, ieee, 0) + + dev = app.get_device(ieee=ieee) + ep = dev.add_endpoint(3) + ep.status = zigpy.endpoint.Status.ZDO_INIT + ep.profile_id = 260 + ep.device_type = profiles.zha.DeviceType.PUMP + clus = ep.add_input_cluster(0) + ep.add_output_cluster(1) + clus._update_attribute(4, "Custom") + clus._update_attribute(5, "Model") + app.device_initialized(dev) + + old_last_seen = dev.last_seen + await app.pre_shutdown() + + # The `last_seen` of a joined device persists + app = await make_app(db) + dev = app.get_device(ieee=ieee) + await app.pre_shutdown() + + next_last_seen = dev.last_seen + assert abs(next_last_seen - old_last_seen) < 0.01 + + await asyncio.sleep(0.1) + + # Now the last_seen will update + app = await make_app(db) + dev = app.get_device(ieee=ieee) + dev.update_last_seen() + await app.pre_shutdown() + + # And it will be updated when the database next loads + app = await make_app(db) + dev = app.get_device(ieee=ieee) + assert dev.last_seen > next_last_seen + 0.1 + await app.pre_shutdown() + + @pytest.mark.parametrize( "stdlib_version,use_sqlite", [ diff --git a/tests/test_appdb_migration.py b/tests/test_appdb_migration.py index b2ebfe398..b880ae271 100644 --- a/tests/test_appdb_migration.py +++ b/tests/test_appdb_migration.py @@ -405,13 +405,6 @@ async def test_v4_to_v5_migration_bad_neighbors(test_db, with_bad_neighbor): assert num_new_neighbors == num_v4_neighbors -async def test_v5_to_v6_migration(test_db): - test_db_v5 = test_db("simple_v5.sql") - - app = await make_app(test_db_v5) - await app.pre_shutdown() - - @pytest.mark.parametrize("with_quirk_attribute", [False, True]) async def test_v4_to_v6_migration_missing_endpoints(test_db, with_quirk_attribute): """V5's schema was too rigid and failed to migrate endpoints created by quirks""" @@ -495,3 +488,21 @@ async def test_migration_missing_tables(): appdb.execute.assert_called_once_with("SELECT * FROM table2_v1") await appdb.shutdown() + + +async def test_last_seen_migration(test_db): + test_db_v5 = test_db("simple_v5.sql") + + # To preserve the old behavior, `0` will not be exposed to ZHA, only `None` + app = await make_app(test_db_v5) + dev = app.get_device(nwk=0xBD4D) + + assert dev.last_seen is None + dev.update_last_seen() + assert isinstance(dev.last_seen, float) + await app.pre_shutdown() + + # But the device's `last_seen` will still update properly when it's actually set + app = await make_app(test_db_v5) + assert isinstance(app.get_device(nwk=0xBD4D).last_seen, float) + await app.pre_shutdown() diff --git a/zigpy/appdb.py b/zigpy/appdb.py index 627b0b969..65e13389e 100644 --- a/zigpy/appdb.py +++ b/zigpy/appdb.py @@ -228,7 +228,7 @@ def device_last_seen_updated( async def _save_device_last_seen(self, ieee: t.EUI64, last_seen: datetime) -> None: await self.execute( - f"UPDATE devices{DB_V} SET last_seen=? WHERE ieee=?", (ieee, last_seen) + f"UPDATE devices{DB_V} SET last_seen=? WHERE ieee=?", (last_seen, ieee) ) await self._db.commit() @@ -372,7 +372,7 @@ async def _save_device(self, device: zigpy.typing.DeviceType) -> None: status=excluded.status, last_seen=excluded.last_seen""" await self.execute( - q, (device.ieee, device.nwk, device.status, device._last_seen) + q, (device.ieee, device.nwk, device.status, device._last_seen or UNIX_EPOCH) ) if device.node_desc is not None: From a22df5d24d62b631bcdcd32262b202b2fa8f4044 Mon Sep 17 00:00:00 2001 From: puddly <32534428+puddly@users.noreply.github.com> Date: Wed, 27 Apr 2022 13:11:38 -0400 Subject: [PATCH 14/15] Log the entire listener exception traceback when an error occurs --- zigpy/util.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/zigpy/util.py b/zigpy/util.py index ae08b9c68..c98121f1b 100644 --- a/zigpy/util.py +++ b/zigpy/util.py @@ -50,7 +50,8 @@ def listener_event(self, method_name, *args): else: result.append(method(*args)) except Exception as e: - LOGGER.warning("Error calling listener.%s: %s", method_name, e) + LOGGER.warning("Error calling listener %r%r: %r", method, args, e) + LOGGER.debug("Error calling listener %r%r", method, args, exc_info=e) return result async def async_event(self, method_name, *args): @@ -69,7 +70,10 @@ async def async_event(self, method_name, *args): results = [] for result in await asyncio.gather(*tasks, return_exceptions=True): if isinstance(result, Exception): - LOGGER.warning("Error calling listener: %s", result) + LOGGER.warning("Error calling listener %r%r: %r", method, args, result) + LOGGER.debug( + "Error calling listener %r%r", method, args, exc_info=result + ) else: results.append(result) return results From 054a8e4ecdf13fc519140cc0cf46c3c0ddfd161f Mon Sep 17 00:00:00 2001 From: puddly <32534428+puddly@users.noreply.github.com> Date: Wed, 27 Apr 2022 16:29:31 -0400 Subject: [PATCH 15/15] Make `args` more distinct in logging message --- zigpy/util.py | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/zigpy/util.py b/zigpy/util.py index c98121f1b..759818afb 100644 --- a/zigpy/util.py +++ b/zigpy/util.py @@ -50,8 +50,12 @@ def listener_event(self, method_name, *args): else: result.append(method(*args)) except Exception as e: - LOGGER.warning("Error calling listener %r%r: %r", method, args, e) - LOGGER.debug("Error calling listener %r%r", method, args, exc_info=e) + LOGGER.warning( + "Error calling listener %r with args %r: %r", method, args, e + ) + LOGGER.debug( + "Error calling listener %r with args %r", method, args, exc_info=e + ) return result async def async_event(self, method_name, *args): @@ -70,9 +74,14 @@ async def async_event(self, method_name, *args): results = [] for result in await asyncio.gather(*tasks, return_exceptions=True): if isinstance(result, Exception): - LOGGER.warning("Error calling listener %r%r: %r", method, args, result) + LOGGER.warning( + "Error calling listener %r with args %r: %r", method, args, result + ) LOGGER.debug( - "Error calling listener %r%r", method, args, exc_info=result + "Error calling listener %r with args %r", + method, + args, + exc_info=result, ) else: results.append(result)