diff --git a/chia/_tests/core/data_layer/conftest.py b/chia/_tests/core/data_layer/conftest.py index d8c814d1b465..32c218129254 100644 --- a/chia/_tests/core/data_layer/conftest.py +++ b/chia/_tests/core/data_layer/conftest.py @@ -52,9 +52,9 @@ def create_example_fixture(request: SubRequest) -> Callable[[DataStore, bytes32] return request.param # type: ignore[no-any-return] -@pytest.fixture(name="tree_id", scope="function") -def tree_id_fixture() -> bytes32: - base = b"a tree id" +@pytest.fixture(name="store_id", scope="function") +def store_id_fixture() -> bytes32: + base = b"a store id" pad = b"." * (32 - len(base)) return bytes32(pad + base) @@ -66,8 +66,8 @@ async def raw_data_store_fixture(database_uri: str) -> AsyncIterable[DataStore]: @pytest.fixture(name="data_store", scope="function") -async def data_store_fixture(raw_data_store: DataStore, tree_id: bytes32) -> AsyncIterable[DataStore]: - await raw_data_store.create_tree(tree_id=tree_id, status=Status.COMMITTED) +async def data_store_fixture(raw_data_store: DataStore, store_id: bytes32) -> AsyncIterable[DataStore]: + await raw_data_store.create_tree(store_id=store_id, status=Status.COMMITTED) await raw_data_store.check() yield raw_data_store @@ -82,14 +82,14 @@ def node_type_fixture(request: SubRequest) -> NodeType: @pytest.fixture(name="valid_node_values") async def valid_node_values_fixture( data_store: DataStore, - tree_id: bytes32, + store_id: bytes32, node_type: NodeType, ) -> Dict[str, Any]: - await add_01234567_example(data_store=data_store, tree_id=tree_id) + await add_01234567_example(data_store=data_store, store_id=store_id) if node_type == NodeType.INTERNAL: - node_a = await data_store.get_node_by_key(key=b"\x02", tree_id=tree_id) - node_b = await data_store.get_node_by_key(key=b"\x04", tree_id=tree_id) + node_a = await data_store.get_node_by_key(key=b"\x02", store_id=store_id) + node_b = await data_store.get_node_by_key(key=b"\x04", store_id=store_id) return create_valid_node_values(node_type=node_type, left_hash=node_a.hash, right_hash=node_b.hash) elif node_type == NodeType.TERMINAL: return create_valid_node_values(node_type=node_type) diff --git a/chia/_tests/core/data_layer/test_data_cli.py b/chia/_tests/core/data_layer/test_data_cli.py index 3ee4d458e4cf..778a795aeae1 100644 --- a/chia/_tests/core/data_layer/test_data_cli.py +++ b/chia/_tests/core/data_layer/test_data_cli.py @@ -29,29 +29,29 @@ def test_round_trip(chia_root: ChiaRoot, chia_daemon: None, chia_data: None) -> print(f"create_data_store: {create}") dic = json.loads(create.stdout) assert dic["success"] - tree_id = dic["id"] + store_id = dic["id"] key = "1a6f915513173902a7216e7d9e4a16bfd088e20683f45de3b432ce72e9cc7aa8" value = "ffff8353594d8083616263" changelist: List[Dict[str, str]] = [{"action": "insert", "key": key, "value": value}] print(json.dumps(changelist)) update = chia_root.run( - args=["data", "update_data_store", "--id", tree_id, "--changelist", json.dumps(changelist)] + args=["data", "update_data_store", "--id", store_id, "--changelist", json.dumps(changelist)] ) dic = json.loads(create.stdout) assert dic["success"] print(f"update_data_store: {update}") - completed_process = chia_root.run(args=["data", "get_value", "--id", tree_id, "--key", key]) + completed_process = chia_root.run(args=["data", "get_value", "--id", store_id, "--key", key]) parsed = json.loads(completed_process.stdout) expected = {"value": value, "success": True} assert parsed == expected - get_keys_values = chia_root.run(args=["data", "get_keys_values", "--id", tree_id]) + get_keys_values = chia_root.run(args=["data", "get_keys_values", "--id", store_id]) print(f"get_keys_values: {get_keys_values}") changelist = [{"action": "delete", "key": key}] update = chia_root.run( - args=["data", "update_data_store", "--id", tree_id, "--changelist", json.dumps(changelist)] + args=["data", "update_data_store", "--id", store_id, "--changelist", json.dumps(changelist)] ) print(f"update_data_store: {update}") - completed_process = chia_root.run(args=["data", "get_value", "--id", tree_id, "--key", key]) + completed_process = chia_root.run(args=["data", "get_value", "--id", store_id, "--key", key]) parsed = json.loads(completed_process.stdout) expected = {"data": None, "success": True} assert parsed == expected diff --git a/chia/_tests/core/data_layer/test_data_layer.py b/chia/_tests/core/data_layer/test_data_layer.py index cf5919ce768e..04d5f1b6a11d 100644 --- a/chia/_tests/core/data_layer/test_data_layer.py +++ b/chia/_tests/core/data_layer/test_data_layer.py @@ -75,8 +75,8 @@ async def wallet_rpc_init() -> WalletRpcClient: ) async with data_layer.manage(): - await data_layer.get_downloader(tree_id=bytes32([0] * 32), url="") - await data_layer.get_uploaders(tree_id=bytes32([0] * 32)) + await data_layer.get_downloader(store_id=bytes32([0] * 32), url="") + await data_layer.get_uploaders(store_id=bytes32([0] * 32)) await data_layer.check_plugins() header_values = {request.headers.get(header_key) for request in recording_web_server.requests} diff --git a/chia/_tests/core/data_layer/test_data_layer_util.py b/chia/_tests/core/data_layer/test_data_layer_util.py index 32fd1a870da1..3a999f73da83 100644 --- a/chia/_tests/core/data_layer/test_data_layer_util.py +++ b/chia/_tests/core/data_layer/test_data_layer_util.py @@ -100,7 +100,7 @@ class RoundTripCase: RoundTripCase( id="Root", instance=Root( - tree_id=bytes32(b"\x00" * 32), + store_id=bytes32(b"\x00" * 32), node_hash=bytes32(b"\x01" * 32), generation=3, status=Status.PENDING, @@ -115,7 +115,7 @@ class RoundTripCase: instance=ClearPendingRootsResponse( success=True, root=Root( - tree_id=bytes32(b"\x00" * 32), + store_id=bytes32(b"\x00" * 32), node_hash=bytes32(b"\x01" * 32), generation=3, status=Status.PENDING, diff --git a/chia/_tests/core/data_layer/test_data_rpc.py b/chia/_tests/core/data_layer/test_data_rpc.py index 2940a58be2ab..ddf6aed08f3b 100644 --- a/chia/_tests/core/data_layer/test_data_rpc.py +++ b/chia/_tests/core/data_layer/test_data_rpc.py @@ -199,8 +199,8 @@ async def check_coin_state(wallet_node: WalletNode, coin_id: bytes32) -> bool: return False # pragma: no cover -async def check_singleton_confirmed(dl: DataLayer, tree_id: bytes32) -> bool: - return await dl.wallet_rpc.dl_latest_singleton(tree_id, True) is not None +async def check_singleton_confirmed(dl: DataLayer, store_id: bytes32) -> bool: + return await dl.wallet_rpc.dl_latest_singleton(store_id, True) is not None async def process_block_and_check_offer_validity(offer: TradingOffer, offer_setup: OfferSetup) -> bool: @@ -923,7 +923,7 @@ async def populate_offer_setup(offer_setup: OfferSetup, count: int) -> OfferSetu ) for store_setup, value_prefix in setups: await store_setup.data_layer.batch_insert( - tree_id=store_setup.id, + store_id=store_setup.id, changelist=[ { "action": "insert", @@ -1752,7 +1752,7 @@ async def test_make_offer_failure_rolls_back_db(offer_setup: OfferSetup) -> None with pytest.raises(Exception, match="store id not available"): await offer_setup.maker.api.make_offer(request=maker_request) - pending_root = await offer_setup.maker.data_layer.data_store.get_pending_root(tree_id=offer_setup.maker.id) + pending_root = await offer_setup.maker.data_layer.data_store.get_pending_root(store_id=offer_setup.maker.id) assert pending_root is None @@ -2055,8 +2055,8 @@ async def test_clear_pending_roots( data_store = data_layer.data_store - tree_id = bytes32(range(32)) - await data_store.create_tree(tree_id=tree_id, status=Status.COMMITTED) + store_id = bytes32(range(32)) + await data_store.create_tree(store_id=store_id, status=Status.COMMITTED) key = b"\x01\x02" value = b"abc" @@ -2064,20 +2064,20 @@ async def test_clear_pending_roots( await data_store.insert( key=key, value=value, - tree_id=tree_id, + store_id=store_id, reference_node_hash=None, side=None, status=Status.PENDING, ) - pending_root = await data_store.get_pending_root(tree_id=tree_id) + pending_root = await data_store.get_pending_root(store_id=store_id) assert pending_root is not None if layer == InterfaceLayer.direct: - cleared_root = await data_rpc_api.clear_pending_roots({"store_id": tree_id.hex()}) + cleared_root = await data_rpc_api.clear_pending_roots({"store_id": store_id.hex()}) elif layer == InterfaceLayer.funcs: cleared_root = await clear_pending_roots( - store_id=tree_id, + store_id=store_id, rpc_port=rpc_port, root_path=bt.root_path, ) @@ -2089,7 +2089,7 @@ async def test_clear_pending_roots( "data", "clear_pending_roots", "--id", - tree_id.hex(), + store_id.hex(), "--data-rpc-port", str(rpc_port), "--yes", @@ -2120,7 +2120,7 @@ async def test_clear_pending_roots( net_config=bt.config, ) try: - cleared_root = await client.clear_pending_roots(store_id=tree_id) + cleared_root = await client.clear_pending_roots(store_id=store_id) finally: client.close() await client.await_closed() @@ -2153,23 +2153,23 @@ async def test_issue_15955_deadlock( await full_node_api.wait_for_wallet_synced(wallet_node) # create a store - transaction_records, tree_id = await data_layer.create_store(fee=uint64(0)) + transaction_records, store_id = await data_layer.create_store(fee=uint64(0)) await full_node_api.process_transaction_records(records=transaction_records) await full_node_api.wait_for_wallet_synced(wallet_node) - assert await check_singleton_confirmed(dl=data_layer, tree_id=tree_id) + assert await check_singleton_confirmed(dl=data_layer, store_id=store_id) # insert a key and value key = b"\x00" value = b"\x01" * 10_000 transaction_record = await data_layer.batch_update( - tree_id=tree_id, + store_id=store_id, changelist=[{"action": "insert", "key": key, "value": value}], fee=uint64(0), ) assert transaction_record is not None await full_node_api.process_transaction_records(records=[transaction_record]) await full_node_api.wait_for_wallet_synced(wallet_node) - assert await check_singleton_confirmed(dl=data_layer, tree_id=tree_id) + assert await check_singleton_confirmed(dl=data_layer, store_id=store_id) # get the value a bunch through several periodic data management cycles concurrent_requests = 10 @@ -2183,7 +2183,7 @@ async def test_issue_15955_deadlock( while time.monotonic() < end: with anyio.fail_after(adjusted_timeout(timeout)): await asyncio.gather( - *(asyncio.create_task(data_layer.get_value(store_id=tree_id, key=key)) for _ in range(10)) + *(asyncio.create_task(data_layer.get_value(store_id=store_id, key=key)) for _ in range(10)) ) @@ -3294,7 +3294,7 @@ async def test_unsubmitted_batch_update( ) keys_values = await data_rpc_api.get_keys_values({"id": store_id.hex()}) assert keys_values == {"keys_values": []} - pending_root = await data_layer.data_store.get_pending_root(tree_id=store_id) + pending_root = await data_layer.data_store.get_pending_root(store_id=store_id) assert pending_root is not None assert pending_root.status == Status.PENDING_BATCH @@ -3313,7 +3313,7 @@ async def test_unsubmitted_batch_update( for key, value in to_insert: assert kv_dict["0x" + key.hex()] == "0x" + value.hex() prev_keys_values = keys_values - old_root = await data_layer.data_store.get_tree_root(tree_id=store_id) + old_root = await data_layer.data_store.get_tree_root(store_id=store_id) key = b"e" value = b"\x00\x05" @@ -3326,7 +3326,7 @@ async def test_unsubmitted_batch_update( await full_node_api.farm_blocks_to_puzzlehash( count=NUM_BLOCKS_WITHOUT_SUBMIT, guarantee_transaction_blocks=True ) - root = await data_layer.data_store.get_tree_root(tree_id=store_id) + root = await data_layer.data_store.get_tree_root(store_id=store_id) assert root == old_root key = b"f" @@ -3342,9 +3342,9 @@ async def test_unsubmitted_batch_update( ) await data_rpc_api.clear_pending_roots({"store_id": store_id.hex()}) - pending_root = await data_layer.data_store.get_pending_root(tree_id=store_id) + pending_root = await data_layer.data_store.get_pending_root(store_id=store_id) assert pending_root is None - root = await data_layer.data_store.get_tree_root(tree_id=store_id) + root = await data_layer.data_store.get_tree_root(store_id=store_id) assert root == old_root key = b"g" @@ -3363,7 +3363,7 @@ async def test_unsubmitted_batch_update( keys_values = await data_rpc_api.get_keys_values({"id": store_id.hex()}) assert keys_values == prev_keys_values - pending_root = await data_layer.data_store.get_pending_root(tree_id=store_id) + pending_root = await data_layer.data_store.get_pending_root(store_id=store_id) assert pending_root is not None assert pending_root.status == Status.PENDING_BATCH @@ -3427,7 +3427,7 @@ async def test_unsubmitted_batch_update( else: # pragma: no cover assert False, "unhandled parametrization" - pending_root = await data_layer.data_store.get_pending_root(tree_id=store_id) + pending_root = await data_layer.data_store.get_pending_root(store_id=store_id) assert pending_root is not None assert pending_root.status == Status.PENDING @@ -3606,7 +3606,7 @@ async def test_multistore_update( with pytest.raises(Exception, match="No pending roots found to submit"): await data_rpc_api.submit_all_pending_roots({}) for store_id in store_ids: - pending_root = await data_store.get_pending_root(tree_id=store_id) + pending_root = await data_store.get_pending_root(store_id=store_id) assert pending_root is None store_updates = [] diff --git a/chia/_tests/core/data_layer/test_data_store.py b/chia/_tests/core/data_layer/test_data_store.py index 318c351e679f..289efd6ff97f 100644 --- a/chia/_tests/core/data_layer/test_data_store.py +++ b/chia/_tests/core/data_layer/test_data_store.py @@ -101,80 +101,82 @@ async def test_create_creates_tables_and_columns( @pytest.mark.anyio async def test_create_tree_accepts_bytes32(raw_data_store: DataStore) -> None: - tree_id = bytes32(b"\0" * 32) + store_id = bytes32(b"\0" * 32) - await raw_data_store.create_tree(tree_id=tree_id) + await raw_data_store.create_tree(store_id=store_id) @pytest.mark.parametrize(argnames=["length"], argvalues=[[length] for length in [*range(0, 32), *range(33, 48)]]) @pytest.mark.anyio -async def test_create_tree_fails_for_not_bytes32(raw_data_store: DataStore, length: int) -> None: - bad_tree_id = b"\0" * length +async def test_create_store_fails_for_not_bytes32(raw_data_store: DataStore, length: int) -> None: + bad_store_id = b"\0" * length # TODO: require a more specific exception with pytest.raises(Exception): # type ignore since we are trying to intentionally pass a bad argument - await raw_data_store.create_tree(tree_id=bad_tree_id) # type: ignore[arg-type] + await raw_data_store.create_tree(store_id=bad_store_id) # type: ignore[arg-type] @pytest.mark.anyio async def test_get_trees(raw_data_store: DataStore) -> None: - expected_tree_ids = set() + expected_store_ids = set() for n in range(10): - tree_id = bytes32(b"\0" * 31 + bytes([n])) - await raw_data_store.create_tree(tree_id=tree_id) - expected_tree_ids.add(tree_id) + store_id = bytes32(b"\0" * 31 + bytes([n])) + await raw_data_store.create_tree(store_id=store_id) + expected_store_ids.add(store_id) - tree_ids = await raw_data_store.get_tree_ids() + store_ids = await raw_data_store.get_store_ids() - assert tree_ids == expected_tree_ids + assert store_ids == expected_store_ids @pytest.mark.anyio -async def test_table_is_empty(data_store: DataStore, tree_id: bytes32) -> None: - is_empty = await data_store.table_is_empty(tree_id=tree_id) +async def test_table_is_empty(data_store: DataStore, store_id: bytes32) -> None: + is_empty = await data_store.table_is_empty(store_id=store_id) assert is_empty @pytest.mark.anyio -async def test_table_is_not_empty(data_store: DataStore, tree_id: bytes32) -> None: +async def test_table_is_not_empty(data_store: DataStore, store_id: bytes32) -> None: key = b"\x01\x02" value = b"abc" await data_store.insert( key=key, value=value, - tree_id=tree_id, + store_id=store_id, reference_node_hash=None, side=None, status=Status.COMMITTED, ) - is_empty = await data_store.table_is_empty(tree_id=tree_id) + is_empty = await data_store.table_is_empty(store_id=store_id) assert not is_empty # @pytest.mark.anyio -# async def test_create_root_provides_bytes32(raw_data_store: DataStore, tree_id: bytes32) -> None: -# await raw_data_store.create_tree(tree_id=tree_id) +# async def test_create_root_provides_bytes32(raw_data_store: DataStore, store_id: bytes32) -> None: +# await raw_data_store.create_tree(store_id=store_id) # # TODO: catchup with the node_hash= -# root_hash = await raw_data_store.create_root(tree_id=tree_id, node_hash=23) +# root_hash = await raw_data_store.create_root(store_id=store_id, node_hash=23) # # assert isinstance(root_hash, bytes32) @pytest.mark.anyio -async def test_insert_over_empty(data_store: DataStore, tree_id: bytes32) -> None: +async def test_insert_over_empty(data_store: DataStore, store_id: bytes32) -> None: key = b"\x01\x02" value = b"abc" - insert_result = await data_store.insert(key=key, value=value, tree_id=tree_id, reference_node_hash=None, side=None) + insert_result = await data_store.insert( + key=key, value=value, store_id=store_id, reference_node_hash=None, side=None + ) assert insert_result.node_hash == leaf_hash(key=key, value=value) @pytest.mark.anyio -async def test_insert_increments_generation(data_store: DataStore, tree_id: bytes32) -> None: +async def test_insert_increments_generation(data_store: DataStore, store_id: bytes32) -> None: keys = [b"a", b"b", b"c", b"d"] # efghijklmnopqrstuvwxyz") value = b"\x01\x02\x03" @@ -186,13 +188,13 @@ async def test_insert_increments_generation(data_store: DataStore, tree_id: byte insert_result = await data_store.insert( key=key, value=value, - tree_id=tree_id, + store_id=store_id, reference_node_hash=node_hash, side=None if node_hash is None else Side.LEFT, status=Status.COMMITTED, ) node_hash = insert_result.node_hash - generation = await data_store.get_tree_generation(tree_id=tree_id) + generation = await data_store.get_tree_generation(store_id=store_id) generations.append(generation) expected.append(expected_generation) @@ -202,18 +204,18 @@ async def test_insert_increments_generation(data_store: DataStore, tree_id: byte @pytest.mark.anyio async def test_get_tree_generation_returns_none_when_none_available( raw_data_store: DataStore, - tree_id: bytes32, + store_id: bytes32, ) -> None: - with pytest.raises(Exception, match=re.escape(f"No generations found for tree ID: {tree_id.hex()}")): - await raw_data_store.get_tree_generation(tree_id=tree_id) + with pytest.raises(Exception, match=re.escape(f"No generations found for store ID: {store_id.hex()}")): + await raw_data_store.get_tree_generation(store_id=store_id) @pytest.mark.anyio -async def test_insert_internal_node_does_nothing_if_matching(data_store: DataStore, tree_id: bytes32) -> None: - await add_01234567_example(data_store=data_store, tree_id=tree_id) +async def test_insert_internal_node_does_nothing_if_matching(data_store: DataStore, store_id: bytes32) -> None: + await add_01234567_example(data_store=data_store, store_id=store_id) - kv_node = await data_store.get_node_by_key(key=b"\x04", tree_id=tree_id) - ancestors = await data_store.get_ancestors(node_hash=kv_node.hash, tree_id=tree_id) + kv_node = await data_store.get_node_by_key(key=b"\x04", store_id=store_id) + ancestors = await data_store.get_ancestors(node_hash=kv_node.hash, store_id=store_id) parent = ancestors[0] async with data_store.db_wrapper.reader() as reader: @@ -230,10 +232,10 @@ async def test_insert_internal_node_does_nothing_if_matching(data_store: DataSto @pytest.mark.anyio -async def test_insert_terminal_node_does_nothing_if_matching(data_store: DataStore, tree_id: bytes32) -> None: - await add_01234567_example(data_store=data_store, tree_id=tree_id) +async def test_insert_terminal_node_does_nothing_if_matching(data_store: DataStore, store_id: bytes32) -> None: + await add_01234567_example(data_store=data_store, store_id=store_id) - kv_node = await data_store.get_node_by_key(key=b"\x04", tree_id=tree_id) + kv_node = await data_store.get_node_by_key(key=b"\x04", store_id=store_id) async with data_store.db_wrapper.reader() as reader: cursor = await reader.execute("SELECT * FROM node") @@ -251,37 +253,37 @@ async def test_insert_terminal_node_does_nothing_if_matching(data_store: DataSto @pytest.mark.anyio async def test_build_a_tree( data_store: DataStore, - tree_id: bytes32, + store_id: bytes32, create_example: Callable[[DataStore, bytes32], Awaitable[Example]], ) -> None: - example = await create_example(data_store, tree_id) + example = await create_example(data_store, store_id) await _debug_dump(db=data_store.db_wrapper, description="final") - actual = await data_store.get_tree_as_program(tree_id=tree_id) + actual = await data_store.get_tree_as_program(store_id=store_id) # print("actual ", actual.as_python()) # print("expected", example.expected.as_python()) assert actual == example.expected @pytest.mark.anyio -async def test_get_node_by_key(data_store: DataStore, tree_id: bytes32) -> None: - example = await add_0123_example(data_store=data_store, tree_id=tree_id) +async def test_get_node_by_key(data_store: DataStore, store_id: bytes32) -> None: + example = await add_0123_example(data_store=data_store, store_id=store_id) key_node_hash = example.terminal_nodes[2] # TODO: make a nicer relationship between the hash and the key - actual = await data_store.get_node_by_key(key=b"\x02", tree_id=tree_id) + actual = await data_store.get_node_by_key(key=b"\x02", store_id=store_id) assert actual.hash == key_node_hash @pytest.mark.anyio -async def test_get_ancestors(data_store: DataStore, tree_id: bytes32) -> None: - example = await add_0123_example(data_store=data_store, tree_id=tree_id) +async def test_get_ancestors(data_store: DataStore, store_id: bytes32) -> None: + example = await add_0123_example(data_store=data_store, store_id=store_id) reference_node_hash = example.terminal_nodes[2] - ancestors = await data_store.get_ancestors(node_hash=reference_node_hash, tree_id=tree_id) + ancestors = await data_store.get_ancestors(node_hash=reference_node_hash, store_id=store_id) hashes = [node.hash.hex() for node in ancestors] # TODO: reverify these are correct @@ -290,12 +292,12 @@ async def test_get_ancestors(data_store: DataStore, tree_id: bytes32) -> None: "c852ecd8fb61549a0a42f9eb9dde65e6c94a01934dbd9c1d35ab94e2a0ae58e2", ] - ancestors_2 = await data_store.get_ancestors_optimized(node_hash=reference_node_hash, tree_id=tree_id) + ancestors_2 = await data_store.get_ancestors_optimized(node_hash=reference_node_hash, store_id=store_id) assert ancestors == ancestors_2 @pytest.mark.anyio -async def test_get_ancestors_optimized(data_store: DataStore, tree_id: bytes32) -> None: +async def test_get_ancestors_optimized(data_store: DataStore, store_id: bytes32) -> None: ancestors: List[Tuple[int, bytes32, List[InternalNode]]] = [] random = Random() random.seed(100, version=2) @@ -316,11 +318,11 @@ async def test_get_ancestors_optimized(data_store: DataStore, tree_id: bytes32) while node_count > 0: node_count -= 1 seed = bytes32(b"0" * 32) - node_hash = await data_store.get_terminal_node_for_seed(tree_id, seed) + node_hash = await data_store.get_terminal_node_for_seed(store_id, seed) assert node_hash is not None node = await data_store.get_node(node_hash) assert isinstance(node, TerminalNode) - await data_store.delete(key=node.key, tree_id=tree_id, status=Status.COMMITTED) + await data_store.delete(key=node.key, store_id=store_id, status=Status.COMMITTED) deleted_all = True is_insert = True else: @@ -332,7 +334,7 @@ async def test_get_ancestors_optimized(data_store: DataStore, tree_id: bytes32) key = (i % 200).to_bytes(4, byteorder="big") value = (i % 200).to_bytes(4, byteorder="big") seed = Program.to((key, value)).get_tree_hash() - node_hash = await data_store.get_terminal_node_for_seed(tree_id, seed) + node_hash = await data_store.get_terminal_node_for_seed(store_id, seed) if is_insert: node_count += 1 side = None if node_hash is None else data_store.get_side_for_seed(seed) @@ -340,7 +342,7 @@ async def test_get_ancestors_optimized(data_store: DataStore, tree_id: bytes32) insert_result = await data_store.insert( key=key, value=value, - tree_id=tree_id, + store_id=store_id, reference_node_hash=node_hash, side=side, use_optimized=False, @@ -348,19 +350,19 @@ async def test_get_ancestors_optimized(data_store: DataStore, tree_id: bytes32) ) node_hash = insert_result.node_hash if node_hash is not None: - generation = await data_store.get_tree_generation(tree_id=tree_id) - current_ancestors = await data_store.get_ancestors(node_hash=node_hash, tree_id=tree_id) + generation = await data_store.get_tree_generation(store_id=store_id) + current_ancestors = await data_store.get_ancestors(node_hash=node_hash, store_id=store_id) ancestors.append((generation, node_hash, current_ancestors)) else: node_count -= 1 assert node_hash is not None node = await data_store.get_node(node_hash) assert isinstance(node, TerminalNode) - await data_store.delete(key=node.key, tree_id=tree_id, use_optimized=False, status=Status.COMMITTED) + await data_store.delete(key=node.key, store_id=store_id, use_optimized=False, status=Status.COMMITTED) for generation, node_hash, expected_ancestors in ancestors: current_ancestors = await data_store.get_ancestors_optimized( - node_hash=node_hash, tree_id=tree_id, generation=generation + node_hash=node_hash, store_id=store_id, generation=generation ) assert current_ancestors == expected_ancestors @@ -376,7 +378,7 @@ async def test_get_ancestors_optimized(data_store: DataStore, tree_id: bytes32) ) async def test_batch_update( data_store: DataStore, - tree_id: bytes32, + store_id: bytes32, use_optimized: bool, tmp_path: Path, num_batches: int, @@ -387,7 +389,7 @@ async def test_batch_update( saved_kv: List[List[TerminalNode]] = [] db_uri = generate_in_memory_db_uri() async with DataStore.managed(database=db_uri, uri=True) as single_op_data_store: - await single_op_data_store.create_tree(tree_id, status=Status.COMMITTED) + await single_op_data_store.create_tree(store_id, status=Status.COMMITTED) random = Random() random.seed(100, version=2) @@ -408,7 +410,7 @@ async def test_batch_update( await single_op_data_store.autoinsert( key=key, value=value, - tree_id=tree_id, + store_id=store_id, use_optimized=use_optimized, status=Status.COMMITTED, ) @@ -416,7 +418,7 @@ async def test_batch_update( await single_op_data_store.upsert( key=key, new_value=value, - tree_id=tree_id, + store_id=store_id, use_optimized=use_optimized, status=Status.COMMITTED, ) @@ -428,7 +430,7 @@ async def test_batch_update( del keys_values[key] await single_op_data_store.delete( key=key, - tree_id=tree_id, + store_id=store_id, use_optimized=use_optimized, status=Status.COMMITTED, ) @@ -442,7 +444,7 @@ async def test_batch_update( await single_op_data_store.upsert( key=key, new_value=new_value, - tree_id=tree_id, + store_id=store_id, use_optimized=use_optimized, status=Status.COMMITTED, ) @@ -451,17 +453,17 @@ async def test_batch_update( if (operation + 1) % num_ops_per_batch == 0: saved_batches.append(batch) batch = [] - current_kv = await single_op_data_store.get_keys_values(tree_id=tree_id) + current_kv = await single_op_data_store.get_keys_values(store_id=store_id) assert {kv.key: kv.value for kv in current_kv} == keys_values saved_kv.append(current_kv) for batch_number, batch in enumerate(saved_batches): assert len(batch) == num_ops_per_batch - await data_store.insert_batch(tree_id, batch, status=Status.COMMITTED) - root = await data_store.get_tree_root(tree_id) + await data_store.insert_batch(store_id, batch, status=Status.COMMITTED) + root = await data_store.get_tree_root(store_id) assert root.generation == batch_number + 1 assert root.node_hash is not None - current_kv = await data_store.get_keys_values(tree_id=tree_id) + current_kv = await data_store.get_keys_values(store_id=store_id) # Get the same keys/values, but possibly stored in other order. assert {node.key: node.value for node in current_kv} == { node.key: node.value for node in saved_kv[batch_number] @@ -475,7 +477,7 @@ async def test_batch_update( while ancestor in ancestors: ancestor = ancestors[ancestor] expected_ancestors.append(ancestor) - result_ancestors = await data_store.get_ancestors_optimized(node_hash, tree_id) + result_ancestors = await data_store.get_ancestors_optimized(node_hash, store_id) assert [node.hash for node in result_ancestors] == expected_ancestors node = await data_store.get_node(node_hash) if isinstance(node, InternalNode): @@ -484,7 +486,7 @@ async def test_batch_update( ancestors[node.left_hash] = node_hash ancestors[node.right_hash] = node_hash - all_kv = await data_store.get_keys_values(tree_id) + all_kv = await data_store.get_keys_values(store_id) assert {node.key: node.value for node in all_kv} == keys_values @@ -495,7 +497,7 @@ async def test_batch_update( ) async def test_upsert_ignores_existing_arguments( data_store: DataStore, - tree_id: bytes32, + store_id: bytes32, use_optimized: bool, ) -> None: key = b"key" @@ -504,43 +506,43 @@ async def test_upsert_ignores_existing_arguments( await data_store.autoinsert( key=key, value=value, - tree_id=tree_id, + store_id=store_id, use_optimized=use_optimized, status=Status.COMMITTED, ) - node = await data_store.get_node_by_key(key, tree_id) + node = await data_store.get_node_by_key(key, store_id) assert node.value == value new_value = b"value2" await data_store.upsert( key=key, new_value=new_value, - tree_id=tree_id, + store_id=store_id, use_optimized=use_optimized, status=Status.COMMITTED, ) - node = await data_store.get_node_by_key(key, tree_id) + node = await data_store.get_node_by_key(key, store_id) assert node.value == new_value await data_store.upsert( key=key, new_value=new_value, - tree_id=tree_id, + store_id=store_id, use_optimized=use_optimized, status=Status.COMMITTED, ) - node = await data_store.get_node_by_key(key, tree_id) + node = await data_store.get_node_by_key(key, store_id) assert node.value == new_value key2 = b"key2" await data_store.upsert( key=key2, new_value=value, - tree_id=tree_id, + store_id=store_id, use_optimized=use_optimized, status=Status.COMMITTED, ) - node = await data_store.get_node_by_key(key2, tree_id) + node = await data_store.get_node_by_key(key2, store_id) assert node.value == value @@ -548,18 +550,18 @@ async def test_upsert_ignores_existing_arguments( @pytest.mark.anyio async def test_insert_batch_reference_and_side( data_store: DataStore, - tree_id: bytes32, + store_id: bytes32, side: Side, ) -> None: insert_result = await data_store.autoinsert( key=b"key1", value=b"value1", - tree_id=tree_id, + store_id=store_id, status=Status.COMMITTED, ) new_root_hash = await data_store.insert_batch( - tree_id=tree_id, + store_id=store_id, changelist=[ { "action": "insert", @@ -585,33 +587,33 @@ async def test_insert_batch_reference_and_side( @pytest.mark.anyio -async def test_ancestor_table_unique_inserts(data_store: DataStore, tree_id: bytes32) -> None: - await add_0123_example(data_store=data_store, tree_id=tree_id) +async def test_ancestor_table_unique_inserts(data_store: DataStore, store_id: bytes32) -> None: + await add_0123_example(data_store=data_store, store_id=store_id) hash_1 = bytes32.from_hexstr("0763561814685fbf92f6ca71fbb1cb11821951450d996375c239979bd63e9535") hash_2 = bytes32.from_hexstr("924be8ff27e84cba17f5bc918097f8410fab9824713a4668a21c8e060a8cab40") - await data_store._insert_ancestor_table(hash_1, hash_2, tree_id, 2) - await data_store._insert_ancestor_table(hash_1, hash_2, tree_id, 2) + await data_store._insert_ancestor_table(hash_1, hash_2, store_id, 2) + await data_store._insert_ancestor_table(hash_1, hash_2, store_id, 2) with pytest.raises(Exception, match="^Requested insertion of ancestor"): - await data_store._insert_ancestor_table(hash_1, hash_1, tree_id, 2) - await data_store._insert_ancestor_table(hash_1, hash_2, tree_id, 2) + await data_store._insert_ancestor_table(hash_1, hash_1, store_id, 2) + await data_store._insert_ancestor_table(hash_1, hash_2, store_id, 2) @pytest.mark.anyio async def test_get_pairs( data_store: DataStore, - tree_id: bytes32, + store_id: bytes32, create_example: Callable[[DataStore, bytes32], Awaitable[Example]], ) -> None: - example = await create_example(data_store, tree_id) + example = await create_example(data_store, store_id) - pairs = await data_store.get_keys_values(tree_id=tree_id) + pairs = await data_store.get_keys_values(store_id=store_id) assert [node.hash for node in pairs] == example.terminal_nodes @pytest.mark.anyio -async def test_get_pairs_when_empty(data_store: DataStore, tree_id: bytes32) -> None: - pairs = await data_store.get_keys_values(tree_id=tree_id) +async def test_get_pairs_when_empty(data_store: DataStore, store_id: bytes32) -> None: + pairs = await data_store.get_keys_values(store_id=store_id) assert pairs == [] @@ -624,7 +626,7 @@ async def test_get_pairs_when_empty(data_store: DataStore, tree_id: bytes32) -> @pytest.mark.anyio() async def test_inserting_duplicate_key_fails( data_store: DataStore, - tree_id: bytes32, + store_id: bytes32, first_value: bytes, second_value: bytes, ) -> None: @@ -633,7 +635,7 @@ async def test_inserting_duplicate_key_fails( insert_result = await data_store.insert( key=key, value=first_value, - tree_id=tree_id, + store_id=store_id, reference_node_hash=None, side=None, ) @@ -643,7 +645,7 @@ async def test_inserting_duplicate_key_fails( await data_store.insert( key=key, value=second_value, - tree_id=tree_id, + store_id=store_id, reference_node_hash=insert_result.node_hash, side=Side.RIGHT, ) @@ -653,7 +655,7 @@ async def test_inserting_duplicate_key_fails( await data_store.insert( key=key, value=second_value, - tree_id=tree_id, + store_id=store_id, reference_node_hash=insert_result.node_hash, side=Side.RIGHT, ) @@ -678,20 +680,20 @@ async def test_inserting_invalid_length_hash_raises_original_exception( @pytest.mark.anyio() async def test_inserting_invalid_length_ancestor_hash_raises_original_exception( data_store: DataStore, - tree_id: bytes32, + store_id: bytes32, ) -> None: with pytest.raises(aiosqlite.IntegrityError): # casting since we are testing an invalid case await data_store._insert_ancestor_table( left_hash=bytes32(b"\x01" * 32), right_hash=bytes32(b"\x02" * 32), - tree_id=tree_id, + store_id=store_id, generation=0, ) @pytest.mark.anyio() -async def test_autoinsert_balances_from_scratch(data_store: DataStore, tree_id: bytes32) -> None: +async def test_autoinsert_balances_from_scratch(data_store: DataStore, store_id: bytes32) -> None: random = Random() random.seed(100, version=2) hashes = [] @@ -699,17 +701,17 @@ async def test_autoinsert_balances_from_scratch(data_store: DataStore, tree_id: for i in range(2000): key = (i + 100).to_bytes(4, byteorder="big") value = (i + 200).to_bytes(4, byteorder="big") - insert_result = await data_store.autoinsert(key, value, tree_id, status=Status.COMMITTED) + insert_result = await data_store.autoinsert(key, value, store_id, status=Status.COMMITTED) hashes.append(insert_result.node_hash) - heights = {node_hash: len(await data_store.get_ancestors_optimized(node_hash, tree_id)) for node_hash in hashes} + heights = {node_hash: len(await data_store.get_ancestors_optimized(node_hash, store_id)) for node_hash in hashes} too_tall = {hash: height for hash, height in heights.items() if height > 14} assert too_tall == {} assert 11 <= statistics.mean(heights.values()) <= 12 @pytest.mark.anyio() -async def test_autoinsert_balances_gaps(data_store: DataStore, tree_id: bytes32) -> None: +async def test_autoinsert_balances_gaps(data_store: DataStore, store_id: bytes32) -> None: random = Random() random.seed(101, version=2) hashes = [] @@ -718,30 +720,30 @@ async def test_autoinsert_balances_gaps(data_store: DataStore, tree_id: bytes32) key = (i + 100).to_bytes(4, byteorder="big") value = (i + 200).to_bytes(4, byteorder="big") if i == 0 or i > 10: - insert_result = await data_store.autoinsert(key, value, tree_id, status=Status.COMMITTED) + insert_result = await data_store.autoinsert(key, value, store_id, status=Status.COMMITTED) else: - reference_node_hash = await data_store.get_terminal_node_for_seed(tree_id, bytes32([0] * 32)) + reference_node_hash = await data_store.get_terminal_node_for_seed(store_id, bytes32([0] * 32)) insert_result = await data_store.insert( key=key, value=value, - tree_id=tree_id, + store_id=store_id, reference_node_hash=reference_node_hash, side=Side.LEFT, status=Status.COMMITTED, ) - ancestors = await data_store.get_ancestors_optimized(insert_result.node_hash, tree_id) + ancestors = await data_store.get_ancestors_optimized(insert_result.node_hash, store_id) assert len(ancestors) == i hashes.append(insert_result.node_hash) - heights = {node_hash: len(await data_store.get_ancestors_optimized(node_hash, tree_id)) for node_hash in hashes} + heights = {node_hash: len(await data_store.get_ancestors_optimized(node_hash, store_id)) for node_hash in hashes} too_tall = {hash: height for hash, height in heights.items() if height > 14} assert too_tall == {} assert 11 <= statistics.mean(heights.values()) <= 12 @pytest.mark.anyio() -async def test_delete_from_left_both_terminal(data_store: DataStore, tree_id: bytes32) -> None: - await add_01234567_example(data_store=data_store, tree_id=tree_id) +async def test_delete_from_left_both_terminal(data_store: DataStore, store_id: bytes32) -> None: + await add_01234567_example(data_store=data_store, store_id=store_id) expected = Program.to( ( @@ -765,15 +767,15 @@ async def test_delete_from_left_both_terminal(data_store: DataStore, tree_id: by ), ) - await data_store.delete(key=b"\x04", tree_id=tree_id, status=Status.COMMITTED) - result = await data_store.get_tree_as_program(tree_id=tree_id) + await data_store.delete(key=b"\x04", store_id=store_id, status=Status.COMMITTED) + result = await data_store.get_tree_as_program(store_id=store_id) assert result == expected @pytest.mark.anyio() -async def test_delete_from_left_other_not_terminal(data_store: DataStore, tree_id: bytes32) -> None: - await add_01234567_example(data_store=data_store, tree_id=tree_id) +async def test_delete_from_left_other_not_terminal(data_store: DataStore, store_id: bytes32) -> None: + await add_01234567_example(data_store=data_store, store_id=store_id) expected = Program.to( ( @@ -794,16 +796,16 @@ async def test_delete_from_left_other_not_terminal(data_store: DataStore, tree_i ), ) - await data_store.delete(key=b"\x04", tree_id=tree_id, status=Status.COMMITTED) - await data_store.delete(key=b"\x05", tree_id=tree_id, status=Status.COMMITTED) - result = await data_store.get_tree_as_program(tree_id=tree_id) + await data_store.delete(key=b"\x04", store_id=store_id, status=Status.COMMITTED) + await data_store.delete(key=b"\x05", store_id=store_id, status=Status.COMMITTED) + result = await data_store.get_tree_as_program(store_id=store_id) assert result == expected @pytest.mark.anyio() -async def test_delete_from_right_both_terminal(data_store: DataStore, tree_id: bytes32) -> None: - await add_01234567_example(data_store=data_store, tree_id=tree_id) +async def test_delete_from_right_both_terminal(data_store: DataStore, store_id: bytes32) -> None: + await add_01234567_example(data_store=data_store, store_id=store_id) expected = Program.to( ( @@ -827,15 +829,15 @@ async def test_delete_from_right_both_terminal(data_store: DataStore, tree_id: b ), ) - await data_store.delete(key=b"\x03", tree_id=tree_id, status=Status.COMMITTED) - result = await data_store.get_tree_as_program(tree_id=tree_id) + await data_store.delete(key=b"\x03", store_id=store_id, status=Status.COMMITTED) + result = await data_store.get_tree_as_program(store_id=store_id) assert result == expected @pytest.mark.anyio() -async def test_delete_from_right_other_not_terminal(data_store: DataStore, tree_id: bytes32) -> None: - await add_01234567_example(data_store=data_store, tree_id=tree_id) +async def test_delete_from_right_other_not_terminal(data_store: DataStore, store_id: bytes32) -> None: + await add_01234567_example(data_store=data_store, store_id=store_id) expected = Program.to( ( @@ -856,24 +858,24 @@ async def test_delete_from_right_other_not_terminal(data_store: DataStore, tree_ ), ) - await data_store.delete(key=b"\x03", tree_id=tree_id, status=Status.COMMITTED) - await data_store.delete(key=b"\x02", tree_id=tree_id, status=Status.COMMITTED) - result = await data_store.get_tree_as_program(tree_id=tree_id) + await data_store.delete(key=b"\x03", store_id=store_id, status=Status.COMMITTED) + await data_store.delete(key=b"\x02", store_id=store_id, status=Status.COMMITTED) + result = await data_store.get_tree_as_program(store_id=store_id) assert result == expected @pytest.mark.anyio -async def test_proof_of_inclusion_by_hash(data_store: DataStore, tree_id: bytes32) -> None: +async def test_proof_of_inclusion_by_hash(data_store: DataStore, store_id: bytes32) -> None: """A proof of inclusion contains the expected sibling side, sibling hash, combined hash, key, value, and root hash values. """ - await add_01234567_example(data_store=data_store, tree_id=tree_id) - root = await data_store.get_tree_root(tree_id=tree_id) + await add_01234567_example(data_store=data_store, store_id=store_id) + root = await data_store.get_tree_root(store_id=store_id) assert root.node_hash is not None - node = await data_store.get_node_by_key(key=b"\x04", tree_id=tree_id) + node = await data_store.get_node_by_key(key=b"\x04", store_id=store_id) - proof = await data_store.get_proof_of_inclusion_by_hash(node_hash=node.hash, tree_id=tree_id) + proof = await data_store.get_proof_of_inclusion_by_hash(node_hash=node.hash, store_id=store_id) print(node) await _debug_dump(db=data_store.db_wrapper) @@ -900,26 +902,26 @@ async def test_proof_of_inclusion_by_hash(data_store: DataStore, tree_id: bytes3 @pytest.mark.anyio -async def test_proof_of_inclusion_by_hash_no_ancestors(data_store: DataStore, tree_id: bytes32) -> None: +async def test_proof_of_inclusion_by_hash_no_ancestors(data_store: DataStore, store_id: bytes32) -> None: """Check proper proof of inclusion creation when the node being proved is the root.""" - await data_store.autoinsert(key=b"\x04", value=b"\x03", tree_id=tree_id, status=Status.COMMITTED) - root = await data_store.get_tree_root(tree_id=tree_id) + await data_store.autoinsert(key=b"\x04", value=b"\x03", store_id=store_id, status=Status.COMMITTED) + root = await data_store.get_tree_root(store_id=store_id) assert root.node_hash is not None - node = await data_store.get_node_by_key(key=b"\x04", tree_id=tree_id) + node = await data_store.get_node_by_key(key=b"\x04", store_id=store_id) - proof = await data_store.get_proof_of_inclusion_by_hash(node_hash=node.hash, tree_id=tree_id) + proof = await data_store.get_proof_of_inclusion_by_hash(node_hash=node.hash, store_id=store_id) assert proof == ProofOfInclusion(node_hash=node.hash, layers=[]) @pytest.mark.anyio -async def test_proof_of_inclusion_by_hash_program(data_store: DataStore, tree_id: bytes32) -> None: +async def test_proof_of_inclusion_by_hash_program(data_store: DataStore, store_id: bytes32) -> None: """The proof of inclusion program has the expected Python equivalence.""" - await add_01234567_example(data_store=data_store, tree_id=tree_id) - node = await data_store.get_node_by_key(key=b"\x04", tree_id=tree_id) + await add_01234567_example(data_store=data_store, store_id=store_id) + node = await data_store.get_node_by_key(key=b"\x04", store_id=store_id) - proof = await data_store.get_proof_of_inclusion_by_hash(node_hash=node.hash, tree_id=tree_id) + proof = await data_store.get_proof_of_inclusion_by_hash(node_hash=node.hash, store_id=store_id) assert proof.as_program() == [ b"\x04", @@ -932,27 +934,27 @@ async def test_proof_of_inclusion_by_hash_program(data_store: DataStore, tree_id @pytest.mark.anyio -async def test_proof_of_inclusion_by_hash_equals_by_key(data_store: DataStore, tree_id: bytes32) -> None: +async def test_proof_of_inclusion_by_hash_equals_by_key(data_store: DataStore, store_id: bytes32) -> None: """The proof of inclusion is equal between hash and key requests.""" - await add_01234567_example(data_store=data_store, tree_id=tree_id) - node = await data_store.get_node_by_key(key=b"\x04", tree_id=tree_id) + await add_01234567_example(data_store=data_store, store_id=store_id) + node = await data_store.get_node_by_key(key=b"\x04", store_id=store_id) - proof_by_hash = await data_store.get_proof_of_inclusion_by_hash(node_hash=node.hash, tree_id=tree_id) - proof_by_key = await data_store.get_proof_of_inclusion_by_key(key=b"\x04", tree_id=tree_id) + proof_by_hash = await data_store.get_proof_of_inclusion_by_hash(node_hash=node.hash, store_id=store_id) + proof_by_key = await data_store.get_proof_of_inclusion_by_key(key=b"\x04", store_id=store_id) assert proof_by_hash == proof_by_key @pytest.mark.anyio -async def test_proof_of_inclusion_by_hash_bytes(data_store: DataStore, tree_id: bytes32) -> None: +async def test_proof_of_inclusion_by_hash_bytes(data_store: DataStore, store_id: bytes32) -> None: """The proof of inclusion provided by the data store is able to be converted to a program and subsequently to bytes. """ - await add_01234567_example(data_store=data_store, tree_id=tree_id) - node = await data_store.get_node_by_key(key=b"\x04", tree_id=tree_id) + await add_01234567_example(data_store=data_store, store_id=store_id) + node = await data_store.get_node_by_key(key=b"\x04", store_id=store_id) - proof = await data_store.get_proof_of_inclusion_by_hash(node_hash=node.hash, tree_id=tree_id) + proof = await data_store.get_proof_of_inclusion_by_hash(node_hash=node.hash, store_id=store_id) expected = ( b"\xff\x04\xff\xff\xa0\xfbf\xfeS\x9b>\xb2\x02\r\xfb\xfa\xdf\xd6\x01\xfa1\x85!)" @@ -966,11 +968,11 @@ async def test_proof_of_inclusion_by_hash_bytes(data_store: DataStore, tree_id: # @pytest.mark.anyio -# async def test_create_first_pair(data_store: DataStore, tree_id: bytes) -> None: +# async def test_create_first_pair(data_store: DataStore, store_id: bytes) -> None: # key = SExp.to([1, 2]) # value = SExp.to(b'abc') # -# root_hash = await data_store.create_root(tree_id=tree_id) +# root_hash = await data_store.create_root(store_id=store_id) # # # await data_store.create_pair(key=key, value=value) @@ -991,7 +993,7 @@ def test_all_checks_collected() -> None: @pytest.mark.anyio async def test_check_roots_are_incrementing_missing_zero(raw_data_store: DataStore) -> None: - tree_id = hexstr_to_bytes("c954ab71ffaf5b0f129b04b35fdc7c84541f4375167e730e2646bfcfdb7cf2cd") + store_id = hexstr_to_bytes("c954ab71ffaf5b0f129b04b35fdc7c84541f4375167e730e2646bfcfdb7cf2cd") async with raw_data_store.db_wrapper.writer() as writer: for generation in range(1, 5): @@ -1001,7 +1003,7 @@ async def test_check_roots_are_incrementing_missing_zero(raw_data_store: DataSto VALUES(:tree_id, :generation, :node_hash, :status) """, { - "tree_id": tree_id, + "tree_id": store_id, "generation": generation, "node_hash": None, "status": Status.COMMITTED.value, @@ -1017,7 +1019,7 @@ async def test_check_roots_are_incrementing_missing_zero(raw_data_store: DataSto @pytest.mark.anyio async def test_check_roots_are_incrementing_gap(raw_data_store: DataStore) -> None: - tree_id = hexstr_to_bytes("c954ab71ffaf5b0f129b04b35fdc7c84541f4375167e730e2646bfcfdb7cf2cd") + store_id = hexstr_to_bytes("c954ab71ffaf5b0f129b04b35fdc7c84541f4375167e730e2646bfcfdb7cf2cd") async with raw_data_store.db_wrapper.writer() as writer: for generation in [*range(5), *range(6, 10)]: @@ -1027,7 +1029,7 @@ async def test_check_roots_are_incrementing_gap(raw_data_store: DataStore) -> No VALUES(:tree_id, :generation, :node_hash, :status) """, { - "tree_id": tree_id, + "tree_id": store_id, "generation": generation, "node_hash": None, "status": Status.COMMITTED.value, @@ -1082,51 +1084,51 @@ async def test_check_hashes_terminal(raw_data_store: DataStore) -> None: @pytest.mark.anyio -async def test_root_state(data_store: DataStore, tree_id: bytes32) -> None: +async def test_root_state(data_store: DataStore, store_id: bytes32) -> None: key = b"\x01\x02" value = b"abc" await data_store.insert( - key=key, value=value, tree_id=tree_id, reference_node_hash=None, side=None, status=Status.PENDING + key=key, value=value, store_id=store_id, reference_node_hash=None, side=None, status=Status.PENDING ) - is_empty = await data_store.table_is_empty(tree_id=tree_id) + is_empty = await data_store.table_is_empty(store_id=store_id) assert is_empty @pytest.mark.anyio -async def test_change_root_state(data_store: DataStore, tree_id: bytes32) -> None: +async def test_change_root_state(data_store: DataStore, store_id: bytes32) -> None: key = b"\x01\x02" value = b"abc" await data_store.insert( key=key, value=value, - tree_id=tree_id, + store_id=store_id, reference_node_hash=None, side=None, ) - root = await data_store.get_pending_root(tree_id) + root = await data_store.get_pending_root(store_id) assert root is not None assert root.status == Status.PENDING - is_empty = await data_store.table_is_empty(tree_id=tree_id) + is_empty = await data_store.table_is_empty(store_id=store_id) assert is_empty await data_store.change_root_status(root, Status.PENDING_BATCH) - root = await data_store.get_pending_root(tree_id) + root = await data_store.get_pending_root(store_id) assert root is not None assert root.status == Status.PENDING_BATCH - is_empty = await data_store.table_is_empty(tree_id=tree_id) + is_empty = await data_store.table_is_empty(store_id=store_id) assert is_empty await data_store.change_root_status(root, Status.COMMITTED) - root = await data_store.get_tree_root(tree_id) - is_empty = await data_store.table_is_empty(tree_id=tree_id) + root = await data_store.get_tree_root(store_id) + is_empty = await data_store.table_is_empty(store_id=store_id) assert not is_empty assert root.node_hash is not None - root = await data_store.get_pending_root(tree_id) + root = await data_store.get_pending_root(store_id) assert root is None @pytest.mark.anyio -async def test_kv_diff(data_store: DataStore, tree_id: bytes32) -> None: +async def test_kv_diff(data_store: DataStore, store_id: bytes32) -> None: random = Random() random.seed(100, version=2) insertions = 0 @@ -1135,7 +1137,7 @@ async def test_kv_diff(data_store: DataStore, tree_id: bytes32) -> None: key = (i + 100).to_bytes(4, byteorder="big") value = (i + 200).to_bytes(4, byteorder="big") seed = leaf_hash(key=key, value=value) - node_hash = await data_store.get_terminal_node_for_seed(tree_id, seed) + node_hash = await data_store.get_terminal_node_for_seed(store_id, seed) if random.randint(0, 4) > 0 or insertions < 10: insertions += 1 side = None if node_hash is None else data_store.get_side_for_seed(seed) @@ -1143,7 +1145,7 @@ async def test_kv_diff(data_store: DataStore, tree_id: bytes32) -> None: await data_store.insert( key=key, value=value, - tree_id=tree_id, + store_id=store_id, reference_node_hash=node_hash, side=side, status=Status.COMMITTED, @@ -1154,133 +1156,133 @@ async def test_kv_diff(data_store: DataStore, tree_id: bytes32) -> None: assert node_hash is not None node = await data_store.get_node(node_hash) assert isinstance(node, TerminalNode) - await data_store.delete(key=node.key, tree_id=tree_id, status=Status.COMMITTED) + await data_store.delete(key=node.key, store_id=store_id, status=Status.COMMITTED) if i > 200: if DiffData(OperationType.INSERT, node.key, node.value) in expected_diff: expected_diff.remove(DiffData(OperationType.INSERT, node.key, node.value)) else: expected_diff.add(DiffData(OperationType.DELETE, node.key, node.value)) if i == 200: - root_start = await data_store.get_tree_root(tree_id) + root_start = await data_store.get_tree_root(store_id) - root_end = await data_store.get_tree_root(tree_id) + root_end = await data_store.get_tree_root(store_id) assert root_start.node_hash is not None assert root_end.node_hash is not None - diffs = await data_store.get_kv_diff(tree_id, root_start.node_hash, root_end.node_hash) + diffs = await data_store.get_kv_diff(store_id, root_start.node_hash, root_end.node_hash) assert diffs == expected_diff @pytest.mark.anyio -async def test_kv_diff_2(data_store: DataStore, tree_id: bytes32) -> None: +async def test_kv_diff_2(data_store: DataStore, store_id: bytes32) -> None: insert_result = await data_store.insert( key=b"000", value=b"000", - tree_id=tree_id, + store_id=store_id, reference_node_hash=None, side=None, ) empty_hash = bytes32([0] * 32) invalid_hash = bytes32([0] * 31 + [1]) - diff_1 = await data_store.get_kv_diff(tree_id, empty_hash, insert_result.node_hash) + diff_1 = await data_store.get_kv_diff(store_id, empty_hash, insert_result.node_hash) assert diff_1 == {DiffData(OperationType.INSERT, b"000", b"000")} - diff_2 = await data_store.get_kv_diff(tree_id, insert_result.node_hash, empty_hash) + diff_2 = await data_store.get_kv_diff(store_id, insert_result.node_hash, empty_hash) assert diff_2 == {DiffData(OperationType.DELETE, b"000", b"000")} - diff_3 = await data_store.get_kv_diff(tree_id, invalid_hash, insert_result.node_hash) + diff_3 = await data_store.get_kv_diff(store_id, invalid_hash, insert_result.node_hash) assert diff_3 == set() @pytest.mark.anyio -async def test_kv_diff_3(data_store: DataStore, tree_id: bytes32) -> None: +async def test_kv_diff_3(data_store: DataStore, store_id: bytes32) -> None: insert_result = await data_store.autoinsert( key=b"000", value=b"000", - tree_id=tree_id, + store_id=store_id, status=Status.COMMITTED, ) - await data_store.delete(tree_id=tree_id, key=b"000", status=Status.COMMITTED) + await data_store.delete(store_id=store_id, key=b"000", status=Status.COMMITTED) insert_result_2 = await data_store.autoinsert( key=b"000", value=b"001", - tree_id=tree_id, + store_id=store_id, status=Status.COMMITTED, ) - diff_1 = await data_store.get_kv_diff(tree_id, insert_result.node_hash, insert_result_2.node_hash) + diff_1 = await data_store.get_kv_diff(store_id, insert_result.node_hash, insert_result_2.node_hash) assert diff_1 == {DiffData(OperationType.DELETE, b"000", b"000"), DiffData(OperationType.INSERT, b"000", b"001")} insert_result_3 = await data_store.upsert( key=b"000", new_value=b"002", - tree_id=tree_id, + store_id=store_id, status=Status.COMMITTED, ) - diff_2 = await data_store.get_kv_diff(tree_id, insert_result_2.node_hash, insert_result_3.node_hash) + diff_2 = await data_store.get_kv_diff(store_id, insert_result_2.node_hash, insert_result_3.node_hash) assert diff_2 == {DiffData(OperationType.DELETE, b"000", b"001"), DiffData(OperationType.INSERT, b"000", b"002")} @pytest.mark.anyio -async def test_rollback_to_generation(data_store: DataStore, tree_id: bytes32) -> None: - await add_0123_example(data_store, tree_id) +async def test_rollback_to_generation(data_store: DataStore, store_id: bytes32) -> None: + await add_0123_example(data_store, store_id) expected_hashes = [] - roots = await data_store.get_roots_between(tree_id, 1, 5) + roots = await data_store.get_roots_between(store_id, 1, 5) for generation, root in enumerate(roots): expected_hashes.append((generation + 1, root.node_hash)) for generation, expected_hash in reversed(expected_hashes): - await data_store.rollback_to_generation(tree_id, generation) - root = await data_store.get_tree_root(tree_id) + await data_store.rollback_to_generation(store_id, generation) + root = await data_store.get_tree_root(store_id) assert root.node_hash == expected_hash @pytest.mark.anyio -async def test_subscribe_unsubscribe(data_store: DataStore, tree_id: bytes32) -> None: - await data_store.subscribe(Subscription(tree_id, [ServerInfo("http://127:0:0:1/8000", 1, 1)])) +async def test_subscribe_unsubscribe(data_store: DataStore, store_id: bytes32) -> None: + await data_store.subscribe(Subscription(store_id, [ServerInfo("http://127:0:0:1/8000", 1, 1)])) subscriptions = await data_store.get_subscriptions() urls = [server_info.url for subscription in subscriptions for server_info in subscription.servers_info] assert urls == ["http://127:0:0:1/8000"] - await data_store.subscribe(Subscription(tree_id, [ServerInfo("http://127:0:0:1/8001", 2, 2)])) + await data_store.subscribe(Subscription(store_id, [ServerInfo("http://127:0:0:1/8001", 2, 2)])) subscriptions = await data_store.get_subscriptions() urls = [server_info.url for subscription in subscriptions for server_info in subscription.servers_info] assert urls == ["http://127:0:0:1/8000", "http://127:0:0:1/8001"] await data_store.subscribe( Subscription( - tree_id, [ServerInfo("http://127:0:0:1/8000", 100, 100), ServerInfo("http://127:0:0:1/8001", 200, 200)] + store_id, [ServerInfo("http://127:0:0:1/8000", 100, 100), ServerInfo("http://127:0:0:1/8001", 200, 200)] ) ) subscriptions = await data_store.get_subscriptions() assert subscriptions == [ - Subscription(tree_id, [ServerInfo("http://127:0:0:1/8000", 1, 1), ServerInfo("http://127:0:0:1/8001", 2, 2)]), + Subscription(store_id, [ServerInfo("http://127:0:0:1/8000", 1, 1), ServerInfo("http://127:0:0:1/8001", 2, 2)]), ] - await data_store.unsubscribe(tree_id) + await data_store.unsubscribe(store_id) assert await data_store.get_subscriptions() == [] - tree_id2 = bytes32([0] * 32) + store_id2 = bytes32([0] * 32) await data_store.subscribe( Subscription( - tree_id, [ServerInfo("http://127:0:0:1/8000", 100, 100), ServerInfo("http://127:0:0:1/8001", 200, 200)] + store_id, [ServerInfo("http://127:0:0:1/8000", 100, 100), ServerInfo("http://127:0:0:1/8001", 200, 200)] ) ) await data_store.subscribe( Subscription( - tree_id2, [ServerInfo("http://127:0:0:1/8000", 300, 300), ServerInfo("http://127:0:0:1/8001", 400, 400)] + store_id2, [ServerInfo("http://127:0:0:1/8000", 300, 300), ServerInfo("http://127:0:0:1/8001", 400, 400)] ) ) subscriptions = await data_store.get_subscriptions() assert subscriptions == [ Subscription( - tree_id, [ServerInfo("http://127:0:0:1/8000", 100, 100), ServerInfo("http://127:0:0:1/8001", 200, 200)] + store_id, [ServerInfo("http://127:0:0:1/8000", 100, 100), ServerInfo("http://127:0:0:1/8001", 200, 200)] ), Subscription( - tree_id2, [ServerInfo("http://127:0:0:1/8000", 300, 300), ServerInfo("http://127:0:0:1/8001", 400, 400)] + store_id2, [ServerInfo("http://127:0:0:1/8000", 300, 300), ServerInfo("http://127:0:0:1/8001", 400, 400)] ), ] @pytest.mark.anyio -async def test_server_selection(data_store: DataStore, tree_id: bytes32) -> None: +async def test_server_selection(data_store: DataStore, store_id: bytes32) -> None: start_timestamp = 1000 await data_store.subscribe( - Subscription(tree_id, [ServerInfo(f"http://127.0.0.1/{port}", 0, 0) for port in range(8000, 8010)]) + Subscription(store_id, [ServerInfo(f"http://127.0.0.1/{port}", 0, 0) for port in range(8000, 8010)]) ) free_servers = {f"http://127.0.0.1/{port}" for port in range(8000, 8010)} @@ -1288,51 +1290,51 @@ async def test_server_selection(data_store: DataStore, tree_id: bytes32) -> None random = Random() random.seed(100, version=2) while len(free_servers) > 0: - servers_info = await data_store.get_available_servers_for_store(tree_id=tree_id, timestamp=start_timestamp) + servers_info = await data_store.get_available_servers_for_store(store_id=store_id, timestamp=start_timestamp) random.shuffle(servers_info) assert servers_info != [] server_info = servers_info[0] assert server_info.ignore_till == 0 - await data_store.received_incorrect_file(tree_id=tree_id, server_info=server_info, timestamp=start_timestamp) + await data_store.received_incorrect_file(store_id=store_id, server_info=server_info, timestamp=start_timestamp) assert server_info.url in free_servers tried_servers += 1 free_servers.remove(server_info.url) assert tried_servers == 10 - servers_info = await data_store.get_available_servers_for_store(tree_id=tree_id, timestamp=start_timestamp) + servers_info = await data_store.get_available_servers_for_store(store_id=store_id, timestamp=start_timestamp) assert servers_info == [] current_timestamp = 2000 + 7 * 24 * 3600 selected_servers = set() for _ in range(100): - servers_info = await data_store.get_available_servers_for_store(tree_id=tree_id, timestamp=current_timestamp) + servers_info = await data_store.get_available_servers_for_store(store_id=store_id, timestamp=current_timestamp) random.shuffle(servers_info) assert servers_info != [] selected_servers.add(servers_info[0].url) assert selected_servers == {f"http://127.0.0.1/{port}" for port in range(8000, 8010)} for _ in range(100): - servers_info = await data_store.get_available_servers_for_store(tree_id=tree_id, timestamp=current_timestamp) + servers_info = await data_store.get_available_servers_for_store(store_id=store_id, timestamp=current_timestamp) random.shuffle(servers_info) assert servers_info != [] if servers_info[0].url != "http://127.0.0.1/8000": await data_store.received_incorrect_file( - tree_id=tree_id, server_info=servers_info[0], timestamp=current_timestamp + store_id=store_id, server_info=servers_info[0], timestamp=current_timestamp ) - servers_info = await data_store.get_available_servers_for_store(tree_id=tree_id, timestamp=current_timestamp) + servers_info = await data_store.get_available_servers_for_store(store_id=store_id, timestamp=current_timestamp) random.shuffle(servers_info) assert len(servers_info) == 1 assert servers_info[0].url == "http://127.0.0.1/8000" - await data_store.received_correct_file(tree_id=tree_id, server_info=servers_info[0]) + await data_store.received_correct_file(store_id=store_id, server_info=servers_info[0]) ban_times = [5 * 60] * 3 + [15 * 60] * 3 + [30 * 60] * 2 + [60 * 60] * 10 for ban_time in ban_times: - servers_info = await data_store.get_available_servers_for_store(tree_id=tree_id, timestamp=current_timestamp) + servers_info = await data_store.get_available_servers_for_store(store_id=store_id, timestamp=current_timestamp) assert len(servers_info) == 1 - await data_store.server_misses_file(tree_id=tree_id, server_info=servers_info[0], timestamp=current_timestamp) + await data_store.server_misses_file(store_id=store_id, server_info=servers_info[0], timestamp=current_timestamp) current_timestamp += ban_time - servers_info = await data_store.get_available_servers_for_store(tree_id=tree_id, timestamp=current_timestamp) + servers_info = await data_store.get_available_servers_for_store(store_id=store_id, timestamp=current_timestamp) assert servers_info == [] current_timestamp += 1 @@ -1343,10 +1345,15 @@ async def test_server_selection(data_store: DataStore, tree_id: bytes32) -> None ) @pytest.mark.anyio async def test_server_http_ban( - data_store: DataStore, tree_id: bytes32, error: bool, monkeypatch: Any, tmp_path: Path, seeded_random: random.Random + data_store: DataStore, + store_id: bytes32, + error: bool, + monkeypatch: Any, + tmp_path: Path, + seeded_random: random.Random, ) -> None: sinfo = ServerInfo("http://127.0.0.1/8003", 0, 0) - await data_store.subscribe(Subscription(tree_id, [sinfo])) + await data_store.subscribe(Subscription(store_id, [sinfo])) async def mock_http_download( client_folder: Path, @@ -1364,7 +1371,7 @@ async def mock_http_download( m.setattr("chia.data_layer.download_data.http_download", mock_http_download) success = await insert_from_delta_file( data_store=data_store, - tree_id=tree_id, + store_id=store_id, existing_generation=3, root_hashes=[bytes32.random(seeded_random)], server_info=sinfo, @@ -1387,7 +1394,7 @@ async def mock_http_download( m.setattr("chia.data_layer.download_data.http_download", mock_http_download) success = await insert_from_delta_file( data_store=data_store, - tree_id=tree_id, + store_id=store_id, existing_generation=3, root_hashes=[bytes32.random(seeded_random)], server_info=sinfo, @@ -1409,14 +1416,14 @@ async def mock_http_download( [True, False], ) @pytest.mark.anyio -async def test_data_server_files(data_store: DataStore, tree_id: bytes32, test_delta: bool, tmp_path: Path) -> None: +async def test_data_server_files(data_store: DataStore, store_id: bytes32, test_delta: bool, tmp_path: Path) -> None: roots: List[Root] = [] num_batches = 10 num_ops_per_batch = 100 db_uri = generate_in_memory_db_uri() async with DataStore.managed(database=db_uri, uri=True) as data_store_server: - await data_store_server.create_tree(tree_id, status=Status.COMMITTED) + await data_store_server.create_tree(store_id, status=Status.COMMITTED) random = Random() random.seed(100, version=2) @@ -1436,9 +1443,9 @@ async def test_data_server_files(data_store: DataStore, tree_id: bytes32, test_d keys.remove(key) changelist.append({"action": "delete", "key": key}) counter += 1 - await data_store_server.insert_batch(tree_id, changelist, status=Status.COMMITTED) - root = await data_store_server.get_tree_root(tree_id) - await write_files_for_root(data_store_server, tree_id, root, tmp_path, 0) + await data_store_server.insert_batch(store_id, changelist, status=Status.COMMITTED) + root = await data_store_server.get_tree_root(store_id) + await write_files_for_root(data_store_server, store_id, root, tmp_path, 0) roots.append(root) generation = 1 @@ -1446,26 +1453,26 @@ async def test_data_server_files(data_store: DataStore, tree_id: bytes32, test_d for root in roots: assert root.node_hash is not None if not test_delta: - filename = get_full_tree_filename(tree_id, root.node_hash, generation) + filename = get_full_tree_filename(store_id, root.node_hash, generation) else: - filename = get_delta_filename(tree_id, root.node_hash, generation) + filename = get_delta_filename(store_id, root.node_hash, generation) assert is_filename_valid(filename) - await insert_into_data_store_from_file(data_store, tree_id, root.node_hash, tmp_path.joinpath(filename)) - current_root = await data_store.get_tree_root(tree_id=tree_id) + await insert_into_data_store_from_file(data_store, store_id, root.node_hash, tmp_path.joinpath(filename)) + current_root = await data_store.get_tree_root(store_id=store_id) assert current_root.node_hash == root.node_hash generation += 1 @pytest.mark.anyio @pytest.mark.parametrize("pending_status", [Status.PENDING, Status.PENDING_BATCH]) -async def test_pending_roots(data_store: DataStore, tree_id: bytes32, pending_status: Status) -> None: +async def test_pending_roots(data_store: DataStore, store_id: bytes32, pending_status: Status) -> None: key = b"\x01\x02" value = b"abc" await data_store.insert( key=key, value=value, - tree_id=tree_id, + store_id=store_id, reference_node_hash=None, side=None, status=Status.COMMITTED, @@ -1477,22 +1484,22 @@ async def test_pending_roots(data_store: DataStore, tree_id: bytes32, pending_st await data_store.autoinsert( key=key, value=value, - tree_id=tree_id, + store_id=store_id, status=pending_status, ) - pending_root = await data_store.get_pending_root(tree_id=tree_id) + pending_root = await data_store.get_pending_root(store_id=store_id) assert pending_root is not None assert pending_root.generation == 2 and pending_root.status == pending_status - await data_store.clear_pending_roots(tree_id=tree_id) - pending_root = await data_store.get_pending_root(tree_id=tree_id) + await data_store.clear_pending_roots(store_id=store_id) + pending_root = await data_store.get_pending_root(store_id=store_id) assert pending_root is None @pytest.mark.anyio @pytest.mark.parametrize("pending_status", [Status.PENDING, Status.PENDING_BATCH]) async def test_clear_pending_roots_returns_root( - data_store: DataStore, tree_id: bytes32, pending_status: Status + data_store: DataStore, store_id: bytes32, pending_status: Status ) -> None: key = b"\x01\x02" value = b"abc" @@ -1500,14 +1507,14 @@ async def test_clear_pending_roots_returns_root( await data_store.insert( key=key, value=value, - tree_id=tree_id, + store_id=store_id, reference_node_hash=None, side=None, status=pending_status, ) - pending_root = await data_store.get_pending_root(tree_id=tree_id) - cleared_root = await data_store.clear_pending_roots(tree_id=tree_id) + pending_root = await data_store.get_pending_root(store_id=store_id) + cleared_root = await data_store.clear_pending_roots(store_id=store_id) assert cleared_root == pending_root @@ -1565,7 +1572,7 @@ def id(self) -> str: @pytest.mark.anyio async def test_benchmark_batch_insert_speed( data_store: DataStore, - tree_id: bytes32, + store_id: bytes32, benchmark_runner: BenchmarkRunner, case: BatchInsertBenchmarkCase, ) -> None: @@ -1586,14 +1593,14 @@ async def test_benchmark_batch_insert_speed( if case.pre > 0: await data_store.insert_batch( - tree_id=tree_id, + store_id=store_id, changelist=pre, status=Status.COMMITTED, ) with benchmark_runner.assert_runtime(seconds=case.limit): await data_store.insert_batch( - tree_id=tree_id, + store_id=store_id, changelist=batch, ) @@ -1608,7 +1615,7 @@ async def test_benchmark_batch_insert_speed( @pytest.mark.anyio async def test_benchmark_batch_insert_speed_multiple_batches( data_store: DataStore, - tree_id: bytes32, + store_id: bytes32, benchmark_runner: BenchmarkRunner, case: BatchesInsertBenchmarkCase, ) -> None: @@ -1626,7 +1633,7 @@ async def test_benchmark_batch_insert_speed_multiple_batches( for x in range(batch * case.count, (batch + 1) * case.count) ] await data_store.insert_batch( - tree_id=tree_id, + store_id=store_id, changelist=changelist, status=Status.COMMITTED, ) @@ -1634,10 +1641,10 @@ async def test_benchmark_batch_insert_speed_multiple_batches( @pytest.mark.anyio async def test_delete_store_data(raw_data_store: DataStore) -> None: - tree_id = bytes32(b"\0" * 32) - tree_id_2 = bytes32(b"\0" * 31 + b"\1") - await raw_data_store.create_tree(tree_id=tree_id, status=Status.COMMITTED) - await raw_data_store.create_tree(tree_id=tree_id_2, status=Status.COMMITTED) + store_id = bytes32(b"\0" * 32) + store_id_2 = bytes32(b"\0" * 31 + b"\1") + await raw_data_store.create_tree(store_id=store_id, status=Status.COMMITTED) + await raw_data_store.create_tree(store_id=store_id_2, status=Status.COMMITTED) total_keys = 4 keys = [key.to_bytes(4, byteorder="big") for key in range(total_keys)] batch1 = [ @@ -1648,9 +1655,9 @@ async def test_delete_store_data(raw_data_store: DataStore) -> None: batch1.append({"action": "insert", "key": keys[2], "value": keys[2]}) batch2.append({"action": "insert", "key": keys[3], "value": keys[3]}) assert batch1 != batch2 - await raw_data_store.insert_batch(tree_id, batch1, status=Status.COMMITTED) - await raw_data_store.insert_batch(tree_id_2, batch2, status=Status.COMMITTED) - keys_values_before = await raw_data_store.get_keys_values(tree_id_2) + await raw_data_store.insert_batch(store_id, batch1, status=Status.COMMITTED) + await raw_data_store.insert_batch(store_id_2, batch2, status=Status.COMMITTED) + keys_values_before = await raw_data_store.get_keys_values(store_id_2) async with raw_data_store.db_wrapper.reader() as reader: result = await reader.execute("SELECT * FROM node") nodes = await result.fetchall() @@ -1659,9 +1666,9 @@ async def test_delete_store_data(raw_data_store: DataStore) -> None: if node["key"] is not None: kv_nodes_before[node["key"]] = node["value"] assert [kv_nodes_before[key] for key in keys] == keys - await raw_data_store.delete_store_data(tree_id) + await raw_data_store.delete_store_data(store_id) # Deleting from `node` table doesn't alter other stores. - keys_values_after = await raw_data_store.get_keys_values(tree_id_2) + keys_values_after = await raw_data_store.get_keys_values(store_id_2) assert keys_values_before == keys_values_after async with raw_data_store.db_wrapper.reader() as reader: result = await reader.execute("SELECT * FROM node") @@ -1676,14 +1683,14 @@ async def test_delete_store_data(raw_data_store: DataStore) -> None: else: # `keys[2]` was only present in the first store. assert keys[i] not in kv_nodes_after - assert not await raw_data_store.tree_id_exists(tree_id) - await raw_data_store.delete_store_data(tree_id_2) + assert not await raw_data_store.store_id_exists(store_id) + await raw_data_store.delete_store_data(store_id_2) async with raw_data_store.db_wrapper.reader() as reader: async with reader.execute("SELECT COUNT(*) FROM node") as cursor: row_count = await cursor.fetchone() assert row_count is not None assert row_count[0] == 0 - assert not await raw_data_store.tree_id_exists(tree_id_2) + assert not await raw_data_store.store_id_exists(store_id_2) @pytest.mark.anyio @@ -1693,9 +1700,9 @@ async def test_delete_store_data_multiple_stores(raw_data_store: DataStore) -> N num_stores = 50 total_keys = 150 keys_deleted_per_store = 3 - tree_ids = [bytes32(i.to_bytes(32, byteorder="big")) for i in range(num_stores)] - for tree_id in tree_ids: - await raw_data_store.create_tree(tree_id=tree_id, status=Status.COMMITTED) + store_ids = [bytes32(i.to_bytes(32, byteorder="big")) for i in range(num_stores)] + for store_id in store_ids: + await raw_data_store.create_tree(store_id=store_id, status=Status.COMMITTED) original_keys = [key.to_bytes(4, byteorder="big") for key in range(total_keys)] batches = [] for i in range(num_stores): @@ -1704,8 +1711,8 @@ async def test_delete_store_data_multiple_stores(raw_data_store: DataStore) -> N ] batches.append(batch) - for tree_id, batch in zip(tree_ids, batches): - await raw_data_store.insert_batch(tree_id, batch, status=Status.COMMITTED) + for store_id, batch in zip(store_ids, batches): + await raw_data_store.insert_batch(store_id, batch, status=Status.COMMITTED) for tree_index in range(num_stores): async with raw_data_store.db_wrapper.reader() as reader: @@ -1718,7 +1725,7 @@ async def test_delete_store_data_multiple_stores(raw_data_store: DataStore) -> N keys_before_index = set(original_keys[: tree_index * keys_deleted_per_store]) assert keys_after_index.issubset(keys) assert keys.isdisjoint(keys_before_index) - await raw_data_store.delete_store_data(tree_ids[tree_index]) + await raw_data_store.delete_store_data(store_ids[tree_index]) async with raw_data_store.db_wrapper.reader() as reader: async with reader.execute("SELECT COUNT(*) FROM node") as cursor: @@ -1730,11 +1737,11 @@ async def test_delete_store_data_multiple_stores(raw_data_store: DataStore) -> N @pytest.mark.parametrize("common_keys_count", [1, 250, 499]) @pytest.mark.anyio async def test_delete_store_data_with_common_values(raw_data_store: DataStore, common_keys_count: int) -> None: - tree_id_1 = bytes32(b"\x00" * 31 + b"\x01") - tree_id_2 = bytes32(b"\x00" * 31 + b"\x02") + store_id_1 = bytes32(b"\x00" * 31 + b"\x01") + store_id_2 = bytes32(b"\x00" * 31 + b"\x02") - await raw_data_store.create_tree(tree_id=tree_id_1, status=Status.COMMITTED) - await raw_data_store.create_tree(tree_id=tree_id_2, status=Status.COMMITTED) + await raw_data_store.create_tree(store_id=store_id_1, status=Status.COMMITTED) + await raw_data_store.create_tree(store_id=store_id_2, status=Status.COMMITTED) key_offset = 1000 total_keys_per_store = 500 @@ -1750,10 +1757,10 @@ async def test_delete_store_data_with_common_values(raw_data_store: DataStore, c batch1 = [{"action": "insert", "key": key, "value": key} for key in common_keys.union(unique_keys_1)] batch2 = [{"action": "insert", "key": key, "value": key} for key in common_keys.union(unique_keys_2)] - await raw_data_store.insert_batch(tree_id_1, batch1, status=Status.COMMITTED) - await raw_data_store.insert_batch(tree_id_2, batch2, status=Status.COMMITTED) + await raw_data_store.insert_batch(store_id_1, batch1, status=Status.COMMITTED) + await raw_data_store.insert_batch(store_id_2, batch2, status=Status.COMMITTED) - await raw_data_store.delete_store_data(tree_id_1) + await raw_data_store.delete_store_data(store_id_1) async with raw_data_store.db_wrapper.reader() as reader: result = await reader.execute("SELECT * FROM node") nodes = await result.fetchall() @@ -1770,9 +1777,9 @@ async def test_delete_store_data_with_common_values(raw_data_store: DataStore, c async def test_delete_store_data_protects_pending_roots(raw_data_store: DataStore, pending_status: Status) -> None: num_stores = 5 total_keys = 15 - tree_ids = [bytes32(i.to_bytes(32, byteorder="big")) for i in range(num_stores)] - for tree_id in tree_ids: - await raw_data_store.create_tree(tree_id=tree_id, status=Status.COMMITTED) + store_ids = [bytes32(i.to_bytes(32, byteorder="big")) for i in range(num_stores)] + for store_id in store_ids: + await raw_data_store.create_tree(store_id=store_id, status=Status.COMMITTED) original_keys = [key.to_bytes(4, byteorder="big") for key in range(total_keys)] batches = [] keys_per_pending_root = 2 @@ -1782,12 +1789,12 @@ async def test_delete_store_data_protects_pending_roots(raw_data_store: DataStor end_index = (i + 1) * keys_per_pending_root batch = [{"action": "insert", "key": key, "value": key} for key in original_keys[start_index:end_index]] batches.append(batch) - for tree_id, batch in zip(tree_ids, batches): - await raw_data_store.insert_batch(tree_id, batch, status=pending_status) + for store_id, batch in zip(store_ids, batches): + await raw_data_store.insert_batch(store_id, batch, status=pending_status) - tree_id = tree_ids[-1] + store_id = store_ids[-1] batch = [{"action": "insert", "key": key, "value": key} for key in original_keys] - await raw_data_store.insert_batch(tree_id, batch, status=Status.COMMITTED) + await raw_data_store.insert_batch(store_id, batch, status=Status.COMMITTED) async with raw_data_store.db_wrapper.reader() as reader: result = await reader.execute("SELECT * FROM node") @@ -1796,7 +1803,7 @@ async def test_delete_store_data_protects_pending_roots(raw_data_store: DataStor keys = {node["key"] for node in nodes if node["key"] is not None} assert keys == set(original_keys) - await raw_data_store.delete_store_data(tree_id) + await raw_data_store.delete_store_data(store_id) async with raw_data_store.db_wrapper.reader() as reader: result = await reader.execute("SELECT * FROM node") nodes = await result.fetchall() @@ -1805,11 +1812,11 @@ async def test_delete_store_data_protects_pending_roots(raw_data_store: DataStor assert keys == set(original_keys[: (num_stores - 1) * keys_per_pending_root]) for index in range(num_stores - 1): - tree_id = tree_ids[index] - root = await raw_data_store.get_pending_root(tree_id) + store_id = store_ids[index] + root = await raw_data_store.get_pending_root(store_id) assert root is not None await raw_data_store.change_root_status(root, Status.COMMITTED) - kv = await raw_data_store.get_keys_values(tree_id=tree_id) + kv = await raw_data_store.get_keys_values(store_id=store_id) start_index = index * keys_per_pending_root end_index = (index + 1) * keys_per_pending_root assert {pair.key for pair in kv} == set(original_keys[start_index:end_index]) @@ -1824,12 +1831,12 @@ async def test_get_node_by_key_with_overlapping_keys(raw_data_store: DataStore) random = Random() random.seed(100, version=2) - tree_ids = [bytes32(i.to_bytes(32, byteorder="big")) for i in range(num_stores)] - for tree_id in tree_ids: - await raw_data_store.create_tree(tree_id=tree_id, status=Status.COMMITTED) + store_ids = [bytes32(i.to_bytes(32, byteorder="big")) for i in range(num_stores)] + for store_id in store_ids: + await raw_data_store.create_tree(store_id=store_id, status=Status.COMMITTED) keys = [key.to_bytes(4, byteorder="big") for key in range(num_keys)] for repetition in range(repetitions): - for index, tree_id in enumerate(tree_ids): + for index, store_id in enumerate(store_ids): values = [ (value + values_offset * repetition).to_bytes(4, byteorder="big") for value in range(index * num_keys, (index + 1) * num_keys) @@ -1837,28 +1844,28 @@ async def test_get_node_by_key_with_overlapping_keys(raw_data_store: DataStore) batch = [] for key, value in zip(keys, values): batch.append({"action": "upsert", "key": key, "value": value}) - await raw_data_store.insert_batch(tree_id, batch, status=Status.COMMITTED) + await raw_data_store.insert_batch(store_id, batch, status=Status.COMMITTED) - for index, tree_id in enumerate(tree_ids): + for index, store_id in enumerate(store_ids): values = [ (value + values_offset * repetition).to_bytes(4, byteorder="big") for value in range(index * num_keys, (index + 1) * num_keys) ] for key, value in zip(keys, values): - node = await raw_data_store.get_node_by_key(tree_id=tree_id, key=key) + node = await raw_data_store.get_node_by_key(store_id=store_id, key=key) assert node.value == value if random.randint(0, 4) == 0: batch = [{"action": "delete", "key": key}] - await raw_data_store.insert_batch(tree_id, batch, status=Status.COMMITTED) + await raw_data_store.insert_batch(store_id, batch, status=Status.COMMITTED) with pytest.raises(KeyNotFoundError, match=f"Key not found: {key.hex()}"): - await raw_data_store.get_node_by_key(tree_id=tree_id, key=key) + await raw_data_store.get_node_by_key(store_id=store_id, key=key) @pytest.mark.anyio async def test_insert_from_delta_file_correct_file_exists( - data_store: DataStore, tree_id: bytes32, tmp_path: Path + data_store: DataStore, store_id: bytes32, tmp_path: Path ) -> None: - await data_store.create_tree(tree_id=tree_id, status=Status.COMMITTED) + await data_store.create_tree(store_id=store_id, status=Status.COMMITTED) num_files = 5 for generation in range(num_files): key = generation.to_bytes(4, byteorder="big") @@ -1866,16 +1873,16 @@ async def test_insert_from_delta_file_correct_file_exists( await data_store.autoinsert( key=key, value=value, - tree_id=tree_id, + store_id=store_id, status=Status.COMMITTED, ) - root = await data_store.get_tree_root(tree_id=tree_id) + root = await data_store.get_tree_root(store_id=store_id) assert root.generation == num_files + 1 root_hashes = [] for generation in range(1, num_files + 2): - root = await data_store.get_tree_root(tree_id=tree_id, generation=generation) - await write_files_for_root(data_store, tree_id, root, tmp_path, 0) + root = await data_store.get_tree_root(store_id=store_id, generation=generation) + await write_files_for_root(data_store, store_id, root, tmp_path, 0) root_hashes.append(bytes32([0] * 32) if root.node_hash is None else root.node_hash) with os.scandir(tmp_path) as entries: filenames = {entry.name for entry in entries} @@ -1886,15 +1893,15 @@ async def test_insert_from_delta_file_correct_file_exists( with os.scandir(tmp_path) as entries: filenames = {entry.name for entry in entries} assert len(filenames) == num_files + 1 - kv_before = await data_store.get_keys_values(tree_id=tree_id) - await data_store.rollback_to_generation(tree_id, 0) - root = await data_store.get_tree_root(tree_id=tree_id) + kv_before = await data_store.get_keys_values(store_id=store_id) + await data_store.rollback_to_generation(store_id, 0) + root = await data_store.get_tree_root(store_id=store_id) assert root.generation == 0 sinfo = ServerInfo("http://127.0.0.1/8003", 0, 0) success = await insert_from_delta_file( data_store=data_store, - tree_id=tree_id, + store_id=store_id, existing_generation=0, root_hashes=root_hashes, server_info=sinfo, @@ -1906,21 +1913,21 @@ async def test_insert_from_delta_file_correct_file_exists( ) assert success - root = await data_store.get_tree_root(tree_id=tree_id) + root = await data_store.get_tree_root(store_id=store_id) assert root.generation == num_files + 1 with os.scandir(tmp_path) as entries: filenames = {entry.name for entry in entries} assert len(filenames) == 2 * (num_files + 1) - kv = await data_store.get_keys_values(tree_id=tree_id) + kv = await data_store.get_keys_values(store_id=store_id) assert kv == kv_before @pytest.mark.anyio async def test_insert_from_delta_file_incorrect_file_exists( - data_store: DataStore, tree_id: bytes32, tmp_path: Path + data_store: DataStore, store_id: bytes32, tmp_path: Path ) -> None: - await data_store.create_tree(tree_id=tree_id, status=Status.COMMITTED) - root = await data_store.get_tree_root(tree_id=tree_id) + await data_store.create_tree(store_id=store_id, status=Status.COMMITTED) + root = await data_store.get_tree_root(store_id=store_id) assert root.generation == 1 key = b"a" @@ -1928,13 +1935,13 @@ async def test_insert_from_delta_file_incorrect_file_exists( await data_store.autoinsert( key=key, value=value, - tree_id=tree_id, + store_id=store_id, status=Status.COMMITTED, ) - root = await data_store.get_tree_root(tree_id=tree_id) + root = await data_store.get_tree_root(store_id=store_id) assert root.generation == 2 - await write_files_for_root(data_store, tree_id, root, tmp_path, 0) + await write_files_for_root(data_store, store_id, root, tmp_path, 0) incorrect_root_hash = bytes32([0] * 31 + [1]) with os.scandir(tmp_path) as entries: @@ -1942,18 +1949,18 @@ async def test_insert_from_delta_file_incorrect_file_exists( assert len(filenames) == 2 os.rename( tmp_path.joinpath(filenames[0]), - tmp_path.joinpath(get_delta_filename(tree_id, incorrect_root_hash, 2)), + tmp_path.joinpath(get_delta_filename(store_id, incorrect_root_hash, 2)), ) os.rename( tmp_path.joinpath(filenames[1]), - tmp_path.joinpath(get_full_tree_filename(tree_id, incorrect_root_hash, 2)), + tmp_path.joinpath(get_full_tree_filename(store_id, incorrect_root_hash, 2)), ) - await data_store.rollback_to_generation(tree_id, 1) + await data_store.rollback_to_generation(store_id, 1) sinfo = ServerInfo("http://127.0.0.1/8003", 0, 0) success = await insert_from_delta_file( data_store=data_store, - tree_id=tree_id, + store_id=store_id, existing_generation=1, root_hashes=[incorrect_root_hash], server_info=sinfo, @@ -1965,7 +1972,7 @@ async def test_insert_from_delta_file_incorrect_file_exists( ) assert not success - root = await data_store.get_tree_root(tree_id=tree_id) + root = await data_store.get_tree_root(store_id=store_id) assert root.generation == 1 with os.scandir(tmp_path) as entries: filenames = [entry.name for entry in entries] @@ -1973,18 +1980,18 @@ async def test_insert_from_delta_file_incorrect_file_exists( @pytest.mark.anyio -async def test_insert_key_already_present(data_store: DataStore, tree_id: bytes32) -> None: +async def test_insert_key_already_present(data_store: DataStore, store_id: bytes32) -> None: key = b"foo" value = b"bar" await data_store.insert( - key=key, value=value, tree_id=tree_id, reference_node_hash=None, side=None, status=Status.COMMITTED + key=key, value=value, store_id=store_id, reference_node_hash=None, side=None, status=Status.COMMITTED ) with pytest.raises(Exception, match=f"Key already present: {key.hex()}"): - await data_store.insert(key=key, value=value, tree_id=tree_id, reference_node_hash=None, side=None) + await data_store.insert(key=key, value=value, store_id=store_id, reference_node_hash=None, side=None) @pytest.mark.anyio -async def test_update_keys(data_store: DataStore, tree_id: bytes32) -> None: +async def test_update_keys(data_store: DataStore, store_id: bytes32) -> None: num_keys = 10 missing_keys = 50 num_values = 10 @@ -2000,18 +2007,18 @@ async def test_update_keys(data_store: DataStore, tree_id: bytes32) -> None: changelist.append({"action": "insert", "key": bytes_key, "value": bytes_value}) await data_store.insert_batch( - tree_id=tree_id, + store_id=store_id, changelist=changelist, status=Status.COMMITTED, ) for key in range(num_keys): bytes_key = key.to_bytes(4, byteorder="big") - node = await data_store.get_node_by_key(bytes_key, tree_id) + node = await data_store.get_node_by_key(bytes_key, store_id) assert node.value == bytes_value for key in range(num_keys, num_keys + missing_keys): bytes_key = key.to_bytes(4, byteorder="big") with pytest.raises(KeyNotFoundError, match=f"Key not found: {bytes_key.hex()}"): - await data_store.get_node_by_key(bytes_key, tree_id) + await data_store.get_node_by_key(bytes_key, store_id) num_keys += new_keys diff --git a/chia/_tests/core/data_layer/test_data_store_schema.py b/chia/_tests/core/data_layer/test_data_store_schema.py index d0750b947716..46a474f2f095 100644 --- a/chia/_tests/core/data_layer/test_data_store_schema.py +++ b/chia/_tests/core/data_layer/test_data_store_schema.py @@ -14,9 +14,9 @@ @pytest.mark.anyio -async def test_node_update_fails(data_store: DataStore, tree_id: bytes32) -> None: - await add_01234567_example(data_store=data_store, tree_id=tree_id) - node = await data_store.get_node_by_key(key=b"\x04", tree_id=tree_id) +async def test_node_update_fails(data_store: DataStore, store_id: bytes32) -> None: + await add_01234567_example(data_store=data_store, store_id=store_id) + node = await data_store.get_node_by_key(key=b"\x04", store_id=store_id) async with data_store.db_wrapper.writer() as writer: with pytest.raises(sqlite3.IntegrityError, match=r"^updates not allowed to the node table$"): @@ -33,7 +33,7 @@ async def test_node_update_fails(data_store: DataStore, tree_id: bytes32) -> Non @pytest.mark.anyio async def test_node_hash_must_be_32( data_store: DataStore, - tree_id: bytes32, + store_id: bytes32, length: int, valid_node_values: Dict[str, Any], ) -> None: @@ -53,7 +53,7 @@ async def test_node_hash_must_be_32( @pytest.mark.anyio async def test_node_hash_must_not_be_null( data_store: DataStore, - tree_id: bytes32, + store_id: bytes32, valid_node_values: Dict[str, Any], ) -> None: valid_node_values["hash"] = None @@ -91,10 +91,10 @@ async def test_node_type_must_be_valid( @pytest.mark.parametrize(argnames="side", argvalues=Side) @pytest.mark.anyio -async def test_node_internal_child_not_null(data_store: DataStore, tree_id: bytes32, side: Side) -> None: - await add_01234567_example(data_store=data_store, tree_id=tree_id) - node_a = await data_store.get_node_by_key(key=b"\x02", tree_id=tree_id) - node_b = await data_store.get_node_by_key(key=b"\x04", tree_id=tree_id) +async def test_node_internal_child_not_null(data_store: DataStore, store_id: bytes32, side: Side) -> None: + await add_01234567_example(data_store=data_store, store_id=store_id) + node_a = await data_store.get_node_by_key(key=b"\x02", store_id=store_id) + node_b = await data_store.get_node_by_key(key=b"\x04", store_id=store_id) values = create_valid_node_values(node_type=NodeType.INTERNAL, left_hash=node_a.hash, right_hash=node_b.hash) @@ -119,13 +119,13 @@ async def test_node_internal_child_not_null(data_store: DataStore, tree_id: byte @pytest.mark.anyio async def test_node_internal_must_be_valid_reference( data_store: DataStore, - tree_id: bytes32, + store_id: bytes32, bad_child_hash: bytes, side: Side, ) -> None: - await add_01234567_example(data_store=data_store, tree_id=tree_id) - node_a = await data_store.get_node_by_key(key=b"\x02", tree_id=tree_id) - node_b = await data_store.get_node_by_key(key=b"\x04", tree_id=tree_id) + await add_01234567_example(data_store=data_store, store_id=store_id) + node_a = await data_store.get_node_by_key(key=b"\x02", store_id=store_id) + node_b = await data_store.get_node_by_key(key=b"\x04", store_id=store_id) values = create_valid_node_values(node_type=NodeType.INTERNAL, left_hash=node_a.hash, right_hash=node_b.hash) @@ -149,8 +149,8 @@ async def test_node_internal_must_be_valid_reference( @pytest.mark.parametrize(argnames="key_or_value", argvalues=["key", "value"]) @pytest.mark.anyio -async def test_node_terminal_key_value_not_null(data_store: DataStore, tree_id: bytes32, key_or_value: str) -> None: - await add_01234567_example(data_store=data_store, tree_id=tree_id) +async def test_node_terminal_key_value_not_null(data_store: DataStore, store_id: bytes32, key_or_value: str) -> None: + await add_01234567_example(data_store=data_store, store_id=store_id) values = create_valid_node_values(node_type=NodeType.TERMINAL) values[key_or_value] = None @@ -168,10 +168,15 @@ async def test_node_terminal_key_value_not_null(data_store: DataStore, tree_id: @pytest.mark.parametrize(argnames="length", argvalues=sorted(set(range(50)) - {32})) @pytest.mark.anyio -async def test_root_tree_id_must_be_32(data_store: DataStore, tree_id: bytes32, length: int) -> None: - example = await add_01234567_example(data_store=data_store, tree_id=tree_id) - bad_tree_id = bytes([0] * length) - values = {"tree_id": bad_tree_id, "generation": 0, "node_hash": example.terminal_nodes[0], "status": Status.PENDING} +async def test_root_store_id_must_be_32(data_store: DataStore, store_id: bytes32, length: int) -> None: + example = await add_01234567_example(data_store=data_store, store_id=store_id) + bad_store_id = bytes([0] * length) + values = { + "tree_id": bad_store_id, + "generation": 0, + "node_hash": example.terminal_nodes[0], + "status": Status.PENDING, + } async with data_store.db_wrapper.writer() as writer: with pytest.raises(sqlite3.IntegrityError, match=r"^CHECK constraint failed:"): @@ -185,8 +190,8 @@ async def test_root_tree_id_must_be_32(data_store: DataStore, tree_id: bytes32, @pytest.mark.anyio -async def test_root_tree_id_must_not_be_null(data_store: DataStore, tree_id: bytes32) -> None: - example = await add_01234567_example(data_store=data_store, tree_id=tree_id) +async def test_root_store_id_must_not_be_null(data_store: DataStore, store_id: bytes32) -> None: + example = await add_01234567_example(data_store=data_store, store_id=store_id) values = {"tree_id": None, "generation": 0, "node_hash": example.terminal_nodes[0], "status": Status.PENDING} async with data_store.db_wrapper.writer() as writer: @@ -203,9 +208,9 @@ async def test_root_tree_id_must_not_be_null(data_store: DataStore, tree_id: byt @pytest.mark.parametrize(argnames="generation", argvalues=[-200, -2, -1]) @pytest.mark.anyio async def test_root_generation_must_not_be_less_than_zero( - data_store: DataStore, tree_id: bytes32, generation: int + data_store: DataStore, store_id: bytes32, generation: int ) -> None: - example = await add_01234567_example(data_store=data_store, tree_id=tree_id) + example = await add_01234567_example(data_store=data_store, store_id=store_id) values = { "tree_id": bytes32([0] * 32), "generation": generation, @@ -225,8 +230,8 @@ async def test_root_generation_must_not_be_less_than_zero( @pytest.mark.anyio -async def test_root_generation_must_not_be_null(data_store: DataStore, tree_id: bytes32) -> None: - example = await add_01234567_example(data_store=data_store, tree_id=tree_id) +async def test_root_generation_must_not_be_null(data_store: DataStore, store_id: bytes32) -> None: + example = await add_01234567_example(data_store=data_store, store_id=store_id) values = { "tree_id": bytes32([0] * 32), "generation": None, @@ -262,8 +267,8 @@ async def test_root_node_hash_must_reference(data_store: DataStore) -> None: @pytest.mark.parametrize(argnames="bad_status", argvalues=sorted(set(range(-20, 20)) - {*Status})) @pytest.mark.anyio -async def test_root_status_must_be_valid(data_store: DataStore, tree_id: bytes32, bad_status: int) -> None: - example = await add_01234567_example(data_store=data_store, tree_id=tree_id) +async def test_root_status_must_be_valid(data_store: DataStore, store_id: bytes32, bad_status: int) -> None: + example = await add_01234567_example(data_store=data_store, store_id=store_id) values = { "tree_id": bytes32([0] * 32), "generation": 0, @@ -283,8 +288,8 @@ async def test_root_status_must_be_valid(data_store: DataStore, tree_id: bytes32 @pytest.mark.anyio -async def test_root_status_must_not_be_null(data_store: DataStore, tree_id: bytes32) -> None: - example = await add_01234567_example(data_store=data_store, tree_id=tree_id) +async def test_root_status_must_not_be_null(data_store: DataStore, store_id: bytes32) -> None: + example = await add_01234567_example(data_store=data_store, store_id=store_id) values = {"tree_id": bytes32([0] * 32), "generation": 0, "node_hash": example.terminal_nodes[0], "status": None} async with data_store.db_wrapper.writer() as writer: @@ -299,9 +304,9 @@ async def test_root_status_must_not_be_null(data_store: DataStore, tree_id: byte @pytest.mark.anyio -async def test_root_tree_id_generation_must_be_unique(data_store: DataStore, tree_id: bytes32) -> None: - example = await add_01234567_example(data_store=data_store, tree_id=tree_id) - values = {"tree_id": tree_id, "generation": 0, "node_hash": example.terminal_nodes[0], "status": Status.COMMITTED} +async def test_root_store_id_generation_must_be_unique(data_store: DataStore, store_id: bytes32) -> None: + example = await add_01234567_example(data_store=data_store, store_id=store_id) + values = {"tree_id": store_id, "generation": 0, "node_hash": example.terminal_nodes[0], "status": Status.COMMITTED} async with data_store.db_wrapper.writer() as writer: with pytest.raises(sqlite3.IntegrityError, match=r"^UNIQUE constraint failed: root.tree_id, root.generation$"): @@ -318,7 +323,7 @@ async def test_root_tree_id_generation_must_be_unique(data_store: DataStore, tre @pytest.mark.anyio async def test_ancestors_ancestor_must_be_32( data_store: DataStore, - tree_id: bytes32, + store_id: bytes32, length: int, ) -> None: async with data_store.db_wrapper.writer() as writer: @@ -335,9 +340,9 @@ async def test_ancestors_ancestor_must_be_32( @pytest.mark.parametrize(argnames="length", argvalues=sorted(set(range(50)) - {32})) @pytest.mark.anyio -async def test_ancestors_tree_id_must_be_32( +async def test_ancestors_store_id_must_be_32( data_store: DataStore, - tree_id: bytes32, + store_id: bytes32, length: int, ) -> None: async with data_store.db_wrapper.writer() as writer: @@ -354,9 +359,9 @@ async def test_ancestors_tree_id_must_be_32( @pytest.mark.parametrize(argnames="length", argvalues=sorted(set(range(50)) - {32})) @pytest.mark.anyio -async def test_subscriptions_tree_id_must_be_32( +async def test_subscriptions_store_id_must_be_32( data_store: DataStore, - tree_id: bytes32, + store_id: bytes32, length: int, ) -> None: async with data_store.db_wrapper.writer() as writer: diff --git a/chia/_tests/core/data_layer/util.py b/chia/_tests/core/data_layer/util.py index 14c999c0fc9e..a83fbebc5e91 100644 --- a/chia/_tests/core/data_layer/util.py +++ b/chia/_tests/core/data_layer/util.py @@ -28,7 +28,7 @@ async def general_insert( data_store: DataStore, - tree_id: bytes32, + store_id: bytes32, key: bytes, value: bytes, reference_node_hash: bytes32, @@ -37,7 +37,7 @@ async def general_insert( insert_result = await data_store.insert( key=key, value=value, - tree_id=tree_id, + store_id=store_id, reference_node_hash=reference_node_hash, side=side, status=Status.COMMITTED, @@ -51,7 +51,7 @@ class Example: terminal_nodes: List[bytes32] -async def add_0123_example(data_store: DataStore, tree_id: bytes32) -> Example: +async def add_0123_example(data_store: DataStore, store_id: bytes32) -> Example: expected = Program.to( ( ( @@ -65,7 +65,7 @@ async def add_0123_example(data_store: DataStore, tree_id: bytes32) -> Example: ), ) - insert = functools.partial(general_insert, data_store=data_store, tree_id=tree_id) + insert = functools.partial(general_insert, data_store=data_store, store_id=store_id) c_hash = await insert(key=b"\x02", value=b"\x12\x02", reference_node_hash=None, side=None) b_hash = await insert(key=b"\x01", value=b"\x11\x01", reference_node_hash=c_hash, side=Side.LEFT) @@ -75,7 +75,7 @@ async def add_0123_example(data_store: DataStore, tree_id: bytes32) -> Example: return Example(expected=expected, terminal_nodes=[a_hash, b_hash, c_hash, d_hash]) -async def add_01234567_example(data_store: DataStore, tree_id: bytes32) -> Example: +async def add_01234567_example(data_store: DataStore, store_id: bytes32) -> Example: expected = Program.to( ( ( @@ -101,7 +101,7 @@ async def add_01234567_example(data_store: DataStore, tree_id: bytes32) -> Examp ), ) - insert = functools.partial(general_insert, data_store=data_store, tree_id=tree_id) + insert = functools.partial(general_insert, data_store=data_store, store_id=store_id) g_hash = await insert(key=b"\x06", value=b"\x16\x06", reference_node_hash=None, side=None) diff --git a/chia/data_layer/data_layer.py b/chia/data_layer/data_layer.py index bd848f16fb77..d5665b1f0d9c 100644 --- a/chia/data_layer/data_layer.py +++ b/chia/data_layer/data_layer.py @@ -242,31 +242,31 @@ async def wallet_log_in(self, fingerprint: int) -> int: async def create_store( self, fee: uint64, root: bytes32 = bytes32([0] * 32) ) -> Tuple[List[TransactionRecord], bytes32]: - txs, tree_id = await self.wallet_rpc.create_new_dl(root, fee) - res = await self.data_store.create_tree(tree_id=tree_id) + txs, store_id = await self.wallet_rpc.create_new_dl(root, fee) + res = await self.data_store.create_tree(store_id=store_id) if res is None: self.log.fatal("failed creating store") self.initialized = True - return txs, tree_id + return txs, store_id async def batch_update( self, - tree_id: bytes32, + store_id: bytes32, changelist: List[Dict[str, Any]], fee: uint64, submit_on_chain: bool = True, ) -> Optional[TransactionRecord]: status = Status.PENDING if submit_on_chain else Status.PENDING_BATCH - await self.batch_insert(tree_id=tree_id, changelist=changelist, status=status) + await self.batch_insert(store_id=store_id, changelist=changelist, status=status) await self.data_store.clean_node_table() if submit_on_chain: - return await self.publish_update(tree_id=tree_id, fee=fee) + return await self.publish_update(store_id=store_id, fee=fee) else: return None - async def _get_publishable_root_hash(self, tree_id: bytes32) -> bytes32: - pending_root: Optional[Root] = await self.data_store.get_pending_root(tree_id=tree_id) + async def _get_publishable_root_hash(self, store_id: bytes32) -> bytes32: + pending_root: Optional[Root] = await self.data_store.get_pending_root(store_id=store_id) if pending_root is None: raise Exception("Latest root is already confirmed.") if pending_root.status == Status.PENDING_BATCH: @@ -290,15 +290,15 @@ async def multistore_batch_update( store_ids.add(store_id) status = Status.PENDING if submit_on_chain else Status.PENDING_BATCH - await self.batch_insert(tree_id=store_id, changelist=changelist, status=status) + await self.batch_insert(store_id=store_id, changelist=changelist, status=status) await self.data_store.clean_node_table() if submit_on_chain: update_dictionary: Dict[bytes32, bytes32] = {} for store_id in store_ids: - await self._update_confirmation_status(tree_id=store_id) - root_hash = await self._get_publishable_root_hash(tree_id=store_id) + await self._update_confirmation_status(store_id=store_id) + root_hash = await self._get_publishable_root_hash(store_id=store_id) update_dictionary[store_id] = root_hash transaction_records = await self.wallet_rpc.dl_update_multiple(update_dictionary=update_dictionary, fee=fee) return transaction_records @@ -307,19 +307,19 @@ async def multistore_batch_update( async def submit_pending_root( self, - tree_id: bytes32, + store_id: bytes32, fee: uint64, ) -> TransactionRecord: - await self._update_confirmation_status(tree_id=tree_id) + await self._update_confirmation_status(store_id=store_id) - pending_root: Optional[Root] = await self.data_store.get_pending_root(tree_id=tree_id) + pending_root: Optional[Root] = await self.data_store.get_pending_root(store_id=store_id) if pending_root is None: raise Exception("Latest root is already confirmed.") if pending_root.status == Status.PENDING: raise Exception("Pending root is already submitted.") await self.data_store.change_root_status(pending_root, Status.PENDING) - return await self.publish_update(tree_id, fee) + return await self.publish_update(store_id, fee) async def submit_all_pending_roots(self, fee: uint64) -> List[TransactionRecord]: pending_roots = await self.data_store.get_all_pending_batches_roots() @@ -328,34 +328,34 @@ async def submit_all_pending_roots(self, fee: uint64) -> List[TransactionRecord] raise Exception("No pending roots found to submit") for pending_root in pending_roots: root_hash = pending_root.node_hash if pending_root.node_hash is not None else self.none_bytes - update_dictionary[pending_root.tree_id] = root_hash + update_dictionary[pending_root.store_id] = root_hash await self.data_store.change_root_status(pending_root, Status.PENDING) transaction_records = await self.wallet_rpc.dl_update_multiple(update_dictionary=update_dictionary, fee=fee) return transaction_records async def batch_insert( self, - tree_id: bytes32, + store_id: bytes32, changelist: List[Dict[str, Any]], status: Status = Status.PENDING, enable_batch_autoinsert: Optional[bool] = None, ) -> bytes32: - await self._update_confirmation_status(tree_id=tree_id) + await self._update_confirmation_status(store_id=store_id) async with self.data_store.transaction(): - pending_root: Optional[Root] = await self.data_store.get_pending_root(tree_id=tree_id) + pending_root: Optional[Root] = await self.data_store.get_pending_root(store_id=store_id) if pending_root is not None and pending_root.status == Status.PENDING: raise Exception("Already have a pending root waiting for confirmation.") # check before any DL changes that this singleton is currently owned by this wallet singleton_records: List[SingletonRecord] = await self.get_owned_stores() - if not any(tree_id == singleton.launcher_id for singleton in singleton_records): - raise ValueError(f"Singleton with launcher ID {tree_id} is not owned by DL Wallet") + if not any(store_id == singleton.launcher_id for singleton in singleton_records): + raise ValueError(f"Singleton with launcher ID {store_id} is not owned by DL Wallet") t1 = time.monotonic() if enable_batch_autoinsert is None: enable_batch_autoinsert = self.config.get("enable_batch_autoinsert", True) - batch_hash = await self.data_store.insert_batch(tree_id, changelist, status, enable_batch_autoinsert) + batch_hash = await self.data_store.insert_batch(store_id, changelist, status, enable_batch_autoinsert) t2 = time.monotonic() self.log.info(f"Data store batch update process time: {t2 - t1}.") # todo return empty node hash from get_tree_root @@ -368,13 +368,13 @@ async def batch_insert( async def publish_update( self, - tree_id: bytes32, + store_id: bytes32, fee: uint64, ) -> TransactionRecord: - await self._update_confirmation_status(tree_id=tree_id) - root_hash = await self._get_publishable_root_hash(tree_id=tree_id) + await self._update_confirmation_status(store_id=store_id) + root_hash = await self._get_publishable_root_hash(store_id=store_id) transaction_record = await self.wallet_rpc.dl_update_root( - launcher_id=tree_id, + launcher_id=store_id, new_root=root_hash, fee=fee, ) @@ -386,22 +386,22 @@ async def get_key_value_hash( key: bytes, root_hash: Optional[bytes32] = None, ) -> bytes32: - await self._update_confirmation_status(tree_id=store_id) + await self._update_confirmation_status(store_id=store_id) async with self.data_store.transaction(): - node = await self.data_store.get_node_by_key(tree_id=store_id, key=key, root_hash=root_hash) + node = await self.data_store.get_node_by_key(store_id=store_id, key=key, root_hash=root_hash) return node.hash async def get_value(self, store_id: bytes32, key: bytes, root_hash: Optional[bytes32] = None) -> bytes: - await self._update_confirmation_status(tree_id=store_id) + await self._update_confirmation_status(store_id=store_id) async with self.data_store.transaction(): # this either returns the node or raises an exception - res = await self.data_store.get_node_by_key(tree_id=store_id, key=key, root_hash=root_hash) + res = await self.data_store.get_node_by_key(store_id=store_id, key=key, root_hash=root_hash) return res.value async def get_keys_values(self, store_id: bytes32, root_hash: Optional[bytes32]) -> List[TerminalNode]: - await self._update_confirmation_status(tree_id=store_id) + await self._update_confirmation_status(store_id=store_id) res = await self.data_store.get_keys_values(store_id, root_hash) if res is None: @@ -415,7 +415,7 @@ async def get_keys_values_paginated( page: int, max_page_size: Optional[int] = None, ) -> KeysValuesPaginationData: - await self._update_confirmation_status(tree_id=store_id) + await self._update_confirmation_status(store_id=store_id) if max_page_size is None: max_page_size = 40 * 1024 * 1024 @@ -423,7 +423,7 @@ async def get_keys_values_paginated( return res async def get_keys(self, store_id: bytes32, root_hash: Optional[bytes32]) -> List[bytes]: - await self._update_confirmation_status(tree_id=store_id) + await self._update_confirmation_status(store_id=store_id) res = await self.data_store.get_keys(store_id, root_hash) return res @@ -435,7 +435,7 @@ async def get_keys_paginated( page: int, max_page_size: Optional[int] = None, ) -> KeysPaginationData: - await self._update_confirmation_status(tree_id=store_id) + await self._update_confirmation_status(store_id=store_id) if max_page_size is None: max_page_size = 40 * 1024 * 1024 @@ -443,9 +443,9 @@ async def get_keys_paginated( return res async def get_ancestors(self, node_hash: bytes32, store_id: bytes32) -> List[InternalNode]: - await self._update_confirmation_status(tree_id=store_id) + await self._update_confirmation_status(store_id=store_id) - res = await self.data_store.get_ancestors(node_hash=node_hash, tree_id=store_id) + res = await self.data_store.get_ancestors(node_hash=node_hash, store_id=store_id) if res is None: self.log.error("Failed to get ancestors") return res @@ -457,9 +457,9 @@ async def get_root(self, store_id: bytes32) -> Optional[SingletonRecord]: return latest async def get_local_root(self, store_id: bytes32) -> Optional[bytes32]: - await self._update_confirmation_status(tree_id=store_id) + await self._update_confirmation_status(store_id=store_id) - res = await self.data_store.get_tree_root(tree_id=store_id) + res = await self.data_store.get_tree_root(store_id=store_id) if res is None: self.log.error(f"Failed to get root for {store_id.hex()}") return None @@ -477,26 +477,26 @@ async def get_root_history(self, store_id: bytes32) -> List[SingletonRecord]: prev = record return root_history - async def _update_confirmation_status(self, tree_id: bytes32) -> None: + async def _update_confirmation_status(self, store_id: bytes32) -> None: async with self.data_store.transaction(): try: - root = await self.data_store.get_tree_root(tree_id=tree_id) + root = await self.data_store.get_tree_root(store_id=store_id) except Exception: root = None - singleton_record: Optional[SingletonRecord] = await self.wallet_rpc.dl_latest_singleton(tree_id, True) + singleton_record: Optional[SingletonRecord] = await self.wallet_rpc.dl_latest_singleton(store_id, True) if singleton_record is None: return if root is None: - pending_root = await self.data_store.get_pending_root(tree_id=tree_id) + pending_root = await self.data_store.get_pending_root(store_id=store_id) if pending_root is not None and pending_root.status == Status.PENDING: if pending_root.generation == 0 and pending_root.node_hash is None: await self.data_store.change_root_status(pending_root, Status.COMMITTED) - await self.data_store.clear_pending_roots(tree_id=tree_id) + await self.data_store.clear_pending_roots(store_id=store_id) return else: root = None if root is None: - self.log.info(f"Don't have pending root for {tree_id}.") + self.log.info(f"Don't have pending root for {store_id}.") return if root.generation == singleton_record.generation: return @@ -507,7 +507,7 @@ async def _update_confirmation_status(self, tree_id: bytes32) -> None: ) return wallet_history = await self.wallet_rpc.dl_history( - launcher_id=tree_id, + launcher_id=store_id, min_generation=uint32(root.generation + 1), max_generation=singleton_record.generation, ) @@ -518,11 +518,11 @@ async def _update_confirmation_status(self, tree_id: bytes32) -> None: generation_shift += 1 new_hashes.pop(0) if generation_shift > 0: - await self.data_store.clear_pending_roots(tree_id=tree_id) - await self.data_store.shift_root_generations(tree_id=tree_id, shift_size=generation_shift) + await self.data_store.clear_pending_roots(store_id=store_id) + await self.data_store.shift_root_generations(store_id=store_id, shift_size=generation_shift) else: expected_root_hash = None if new_hashes[0] == self.none_bytes else new_hashes[0] - pending_root = await self.data_store.get_pending_root(tree_id=tree_id) + pending_root = await self.data_store.get_pending_root(store_id=store_id) if ( pending_root is not None and pending_root.generation == root.generation + 1 @@ -530,31 +530,31 @@ async def _update_confirmation_status(self, tree_id: bytes32) -> None: and pending_root.status == Status.PENDING ): await self.data_store.change_root_status(pending_root, Status.COMMITTED) - await self.data_store.build_ancestor_table_for_latest_root(tree_id=tree_id) - await self.data_store.clear_pending_roots(tree_id=tree_id) + await self.data_store.build_ancestor_table_for_latest_root(store_id=store_id) + await self.data_store.clear_pending_roots(store_id=store_id) - async def fetch_and_validate(self, tree_id: bytes32) -> None: - singleton_record: Optional[SingletonRecord] = await self.wallet_rpc.dl_latest_singleton(tree_id, True) + async def fetch_and_validate(self, store_id: bytes32) -> None: + singleton_record: Optional[SingletonRecord] = await self.wallet_rpc.dl_latest_singleton(store_id, True) if singleton_record is None: - self.log.info(f"Fetch data: No singleton record for {tree_id}.") + self.log.info(f"Fetch data: No singleton record for {store_id}.") return if singleton_record.generation == uint32(0): - self.log.info(f"Fetch data: No data on chain for {tree_id}.") + self.log.info(f"Fetch data: No data on chain for {store_id}.") return - await self._update_confirmation_status(tree_id=tree_id) + await self._update_confirmation_status(store_id=store_id) - if not await self.data_store.tree_id_exists(tree_id=tree_id): - await self.data_store.create_tree(tree_id=tree_id, status=Status.COMMITTED) + if not await self.data_store.store_id_exists(store_id=store_id): + await self.data_store.create_tree(store_id=store_id, status=Status.COMMITTED) timestamp = int(time.time()) - servers_info = await self.data_store.get_available_servers_for_store(tree_id, timestamp) + servers_info = await self.data_store.get_available_servers_for_store(store_id, timestamp) # TODO: maybe append a random object to the whole DataLayer class? random.shuffle(servers_info) for server_info in servers_info: url = server_info.url - root = await self.data_store.get_tree_root(tree_id=tree_id) + root = await self.data_store.get_tree_root(store_id=store_id) if root.generation > singleton_record.generation: self.log.info( "Fetch data: local DL store is ahead of chain generation. " @@ -562,18 +562,18 @@ async def fetch_and_validate(self, tree_id: bytes32) -> None: ) break if root.generation == singleton_record.generation: - self.log.info(f"Fetch data: wallet generation matching on-chain generation: {tree_id}.") + self.log.info(f"Fetch data: wallet generation matching on-chain generation: {store_id}.") break self.log.info( - f"Downloading files {tree_id}. " + f"Downloading files {store_id}. " f"Current wallet generation: {root.generation}. " f"Target wallet generation: {singleton_record.generation}. " f"Server used: {url}." ) to_download = await self.wallet_rpc.dl_history( - launcher_id=tree_id, + launcher_id=store_id, min_generation=uint32(root.generation + 1), max_generation=singleton_record.generation, ) @@ -582,7 +582,7 @@ async def fetch_and_validate(self, tree_id: bytes32) -> None: proxy_url = self.config.get("proxy_url", None) success = await insert_from_delta_file( self.data_store, - tree_id, + store_id, root.generation, [record.root for record in reversed(to_download)], server_info, @@ -590,22 +590,22 @@ async def fetch_and_validate(self, tree_id: bytes32) -> None: timeout, self.log, proxy_url, - await self.get_downloader(tree_id, url), + await self.get_downloader(store_id, url), ) if success: self.log.info( - f"Finished downloading and validating {tree_id}. " + f"Finished downloading and validating {store_id}. " f"Wallet generation saved: {singleton_record.generation}. " f"Root hash saved: {singleton_record.root}." ) break except aiohttp.client_exceptions.ClientConnectorError: - self.log.warning(f"Server {url} unavailable for {tree_id}.") + self.log.warning(f"Server {url} unavailable for {store_id}.") except Exception as e: - self.log.warning(f"Exception while downloading files for {tree_id}: {e} {traceback.format_exc()}.") + self.log.warning(f"Exception while downloading files for {store_id}: {e} {traceback.format_exc()}.") - async def get_downloader(self, tree_id: bytes32, url: str) -> Optional[PluginRemote]: - request_json = {"store_id": tree_id.hex(), "url": url} + async def get_downloader(self, store_id: bytes32, url: str) -> Optional[PluginRemote]: + request_json = {"store_id": store_id.hex(), "url": url} for d in self.downloaders: async with aiohttp.ClientSession() as session: try: @@ -621,43 +621,43 @@ async def get_downloader(self, tree_id: bytes32, url: str) -> Optional[PluginRem self.log.error(f"get_downloader could not get response: {type(e).__name__}: {e}") return None - async def clean_old_full_tree_files(self, tree_id: bytes32) -> None: - singleton_record: Optional[SingletonRecord] = await self.wallet_rpc.dl_latest_singleton(tree_id, True) + async def clean_old_full_tree_files(self, store_id: bytes32) -> None: + singleton_record: Optional[SingletonRecord] = await self.wallet_rpc.dl_latest_singleton(store_id, True) if singleton_record is None: return - await self._update_confirmation_status(tree_id=tree_id) + await self._update_confirmation_status(store_id=store_id) - root = await self.data_store.get_tree_root(tree_id=tree_id) + root = await self.data_store.get_tree_root(store_id=store_id) latest_generation = root.generation full_tree_first_publish_generation = max(0, latest_generation - self.maximum_full_file_count + 1) foldername = self.server_files_location for generation in range(full_tree_first_publish_generation - 1, 0, -1): - root = await self.data_store.get_tree_root(tree_id=tree_id, generation=generation) - file_exists = delete_full_file_if_exists(foldername, tree_id, root) + root = await self.data_store.get_tree_root(store_id=store_id, generation=generation) + file_exists = delete_full_file_if_exists(foldername, store_id, root) if not file_exists: break - async def upload_files(self, tree_id: bytes32) -> None: - uploaders = await self.get_uploaders(tree_id) - singleton_record: Optional[SingletonRecord] = await self.wallet_rpc.dl_latest_singleton(tree_id, True) + async def upload_files(self, store_id: bytes32) -> None: + uploaders = await self.get_uploaders(store_id) + singleton_record: Optional[SingletonRecord] = await self.wallet_rpc.dl_latest_singleton(store_id, True) if singleton_record is None: - self.log.info(f"Upload files: no on-chain record for {tree_id}.") + self.log.info(f"Upload files: no on-chain record for {store_id}.") return - await self._update_confirmation_status(tree_id=tree_id) + await self._update_confirmation_status(store_id=store_id) - root = await self.data_store.get_tree_root(tree_id=tree_id) + root = await self.data_store.get_tree_root(store_id=store_id) latest_generation = root.generation # Don't store full tree files before this generation. full_tree_first_publish_generation = max(0, latest_generation - self.maximum_full_file_count + 1) publish_generation = min(singleton_record.generation, 0 if root is None else root.generation) # If we make some batch updates, which get confirmed to the chain, we need to create the files. # We iterate back and write the missing files, until we find the files already written. - root = await self.data_store.get_tree_root(tree_id=tree_id, generation=publish_generation) + root = await self.data_store.get_tree_root(store_id=store_id, generation=publish_generation) while publish_generation > 0: write_file_result = await write_files_for_root( self.data_store, - tree_id, + store_id, root, self.server_files_location, full_tree_first_publish_generation, @@ -668,14 +668,14 @@ async def upload_files(self, tree_id: bytes32) -> None: try: if uploaders is not None and len(uploaders) > 0: request_json = { - "store_id": tree_id.hex(), + "store_id": store_id.hex(), "diff_filename": write_file_result.diff_tree.name, } if write_file_result.full_tree is not None: request_json["full_tree_filename"] = write_file_result.full_tree.name for uploader in uploaders: - self.log.info(f"Using uploader {uploader} for store {tree_id.hex()}") + self.log.info(f"Using uploader {uploader} for store {store_id.hex()}") async with aiohttp.ClientSession() as session: async with session.post( uploader.url + "/upload", @@ -685,7 +685,7 @@ async def upload_files(self, tree_id: bytes32) -> None: res_json = await response.json() if res_json["uploaded"]: self.log.info( - f"Uploaded files to {uploader} for store {tree_id.hex()} " + f"Uploaded files to {uploader} for store {store_id.hex()} " f"generation {publish_generation}" ) else: @@ -693,16 +693,16 @@ async def upload_files(self, tree_id: bytes32) -> None: f"Failed to upload files to, will retry later: {uploader} : {res_json}" ) except Exception as e: - self.log.error(f"Exception uploading files, will retry later: tree id {tree_id}") + self.log.error(f"Exception uploading files, will retry later: store id {store_id}") self.log.debug(f"Failed to upload files, cleaning local files: {type(e).__name__}: {e}") if write_file_result.full_tree is not None: os.remove(write_file_result.full_tree) os.remove(write_file_result.diff_tree) publish_generation -= 1 - root = await self.data_store.get_tree_root(tree_id=tree_id, generation=publish_generation) + root = await self.data_store.get_tree_root(store_id=store_id, generation=publish_generation) async def add_missing_files(self, store_id: bytes32, overwrite: bool, foldername: Optional[Path]) -> None: - root = await self.data_store.get_tree_root(tree_id=store_id) + root = await self.data_store.get_tree_root(store_id=store_id) latest_generation = root.generation full_tree_first_publish_generation = max(0, latest_generation - self.maximum_full_file_count + 1) singleton_record: Optional[SingletonRecord] = await self.wallet_rpc.dl_latest_singleton(store_id, True) @@ -713,7 +713,7 @@ async def add_missing_files(self, store_id: bytes32, overwrite: bool, foldername server_files_location = foldername if foldername is not None else self.server_files_location files = [] for generation in range(1, max_generation + 1): - root = await self.data_store.get_tree_root(tree_id=store_id, generation=generation) + root = await self.data_store.get_tree_root(store_id=store_id, generation=generation) res = await write_files_for_root( self.data_store, store_id, @@ -745,10 +745,10 @@ async def add_missing_files(self, store_id: bytes32, overwrite: bool, foldername async def subscribe(self, store_id: bytes32, urls: List[str]) -> Subscription: parsed_urls = [url.rstrip("/") for url in urls] subscription = Subscription(store_id, [ServerInfo(url, 0, 0) for url in parsed_urls]) - await self.wallet_rpc.dl_track_new(subscription.tree_id) + await self.wallet_rpc.dl_track_new(subscription.store_id) async with self.subscription_lock: await self.data_store.subscribe(subscription) - self.log.info(f"Done adding subscription: {subscription.tree_id}") + self.log.info(f"Done adding subscription: {subscription.store_id}") return subscription async def remove_subscriptions(self, store_id: bytes32, urls: List[str]) -> None: @@ -756,31 +756,31 @@ async def remove_subscriptions(self, store_id: bytes32, urls: List[str]) -> None async with self.subscription_lock: await self.data_store.remove_subscriptions(store_id, parsed_urls) - async def unsubscribe(self, tree_id: bytes32, retain_data: bool) -> None: + async def unsubscribe(self, store_id: bytes32, retain_data: bool) -> None: async with self.subscription_lock: # Unsubscribe is processed later, after all fetching of data is done, to avoid races. - self.unsubscribe_data_queue.append(UnsubscribeData(tree_id, retain_data)) + self.unsubscribe_data_queue.append(UnsubscribeData(store_id, retain_data)) - async def process_unsubscribe(self, tree_id: bytes32, retain_data: bool) -> None: + async def process_unsubscribe(self, store_id: bytes32, retain_data: bool) -> None: # This function already acquired `subscriptions_lock`. subscriptions = await self.data_store.get_subscriptions() - if tree_id not in (subscription.tree_id for subscription in subscriptions): - raise RuntimeError("No subscription found for the given tree_id.") + if store_id not in (subscription.store_id for subscription in subscriptions): + raise RuntimeError("No subscription found for the given store_id.") filenames: List[str] = [] - if await self.data_store.tree_id_exists(tree_id) and not retain_data: - generation = await self.data_store.get_tree_generation(tree_id) - all_roots = await self.data_store.get_roots_between(tree_id, 1, generation + 1) + if await self.data_store.store_id_exists(store_id) and not retain_data: + generation = await self.data_store.get_tree_generation(store_id) + all_roots = await self.data_store.get_roots_between(store_id, 1, generation + 1) for root in all_roots: root_hash = root.node_hash if root.node_hash is not None else self.none_bytes - filenames.append(get_full_tree_filename(tree_id, root_hash, root.generation)) - filenames.append(get_delta_filename(tree_id, root_hash, root.generation)) + filenames.append(get_full_tree_filename(store_id, root_hash, root.generation)) + filenames.append(get_delta_filename(store_id, root_hash, root.generation)) # stop tracking first, then unsubscribe from the data store - await self.wallet_rpc.dl_stop_tracking(tree_id) - await self.data_store.unsubscribe(tree_id) + await self.wallet_rpc.dl_stop_tracking(store_id) + await self.data_store.unsubscribe(store_id) if not retain_data: - await self.data_store.delete_store_data(tree_id) + await self.data_store.delete_store_data(store_id) - self.log.info(f"Unsubscribed to {tree_id}") + self.log.info(f"Unsubscribed to {store_id}") for filename in filenames: file_path = self.server_files_location.joinpath(filename) try: @@ -801,30 +801,30 @@ async def add_mirror(self, store_id: bytes32, urls: List[str], amount: uint64, f async def delete_mirror(self, coin_id: bytes32, fee: uint64) -> None: await self.wallet_rpc.dl_delete_mirror(coin_id, fee) - async def get_mirrors(self, tree_id: bytes32) -> List[Mirror]: - mirrors: List[Mirror] = await self.wallet_rpc.dl_get_mirrors(tree_id) + async def get_mirrors(self, store_id: bytes32) -> List[Mirror]: + mirrors: List[Mirror] = await self.wallet_rpc.dl_get_mirrors(store_id) return [mirror for mirror in mirrors if mirror.urls] - async def update_subscriptions_from_wallet(self, tree_id: bytes32) -> None: - mirrors: List[Mirror] = await self.wallet_rpc.dl_get_mirrors(tree_id) + async def update_subscriptions_from_wallet(self, store_id: bytes32) -> None: + mirrors: List[Mirror] = await self.wallet_rpc.dl_get_mirrors(store_id) urls: List[str] = [] for mirror in mirrors: urls = urls + [url.decode("utf8") for url in mirror.urls] urls = [url.rstrip("/") for url in urls] - await self.data_store.update_subscriptions_from_wallet(tree_id, urls) + await self.data_store.update_subscriptions_from_wallet(store_id, urls) async def get_owned_stores(self) -> List[SingletonRecord]: return await self.wallet_rpc.dl_owned_singletons() - async def get_kv_diff(self, tree_id: bytes32, hash_1: bytes32, hash_2: bytes32) -> Set[DiffData]: - return await self.data_store.get_kv_diff(tree_id, hash_1, hash_2) + async def get_kv_diff(self, store_id: bytes32, hash_1: bytes32, hash_2: bytes32) -> Set[DiffData]: + return await self.data_store.get_kv_diff(store_id, hash_1, hash_2) async def get_kv_diff_paginated( - self, tree_id: bytes32, hash_1: bytes32, hash_2: bytes32, page: int, max_page_size: Optional[int] = None + self, store_id: bytes32, hash_1: bytes32, hash_2: bytes32, page: int, max_page_size: Optional[int] = None ) -> KVDiffPaginationData: if max_page_size is None: max_page_size = 40 * 1024 * 1024 - return await self.data_store.get_kv_diff_paginated(tree_id, page, max_page_size, hash_1, hash_2) + return await self.data_store.get_kv_diff_paginated(store_id, page, max_page_size, hash_1, hash_2) async def periodically_manage_data(self) -> None: manage_data_interval = self.config.get("manage_data_interval", 60) @@ -833,7 +833,7 @@ async def periodically_manage_data(self) -> None: try: subscriptions = await self.data_store.get_subscriptions() for subscription in subscriptions: - await self.wallet_rpc.dl_track_new(subscription.tree_id) + await self.wallet_rpc.dl_track_new(subscription.store_id) break except aiohttp.client_exceptions.ClientConnectorError: pass @@ -852,11 +852,11 @@ async def periodically_manage_data(self) -> None: async with self.subscription_lock: subscriptions = await self.data_store.get_subscriptions() - # Subscribe to all local tree_ids that we can find on chain. - local_tree_ids = await self.data_store.get_tree_ids() - subscription_tree_ids = {subscription.tree_id for subscription in subscriptions} - for local_id in local_tree_ids: - if local_id not in subscription_tree_ids: + # Subscribe to all local store_ids that we can find on chain. + local_store_ids = await self.data_store.get_store_ids() + subscription_store_ids = {subscription.store_id for subscription in subscriptions} + for local_id in local_store_ids: + if local_id not in subscription_store_ids: try: subscription = await self.subscribe(local_id, []) subscriptions.insert(0, subscription) @@ -882,7 +882,7 @@ async def periodically_manage_data(self) -> None: # Do unsubscribes after the fetching of data is complete, to avoid races. async with self.subscription_lock: for unsubscribe_data in self.unsubscribe_data_queue: - await self.process_unsubscribe(unsubscribe_data.tree_id, unsubscribe_data.retain_data) + await self.process_unsubscribe(unsubscribe_data.store_id, unsubscribe_data.retain_data) self.unsubscribe_data_queue.clear() await asyncio.sleep(manage_data_interval) @@ -894,10 +894,10 @@ async def update_subscription( subscription = job.input try: - await self.update_subscriptions_from_wallet(subscription.tree_id) - await self.fetch_and_validate(subscription.tree_id) - await self.upload_files(subscription.tree_id) - await self.clean_old_full_tree_files(subscription.tree_id) + await self.update_subscriptions_from_wallet(subscription.store_id) + await self.fetch_and_validate(subscription.store_id) + await self.upload_files(subscription.store_id) + await self.clean_old_full_tree_files(subscription.store_id) except Exception as e: self.log.error(f"Exception while fetching data: {type(e)} {e} {traceback.format_exc()}.") @@ -939,7 +939,7 @@ async def build_offer_changelist( async def process_offered_stores(self, offer_stores: Tuple[OfferStore, ...]) -> Dict[bytes32, StoreProofs]: for offer_store in offer_stores: - await self._update_confirmation_status(tree_id=offer_store.store_id) + await self._update_confirmation_status(store_id=offer_store.store_id) async with self.data_store.transaction(): our_store_proofs: Dict[bytes32, StoreProofs] = {} @@ -951,7 +951,7 @@ async def process_offered_stores(self, offer_stores: Tuple[OfferStore, ...]) -> if len(changelist) > 0: new_root_hash = await self.batch_insert( - tree_id=offer_store.store_id, + store_id=offer_store.store_id, changelist=changelist, enable_batch_autoinsert=False, ) @@ -973,7 +973,7 @@ async def process_offered_stores(self, offer_stores: Tuple[OfferStore, ...]) -> ) proof_of_inclusion = await self.data_store.get_proof_of_inclusion_by_hash( node_hash=node_hash, - tree_id=offer_store.store_id, + store_id=offer_store.store_id, root_hash=new_root_hash, ) proof = Proof( @@ -1149,14 +1149,14 @@ async def cancel_offer(self, trade_id: bytes32, secure: bool, fee: uint64) -> No if not secure: for store_id in store_ids: - await self.data_store.clear_pending_roots(tree_id=store_id) + await self.data_store.clear_pending_roots(store_id=store_id) async def get_sync_status(self, store_id: bytes32) -> SyncStatus: - await self._update_confirmation_status(tree_id=store_id) + await self._update_confirmation_status(store_id=store_id) - if not await self.data_store.tree_id_exists(tree_id=store_id): - raise Exception(f"No tree id stored in the local database for {store_id}") - root = await self.data_store.get_tree_root(tree_id=store_id) + if not await self.data_store.store_id_exists(store_id=store_id): + raise Exception(f"No store id stored in the local database for {store_id}") + root = await self.data_store.get_tree_root(store_id=store_id) singleton_record = await self.wallet_rpc.dl_latest_singleton(store_id, True) if singleton_record is None: raise Exception(f"No singleton found for {store_id}") @@ -1168,14 +1168,14 @@ async def get_sync_status(self, store_id: bytes32) -> SyncStatus: target_generation=singleton_record.generation, ) - async def get_uploaders(self, tree_id: bytes32) -> List[PluginRemote]: + async def get_uploaders(self, store_id: bytes32) -> List[PluginRemote]: uploaders = [] for uploader in self.uploaders: async with aiohttp.ClientSession() as session: try: async with session.post( uploader.url + "/handle_upload", - json={"store_id": tree_id.hex()}, + json={"store_id": store_id.hex()}, headers=uploader.headers, ) as response: res_json = await response.json() diff --git a/chia/data_layer/data_layer_errors.py b/chia/data_layer/data_layer_errors.py index b3bbbc2aadf9..7bb2eb739a67 100644 --- a/chia/data_layer/data_layer_errors.py +++ b/chia/data_layer/data_layer_errors.py @@ -14,11 +14,11 @@ def build_message_with_hashes(message: str, bytes_objects: Iterable[bytes]) -> s class TreeGenerationIncrementingError(IntegrityError): - def __init__(self, tree_ids: List[bytes32]) -> None: + def __init__(self, store_ids: List[bytes32]) -> None: super().__init__( build_message_with_hashes( message="Found trees with generations not properly incrementing:", - bytes_objects=tree_ids, + bytes_objects=store_ids, ) ) diff --git a/chia/data_layer/data_layer_util.py b/chia/data_layer/data_layer_util.py index 8ad6dd709d9b..026236626a92 100644 --- a/chia/data_layer/data_layer_util.py +++ b/chia/data_layer/data_layer_util.py @@ -90,8 +90,8 @@ async def _debug_dump(db: DBWrapper2, description: str = "") -> None: async def _dot_dump(data_store: DataStore, store_id: bytes32, root_hash: bytes32) -> str: - terminal_nodes = await data_store.get_keys_values(tree_id=store_id, root_hash=root_hash) - internal_nodes = await data_store.get_internal_nodes(tree_id=store_id, root_hash=root_hash) + terminal_nodes = await data_store.get_keys_values(store_id=store_id, root_hash=root_hash) + internal_nodes = await data_store.get_internal_nodes(store_id=store_id, root_hash=root_hash) n = 8 @@ -315,7 +315,7 @@ def other_child_side(self, hash: bytes32) -> Side: @dataclass(frozen=True) class Root: - tree_id: bytes32 + store_id: bytes32 node_hash: Optional[bytes32] generation: int status: Status @@ -329,7 +329,7 @@ def from_row(cls, row: aiosqlite.Row) -> Root: node_hash = bytes32(raw_node_hash) return cls( - tree_id=bytes32(row["tree_id"]), + store_id=bytes32(row["tree_id"]), node_hash=node_hash, generation=row["generation"], status=Status(row["status"]), @@ -337,7 +337,7 @@ def from_row(cls, row: aiosqlite.Row) -> Root: def to_row(self) -> Dict[str, Any]: return { - "tree_id": self.tree_id, + "tree_id": self.store_id, "node_hash": self.node_hash, "generation": self.generation, "status": self.status.value, @@ -346,7 +346,7 @@ def to_row(self) -> Dict[str, Any]: @classmethod def unmarshal(cls, marshalled: Dict[str, Any]) -> Root: return cls( - tree_id=bytes32.from_hexstr(marshalled["tree_id"]), + store_id=bytes32.from_hexstr(marshalled["tree_id"]), node_hash=None if marshalled["node_hash"] is None else bytes32.from_hexstr(marshalled["node_hash"]), generation=marshalled["generation"], status=Status(marshalled["status"]), @@ -354,7 +354,7 @@ def unmarshal(cls, marshalled: Dict[str, Any]) -> Root: def marshal(self) -> Dict[str, Any]: return { - "tree_id": self.tree_id.hex(), + "tree_id": self.store_id.hex(), "node_hash": None if self.node_hash is None else self.node_hash.hex(), "generation": self.generation, "status": self.status.value, @@ -376,7 +376,7 @@ class ServerInfo: @dataclass(frozen=True) class Subscription: - tree_id: bytes32 + store_id: bytes32 servers_info: List[ServerInfo] @@ -701,7 +701,7 @@ class ClearPendingRootsResponse: success: bool root: Optional[Root] - # tree_id: bytes32 + # store_id: bytes32 # node_hash: Optional[bytes32] # generation: int # status: Status @@ -765,7 +765,7 @@ class InsertResult: @dataclasses.dataclass(frozen=True) class UnsubscribeData: - tree_id: bytes32 + store_id: bytes32 retain_data: bool diff --git a/chia/data_layer/data_store.py b/chia/data_layer/data_store.py index 470fc4be4b04..2355b7162381 100644 --- a/chia/data_layer/data_store.py +++ b/chia/data_layer/data_store.py @@ -217,28 +217,28 @@ async def migrate_db(self) -> None: async def _insert_root( self, - tree_id: bytes32, + store_id: bytes32, node_hash: Optional[bytes32], status: Status, generation: Optional[int] = None, ) -> Root: # This should be replaced by an SQLite schema level check. # https://github.com/Chia-Network/chia-blockchain/pull/9284 - tree_id = bytes32(tree_id) + store_id = bytes32(store_id) async with self.db_wrapper.writer() as writer: if generation is None: try: - existing_generation = await self.get_tree_generation(tree_id=tree_id) + existing_generation = await self.get_tree_generation(store_id=store_id) except Exception as e: - if not str(e).startswith("No generations found for tree ID:"): + if not str(e).startswith("No generations found for store ID:"): raise generation = 0 else: generation = existing_generation + 1 new_root = Root( - tree_id=tree_id, + store_id=store_id, node_hash=None if node_hash is None else node_hash, generation=generation, status=status, @@ -257,7 +257,7 @@ async def _insert_root( if node_hash is not None and status == Status.COMMITTED: values = { "hash": node_hash, - "tree_id": tree_id, + "tree_id": store_id, "generation": generation, } await writer.execute( @@ -350,7 +350,7 @@ async def _insert_ancestor_table( self, left_hash: bytes32, right_hash: bytes32, - tree_id: bytes32, + store_id: bytes32, generation: int, ) -> None: node_hash = internal_hash(left_hash=left_hash, right_hash=right_hash) @@ -360,7 +360,7 @@ async def _insert_ancestor_table( values = { "hash": hash, "ancestor": node_hash, - "tree_id": tree_id, + "tree_id": store_id, "generation": generation, } try: @@ -383,7 +383,7 @@ async def _insert_ancestor_table( WHERE hash == :hash AND generation == :generation AND tree_id == :tree_id LIMIT 1 """, - {"hash": hash, "generation": generation, "tree_id": tree_id}, + {"hash": hash, "generation": generation, "tree_id": store_id}, ) as cursor: result = await cursor.fetchone() @@ -398,7 +398,7 @@ async def _insert_ancestor_table( if result_dict != values: raise Exception( "Requested insertion of ancestor, where ancestor differ, but other values are identical: " - f"{hash} {generation} {tree_id}" + f"{hash} {generation} {store_id}" ) from None async def _insert_terminal_node(self, key: bytes, value: bytes) -> bytes32: @@ -418,7 +418,7 @@ async def _insert_terminal_node(self, key: bytes, value: bytes) -> bytes32: return node_hash - async def get_pending_root(self, tree_id: bytes32) -> Optional[Root]: + async def get_pending_root(self, store_id: bytes32) -> Optional[Root]: async with self.db_wrapper.reader() as reader: cursor = await reader.execute( """ @@ -426,7 +426,7 @@ async def get_pending_root(self, tree_id: bytes32) -> Optional[Root]: AND status IN (:pending_status, :pending_batch_status) LIMIT 2 """, { - "tree_id": tree_id, + "tree_id": store_id, "pending_status": Status.PENDING.value, "pending_batch_status": Status.PENDING_BATCH.value, }, @@ -439,19 +439,19 @@ async def get_pending_root(self, tree_id: bytes32) -> Optional[Root]: maybe_extra_result = await cursor.fetchone() if maybe_extra_result is not None: - raise Exception(f"multiple pending roots found for id: {tree_id.hex()}") + raise Exception(f"multiple pending roots found for id: {store_id.hex()}") return Root.from_row(row=row) - async def clear_pending_roots(self, tree_id: bytes32) -> Optional[Root]: + async def clear_pending_roots(self, store_id: bytes32) -> Optional[Root]: async with self.db_wrapper.writer() as writer: - pending_root = await self.get_pending_root(tree_id=tree_id) + pending_root = await self.get_pending_root(store_id=store_id) if pending_root is not None: await writer.execute( "DELETE FROM root WHERE tree_id == :tree_id AND status IN (:pending_status, :pending_batch_status)", { - "tree_id": tree_id, + "tree_id": store_id, "pending_status": Status.PENDING.value, "pending_batch_status": Status.PENDING_BATCH.value, }, @@ -459,11 +459,11 @@ async def clear_pending_roots(self, tree_id: bytes32) -> Optional[Root]: return pending_root - async def shift_root_generations(self, tree_id: bytes32, shift_size: int) -> None: + async def shift_root_generations(self, store_id: bytes32, shift_size: int) -> None: async with self.db_wrapper.writer(): - root = await self.get_tree_root(tree_id=tree_id) + root = await self.get_tree_root(store_id=store_id) for _ in range(shift_size): - await self._insert_root(tree_id=tree_id, node_hash=root.node_hash, status=Status.COMMITTED) + await self._insert_root(store_id=store_id, node_hash=root.node_hash, status=Status.COMMITTED) async def change_root_status(self, root: Root, status: Status = Status.PENDING) -> None: async with self.db_wrapper.writer() as writer: @@ -471,7 +471,7 @@ async def change_root_status(self, root: Root, status: Status = Status.PENDING) "UPDATE root SET status = ? WHERE tree_id=? and generation = ?", ( status.value, - root.tree_id, + root.store_id, root.generation, ), ) @@ -480,7 +480,7 @@ async def change_root_status(self, root: Root, status: Status = Status.PENDING) if root.node_hash is not None and status == Status.COMMITTED: values = { "hash": root.node_hash, - "tree_id": root.tree_id, + "tree_id": root.store_id, "generation": root.generation, } await writer.execute( @@ -503,18 +503,18 @@ async def _check_roots_are_incrementing(self) -> None: roots_by_tree: Dict[bytes32, List[Root]] = defaultdict(list) for root in roots: - roots_by_tree[root.tree_id].append(root) + roots_by_tree[root.store_id].append(root) bad_trees = [] - for tree_id, roots in roots_by_tree.items(): + for store_id, roots in roots_by_tree.items(): current_generation = roots[-1].generation expected_generations = list(range(current_generation + 1)) actual_generations = [root.generation for root in roots] if actual_generations != expected_generations: - bad_trees.append(tree_id) + bad_trees.append(store_id) if len(bad_trees) > 0: - raise TreeGenerationIncrementingError(tree_ids=bad_trees) + raise TreeGenerationIncrementingError(store_ids=bad_trees) async def _check_hashes(self) -> None: async with self.db_wrapper.reader() as reader: @@ -539,29 +539,29 @@ async def _check_hashes(self) -> None: _check_hashes, ) - async def create_tree(self, tree_id: bytes32, status: Status = Status.PENDING) -> bool: - await self._insert_root(tree_id=tree_id, node_hash=None, status=status) + async def create_tree(self, store_id: bytes32, status: Status = Status.PENDING) -> bool: + await self._insert_root(store_id=store_id, node_hash=None, status=status) return True - async def table_is_empty(self, tree_id: bytes32) -> bool: - tree_root = await self.get_tree_root(tree_id=tree_id) + async def table_is_empty(self, store_id: bytes32) -> bool: + tree_root = await self.get_tree_root(store_id=store_id) return tree_root.node_hash is None - async def get_tree_ids(self) -> Set[bytes32]: + async def get_store_ids(self) -> Set[bytes32]: async with self.db_wrapper.reader() as reader: cursor = await reader.execute("SELECT DISTINCT tree_id FROM root") - tree_ids = {bytes32(row["tree_id"]) async for row in cursor} + store_ids = {bytes32(row["tree_id"]) async for row in cursor} - return tree_ids + return store_ids - async def get_tree_generation(self, tree_id: bytes32) -> int: + async def get_tree_generation(self, store_id: bytes32) -> int: async with self.db_wrapper.reader() as reader: cursor = await reader.execute( "SELECT MAX(generation) FROM root WHERE tree_id == :tree_id AND status == :status", - {"tree_id": tree_id, "status": Status.COMMITTED.value}, + {"tree_id": store_id, "status": Status.COMMITTED.value}, ) row = await cursor.fetchone() @@ -571,12 +571,12 @@ async def get_tree_generation(self, tree_id: bytes32) -> int: if generation is not None: return generation - raise Exception(f"No generations found for tree ID: {tree_id.hex()}") + raise Exception(f"No generations found for store ID: {store_id.hex()}") - async def get_tree_root(self, tree_id: bytes32, generation: Optional[int] = None) -> Root: + async def get_tree_root(self, store_id: bytes32, generation: Optional[int] = None) -> Root: async with self.db_wrapper.reader() as reader: if generation is None: - generation = await self.get_tree_generation(tree_id=tree_id) + generation = await self.get_tree_generation(store_id=store_id) cursor = await reader.execute( """ SELECT * @@ -584,12 +584,12 @@ async def get_tree_root(self, tree_id: bytes32, generation: Optional[int] = None WHERE tree_id == :tree_id AND generation == :generation AND status == :status LIMIT 1 """, - {"tree_id": tree_id, "generation": generation, "status": Status.COMMITTED.value}, + {"tree_id": store_id, "generation": generation, "status": Status.COMMITTED.value}, ) row = await cursor.fetchone() if row is None: - raise Exception(f"unable to find root for id, generation: {tree_id.hex()}, {generation}") + raise Exception(f"unable to find root for id, generation: {store_id.hex()}, {generation}") return Root.from_row(row=row) @@ -602,16 +602,16 @@ async def get_all_pending_batches_roots(self) -> List[Root]: {"status": Status.PENDING_BATCH.value}, ) roots = [Root.from_row(row=row) async for row in cursor] - tree_ids = [root.tree_id for root in roots] - if len(set(tree_ids)) != len(tree_ids): + store_ids = [root.store_id for root in roots] + if len(set(store_ids)) != len(store_ids): raise Exception("Internal error: multiple pending batches for a store") return roots - async def tree_id_exists(self, tree_id: bytes32) -> bool: + async def store_id_exists(self, store_id: bytes32) -> bool: async with self.db_wrapper.reader() as reader: cursor = await reader.execute( "SELECT 1 FROM root WHERE tree_id == :tree_id AND status == :status LIMIT 1", - {"tree_id": tree_id, "status": Status.COMMITTED.value}, + {"tree_id": store_id, "status": Status.COMMITTED.value}, ) row = await cursor.fetchone() @@ -619,19 +619,19 @@ async def tree_id_exists(self, tree_id: bytes32) -> bool: return False return True - async def get_roots_between(self, tree_id: bytes32, generation_begin: int, generation_end: int) -> List[Root]: + async def get_roots_between(self, store_id: bytes32, generation_begin: int, generation_end: int) -> List[Root]: async with self.db_wrapper.reader() as reader: cursor = await reader.execute( "SELECT * FROM root WHERE tree_id == :tree_id " "AND generation >= :generation_begin AND generation < :generation_end ORDER BY generation ASC", - {"tree_id": tree_id, "generation_begin": generation_begin, "generation_end": generation_end}, + {"tree_id": store_id, "generation_begin": generation_begin, "generation_end": generation_end}, ) roots = [Root.from_row(row=row) async for row in cursor] return roots async def get_last_tree_root_by_hash( - self, tree_id: bytes32, hash: Optional[bytes32], max_generation: Optional[int] = None + self, store_id: bytes32, hash: Optional[bytes32], max_generation: Optional[int] = None ) -> Optional[Root]: async with self.db_wrapper.reader() as reader: max_generation_str = f"AND generation < {max_generation} " if max_generation is not None else "" @@ -641,7 +641,7 @@ async def get_last_tree_root_by_hash( f"{max_generation_str}" f"{node_hash_str}" "ORDER BY generation DESC LIMIT 1", - {"tree_id": tree_id, "node_hash": None if hash is None else hash}, + {"tree_id": store_id, "node_hash": None if hash is None else hash}, ) row = await cursor.fetchone() @@ -652,15 +652,15 @@ async def get_last_tree_root_by_hash( async def get_ancestors( self, node_hash: bytes32, - tree_id: bytes32, + store_id: bytes32, root_hash: Optional[bytes32] = None, ) -> List[InternalNode]: async with self.db_wrapper.reader() as reader: if root_hash is None: - root = await self.get_tree_root(tree_id=tree_id) + root = await self.get_tree_root(store_id=store_id) root_hash = root.node_hash if root_hash is None: - raise Exception(f"Root hash is unspecified for tree ID: {tree_id.hex()}") + raise Exception(f"Root hash is unspecified for store ID: {store_id.hex()}") cursor = await reader.execute( """ WITH RECURSIVE @@ -695,21 +695,21 @@ async def get_ancestors( async def get_ancestors_optimized( self, node_hash: bytes32, - tree_id: bytes32, + store_id: bytes32, generation: Optional[int] = None, root_hash: Optional[bytes32] = None, ) -> List[InternalNode]: async with self.db_wrapper.reader(): nodes = [] if root_hash is None: - root = await self.get_tree_root(tree_id=tree_id, generation=generation) + root = await self.get_tree_root(store_id=store_id, generation=generation) root_hash = root.node_hash if root_hash is None: return [] while True: - internal_node = await self._get_one_ancestor(node_hash, tree_id, generation) + internal_node = await self._get_one_ancestor(node_hash, store_id, generation) if internal_node is None: break nodes.append(internal_node) @@ -721,10 +721,10 @@ async def get_ancestors_optimized( return nodes - async def get_internal_nodes(self, tree_id: bytes32, root_hash: Optional[bytes32] = None) -> List[InternalNode]: + async def get_internal_nodes(self, store_id: bytes32, root_hash: Optional[bytes32] = None) -> List[InternalNode]: async with self.db_wrapper.reader() as reader: if root_hash is None: - root = await self.get_tree_root(tree_id=tree_id) + root = await self.get_tree_root(store_id=store_id) root_hash = root.node_hash cursor = await reader.execute( """ @@ -777,10 +777,10 @@ async def get_keys_values_cursor( {"root_hash": root_hash, "node_type": NodeType.TERMINAL}, ) - async def get_keys_values(self, tree_id: bytes32, root_hash: Optional[bytes32] = None) -> List[TerminalNode]: + async def get_keys_values(self, store_id: bytes32, root_hash: Optional[bytes32] = None) -> List[TerminalNode]: async with self.db_wrapper.reader() as reader: if root_hash is None: - root = await self.get_tree_root(tree_id=tree_id) + root = await self.get_tree_root(store_id=store_id) root_hash = root.node_hash cursor = await self.get_keys_values_cursor(reader, root_hash) @@ -806,11 +806,11 @@ async def get_keys_values(self, tree_id: bytes32, root_hash: Optional[bytes32] = return terminal_nodes async def get_keys_values_compressed( - self, tree_id: bytes32, root_hash: Optional[bytes32] = None + self, store_id: bytes32, root_hash: Optional[bytes32] = None ) -> KeysValuesCompressed: async with self.db_wrapper.reader() as reader: if root_hash is None: - root = await self.get_tree_root(tree_id=tree_id) + root = await self.get_tree_root(store_id=store_id) root_hash = root.node_hash cursor = await self.get_keys_values_cursor(reader, root_hash) @@ -830,9 +830,9 @@ async def get_keys_values_compressed( return KeysValuesCompressed(keys_values_hashed, key_hash_to_length, leaf_hash_to_length, root_hash) async def get_keys_paginated( - self, tree_id: bytes32, page: int, max_page_size: int, root_hash: Optional[bytes32] = None + self, store_id: bytes32, page: int, max_page_size: int, root_hash: Optional[bytes32] = None ) -> KeysPaginationData: - keys_values_compressed = await self.get_keys_values_compressed(tree_id, root_hash) + keys_values_compressed = await self.get_keys_values_compressed(store_id, root_hash) pagination_data = get_hashes_for_page(page, keys_values_compressed.key_hash_to_length, max_page_size) keys: List[bytes] = [] @@ -850,9 +850,9 @@ async def get_keys_paginated( ) async def get_keys_values_paginated( - self, tree_id: bytes32, page: int, max_page_size: int, root_hash: Optional[bytes32] = None + self, store_id: bytes32, page: int, max_page_size: int, root_hash: Optional[bytes32] = None ) -> KeysValuesPaginationData: - keys_values_compressed = await self.get_keys_values_compressed(tree_id, root_hash) + keys_values_compressed = await self.get_keys_values_compressed(store_id, root_hash) pagination_data = get_hashes_for_page(page, keys_values_compressed.leaf_hash_to_length, max_page_size) keys_values: List[TerminalNode] = [] @@ -869,10 +869,10 @@ async def get_keys_values_paginated( ) async def get_kv_diff_paginated( - self, tree_id: bytes32, page: int, max_page_size: int, hash1: bytes32, hash2: bytes32 + self, store_id: bytes32, page: int, max_page_size: int, hash1: bytes32, hash2: bytes32 ) -> KVDiffPaginationData: - old_pairs = await self.get_keys_values_compressed(tree_id, hash1) - new_pairs = await self.get_keys_values_compressed(tree_id, hash2) + old_pairs = await self.get_keys_values_compressed(store_id, hash1) + new_pairs = await self.get_keys_values_compressed(store_id, hash2) if len(old_pairs.keys_values_hashed) == 0 and hash1 != bytes32([0] * 32): return KVDiffPaginationData(1, 0, []) if len(new_pairs.keys_values_hashed) == 0 and hash2 != bytes32([0] * 32): @@ -919,12 +919,12 @@ async def get_node_type(self, node_hash: bytes32) -> NodeType: return NodeType(raw_node_type["node_type"]) async def get_terminal_node_for_seed( - self, tree_id: bytes32, seed: bytes32, root_hash: Optional[bytes32] = None + self, store_id: bytes32, seed: bytes32, root_hash: Optional[bytes32] = None ) -> Optional[bytes32]: path = "".join(reversed("".join(f"{b:08b}" for b in seed))) async with self.db_wrapper.reader() as reader: if root_hash is None: - root = await self.get_tree_root(tree_id) + root = await self.get_tree_root(store_id) root_hash = root.node_hash if root_hash is None: return None @@ -977,14 +977,14 @@ async def autoinsert( self, key: bytes, value: bytes, - tree_id: bytes32, + store_id: bytes32, use_optimized: bool = True, status: Status = Status.PENDING, root: Optional[Root] = None, ) -> InsertResult: async with self.db_wrapper.writer(): if root is None: - root = await self.get_tree_root(tree_id=tree_id) + root = await self.get_tree_root(store_id=store_id) was_empty = root.node_hash is None @@ -993,13 +993,13 @@ async def autoinsert( side = None else: seed = leaf_hash(key=key, value=value) - reference_node_hash = await self.get_terminal_node_for_seed(tree_id, seed, root_hash=root.node_hash) + reference_node_hash = await self.get_terminal_node_for_seed(store_id, seed, root_hash=root.node_hash) side = self.get_side_for_seed(seed) return await self.insert( key=key, value=value, - tree_id=tree_id, + store_id=store_id, reference_node_hash=reference_node_hash, side=side, use_optimized=use_optimized, @@ -1007,14 +1007,14 @@ async def autoinsert( root=root, ) - async def get_keys_values_dict(self, tree_id: bytes32, root_hash: Optional[bytes32] = None) -> Dict[bytes, bytes]: - pairs = await self.get_keys_values(tree_id=tree_id, root_hash=root_hash) + async def get_keys_values_dict(self, store_id: bytes32, root_hash: Optional[bytes32] = None) -> Dict[bytes, bytes]: + pairs = await self.get_keys_values(store_id=store_id, root_hash=root_hash) return {node.key: node.value for node in pairs} - async def get_keys(self, tree_id: bytes32, root_hash: Optional[bytes32] = None) -> List[bytes]: + async def get_keys(self, store_id: bytes32, root_hash: Optional[bytes32] = None) -> List[bytes]: async with self.db_wrapper.reader() as reader: if root_hash is None: - root = await self.get_tree_root(tree_id=tree_id) + root = await self.get_tree_root(store_id=store_id) root_hash = root.node_hash cursor = await reader.execute( """ @@ -1039,7 +1039,7 @@ async def get_keys(self, tree_id: bytes32, root_hash: Optional[bytes32] = None) async def get_ancestors_common( self, node_hash: bytes32, - tree_id: bytes32, + store_id: bytes32, root_hash: Optional[bytes32], generation: Optional[int] = None, use_optimized: bool = True, @@ -1047,19 +1047,19 @@ async def get_ancestors_common( if use_optimized: ancestors: List[InternalNode] = await self.get_ancestors_optimized( node_hash=node_hash, - tree_id=tree_id, + store_id=store_id, generation=generation, root_hash=root_hash, ) else: ancestors = await self.get_ancestors_optimized( node_hash=node_hash, - tree_id=tree_id, + store_id=store_id, generation=generation, root_hash=root_hash, ) ancestors_2: List[InternalNode] = await self.get_ancestors( - node_hash=node_hash, tree_id=tree_id, root_hash=root_hash + node_hash=node_hash, store_id=store_id, root_hash=root_hash ) if ancestors != ancestors_2: raise RuntimeError("Ancestors optimized didn't produce the expected result.") @@ -1070,7 +1070,7 @@ async def get_ancestors_common( async def update_ancestor_hashes_on_insert( self, - tree_id: bytes32, + store_id: bytes32, left: bytes32, right: bytes32, traversal_node_hash: bytes32, @@ -1083,7 +1083,7 @@ async def update_ancestor_hashes_on_insert( new_generation = root.generation + 1 # create first new internal node new_hash = await self._insert_internal_node(left_hash=left, right_hash=right) - insert_ancestors_cache.append((left, right, tree_id)) + insert_ancestors_cache.append((left, right, store_id)) # create updated replacements for the rest of the internal nodes for ancestor in ancestors: @@ -1100,18 +1100,18 @@ async def update_ancestor_hashes_on_insert( traversal_node_hash = ancestor.hash new_hash = await self._insert_internal_node(left_hash=left, right_hash=right) - insert_ancestors_cache.append((left, right, tree_id)) + insert_ancestors_cache.append((left, right, store_id)) new_root = await self._insert_root( - tree_id=tree_id, + store_id=store_id, node_hash=new_hash, status=status, generation=new_generation, ) if status == Status.COMMITTED: - for left_hash, right_hash, tree_id in insert_ancestors_cache: - await self._insert_ancestor_table(left_hash, right_hash, tree_id, new_generation) + for left_hash, right_hash, store_id in insert_ancestors_cache: + await self._insert_ancestor_table(left_hash, right_hash, store_id, new_generation) return new_root @@ -1119,7 +1119,7 @@ async def insert( self, key: bytes, value: bytes, - tree_id: bytes32, + store_id: bytes32, reference_node_hash: Optional[bytes32], side: Optional[Side], use_optimized: bool = True, @@ -1128,10 +1128,10 @@ async def insert( ) -> InsertResult: async with self.db_wrapper.writer(): if root is None: - root = await self.get_tree_root(tree_id=tree_id) + root = await self.get_tree_root(store_id=store_id) try: - await self.get_node_by_key(key=key, tree_id=tree_id) + await self.get_node_by_key(key=key, store_id=store_id) raise Exception(f"Key already present: {key.hex()}") except KeyNotFoundError: pass @@ -1139,7 +1139,7 @@ async def insert( was_empty = root.node_hash is None if reference_node_hash is None: if not was_empty: - raise Exception(f"Reference node hash must be specified for non-empty tree: {tree_id.hex()}") + raise Exception(f"Reference node hash must be specified for non-empty tree: {store_id.hex()}") else: reference_node_type = await self.get_node_type(node_hash=reference_node_hash) if reference_node_type == NodeType.INTERNAL: @@ -1153,7 +1153,7 @@ async def insert( raise Exception("Tree was empty so side must be unspecified, got: {side!r}") new_root = await self._insert_root( - tree_id=tree_id, + store_id=store_id, node_hash=new_terminal_node_hash, status=status, ) @@ -1174,13 +1174,13 @@ async def insert( ancestors = await self.get_ancestors_common( node_hash=reference_node_hash, - tree_id=tree_id, + store_id=store_id, root_hash=root.node_hash, generation=root.generation, use_optimized=use_optimized, ) new_root = await self.update_ancestor_hashes_on_insert( - tree_id=tree_id, + store_id=store_id, left=left, right=right, traversal_node_hash=reference_node_hash, @@ -1194,7 +1194,7 @@ async def insert( async def delete( self, key: bytes, - tree_id: bytes32, + store_id: bytes32, use_optimized: bool = True, status: Status = Status.PENDING, root: Optional[Root] = None, @@ -1202,7 +1202,7 @@ async def delete( root_hash = None if root is None else root.node_hash async with self.db_wrapper.writer(): try: - node = await self.get_node_by_key(key=key, tree_id=tree_id) + node = await self.get_node_by_key(key=key, store_id=store_id) node_hash = node.hash assert isinstance(node, TerminalNode) except KeyNotFoundError: @@ -1211,7 +1211,7 @@ async def delete( ancestors: List[InternalNode] = await self.get_ancestors_common( node_hash=node_hash, - tree_id=tree_id, + store_id=store_id, root_hash=root_hash, use_optimized=use_optimized, ) @@ -1219,7 +1219,7 @@ async def delete( if len(ancestors) == 0: # the only node is being deleted return await self._insert_root( - tree_id=tree_id, + store_id=store_id, node_hash=None, status=status, ) @@ -1230,7 +1230,7 @@ async def delete( if len(ancestors) == 1: # the parent is the root so the other side will become the new root return await self._insert_root( - tree_id=tree_id, + store_id=store_id, node_hash=other_hash, status=status, ) @@ -1238,7 +1238,7 @@ async def delete( old_child_hash = parent.hash new_child_hash = other_hash if root is None: - new_generation = await self.get_tree_generation(tree_id) + 1 + new_generation = await self.get_tree_generation(store_id) + 1 else: new_generation = root.generation + 1 # update ancestors after inserting root, to keep table constraints. @@ -1255,18 +1255,18 @@ async def delete( raise Exception("Internal error.") new_child_hash = await self._insert_internal_node(left_hash=left_hash, right_hash=right_hash) - insert_ancestors_cache.append((left_hash, right_hash, tree_id)) + insert_ancestors_cache.append((left_hash, right_hash, store_id)) old_child_hash = ancestor.hash new_root = await self._insert_root( - tree_id=tree_id, + store_id=store_id, node_hash=new_child_hash, status=status, generation=new_generation, ) if status == Status.COMMITTED: - for left_hash, right_hash, tree_id in insert_ancestors_cache: - await self._insert_ancestor_table(left_hash, right_hash, tree_id, new_generation) + for left_hash, right_hash, store_id in insert_ancestors_cache: + await self._insert_ancestor_table(left_hash, right_hash, store_id, new_generation) return new_root @@ -1274,23 +1274,23 @@ async def upsert( self, key: bytes, new_value: bytes, - tree_id: bytes32, + store_id: bytes32, use_optimized: bool = True, status: Status = Status.PENDING, root: Optional[Root] = None, ) -> InsertResult: async with self.db_wrapper.writer(): if root is None: - root = await self.get_tree_root(tree_id=tree_id) + root = await self.get_tree_root(store_id=store_id) try: - old_node = await self.get_node_by_key(key=key, tree_id=tree_id) + old_node = await self.get_node_by_key(key=key, store_id=store_id) except KeyNotFoundError: log.debug(f"Key not found: {key.hex()}. Doing an autoinsert instead") return await self.autoinsert( key=key, value=new_value, - tree_id=tree_id, + store_id=store_id, use_optimized=use_optimized, status=status, root=root, @@ -1304,7 +1304,7 @@ async def upsert( ancestors = await self.get_ancestors_common( node_hash=old_node.hash, - tree_id=tree_id, + store_id=store_id, root_hash=root.node_hash, generation=root.generation, use_optimized=use_optimized, @@ -1313,7 +1313,7 @@ async def upsert( # Store contains only the old root, replace it with a new root having the terminal node. if len(ancestors) == 0: new_root = await self._insert_root( - tree_id=tree_id, + store_id=store_id, node_hash=new_terminal_node_hash, status=status, ) @@ -1329,7 +1329,7 @@ async def upsert( raise Exception("Internal error.") new_root = await self.update_ancestor_hashes_on_insert( - tree_id=tree_id, + store_id=store_id, left=left, right=right, traversal_node_hash=parent.hash, @@ -1385,14 +1385,14 @@ async def get_leaf_at_minimum_height(self, root_hash: bytes32) -> TerminalNode: async def insert_batch( self, - tree_id: bytes32, + store_id: bytes32, changelist: List[Dict[str, Any]], status: Status = Status.PENDING, enable_batch_autoinsert: bool = True, ) -> Optional[bytes32]: async with self.transaction(): - old_root = await self.get_tree_root(tree_id) - pending_root = await self.get_pending_root(tree_id=tree_id) + old_root = await self.get_tree_root(store_id) + pending_root = await self.get_pending_root(store_id=store_id) if pending_root is None: latest_local_root: Optional[Root] = old_root else: @@ -1401,7 +1401,7 @@ async def insert_batch( if pending_root.generation != old_root.generation + 1: raise Exception("Internal error") await self.change_root_status(pending_root, Status.COMMITTED) - await self.build_ancestor_table_for_latest_root(tree_id=tree_id) + await self.build_ancestor_table_for_latest_root(store_id=store_id) latest_local_root = pending_root else: raise Exception("Internal error") @@ -1439,7 +1439,7 @@ async def insert_batch( pending_autoinsert_hashes.append(terminal_node_hash) continue insert_result = await self.autoinsert( - key, value, tree_id, True, Status.COMMITTED, root=latest_local_root + key, value, store_id, True, Status.COMMITTED, root=latest_local_root ) latest_local_root = insert_result.root else: @@ -1448,7 +1448,7 @@ async def insert_batch( insert_result = await self.insert( key, value, - tree_id, + store_id, reference_node_hash, side, True, @@ -1458,12 +1458,12 @@ async def insert_batch( latest_local_root = insert_result.root elif change["action"] == "delete": key = change["key"] - latest_local_root = await self.delete(key, tree_id, True, Status.COMMITTED, root=latest_local_root) + latest_local_root = await self.delete(key, store_id, True, Status.COMMITTED, root=latest_local_root) elif change["action"] == "upsert": key = change["key"] new_value = change["value"] insert_result = await self.upsert( - key, new_value, tree_id, True, Status.COMMITTED, root=latest_local_root + key, new_value, store_id, True, Status.COMMITTED, root=latest_local_root ) latest_local_root = insert_result.root else: @@ -1486,18 +1486,18 @@ async def insert_batch( if len(pending_autoinsert_hashes): subtree_hash = pending_autoinsert_hashes[0] if latest_local_root is None or latest_local_root.node_hash is None: - await self._insert_root(tree_id=tree_id, node_hash=subtree_hash, status=Status.COMMITTED) + await self._insert_root(store_id=store_id, node_hash=subtree_hash, status=Status.COMMITTED) else: min_height_leaf = await self.get_leaf_at_minimum_height(latest_local_root.node_hash) ancestors = await self.get_ancestors_common( node_hash=min_height_leaf.hash, - tree_id=tree_id, + store_id=store_id, root_hash=latest_local_root.node_hash, generation=latest_local_root.generation, use_optimized=True, ) await self.update_ancestor_hashes_on_insert( - tree_id=tree_id, + store_id=store_id, left=min_height_leaf.hash, right=subtree_hash, traversal_node_hash=min_height_leaf.hash, @@ -1506,19 +1506,19 @@ async def insert_batch( root=latest_local_root, ) - root = await self.get_tree_root(tree_id=tree_id) + root = await self.get_tree_root(store_id=store_id) if root.node_hash == old_root.node_hash: if len(changelist) != 0: - await self.rollback_to_generation(tree_id, old_root.generation) + await self.rollback_to_generation(store_id, old_root.generation) raise ValueError("Changelist resulted in no change to tree data") # We delete all "temporary" records stored in root and ancestor tables and store only the final result. - await self.rollback_to_generation(tree_id, old_root.generation) - await self.insert_root_with_ancestor_table(tree_id=tree_id, node_hash=root.node_hash, status=status) + await self.rollback_to_generation(store_id, old_root.generation) + await self.insert_root_with_ancestor_table(store_id=store_id, node_hash=root.node_hash, status=status) if status in (Status.PENDING, Status.PENDING_BATCH): - new_root = await self.get_pending_root(tree_id=tree_id) + new_root = await self.get_pending_root(store_id=store_id) assert new_root is not None elif status == Status.COMMITTED: - new_root = await self.get_tree_root(tree_id=tree_id) + new_root = await self.get_tree_root(store_id=store_id) else: raise Exception(f"No known status: {status}") if new_root.node_hash != root.node_hash: @@ -1535,12 +1535,12 @@ async def insert_batch( async def _get_one_ancestor( self, node_hash: bytes32, - tree_id: bytes32, + store_id: bytes32, generation: Optional[int] = None, ) -> Optional[InternalNode]: async with self.db_wrapper.reader() as reader: if generation is None: - generation = await self.get_tree_generation(tree_id=tree_id) + generation = await self.get_tree_generation(store_id=store_id) cursor = await reader.execute( """ SELECT * from node INNER JOIN ( @@ -1552,53 +1552,53 @@ async def _get_one_ancestor( GROUP BY hash ) asc on asc.hash == node.hash """, - {"hash": node_hash, "tree_id": tree_id, "generation": generation}, + {"hash": node_hash, "tree_id": store_id, "generation": generation}, ) row = await cursor.fetchone() if row is None: return None return InternalNode.from_row(row=row) - async def build_ancestor_table_for_latest_root(self, tree_id: bytes32) -> None: + async def build_ancestor_table_for_latest_root(self, store_id: bytes32) -> None: async with self.db_wrapper.writer(): - root = await self.get_tree_root(tree_id=tree_id) + root = await self.get_tree_root(store_id=store_id) if root.node_hash is None: return previous_root = await self.get_tree_root( - tree_id=tree_id, + store_id=store_id, generation=max(root.generation - 1, 0), ) if previous_root.node_hash is not None: previous_internal_nodes: List[InternalNode] = await self.get_internal_nodes( - tree_id=tree_id, + store_id=store_id, root_hash=previous_root.node_hash, ) known_hashes: Set[bytes32] = {node.hash for node in previous_internal_nodes} else: known_hashes = set() internal_nodes: List[InternalNode] = await self.get_internal_nodes( - tree_id=tree_id, + store_id=store_id, root_hash=root.node_hash, ) for node in internal_nodes: # We already have the same values in ancestor tables, if we have the same internal node. # Don't reinsert it so we can save DB space. if node.hash not in known_hashes: - await self._insert_ancestor_table(node.left_hash, node.right_hash, tree_id, root.generation) + await self._insert_ancestor_table(node.left_hash, node.right_hash, store_id, root.generation) async def insert_root_with_ancestor_table( - self, tree_id: bytes32, node_hash: Optional[bytes32], status: Status = Status.PENDING + self, store_id: bytes32, node_hash: Optional[bytes32], status: Status = Status.PENDING ) -> None: async with self.db_wrapper.writer(): - await self._insert_root(tree_id=tree_id, node_hash=node_hash, status=status) + await self._insert_root(store_id=store_id, node_hash=node_hash, status=status) # Don't update the ancestor table for non-committed status. if status == Status.COMMITTED: - await self.build_ancestor_table_for_latest_root(tree_id=tree_id) + await self.build_ancestor_table_for_latest_root(store_id=store_id) - async def get_node_by_key_latest_generation(self, key: bytes, tree_id: bytes32) -> TerminalNode: + async def get_node_by_key_latest_generation(self, key: bytes, store_id: bytes32) -> TerminalNode: async with self.db_wrapper.reader() as reader: - root = await self.get_tree_root(tree_id=tree_id) + root = await self.get_tree_root(store_id=store_id) if root.node_hash is None: raise KeyNotFoundError(key=key) @@ -1611,7 +1611,7 @@ async def get_node_by_key_latest_generation(self, key: bytes, tree_id: bytes32) ORDER BY a.generation DESC LIMIT 1 """, - {"key": key, "tree_id": tree_id}, + {"key": key, "tree_id": store_id}, ) row = await cursor.fetchone() @@ -1621,7 +1621,7 @@ async def get_node_by_key_latest_generation(self, key: bytes, tree_id: bytes32) node = await self.get_node(row["hash"]) node_hash = node.hash while True: - internal_node = await self._get_one_ancestor(node_hash, tree_id) + internal_node = await self._get_one_ancestor(node_hash, store_id) if internal_node is None: break node_hash = internal_node.hash @@ -1634,13 +1634,13 @@ async def get_node_by_key_latest_generation(self, key: bytes, tree_id: bytes32) async def get_node_by_key( self, key: bytes, - tree_id: bytes32, + store_id: bytes32, root_hash: Optional[bytes32] = None, ) -> TerminalNode: if root_hash is None: - return await self.get_node_by_key_latest_generation(key, tree_id) + return await self.get_node_by_key_latest_generation(key, store_id) - nodes = await self.get_keys_values(tree_id=tree_id, root_hash=root_hash) + nodes = await self.get_keys_values(store_id=store_id, root_hash=root_hash) for node in nodes: if node.key == key: @@ -1659,9 +1659,9 @@ async def get_node(self, node_hash: bytes32) -> Node: node = row_to_node(row=row) return node - async def get_tree_as_program(self, tree_id: bytes32) -> Program: + async def get_tree_as_program(self, store_id: bytes32) -> Program: async with self.db_wrapper.reader() as reader: - root = await self.get_tree_root(tree_id=tree_id) + root = await self.get_tree_root(store_id=store_id) # TODO: consider actual proper behavior assert root.node_hash is not None root_node = await self.get_node(node_hash=root.node_hash) @@ -1697,7 +1697,7 @@ async def get_tree_as_program(self, tree_id: bytes32) -> Program: async def get_proof_of_inclusion_by_hash( self, node_hash: bytes32, - tree_id: bytes32, + store_id: bytes32, root_hash: Optional[bytes32] = None, use_optimized: bool = False, ) -> ProofOfInclusion: @@ -1709,9 +1709,9 @@ async def get_proof_of_inclusion_by_hash( # when used with use_optimized=False - it will compare both methods in this case and raise an exception. # this is undesirable in the DL Offers flow where PENDING roots can cause the optimized code to fail. if use_optimized: - ancestors = await self.get_ancestors_optimized(node_hash=node_hash, tree_id=tree_id, root_hash=root_hash) + ancestors = await self.get_ancestors_optimized(node_hash=node_hash, store_id=store_id, root_hash=root_hash) else: - ancestors = await self.get_ancestors(node_hash=node_hash, tree_id=tree_id, root_hash=root_hash) + ancestors = await self.get_ancestors(node_hash=node_hash, store_id=store_id, root_hash=root_hash) layers: List[ProofOfInclusionLayer] = [] child_hash = node_hash @@ -1738,20 +1738,20 @@ async def get_proof_of_inclusion_by_hash( async def get_proof_of_inclusion_by_key( self, key: bytes, - tree_id: bytes32, + store_id: bytes32, ) -> ProofOfInclusion: """Collect the information for a proof of inclusion of a key and its value in the Merkle tree. """ async with self.db_wrapper.reader(): - node = await self.get_node_by_key(key=key, tree_id=tree_id) - return await self.get_proof_of_inclusion_by_hash(node_hash=node.hash, tree_id=tree_id) + node = await self.get_node_by_key(key=key, store_id=store_id) + return await self.get_proof_of_inclusion_by_hash(node_hash=node.hash, store_id=store_id) - async def get_first_generation(self, node_hash: bytes32, tree_id: bytes32) -> int: + async def get_first_generation(self, node_hash: bytes32, store_id: bytes32) -> int: async with self.db_wrapper.reader() as reader: cursor = await reader.execute( "SELECT MIN(generation) AS generation FROM ancestors WHERE hash == :hash AND tree_id == :tree_id", - {"hash": node_hash, "tree_id": tree_id}, + {"hash": node_hash, "tree_id": store_id}, ) row = await cursor.fetchone() if row is None: @@ -1764,7 +1764,7 @@ async def write_tree_to_file( self, root: Root, node_hash: bytes32, - tree_id: bytes32, + store_id: bytes32, deltas_only: bool, writer: BinaryIO, ) -> None: @@ -1772,15 +1772,15 @@ async def write_tree_to_file( return if deltas_only: - generation = await self.get_first_generation(node_hash, tree_id) + generation = await self.get_first_generation(node_hash, store_id) # Root's generation is not the first time we see this hash, so it's not a new delta. if root.generation != generation: return node = await self.get_node(node_hash) to_write = b"" if isinstance(node, InternalNode): - await self.write_tree_to_file(root, node.left_hash, tree_id, deltas_only, writer) - await self.write_tree_to_file(root, node.right_hash, tree_id, deltas_only, writer) + await self.write_tree_to_file(root, node.left_hash, store_id, deltas_only, writer) + await self.write_tree_to_file(root, node.right_hash, store_id, deltas_only, writer) to_write = bytes(SerializedNode(False, bytes(node.left_hash), bytes(node.right_hash))) elif isinstance(node, TerminalNode): to_write = bytes(SerializedNode(True, node.key, node.value)) @@ -1790,19 +1790,19 @@ async def write_tree_to_file( writer.write(len(to_write).to_bytes(4, byteorder="big")) writer.write(to_write) - async def update_subscriptions_from_wallet(self, tree_id: bytes32, new_urls: List[str]) -> None: + async def update_subscriptions_from_wallet(self, store_id: bytes32, new_urls: List[str]) -> None: async with self.db_wrapper.writer() as writer: cursor = await writer.execute( "SELECT * FROM subscriptions WHERE from_wallet == 1 AND tree_id == :tree_id", { - "tree_id": tree_id, + "tree_id": store_id, }, ) old_urls = [row["url"] async for row in cursor] cursor = await writer.execute( "SELECT * FROM subscriptions WHERE from_wallet == 0 AND tree_id == :tree_id", { - "tree_id": tree_id, + "tree_id": store_id, }, ) from_subscriptions_urls = {row["url"] async for row in cursor} @@ -1813,7 +1813,7 @@ async def update_subscriptions_from_wallet(self, tree_id: bytes32, new_urls: Lis "DELETE FROM subscriptions WHERE url == :url AND tree_id == :tree_id", { "url": url, - "tree_id": tree_id, + "tree_id": store_id, }, ) for url in additions: @@ -1822,19 +1822,19 @@ async def update_subscriptions_from_wallet(self, tree_id: bytes32, new_urls: Lis "INSERT INTO subscriptions(tree_id, url, ignore_till, num_consecutive_failures, from_wallet) " "VALUES (:tree_id, :url, 0, 0, 1)", { - "tree_id": tree_id, + "tree_id": store_id, "url": url, }, ) async def subscribe(self, subscription: Subscription) -> None: async with self.db_wrapper.writer() as writer: - # Add a fake subscription, so we always have the tree_id, even with no URLs. + # Add a fake subscription, so we always have the store_id, even with no URLs. await writer.execute( "INSERT INTO subscriptions(tree_id, url, ignore_till, num_consecutive_failures, from_wallet) " "VALUES (:tree_id, NULL, NULL, NULL, 0)", { - "tree_id": subscription.tree_id, + "tree_id": subscription.store_id, }, ) all_subscriptions = await self.get_subscriptions() @@ -1842,7 +1842,7 @@ async def subscribe(self, subscription: Subscription) -> None: ( old_subscription for old_subscription in all_subscriptions - if old_subscription.tree_id == subscription.tree_id + if old_subscription.store_id == subscription.store_id ), None, ) @@ -1855,25 +1855,25 @@ async def subscribe(self, subscription: Subscription) -> None: "INSERT INTO subscriptions(tree_id, url, ignore_till, num_consecutive_failures, from_wallet) " "VALUES (:tree_id, :url, :ignore_till, :num_consecutive_failures, 0)", { - "tree_id": subscription.tree_id, + "tree_id": subscription.store_id, "url": server_info.url, "ignore_till": server_info.ignore_till, "num_consecutive_failures": server_info.num_consecutive_failures, }, ) - async def remove_subscriptions(self, tree_id: bytes32, urls: List[str]) -> None: + async def remove_subscriptions(self, store_id: bytes32, urls: List[str]) -> None: async with self.db_wrapper.writer() as writer: for url in urls: await writer.execute( "DELETE FROM subscriptions WHERE tree_id == :tree_id AND url == :url", { - "tree_id": tree_id, + "tree_id": store_id, "url": url, }, ) - async def delete_store_data(self, tree_id: bytes32) -> None: + async def delete_store_data(self, store_id: bytes32) -> None: async with self.db_wrapper.writer(foreign_key_enforcement_enabled=False) as writer: await self.clean_node_table(writer) cursor = await writer.execute( @@ -1903,7 +1903,7 @@ async def delete_store_data(self, tree_id: bytes32) -> None: AND hash NOT IN (SELECT hash from pending_nodes) """, { - "tree_id": tree_id, + "tree_id": store_id, "pending_status": Status.PENDING.value, "pending_batch_status": Status.PENDING_BATCH.value, }, @@ -1925,8 +1925,8 @@ async def delete_store_data(self, tree_id: bytes32) -> None: if right is not None: ref_counts[right] = ref_counts.get(right, 0) + 1 - await writer.execute("DELETE FROM ancestors WHERE tree_id == ?", (tree_id,)) - await writer.execute("DELETE FROM root WHERE tree_id == ?", (tree_id,)) + await writer.execute("DELETE FROM ancestors WHERE tree_id == ?", (store_id,)) + await writer.execute("DELETE FROM root WHERE tree_id == ?", (store_id,)) queue = [hash for hash in to_delete if ref_counts.get(hash, 0) == 0] while queue: hash = queue.pop(0) @@ -1945,25 +1945,25 @@ async def delete_store_data(self, tree_id: bytes32) -> None: if ref_counts[right] == 0: queue.append(right) - async def unsubscribe(self, tree_id: bytes32) -> None: + async def unsubscribe(self, store_id: bytes32) -> None: async with self.db_wrapper.writer() as writer: await writer.execute( "DELETE FROM subscriptions WHERE tree_id == :tree_id", - {"tree_id": tree_id}, + {"tree_id": store_id}, ) - async def rollback_to_generation(self, tree_id: bytes32, target_generation: int) -> None: + async def rollback_to_generation(self, store_id: bytes32, target_generation: int) -> None: async with self.db_wrapper.writer() as writer: await writer.execute( "DELETE FROM ancestors WHERE tree_id == :tree_id AND generation > :target_generation", - {"tree_id": tree_id, "target_generation": target_generation}, + {"tree_id": store_id, "target_generation": target_generation}, ) await writer.execute( "DELETE FROM root WHERE tree_id == :tree_id AND generation > :target_generation", - {"tree_id": tree_id, "target_generation": target_generation}, + {"tree_id": store_id, "target_generation": target_generation}, ) - async def update_server_info(self, tree_id: bytes32, server_info: ServerInfo) -> None: + async def update_server_info(self, store_id: bytes32, server_info: ServerInfo) -> None: async with self.db_wrapper.writer() as writer: await writer.execute( "UPDATE subscriptions SET ignore_till = :ignore_till, " @@ -1971,28 +1971,28 @@ async def update_server_info(self, tree_id: bytes32, server_info: ServerInfo) -> { "ignore_till": server_info.ignore_till, "num_consecutive_failures": server_info.num_consecutive_failures, - "tree_id": tree_id, + "tree_id": store_id, "url": server_info.url, }, ) - async def received_incorrect_file(self, tree_id: bytes32, server_info: ServerInfo, timestamp: int) -> None: + async def received_incorrect_file(self, store_id: bytes32, server_info: ServerInfo, timestamp: int) -> None: SEVEN_DAYS_BAN = 7 * 24 * 60 * 60 new_server_info = replace( server_info, num_consecutive_failures=server_info.num_consecutive_failures + 1, ignore_till=max(server_info.ignore_till, timestamp + SEVEN_DAYS_BAN), ) - await self.update_server_info(tree_id, new_server_info) + await self.update_server_info(store_id, new_server_info) - async def received_correct_file(self, tree_id: bytes32, server_info: ServerInfo) -> None: + async def received_correct_file(self, store_id: bytes32, server_info: ServerInfo) -> None: new_server_info = replace( server_info, num_consecutive_failures=0, ) - await self.update_server_info(tree_id, new_server_info) + await self.update_server_info(store_id, new_server_info) - async def server_misses_file(self, tree_id: bytes32, server_info: ServerInfo, timestamp: int) -> ServerInfo: + async def server_misses_file(self, store_id: bytes32, server_info: ServerInfo, timestamp: int) -> ServerInfo: # Max banned time is 1 hour. BAN_TIME_BY_MISSING_COUNT = [5 * 60] * 3 + [15 * 60] * 3 + [30 * 60] * 2 + [60 * 60] index = min(server_info.num_consecutive_failures, len(BAN_TIME_BY_MISSING_COUNT) - 1) @@ -2001,12 +2001,12 @@ async def server_misses_file(self, tree_id: bytes32, server_info: ServerInfo, ti num_consecutive_failures=server_info.num_consecutive_failures + 1, ignore_till=max(server_info.ignore_till, timestamp + BAN_TIME_BY_MISSING_COUNT[index]), ) - await self.update_server_info(tree_id, new_server_info) + await self.update_server_info(store_id, new_server_info) return new_server_info - async def get_available_servers_for_store(self, tree_id: bytes32, timestamp: int) -> List[ServerInfo]: + async def get_available_servers_for_store(self, store_id: bytes32, timestamp: int) -> List[ServerInfo]: subscriptions = await self.get_subscriptions() - subscription = next((subscription for subscription in subscriptions if subscription.tree_id == tree_id), None) + subscription = next((subscription for subscription in subscriptions if subscription.store_id == store_id), None) if subscription is None: return [] servers_info = [] @@ -2023,20 +2023,20 @@ async def get_subscriptions(self) -> List[Subscription]: "SELECT * from subscriptions", ) async for row in cursor: - tree_id = bytes32(row["tree_id"]) + store_id = bytes32(row["tree_id"]) url = row["url"] ignore_till = row["ignore_till"] num_consecutive_failures = row["num_consecutive_failures"] subscription = next( - (subscription for subscription in subscriptions if subscription.tree_id == tree_id), None + (subscription for subscription in subscriptions if subscription.store_id == store_id), None ) if subscription is None: if url is not None and num_consecutive_failures is not None and ignore_till is not None: subscriptions.append( - Subscription(tree_id, [ServerInfo(url, num_consecutive_failures, ignore_till)]) + Subscription(store_id, [ServerInfo(url, num_consecutive_failures, ignore_till)]) ) else: - subscriptions.append(Subscription(tree_id, [])) + subscriptions.append(Subscription(store_id, [])) else: if url is not None and num_consecutive_failures is not None and ignore_till is not None: new_servers_info = subscription.servers_info @@ -2049,13 +2049,13 @@ async def get_subscriptions(self) -> List[Subscription]: async def get_kv_diff( self, - tree_id: bytes32, + store_id: bytes32, hash_1: bytes32, hash_2: bytes32, ) -> Set[DiffData]: async with self.db_wrapper.reader(): - old_pairs = set(await self.get_keys_values(tree_id, hash_1)) - new_pairs = set(await self.get_keys_values(tree_id, hash_2)) + old_pairs = set(await self.get_keys_values(store_id, hash_1)) + new_pairs = set(await self.get_keys_values(store_id, hash_2)) if len(old_pairs) == 0 and hash_1 != bytes32([0] * 32): return set() if len(new_pairs) == 0 and hash_2 != bytes32([0] * 32): diff --git a/chia/data_layer/download_data.py b/chia/data_layer/download_data.py index ed56a6a52eb0..b597693e0698 100644 --- a/chia/data_layer/download_data.py +++ b/chia/data_layer/download_data.py @@ -15,20 +15,20 @@ from chia.types.blockchain_format.sized_bytes import bytes32 -def get_full_tree_filename(tree_id: bytes32, node_hash: bytes32, generation: int) -> str: - return f"{tree_id}-{node_hash}-full-{generation}-v1.0.dat" +def get_full_tree_filename(store_id: bytes32, node_hash: bytes32, generation: int) -> str: + return f"{store_id}-{node_hash}-full-{generation}-v1.0.dat" -def get_delta_filename(tree_id: bytes32, node_hash: bytes32, generation: int) -> str: - return f"{tree_id}-{node_hash}-delta-{generation}-v1.0.dat" +def get_delta_filename(store_id: bytes32, node_hash: bytes32, generation: int) -> str: + return f"{store_id}-{node_hash}-delta-{generation}-v1.0.dat" def is_filename_valid(filename: str) -> bool: split = filename.split("-") try: - raw_tree_id, raw_node_hash, file_type, raw_generation, raw_version, *rest = split - tree_id = bytes32(bytes.fromhex(raw_tree_id)) + raw_store_id, raw_node_hash, file_type, raw_generation, raw_version, *rest = split + store_id = bytes32(bytes.fromhex(raw_store_id)) node_hash = bytes32(bytes.fromhex(raw_node_hash)) generation = int(raw_generation) except ValueError: @@ -45,14 +45,14 @@ def is_filename_valid(filename: str) -> bool: return False generate_file_func = get_delta_filename if file_type == "delta" else get_full_tree_filename - reformatted = generate_file_func(tree_id=tree_id, node_hash=node_hash, generation=generation) + reformatted = generate_file_func(store_id=store_id, node_hash=node_hash, generation=generation) return reformatted == filename async def insert_into_data_store_from_file( data_store: DataStore, - tree_id: bytes32, + store_id: bytes32, root_hash: Optional[bytes32], filename: Path, ) -> None: @@ -83,7 +83,7 @@ async def insert_into_data_store_from_file( node_type = NodeType.TERMINAL if serialized_node.is_terminal else NodeType.INTERNAL await data_store.insert_node(node_type, serialized_node.value1, serialized_node.value2) - await data_store.insert_root_with_ancestor_table(tree_id=tree_id, node_hash=root_hash, status=Status.COMMITTED) + await data_store.insert_root_with_ancestor_table(store_id=store_id, node_hash=root_hash, status=Status.COMMITTED) @dataclass @@ -95,7 +95,7 @@ class WriteFilesResult: async def write_files_for_root( data_store: DataStore, - tree_id: bytes32, + store_id: bytes32, root: Root, foldername: Path, full_tree_first_publish_generation: int, @@ -106,8 +106,8 @@ async def write_files_for_root( else: node_hash = bytes32([0] * 32) # todo change - filename_full_tree = foldername.joinpath(get_full_tree_filename(tree_id, node_hash, root.generation)) - filename_diff_tree = foldername.joinpath(get_delta_filename(tree_id, node_hash, root.generation)) + filename_full_tree = foldername.joinpath(get_full_tree_filename(store_id, node_hash, root.generation)) + filename_diff_tree = foldername.joinpath(get_delta_filename(store_id, node_hash, root.generation)) written = False mode: Literal["wb", "xb"] = "wb" if overwrite else "xb" @@ -116,7 +116,7 @@ async def write_files_for_root( if root.generation >= full_tree_first_publish_generation: try: with open(filename_full_tree, mode) as writer: - await data_store.write_tree_to_file(root, node_hash, tree_id, False, writer) + await data_store.write_tree_to_file(root, node_hash, store_id, False, writer) written = True written_full_file = True except FileExistsError: @@ -124,11 +124,11 @@ async def write_files_for_root( try: last_seen_generation = await data_store.get_last_tree_root_by_hash( - tree_id, root.node_hash, max_generation=root.generation + store_id, root.node_hash, max_generation=root.generation ) if last_seen_generation is None: with open(filename_diff_tree, mode) as writer: - await data_store.write_tree_to_file(root, node_hash, tree_id, True, writer) + await data_store.write_tree_to_file(root, node_hash, store_id, True, writer) else: open(filename_diff_tree, mode).close() written = True @@ -140,7 +140,7 @@ async def write_files_for_root( async def insert_from_delta_file( data_store: DataStore, - tree_id: bytes32, + store_id: bytes32, existing_generation: int, root_hashes: List[bytes32], server_info: ServerInfo, @@ -153,7 +153,7 @@ async def insert_from_delta_file( for root_hash in root_hashes: timestamp = int(time.time()) existing_generation += 1 - filename = get_delta_filename(tree_id, root_hash, existing_generation) + filename = get_delta_filename(store_id, root_hash, existing_generation) request_json = {"url": server_info.url, "client_folder": str(client_foldername), "filename": filename} target_path = client_foldername.joinpath(filename) filename_exists = False @@ -166,7 +166,7 @@ async def insert_from_delta_file( try: await http_download(client_foldername, filename, proxy_url, server_info, timeout, log) except (asyncio.TimeoutError, aiohttp.ClientError): - new_server_info = await data_store.server_misses_file(tree_id, server_info, timestamp) + new_server_info = await data_store.server_misses_file(store_id, server_info, timestamp) log.info( f"Failed to download {filename} from {new_server_info.url}." f"Miss {new_server_info.num_consecutive_failures}." @@ -174,7 +174,7 @@ async def insert_from_delta_file( log.info(f"Next attempt from {new_server_info.url} in {new_server_info.ignore_till - timestamp}s.") return False else: - log.info(f"Using downloader {downloader} for store {tree_id.hex()}.") + log.info(f"Using downloader {downloader} for store {store_id.hex()}.") async with aiohttp.ClientSession() as session: async with session.post( downloader.url + "/download", @@ -189,24 +189,24 @@ async def insert_from_delta_file( log.info(f"Successfully downloaded delta file {filename}.") try: filename_full_tree = client_foldername.joinpath( - get_full_tree_filename(tree_id, root_hash, existing_generation) + get_full_tree_filename(store_id, root_hash, existing_generation) ) await insert_into_data_store_from_file( data_store, - tree_id, + store_id, None if root_hash == bytes32([0] * 32) else root_hash, client_foldername.joinpath(filename), ) log.info( f"Successfully inserted hash {root_hash} from delta file. " - f"Generation: {existing_generation}. Tree id: {tree_id}." + f"Generation: {existing_generation}. Store id: {store_id}." ) - root = await data_store.get_tree_root(tree_id=tree_id) + root = await data_store.get_tree_root(store_id=store_id) with open(filename_full_tree, "wb") as writer: - await data_store.write_tree_to_file(root, root_hash, tree_id, False, writer) + await data_store.write_tree_to_file(root, root_hash, store_id, False, writer) log.info(f"Successfully written full tree filename {filename_full_tree}.") - await data_store.received_correct_file(tree_id, server_info) + await data_store.received_correct_file(store_id, server_info) except Exception: target_filename = client_foldername.joinpath(filename) try: @@ -219,26 +219,26 @@ async def insert_from_delta_file( except FileNotFoundError: pass - # await data_store.received_incorrect_file(tree_id, server_info, timestamp) + # await data_store.received_incorrect_file(store_id, server_info, timestamp) # incorrect file bans for 7 days which in practical usage # is too long given this file might be incorrect for various reasons # therefore, use the misses file logic instead if not filename_exists: # Don't penalize this server if we didn't download the file from it. - await data_store.server_misses_file(tree_id, server_info, timestamp) - await data_store.rollback_to_generation(tree_id, existing_generation - 1) + await data_store.server_misses_file(store_id, server_info, timestamp) + await data_store.rollback_to_generation(store_id, existing_generation - 1) return False return True -def delete_full_file_if_exists(foldername: Path, tree_id: bytes32, root: Root) -> bool: +def delete_full_file_if_exists(foldername: Path, store_id: bytes32, root: Root) -> bool: if root.node_hash is not None: node_hash = root.node_hash else: node_hash = bytes32([0] * 32) # todo change - filename_full_tree = foldername.joinpath(get_full_tree_filename(tree_id, node_hash, root.generation)) + filename_full_tree = foldername.joinpath(get_full_tree_filename(store_id, node_hash, root.generation)) try: filename_full_tree.unlink() except FileNotFoundError: diff --git a/chia/data_layer/s3_plugin_service.py b/chia/data_layer/s3_plugin_service.py index 3e9aa1907c2a..26d4855a67ce 100644 --- a/chia/data_layer/s3_plugin_service.py +++ b/chia/data_layer/s3_plugin_service.py @@ -152,12 +152,12 @@ async def upload(self, request: web.Request) -> web.Response: return web.json_response({"uploaded": False}) # Pull the store_id from the filename to make sure we only upload for configured stores - full_tree_id = None if full_tree_name is None else bytes32.fromhex(full_tree_name[:64]) - diff_tree_id = bytes32.fromhex(diff_name[:64]) + full_store_id = None if full_tree_name is None else bytes32.fromhex(full_tree_name[:64]) + diff_store_id = bytes32.fromhex(diff_name[:64]) - if full_tree_id is not None and not (full_tree_id == diff_tree_id == store_id): + if full_store_id is not None and not (full_store_id == diff_store_id == store_id): return web.json_response({"uploaded": False}) - if full_tree_id is None and diff_tree_id != store_id: + if full_store_id is None and diff_store_id != store_id: return web.json_response({"uploaded": False}) full_tree_path = None if full_tree_name is None else self.server_files_path.joinpath(full_tree_name) @@ -220,11 +220,11 @@ async def download(self, request: web.Request) -> web.Response: return web.json_response({"downloaded": False}) # Pull the store_id from the filename to make sure we only download for configured stores - filename_tree_id = bytes32.fromhex(filename[:64]) + filename_store_id = bytes32.fromhex(filename[:64]) parse_result = urlparse(url) should_download = False for store in self.stores: - if store.id == filename_tree_id and parse_result.scheme == "s3" and url in store.urls: + if store.id == filename_store_id and parse_result.scheme == "s3" and url in store.urls: should_download = True break diff --git a/chia/data_layer/util/benchmark.py b/chia/data_layer/util/benchmark.py index 5cabd1e03944..2808060897a3 100644 --- a/chia/data_layer/util/benchmark.py +++ b/chia/data_layer/util/benchmark.py @@ -24,8 +24,8 @@ async def generate_datastore(num_nodes: int, slow_mode: bool) -> None: async with DataStore.managed(database=db_path) as data_store: - tree_id = bytes32(b"0" * 32) - await data_store.create_tree(tree_id) + store_id = bytes32(b"0" * 32) + await data_store.create_tree(store_id) insert_time = 0.0 insert_count = 0 @@ -38,7 +38,7 @@ async def generate_datastore(num_nodes: int, slow_mode: bool) -> None: key = i.to_bytes(4, byteorder="big") value = (2 * i).to_bytes(4, byteorder="big") seed = leaf_hash(key=key, value=value) - reference_node_hash: Optional[bytes32] = await data_store.get_terminal_node_for_seed(tree_id, seed) + reference_node_hash: Optional[bytes32] = await data_store.get_terminal_node_for_seed(store_id, seed) side: Optional[Side] = data_store.get_side_for_seed(seed) if i == 0: @@ -50,7 +50,7 @@ async def generate_datastore(num_nodes: int, slow_mode: bool) -> None: await data_store.insert( key=key, value=value, - tree_id=tree_id, + store_id=store_id, reference_node_hash=reference_node_hash, side=side, ) @@ -58,7 +58,7 @@ async def generate_datastore(num_nodes: int, slow_mode: bool) -> None: await data_store.insert( key=key, value=value, - tree_id=tree_id, + store_id=store_id, reference_node_hash=reference_node_hash, side=side, use_optimized=False, @@ -69,12 +69,12 @@ async def generate_datastore(num_nodes: int, slow_mode: bool) -> None: elif i % 3 == 1: t1 = time.time() if not slow_mode: - await data_store.autoinsert(key=key, value=value, tree_id=tree_id) + await data_store.autoinsert(key=key, value=value, store_id=store_id) else: await data_store.autoinsert( key=key, value=value, - tree_id=tree_id, + store_id=store_id, use_optimized=False, ) t2 = time.time() @@ -86,9 +86,9 @@ async def generate_datastore(num_nodes: int, slow_mode: bool) -> None: node = await data_store.get_node(reference_node_hash) assert isinstance(node, TerminalNode) if not slow_mode: - await data_store.delete(key=node.key, tree_id=tree_id) + await data_store.delete(key=node.key, store_id=store_id) else: - await data_store.delete(key=node.key, tree_id=tree_id, use_optimized=False) + await data_store.delete(key=node.key, store_id=store_id, use_optimized=False) t2 = time.time() delete_time += t2 - t1 delete_count += 1 @@ -97,7 +97,7 @@ async def generate_datastore(num_nodes: int, slow_mode: bool) -> None: print(f"Average autoinsert time: {autoinsert_time / autoinsert_count}") print(f"Average delete time: {delete_time / delete_count}") print(f"Total time for {num_nodes} operations: {insert_time + autoinsert_time + delete_time}") - root = await data_store.get_tree_root(tree_id=tree_id) + root = await data_store.get_tree_root(store_id=store_id) print(f"Root hash: {root.node_hash}") diff --git a/chia/rpc/data_layer_rpc_api.py b/chia/rpc/data_layer_rpc_api.py index 6e2f8ab42985..3d559d0d3625 100644 --- a/chia/rpc/data_layer_rpc_api.py +++ b/chia/rpc/data_layer_rpc_api.py @@ -401,7 +401,7 @@ async def subscriptions(self, request: Dict[str, Any]) -> EndpointResult: if self.service is None: raise Exception("Data layer not created") subscriptions: List[Subscription] = await self.service.get_subscriptions() - return {"store_ids": [sub.tree_id.hex() for sub in subscriptions]} + return {"store_ids": [sub.store_id.hex() for sub in subscriptions]} async def remove_subscriptions(self, request: Dict[str, Any]) -> EndpointResult: if self.service is None: @@ -423,13 +423,13 @@ async def add_missing_files(self, request: Dict[str, Any]) -> EndpointResult: ids_bytes = [bytes32.from_hexstr(id) for id in store_ids] else: subscriptions: List[Subscription] = await self.service.get_subscriptions() - ids_bytes = [subscription.tree_id for subscription in subscriptions] + ids_bytes = [subscription.store_id for subscription in subscriptions] overwrite = request.get("overwrite", False) foldername: Optional[Path] = None if "foldername" in request: foldername = Path(request["foldername"]) - for tree_id in ids_bytes: - await self.service.add_missing_files(tree_id, overwrite, foldername) + for store_id in ids_bytes: + await self.service.add_missing_files(store_id, overwrite, foldername) return {} async def get_root_history(self, request: Dict[str, Any]) -> EndpointResult: @@ -575,7 +575,7 @@ async def check_plugins(self, request: Dict[str, Any]) -> EndpointResult: @marshal() # type: ignore[arg-type] async def clear_pending_roots(self, request: ClearPendingRootsRequest) -> ClearPendingRootsResponse: - root = await self.service.data_store.clear_pending_roots(tree_id=request.store_id) + root = await self.service.data_store.clear_pending_roots(store_id=request.store_id) return ClearPendingRootsResponse(success=root is not None, root=root) @@ -587,9 +587,9 @@ async def get_proof(self, request: GetProofRequest) -> GetProofResponse: all_proofs: List[HashOnlyProof] = [] for key in request.keys: - node = await self.service.data_store.get_node_by_key(tree_id=request.store_id, key=key) + node = await self.service.data_store.get_node_by_key(store_id=request.store_id, key=key) pi = await self.service.data_store.get_proof_of_inclusion_by_hash( - tree_id=request.store_id, node_hash=node.hash, use_optimized=True + store_id=request.store_id, node_hash=node.hash, use_optimized=True ) proof = HashOnlyProof.from_key_value(