diff --git a/.gitignore b/.gitignore index 3963059..09b5e97 100644 --- a/.gitignore +++ b/.gitignore @@ -1,12 +1,14 @@ rid-lib __pycache__ -*.json *.pem *.yaml +*.ndjson* venv .env prototypes .vscode dist/ docs/ -*.ndjson \ No newline at end of file +tests/ +.rid_cache/ +*.ndjson diff --git a/examples/coordinator.py b/examples/coordinator.py index 5abef99..040491f 100644 --- a/examples/coordinator.py +++ b/examples/coordinator.py @@ -1,65 +1,53 @@ -import logging -from rich.logging import RichHandler -from pydantic import Field from rid_lib.types import KoiNetNode, KoiNetEdge -from koi_net.config import NodeConfig, KoiNetConfig -from koi_net.protocol.node import NodeProfile, NodeProvides, NodeType -from koi_net import NodeInterface -from koi_net.context import HandlerContext -from koi_net.processor.handler import HandlerType +import structlog +from koi_net.config.full_node import ( + FullNodeConfig, + ServerConfig, + KoiNetConfig, + NodeProfile, + NodeProvides +) +from koi_net.core import FullNode +from koi_net.processor.context import HandlerContext +from koi_net.processor.handler import HandlerType, KnowledgeHandler from koi_net.processor.knowledge_object import KnowledgeObject from koi_net.protocol.event import Event, EventType from koi_net.protocol.edge import EdgeType, generate_edge_bundle -logging.basicConfig( - level=logging.INFO, - format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", - datefmt="%Y-%m-%d %H:%M:%S", - handlers=[RichHandler()] -) +log = structlog.stdlib.get_logger() -logging.getLogger("koi_net").setLevel(logging.DEBUG) -logger = logging.getLogger(__name__) -class CoordinatorConfig(NodeConfig): - koi_net: KoiNetConfig = Field(default_factory = lambda: - KoiNetConfig( - node_name="coordinator", - node_profile=NodeProfile( - node_type=NodeType.FULL, - provides=NodeProvides( - event=[KoiNetNode, KoiNetEdge], - state=[KoiNetNode, KoiNetEdge] - ) - ), - cache_directory_path=".coordinator_rid_cache", - event_queues_path="coordinator_event_queues.json", - private_key_pem_path="coordinator_priv_key.pem" - ) +class CoordinatorConfig(FullNodeConfig): + server: ServerConfig = ServerConfig(port=8080) + koi_net: KoiNetConfig = KoiNetConfig( + node_name="coordinator", + node_profile=NodeProfile( + provides=NodeProvides( + event=[KoiNetNode, KoiNetEdge], + state=[KoiNetNode, KoiNetEdge] + ) + ), + rid_types_of_interest=[KoiNetNode, KoiNetEdge] ) - -node = NodeInterface( - config=CoordinatorConfig.load_from_yaml("coordinator_config.yaml"), - use_kobj_processor_thread=True -) -@node.processor.pipeline.register_handler(HandlerType.Network, rid_types=[KoiNetNode]) +@KnowledgeHandler.create( + HandlerType.Network, + rid_types=[KoiNetNode]) def handshake_handler(ctx: HandlerContext, kobj: KnowledgeObject): - logger.info("Handling node handshake") + log.info("Handling node handshake") # only respond if node declares itself as NEW if kobj.event_type != EventType.NEW: return - logger.info("Sharing this node's bundle with peer") - identity_bundle = ctx.effector.deref(ctx.identity.rid) - ctx.event_queue.push_event_to( + log.info("Sharing this node's bundle with peer") + identity_bundle = ctx.cache.read(ctx.identity.rid) + ctx.event_queue.push( event=Event.from_bundle(EventType.NEW, identity_bundle), - node=kobj.rid, - flush=True + target=kobj.rid ) - logger.info("Proposing new edge") + log.info("Proposing new edge") # defer handling of proposed edge edge_bundle = generate_edge_bundle( @@ -69,8 +57,12 @@ def handshake_handler(ctx: HandlerContext, kobj: KnowledgeObject): rid_types=[KoiNetNode, KoiNetEdge] ) - ctx.handle(rid=edge_bundle.rid, event_type=EventType.FORGET) - ctx.handle(bundle=edge_bundle) - + ctx.kobj_queue.push(rid=edge_bundle.rid, event_type=EventType.FORGET) + ctx.kobj_queue.push(bundle=edge_bundle) + +class CoordinatorNode(FullNode): + config_schema = CoordinatorConfig + knowledge_handlers = FullNode.knowledge_handlers + [handshake_handler] + if __name__ == "__main__": - node.server.run() \ No newline at end of file + CoordinatorNode().run() \ No newline at end of file diff --git a/examples/partial.py b/examples/partial.py index 20ff481..a52e28b 100644 --- a/examples/partial.py +++ b/examples/partial.py @@ -1,37 +1,15 @@ -import logging -from pydantic import Field -from rich.logging import RichHandler -from koi_net import NodeInterface -from koi_net.protocol.node import NodeProfile, NodeType -from koi_net.config import NodeConfig, KoiNetConfig +from koi_net.config.partial_node import PartialNodeConfig, KoiNetConfig, NodeProfile +from koi_net.core import PartialNode -logging.basicConfig( - level=logging.INFO, - format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", - datefmt="%Y-%m-%d %H:%M:%S", - handlers=[RichHandler()] -) -logging.getLogger("koi_net").setLevel(logging.DEBUG) -logger = logging.getLogger(__name__) - - -class PartialNodeConfig(NodeConfig): - koi_net: KoiNetConfig = Field(default_factory = lambda: - KoiNetConfig( - node_name="partial", - node_profile=NodeProfile( - node_type=NodeType.PARTIAL - ), - cache_directory_path=".partial_rid_cache", - event_queues_path="partial_event_queues.json", - private_key_pem_path="partial_priv_key.pem" - ) +class MyPartialNodeConfig(PartialNodeConfig): + koi_net: KoiNetConfig = KoiNetConfig( + node_name="partial", + node_profile=NodeProfile() ) -node = NodeInterface( - config=PartialNodeConfig.load_from_yaml("partial_config.yaml") -) +class MyPartialNode(PartialNode): + config_schema = MyPartialNodeConfig if __name__ == "__main__": - node.poller.run() \ No newline at end of file + MyPartialNode().run() \ No newline at end of file diff --git a/koi-net.config.json b/koi-net.config.json new file mode 100644 index 0000000..4a26945 --- /dev/null +++ b/koi-net.config.json @@ -0,0 +1,79 @@ +{ + "$schema": "https://lnav.org/schemas/format-v1.schema.json", + "koi_net_json_log": { + "title": "KOI-net node logs", + "description": "Detailed logs of node and network behavior", + "file-type": "json", + "body-field": "event", + "level-field": "", + "timestamp-field": "timestamp", + "hide-extra": true, + "line-format": [ + { + "field": "timestamp", + "auto-width": true, + "timestamp-format": "%Y-%m-%d %H:%M:%S" + }, + " ", + { + "field": "level", + "auto-width": true, + "prefix": "[", + "suffix": "]" + }, + " ", + { + "field": "module", + "min-width": 15, + "max-width": 15, + "overflow": "dot-dot", + "align": "right" + }, + " - ", + { + "field": "event" + } + ], + "value": { + "level": { + "kind": "string" + }, + "event": { + "kind": "string" + }, + "module": { + "kind": "string" + } + }, + "highlights": { + "debug": { + "pattern": "\\[(debug) *\\]", + "color": "Blue" + }, + "info": { + "pattern": "\\[(info) *\\]", + "color": "Green" + }, + "warning": { + "pattern": "\\[(warning) *\\]", + "color": "Yellow" + }, + "error": { + "pattern": "\\[(error|critical) *\\]", + "color": "Red" + }, + "timestamp": { + "pattern": "(\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2})", + "color": "Grey" + }, + "obj_name": { + "pattern": "<([a-zA-Z.]+)", + "color": "Purple" + }, + "quotations": { + "pattern": "(['\"][^']*['\"])", + "color": "Green" + } + } + } +} \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 94ca887..ad577c8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "hatchling.build" [project] name = "koi-net" -version = "1.1.0" +version = "1.2.0" description = "Implementation of KOI-net protocol in Python" authors = [ {name = "Luke Miller", email = "luke@block.science"} @@ -23,6 +23,7 @@ dependencies = [ "fastapi>=0.115.12", "uvicorn>=0.34.2", "rich>=14.1.0", + "structlog>=25.4.0", ] [project.optional-dependencies] diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 2c79f86..0000000 --- a/requirements.txt +++ /dev/null @@ -1,9 +0,0 @@ -networkx>=3.4.2 -rid-lib>=3.2.1 -httpx>=0.28.1 -pydantic>=2.10.6 - -# requirements for examples/ -rich -fastapi -uvicorn \ No newline at end of file diff --git a/schemas/bundle.schema.json b/schemas/bundle.schema.json new file mode 100644 index 0000000..48ccfed --- /dev/null +++ b/schemas/bundle.schema.json @@ -0,0 +1,16 @@ +{ + "title": "Bundle", + "type": "object", + "properties": { + "manifest": { + "$ref": "./manifest.schema.json" + }, + "contents": { + "type": "object" + } + }, + "required": [ + "manifest", + "contents" + ] +} \ No newline at end of file diff --git a/schemas/bundles_payload.schema.json b/schemas/bundles_payload.schema.json new file mode 100644 index 0000000..69577a6 --- /dev/null +++ b/schemas/bundles_payload.schema.json @@ -0,0 +1,32 @@ +{ + "title": "Bundles Payload", + "type": "object", + "properties": { + "type": { + "type": "string", + "const": "bundles_payload" + }, + "bundles": { + "type": "array", + "items": { + "$ref": "./bundle.schema.json" + } + }, + "not_found": { + "type": "array", + "items": { + "type": "string" + } + }, + "deferred": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "required": [ + "type", + "bundles" + ] +} \ No newline at end of file diff --git a/schemas/edge_profile.schema.json b/schemas/edge_profile.schema.json new file mode 100644 index 0000000..9f0d078 --- /dev/null +++ b/schemas/edge_profile.schema.json @@ -0,0 +1,49 @@ +{ + "$defs": { + "EdgeStatus": { + "title": "Edge Status", + "type": "string", + "enum": [ + "PROPOSED", + "APPROVED" + ] + }, + "EdgeType": { + "title": "Edge Type", + "type": "string", + "enum": [ + "WEBHOOK", + "POLL" + ] + } + }, + "title": "Edge Profile", + "type": "object", + "properties": { + "source": { + "type": "string" + }, + "target": { + "type": "string" + }, + "edge_type": { + "$ref": "#/$defs/EdgeType" + }, + "status": { + "$ref": "#/$defs/EdgeStatus" + }, + "rid_types": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "required": [ + "source", + "target", + "edge_type", + "status", + "rid_types" + ] +} \ No newline at end of file diff --git a/schemas/error_response.schema.json b/schemas/error_response.schema.json new file mode 100644 index 0000000..467d394 --- /dev/null +++ b/schemas/error_response.schema.json @@ -0,0 +1,23 @@ +{ + "title": "Error Response", + "type": "object", + "properties": { + "type": { + "type": "string", + "const": "error_response" + }, + "error": { + "type": "string", + "enum": [ + "unknown_node", + "invalid_key", + "invalid_signature", + "invalid_target" + ] + } + }, + "required": [ + "type", + "error" + ] +} \ No newline at end of file diff --git a/schemas/event.schema.json b/schemas/event.schema.json new file mode 100644 index 0000000..b378bd2 --- /dev/null +++ b/schemas/event.schema.json @@ -0,0 +1,33 @@ +{ + "$defs": { + "EventType": { + "title": "EventType", + "type": "string", + "enum": [ + "NEW", + "UPDATE", + "FORGET" + ] + } + }, + "title": "Event", + "type": "object", + "properties": { + "rid": { + "type": "string" + }, + "event_type": { + "$ref": "#/$defs/EventType" + }, + "manifest": { + "$ref": "./manifest.schema.json" + }, + "contents": { + "type": "object" + } + }, + "required": [ + "rid", + "event_type" + ] +} \ No newline at end of file diff --git a/schemas/events_payload.schema.json b/schemas/events_payload.schema.json new file mode 100644 index 0000000..20d395a --- /dev/null +++ b/schemas/events_payload.schema.json @@ -0,0 +1,20 @@ +{ + "title": "Events Payload", + "type": "object", + "properties": { + "type": { + "type": "string", + "const": "events_payload" + }, + "events": { + "type": "array", + "items": { + "$ref": "./event.schema.json" + } + } + }, + "required": [ + "type", + "events" + ] +} \ No newline at end of file diff --git a/schemas/fetch_bundles.schema.json b/schemas/fetch_bundles.schema.json new file mode 100644 index 0000000..17c92ca --- /dev/null +++ b/schemas/fetch_bundles.schema.json @@ -0,0 +1,20 @@ +{ + "title": "Fetch Bundles", + "type": "object", + "properties": { + "type": { + "type": "string", + "const": "fetch_bundles" + }, + "rids": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "required": [ + "type", + "rids" + ] +} \ No newline at end of file diff --git a/schemas/fetch_manifests.schema.json b/schemas/fetch_manifests.schema.json new file mode 100644 index 0000000..9f4c468 --- /dev/null +++ b/schemas/fetch_manifests.schema.json @@ -0,0 +1,25 @@ +{ + "title": "Fetch Manifests", + "type": "object", + "properties": { + "type": { + "type": "string", + "const": "fetch_manifests" + }, + "rid_types": { + "type": "array", + "items": { + "type": "string" + } + }, + "rids": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "required": [ + "type" + ] +} \ No newline at end of file diff --git a/schemas/fetch_rids.schema.json b/schemas/fetch_rids.schema.json new file mode 100644 index 0000000..cf846c8 --- /dev/null +++ b/schemas/fetch_rids.schema.json @@ -0,0 +1,19 @@ +{ + "title": "Fetch RIDs", + "type": "object", + "properties": { + "type": { + "type": "string", + "const": "fetch_rids" + }, + "rid_types": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "required": [ + "type" + ] +} \ No newline at end of file diff --git a/schemas/manifest.schema.json b/schemas/manifest.schema.json new file mode 100644 index 0000000..639cf65 --- /dev/null +++ b/schemas/manifest.schema.json @@ -0,0 +1,21 @@ +{ + "title": "Manifest", + "type": "object", + "properties": { + "rid": { + "type": "string" + }, + "timestamp": { + "type": "string", + "format": "date-time" + }, + "sha256_hash": { + "type": "string" + } + }, + "required": [ + "rid", + "timestamp", + "sha256_hash" + ] +} \ No newline at end of file diff --git a/schemas/manifests_payload.schema.json b/schemas/manifests_payload.schema.json new file mode 100644 index 0000000..4dad3e8 --- /dev/null +++ b/schemas/manifests_payload.schema.json @@ -0,0 +1,26 @@ +{ + "title": "Manifests Payload", + "type": "object", + "properties": { + "type": { + "type": "string", + "const": "manifests_payload" + }, + "manifests": { + "type": "array", + "items": { + "$ref": "./manifest.schema.json" + } + }, + "not_found": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "required": [ + "type", + "manifests" + ] +} \ No newline at end of file diff --git a/schemas/node_profile.schema.json b/schemas/node_profile.schema.json new file mode 100644 index 0000000..832fc20 --- /dev/null +++ b/schemas/node_profile.schema.json @@ -0,0 +1,51 @@ +{ + "$defs": { + "NodeType": { + "title": "Node Type", + "type": "string", + "enum": [ + "FULL", + "PARTIAL" + ] + } + }, + "title": "Node Profile", + "type": "object", + "properties": { + "node_type": { + "$ref": "#/$defs/NodeType" + }, + "base_url": { + "type": "string" + }, + "provides": { + "type": "object", + "properties": { + "event": { + "type": "array", + "items": { + "type": "string" + } + }, + "state": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "required": [ + "event", + "state" + ] + }, + "public_key": { + "type": "string" + } + }, + "required": [ + "node_type", + "provides", + "public_key" + ] +} \ No newline at end of file diff --git a/schemas/poll_events.schema.json b/schemas/poll_events.schema.json new file mode 100644 index 0000000..6e5c458 --- /dev/null +++ b/schemas/poll_events.schema.json @@ -0,0 +1,16 @@ +{ + "title": "Poll Events", + "type": "object", + "properties": { + "type": { + "type": "string", + "const": "poll_events" + }, + "limit": { + "type": "integer" + } + }, + "required": [ + "type" + ] +} \ No newline at end of file diff --git a/schemas/rids_payload.schema.json b/schemas/rids_payload.schema.json new file mode 100644 index 0000000..296a46c --- /dev/null +++ b/schemas/rids_payload.schema.json @@ -0,0 +1,20 @@ +{ + "title": "RIDs Payload", + "type": "object", + "properties": { + "type": { + "type": "string", + "const": "rids_payload" + }, + "rids": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "required": [ + "type", + "rids" + ] +} \ No newline at end of file diff --git a/schemas/signed_envelope.schema.json b/schemas/signed_envelope.schema.json new file mode 100644 index 0000000..3d48ce6 --- /dev/null +++ b/schemas/signed_envelope.schema.json @@ -0,0 +1,33 @@ +{ + "title": "Signed Envelope", + "type": "object", + "properties": { + "payload": { + "anyOf": [ + {"$ref": "./poll_events.schema.json"}, + {"$ref": "./fetch_rids.schema.json"}, + {"$ref": "./fetch_manifests.schema.json"}, + {"$ref": "./fetch_bundles.schema.json"}, + {"$ref": "./rids_payload.schema.json"}, + {"$ref": "./manifests_payload.schema.json"}, + {"$ref": "./bundles_payload.schema.json"}, + {"$ref": "./events_payload.schema.json"} + ] + }, + "source_node": { + "type": "string" + }, + "target_node": { + "type": "string" + }, + "signature": { + "type": "string" + } + }, + "required": [ + "payload", + "source_node", + "target_node", + "signature" + ] +} \ No newline at end of file diff --git a/schemas/unsigned_envelope.json b/schemas/unsigned_envelope.json new file mode 100644 index 0000000..e9b1301 --- /dev/null +++ b/schemas/unsigned_envelope.json @@ -0,0 +1,29 @@ +{ + "title": "Unsigned Envelope", + "type": "object", + "properties": { + "payload": { + "anyOf": [ + {"$ref": "./poll_events.schema.json"}, + {"$ref": "./fetch_rids.schema.json"}, + {"$ref": "./fetch_manifests.schema.json"}, + {"$ref": "./fetch_bundles.schema.json"}, + {"$ref": "./rids_payload.schema.json"}, + {"$ref": "./manifests_payload.schema.json"}, + {"$ref": "./bundles_payload.schema.json"}, + {"$ref": "./events_payload.schema.json"} + ] + }, + "source_node": { + "type": "string" + }, + "target_node": { + "type": "string" + } + }, + "required": [ + "payload", + "source_node", + "target_node" + ] +} \ No newline at end of file diff --git a/src/koi_net/__init__.py b/src/koi_net/__init__.py index 61c995d..e69de29 100644 --- a/src/koi_net/__init__.py +++ b/src/koi_net/__init__.py @@ -1 +0,0 @@ -from .core import NodeInterface \ No newline at end of file diff --git a/src/koi_net/actor.py b/src/koi_net/actor.py deleted file mode 100644 index 5a0f5ef..0000000 --- a/src/koi_net/actor.py +++ /dev/null @@ -1,75 +0,0 @@ -from logging import getLogger -from rid_lib.types import KoiNetNode -from rid_lib import RIDType -from koi_net.context import HandlerContext -from koi_net.protocol.api_models import ErrorResponse -from .protocol.event import Event, EventType - - -logger = getLogger(__name__) - - -class Actor: - """Basic node actions. - - Functions defined here used by multiple subsystems. - """ - - ctx: HandlerContext - - def set_ctx(self, ctx: HandlerContext): - self.ctx = ctx - - def handshake_with(self, target: KoiNetNode): - """Initiates a handshake with target node. - - Pushes successive `FORGET` and `NEW` events to target node to - reset the target's cache in case it already knew this node. - """ - - logger.debug(f"Initiating handshake with {target}") - self.ctx.event_queue.push_event_to( - Event.from_rid( - event_type=EventType.FORGET, - rid=self.ctx.identity.rid), - node=target - ) - - self.ctx.event_queue.push_event_to( - event=Event.from_bundle( - event_type=EventType.NEW, - bundle=self.ctx.effector.deref(self.ctx.identity.rid)), - node=target - ) - - self.ctx.event_queue.flush_webhook_queue(target) - - def identify_coordinators(self) -> list[KoiNetNode]: - """Returns node's providing state for `orn:koi-net.node`.""" - return self.ctx.resolver.get_state_providers(KoiNetNode) - - def catch_up_with(self, target: KoiNetNode, rid_types: list[RIDType] = []): - """Fetches and processes knowledge objects from target node. - - Args: - target: Node to catch up with - rid_types: RID types to fetch from target (all types if list is empty) - """ - logger.debug(f"catching up with {target} on {rid_types or 'all types'}") - - payload = self.ctx.request_handler.fetch_manifests( - node=target, - rid_types=rid_types - ) - if type(payload) == ErrorResponse: - logger.debug("failed to reach node") - return - - for manifest in payload.manifests: - if manifest.rid == self.ctx.identity.rid: - continue - - self.ctx.handle( - manifest=manifest, - source=target - ) \ No newline at end of file diff --git a/src/koi_net/behaviors/handshaker.py b/src/koi_net/behaviors/handshaker.py new file mode 100644 index 0000000..c546af7 --- /dev/null +++ b/src/koi_net/behaviors/handshaker.py @@ -0,0 +1,68 @@ +import structlog +from rid_lib.ext import Cache +from rid_lib.types import KoiNetNode + +from ..network.graph import NetworkGraph +from ..config.core import NodeConfig +from ..identity import NodeIdentity +from ..network.event_queue import EventQueue +from ..protocol.event import Event, EventType + +log = structlog.stdlib.get_logger() + + +class Handshaker: + """Handles handshaking with other nodes.""" + def __init__( + self, + cache: Cache, + identity: NodeIdentity, + event_queue: EventQueue, + config: NodeConfig, + graph: NetworkGraph + ): + self.config = config + self.cache = cache + self.identity = identity + self.event_queue = event_queue + self.graph = graph + + def start(self): + """Attempts handshake with first contact on startup. + + Handshake occurs if first contact is set in the config, the first + contact is not already known to this node, and this node does not + already have incoming edges with node providers. + """ + if not self.config.koi_net.first_contact.rid: + return + + if self.cache.read(self.config.koi_net.first_contact.rid): + return + + if self.graph.get_neighbors( + direction="in", allowed_type=KoiNetNode): + return + + self.handshake_with(self.config.koi_net.first_contact.rid) + + def handshake_with(self, target: KoiNetNode): + """Initiates a handshake with target node. + + Pushes successive `FORGET` and `NEW` events to target node to + reset the target's cache in case it already knew this node. + """ + + log.debug(f"Initiating handshake with {target}") + self.event_queue.push( + Event.from_rid( + event_type=EventType.FORGET, + rid=self.identity.rid), + target=target + ) + self.event_queue.push( + event=Event.from_bundle( + event_type=EventType.NEW, + bundle=self.cache.read(self.identity.rid)), + target=target + ) \ No newline at end of file diff --git a/src/koi_net/behaviors/profile_monitor.py b/src/koi_net/behaviors/profile_monitor.py new file mode 100644 index 0000000..cd84b1d --- /dev/null +++ b/src/koi_net/behaviors/profile_monitor.py @@ -0,0 +1,23 @@ +from rid_lib.ext import Bundle +from ..identity import NodeIdentity +from ..processor.kobj_queue import KobjQueue + + +class ProfileMonitor: + """Processes changes to node profile in the config.""" + def __init__( + self, + kobj_queue: KobjQueue, + identity: NodeIdentity + ): + self.kobj_queue = kobj_queue + self.identity = identity + + def start(self): + """Processes identity bundle generated from config.""" + self_bundle = Bundle.generate( + rid=self.identity.rid, + contents=self.identity.profile.model_dump() + ) + + self.kobj_queue.push(bundle=self_bundle) \ No newline at end of file diff --git a/src/koi_net/behaviors/sync_manager.py b/src/koi_net/behaviors/sync_manager.py new file mode 100644 index 0000000..0ef37ef --- /dev/null +++ b/src/koi_net/behaviors/sync_manager.py @@ -0,0 +1,68 @@ +import structlog +from rid_lib.ext import Cache +from rid_lib.types import KoiNetNode + +from ..network.graph import NetworkGraph +from ..network.request_handler import RequestHandler +from ..processor.kobj_queue import KobjQueue +from ..protocol.api_models import ErrorResponse +from ..protocol.node import NodeProfile, NodeType + +log = structlog.stdlib.get_logger() + + +class SyncManager: + """Handles state synchronization actions with other nodes.""" + graph: NetworkGraph + cache: Cache + request_handler: RequestHandler + kobj_queue: KobjQueue + + def __init__( + self, + graph: NetworkGraph, + cache: Cache, + request_handler: RequestHandler, + kobj_queue: KobjQueue + ): + self.graph = graph + self.cache = cache + self.request_handler = request_handler + self.kobj_queue = kobj_queue + + def start(self): + """Catches up with node providers on startup.""" + + node_providers = self.graph.get_neighbors( + direction="in", + allowed_type=KoiNetNode + ) + + if not node_providers: + return + + log.debug(f"Catching up with `orn:koi-net.node` providers: {node_providers}") + self.catch_up_with(node_providers, [KoiNetNode]) + + def catch_up_with(self, nodes, rid_types): + """Catches up with the state of RID types within other nodes.""" + + for node in nodes: + node_bundle = self.cache.read(node) + node_profile = node_bundle.validate_contents(NodeProfile) + + # can't catch up with partial nodes + if node_profile.node_type != NodeType.FULL: + continue + + payload = self.request_handler.fetch_manifests( + node, rid_types=rid_types) + + if type(payload) is ErrorResponse: + continue + + for manifest in payload.manifests: + self.kobj_queue.push( + manifest=manifest, + source=node + ) \ No newline at end of file diff --git a/src/koi_net/build/artifact.py b/src/koi_net/build/artifact.py new file mode 100644 index 0000000..a18830e --- /dev/null +++ b/src/koi_net/build/artifact.py @@ -0,0 +1,205 @@ +import inspect +from collections import deque +from typing import TYPE_CHECKING, Any + +from .consts import ( + COMP_ORDER_OVERRIDE, + COMP_TYPE_OVERRIDE, + START_FUNC_NAME, + START_ORDER_OVERRIDE, + STOP_FUNC_NAME, + STOP_ORDER_OVERRIDE, + CompOrder, + CompType +) + +if TYPE_CHECKING: + from .assembler import NodeAssembler + + +class BuildArtifact: + assembler: "NodeAssembler" + comp_dict: dict[str, Any] + dep_graph: dict[str, list[str]] + comp_types: dict[str, CompType] + init_order: list[str] + start_order: list[str] + stop_order: list[str] + graphviz: str + + def __init__(self, assembler: "NodeAssembler"): + self.assembler = assembler + + def collect_comps(self): + """Collects components from class definition.""" + + self.comp_dict = {} + # adds components from class and all base classes. skips `type`, and runs in reverse so that sub classes override super class values + for base in reversed(inspect.getmro(self.assembler)[:-1]): + for k, v in vars(base).items(): + # excludes built in, private, and `None` attributes + if k.startswith("_") or v is None: + continue + + self.comp_dict[k] = v + + def build_dependencies(self): + """Builds dependency graph and component type map. + + Graph representation is an adjacency list: the key is a component + name, and the value is a tuple containing names of the depedencies. + """ + + self.comp_types = {} + self.dep_graph = {} + for comp_name, comp in self.comp_dict.items(): + + dep_names = [] + + explicit_type = getattr(comp, COMP_TYPE_OVERRIDE, None) + if explicit_type: + self.comp_types[comp_name] = explicit_type + + # non callable components are objects treated "as is" + elif not callable(comp): + self.comp_types[comp_name] = CompType.OBJECT + + # callable components default to singletons + else: + sig = inspect.signature(comp) + self.comp_types[comp_name] = CompType.SINGLETON + dep_names = list(sig.parameters) + + invalid_deps = set(dep_names) - set(self.comp_dict) + if invalid_deps: + raise Exception(f"Dependencies {invalid_deps} of component '{comp_name}' are undefined") + + self.dep_graph[comp_name] = dep_names + + [print(f"{i}: {comp_name} -> {deps}") for i, (comp_name, deps) in enumerate(self.dep_graph.items())] + + def build_init_order(self): + """Builds component initialization order using Kahn's algorithm.""" + + # adj list: n -> outgoing neighbors + adj = self.dep_graph + # reverse adj list: n -> incoming neighbors + r_adj: dict[str, list[str]] = {} + + # computes reverse adjacency list + for node in adj: + r_adj.setdefault(node, []) + for n in adj[node]: + r_adj.setdefault(n, []) + r_adj[n].append(node) + + # how many outgoing edges each node has + out_degree = { + n: len(neighbors) + for n, neighbors in adj.items() + } + + # initializing queue: nodes w/o dependencies + queue = deque() + for node in out_degree: + if out_degree[node] == 0: + queue.append(node) + + self.init_order = [] + while queue: + # removes node from graph + n = queue.popleft() + self.init_order.append(n) + + # updates out degree for nodes dependent on this node + for next_n in r_adj[n]: + out_degree[next_n] -= 1 + # adds nodes now without dependencies to queue + if out_degree[next_n] == 0: + queue.append(next_n) + + if len(self.init_order) != len(self.dep_graph): + cycle_nodes = set(self.dep_graph) - set(self.init_order) + raise Exception(f"Found cycle in dependency graph, the following nodes could not be ordered: {cycle_nodes}") + + print("\ninit order") + [print(f"{i}: {comp_name}") for i, comp_name in enumerate(self.init_order)] + + def build_start_order(self): + """Builds component start order. + + Checks if components define a start function in init order. Can + be overridden by setting start order override in the `NodeAssembler`. + """ + + self.start_order = getattr(self.assembler, START_ORDER_OVERRIDE, None) + + if self.start_order: + return + + workers = [] + start_order = [] + for comp_name in self.init_order: + comp = self.comp_dict[comp_name] + if getattr(comp, START_FUNC_NAME, None): + if getattr(comp, COMP_ORDER_OVERRIDE, None) == CompOrder.WORKER: + workers.append(comp_name) + else: + start_order.append(comp_name) + + # order workers first + self.start_order = workers + start_order + + print("\nstart order") + [print(f"{i}: {comp_name}") for i, comp_name in enumerate(self.start_order)] + + def build_stop_order(self): + """Builds component stop order. + + Checks if components define a stop function in init order. Can + be overridden by setting stop order override in the `NodeAssembler`. + """ + self.stop_order = getattr(self.assembler, STOP_ORDER_OVERRIDE, None) + + if self.stop_order: + return + + workers = [] + stop_order = [] + for comp_name in self.init_order: + comp = self.comp_dict[comp_name] + if getattr(comp, STOP_FUNC_NAME, None): + if getattr(comp, COMP_ORDER_OVERRIDE, None) == CompOrder.WORKER: + workers.append(comp_name) + else: + stop_order.append(comp_name) + + # order workers first (last) + self.stop_order = workers + stop_order + # reverse order from start order + self.stop_order.reverse() + + print("\nstop order") + [print(f"{i}: {comp_name}") for i, comp_name in enumerate(self.stop_order)] + + + def visualize(self) -> str: + """Creates representation of dependency graph in Graphviz DOT language.""" + + s = "digraph G {\n" + for node, neighbors in self.dep_graph.items(): + sub_s = node + if neighbors: + sub_s += f"-> {', '.join(neighbors)}" + sub_s = sub_s.replace("graph", "graph_") + ";" + s += " " * 4 + sub_s + "\n" + s += "}" + self.graphviz = s + + def build(self): + self.collect_comps() + self.build_dependencies() + self.build_init_order() + self.build_start_order() + self.build_stop_order() + self.visualize() diff --git a/src/koi_net/build/assembler.py b/src/koi_net/build/assembler.py new file mode 100644 index 0000000..9c60927 --- /dev/null +++ b/src/koi_net/build/assembler.py @@ -0,0 +1,57 @@ + +from typing import Any, Self + +import structlog + +from .artifact import BuildArtifact, CompType +from .container import NodeContainer + +log = structlog.stdlib.get_logger() + + +class NodeAssembler: + _artifact: BuildArtifact = None + + # optional order overrides: + _start_order: list[str] + _stop_order: list[str] + + # annotation hack to show the components and container methods + def __new__(cls) -> Self | NodeContainer: + """Returns assembled node container.""" + + # builds assembly artifact if it doesn't exist + if not cls._artifact: + cls._artifact = BuildArtifact(cls) + cls._artifact.build() + + components = cls._build_components(cls._artifact) + + return NodeContainer(cls._artifact, **components) + + @staticmethod + def _build_components(artifact: BuildArtifact): + """Returns assembled components as a dict.""" + + print("\nbuilding components") + components: dict[str, Any] = {} + for comp_name in artifact.init_order: + # for comp_name, (comp_type, dep_names) in dep_graph.items(): + comp = artifact.comp_dict[comp_name] + comp_type = artifact.comp_types[comp_name] + + print(comp, comp_type) + + if comp_type == CompType.OBJECT: + components[comp_name] = comp + + elif comp_type == CompType.SINGLETON: + # builds depedency dict for current component + dependencies = {} + for dep in artifact.dep_graph[comp_name]: + if dep not in components: + raise Exception(f"Couldn't find required component '{dep}'") + dependencies[dep] = components[dep] + components[comp_name] = comp(**dependencies) + + return components diff --git a/src/koi_net/build/comp_order.py b/src/koi_net/build/comp_order.py new file mode 100644 index 0000000..e042f6b --- /dev/null +++ b/src/koi_net/build/comp_order.py @@ -0,0 +1,6 @@ +from koi_net.build.consts import COMP_ORDER_OVERRIDE, CompOrder + + +def worker(cls): + setattr(cls, COMP_ORDER_OVERRIDE, CompOrder.WORKER) + return cls \ No newline at end of file diff --git a/src/koi_net/build/comp_type.py b/src/koi_net/build/comp_type.py new file mode 100644 index 0000000..e34fb77 --- /dev/null +++ b/src/koi_net/build/comp_type.py @@ -0,0 +1,7 @@ +from .consts import COMP_TYPE_OVERRIDE, CompType + + +def object(cls): + """Sets a component's type to `CompType.OBJECT`.""" + setattr(cls, COMP_TYPE_OVERRIDE, CompType.OBJECT) + return cls \ No newline at end of file diff --git a/src/koi_net/build/consts.py b/src/koi_net/build/consts.py new file mode 100644 index 0000000..9101c92 --- /dev/null +++ b/src/koi_net/build/consts.py @@ -0,0 +1,18 @@ +from enum import StrEnum + + +START_FUNC_NAME = "start" +STOP_FUNC_NAME = "stop" + +START_ORDER_OVERRIDE = "_start_order" +STOP_ORDER_OVERRIDE = "_stop_order" + +COMP_TYPE_OVERRIDE = "_comp_type" +COMP_ORDER_OVERRIDE = "_comp_order" + +class CompType(StrEnum): + SINGLETON = "SINGLETON" + OBJECT = "OBJECT" + +class CompOrder(StrEnum): + WORKER = "WORKER" \ No newline at end of file diff --git a/src/koi_net/build/container.py b/src/koi_net/build/container.py new file mode 100644 index 0000000..56ace2f --- /dev/null +++ b/src/koi_net/build/container.py @@ -0,0 +1,44 @@ +import structlog + +from ..entrypoints.base import EntryPoint +from .artifact import BuildArtifact +from .consts import START_FUNC_NAME, STOP_FUNC_NAME + +log = structlog.stdlib.get_logger() + + +class NodeContainer: + """Dummy 'shape' for node containers built by assembler.""" + _artifact: BuildArtifact + + entrypoint: EntryPoint + + def __init__(self, artifact, **kwargs): + self._artifact = artifact + + # adds all components as attributes of this instance + for name, comp in kwargs.items(): + setattr(self, name, comp) + + def run(self): + try: + self.start() + self.entrypoint.run() + except KeyboardInterrupt: + log.info("Keyboard interrupt!") + finally: + self.stop() + + def start(self): + log.info("Starting node...") + for comp_name in self._artifact.start_order: + comp = getattr(self, comp_name) + start_func = getattr(comp, START_FUNC_NAME) + start_func() + + def stop(self): + log.info("Stopping node...") + for comp_name in self._artifact.stop_order: + comp = getattr(self, comp_name) + stop_func = getattr(comp, STOP_FUNC_NAME) + stop_func() \ No newline at end of file diff --git a/src/koi_net/cache.py b/src/koi_net/cache.py new file mode 100644 index 0000000..dacb3c7 --- /dev/null +++ b/src/koi_net/cache.py @@ -0,0 +1,81 @@ +import os +import shutil +from rid_lib.core import RID, RIDType +from rid_lib.ext import Bundle +from rid_lib.ext.utils import b64_encode, b64_decode + +from .config.core import NodeConfig + + +class Cache: + def __init__(self, config: NodeConfig): + self.config = config + + @property + def directory_path(self): + return self.config.koi_net.cache_directory_path + + def file_path_to(self, rid: RID) -> str: + encoded_rid_str = b64_encode(str(rid)) + return f"{self.directory_path}/{encoded_rid_str}.json" + + def write(self, bundle: Bundle) -> Bundle: + """Writes bundle to cache, returns a Bundle.""" + if not os.path.exists(self.directory_path): + os.makedirs(self.directory_path) + + with open( + file=self.file_path_to(bundle.manifest.rid), + mode="w", + encoding="utf-8" + ) as f: + f.write(bundle.model_dump_json(indent=2)) + + return bundle + + def exists(self, rid: RID) -> bool: + return os.path.exists( + self.file_path_to(rid) + ) + + def read(self, rid: RID) -> Bundle | None: + """Reads and returns CacheEntry from RID cache.""" + try: + with open( + file=self.file_path_to(rid), + mode="r", + encoding="utf-8" + ) as f: + return Bundle.model_validate_json(f.read()) + except FileNotFoundError: + return None + + def list_rids(self, rid_types: list[RIDType] | None = None) -> list[RID]: + if not os.path.exists(self.directory_path): + return [] + + rids = [] + for filename in os.listdir(self.directory_path): + encoded_rid_str = filename.split(".")[0] + rid_str = b64_decode(encoded_rid_str) + rid = RID.from_string(rid_str) + + if not rid_types or type(rid) in rid_types: + rids.append(rid) + + return rids + + def delete(self, rid: RID) -> None: + """Deletes cache bundle.""" + try: + os.remove(self.file_path_to(rid)) + except FileNotFoundError: + return + + def drop(self) -> None: + """Deletes all cache bundles.""" + try: + shutil.rmtree(self.directory_path) + except FileNotFoundError: + return + diff --git a/src/koi_net/config.py b/src/koi_net/config.py deleted file mode 100644 index 8281e21..0000000 --- a/src/koi_net/config.py +++ /dev/null @@ -1,157 +0,0 @@ -import os -from ruamel.yaml import YAML -from pydantic import BaseModel, Field, PrivateAttr -from dotenv import load_dotenv -from rid_lib.ext.utils import sha256_hash -from rid_lib.types import KoiNetNode -from .protocol.secure import PrivateKey -from .protocol.node import NodeProfile, NodeType - - -class ServerConfig(BaseModel): - """Config for the node server (full node only).""" - - host: str = "127.0.0.1" - port: int = 8000 - path: str | None = "/koi-net" - - @property - def url(self) -> str: - return f"http://{self.host}:{self.port}{self.path or ''}" - -class NodeContact(BaseModel): - rid: KoiNetNode | None = None - url: str | None = None - -class KoiNetConfig(BaseModel): - """Config for KOI-net.""" - - node_name: str - node_rid: KoiNetNode | None = None - node_profile: NodeProfile - - cache_directory_path: str = ".rid_cache" - event_queues_path: str = "event_queues.json" - private_key_pem_path: str = "priv_key.pem" - polling_interval: int = 5 - - first_contact: NodeContact = Field(default_factory=NodeContact) - - _priv_key: PrivateKey | None = PrivateAttr(default=None) - -class EnvConfig(BaseModel): - """Config for environment variables. - - Values set in the config are the variables names, and are loaded - from the environment at runtime. For example, if the config YAML - sets `priv_key_password: PRIV_KEY_PASSWORD` accessing - `priv_key_password` would retrieve the value of `PRIV_KEY_PASSWORD` - from the environment. - """ - - priv_key_password: str | None = "PRIV_KEY_PASSWORD" - - def __init__(self, **kwargs): - super().__init__(**kwargs) - load_dotenv() - - def __getattribute__(self, name): - value = super().__getattribute__(name) - if name in type(self).model_fields: - env_val = os.getenv(value) - if env_val is None: - raise ValueError(f"Required environment variable {value} not set") - return env_val - return value - -class NodeConfig(BaseModel): - """Base configuration class for all nodes. - - Designed to be extensible for custom node implementations. Classes - inheriting from `NodeConfig` may add additional config groups. - """ - - server: ServerConfig = Field(default_factory=ServerConfig) - koi_net: KoiNetConfig - env: EnvConfig = Field(default_factory=EnvConfig) - - _file_path: str = PrivateAttr(default="config.yaml") - _file_content: str | None = PrivateAttr(default=None) - - @classmethod - def load_from_yaml( - cls, - file_path: str = "config.yaml", - generate_missing: bool = True - ): - """Loads config state from YAML file. - - Defaults to `config.yaml`. If `generate_missing` is set to - `True`, a private key and RID will be generated if not already - present in the config. - """ - yaml = YAML() - - try: - with open(file_path, "r") as f: - file_content = f.read() - config_data = yaml.load(file_content) - config = cls.model_validate(config_data) - config._file_content = file_content - - except FileNotFoundError: - # empty_fields = {} - # for name, field in cls.model_fields.items(): - - # if field.default is None or field.default_factory is None: - # print(empty_fields) - config = cls() - - - config._file_path = file_path - - if generate_missing: - if not config.koi_net.node_rid: - priv_key = PrivateKey.generate() - pub_key = priv_key.public_key() - - config.koi_net.node_rid = KoiNetNode( - config.koi_net.node_name, - sha256_hash(pub_key.to_der()) - ) - - with open(config.koi_net.private_key_pem_path, "w") as f: - f.write( - priv_key.to_pem(config.env.priv_key_password) - ) - - config.koi_net.node_profile.public_key = pub_key.to_der() - - if config.koi_net.node_profile.node_type == NodeType.FULL: - config.koi_net.node_profile.base_url = ( - config.koi_net.node_profile.base_url or config.server.url - ) - - config.save_to_yaml() - - return config - - def save_to_yaml(self): - """Saves config state to YAML file. - - File path is set by `load_from_yaml` class method. - """ - - yaml = YAML() - - with open(self._file_path, "w") as f: - try: - config_data = self.model_dump(mode="json") - yaml.dump(config_data, f) - except Exception as e: - if self._file_content: - f.seek(0) - f.truncate() - f.write(self._file_content) - raise e - diff --git a/src/koi_net/config/core.py b/src/koi_net/config/core.py new file mode 100644 index 0000000..b64d763 --- /dev/null +++ b/src/koi_net/config/core.py @@ -0,0 +1,113 @@ +import os +from pydantic import BaseModel, model_validator +from dotenv import load_dotenv +from rid_lib import RIDType +from rid_lib.types import KoiNetNode +import structlog + +from ..build import comp_type +from ..protocol.secure import PrivateKey +from ..protocol.node import NodeProfile + +log = structlog.stdlib.get_logger() + + +class EventWorkerConfig(BaseModel): + queue_timeout: float = 0.1 + max_buf_len: int = 5 + max_wait_time: float = 1.0 + +class KobjWorkerConfig(BaseModel): + queue_timeout: float = 0.1 + +class NodeContact(BaseModel): + rid: KoiNetNode | None = None + url: str | None = None + +class KoiNetConfig(BaseModel): + """Config for KOI-net parameters.""" + + node_name: str + node_rid: KoiNetNode | None = None + node_profile: NodeProfile + + rid_types_of_interest: list[RIDType] = [KoiNetNode] + + cache_directory_path: str = ".rid_cache" + private_key_pem_path: str = "priv_key.pem" + + event_worker: EventWorkerConfig = EventWorkerConfig() + kobj_worker: KobjWorkerConfig = KobjWorkerConfig() + + first_contact: NodeContact = NodeContact() + +class EnvConfig(BaseModel): + """Config for environment variables. + + Values set in the config are the variables names, and are loaded + from the environment at runtime. For example, if the config YAML + sets `priv_key_password: "PRIV_KEY_PASSWORD"` accessing + `priv_key_password` would retrieve the value of `PRIV_KEY_PASSWORD` + from the environment variables. + """ + + priv_key_password: str = "PRIV_KEY_PASSWORD" + + def __init__(self, **kwargs): + super().__init__(**kwargs) + load_dotenv() + + def __getattribute__(self, name): + value = super().__getattribute__(name) + if name in type(self).model_fields: + env_val = os.getenv(value) + if env_val is None: + raise ValueError(f"Required environment variable {value} not set") + return env_val + return value + +# marking this component as static, classes are implicitly treated as +# factories, but this needs to be passed as is +@comp_type.object +class NodeConfig(BaseModel): + """Base node config class, intended to be extended.""" + + koi_net: KoiNetConfig + env: EnvConfig = EnvConfig() + + @model_validator(mode="after") + def generate_rid_cascade(self): + """Generates node RID if missing.""" + if self.koi_net.node_rid and self.koi_net.node_profile.public_key: + return self + + log.debug("Node RID or public key not found in config, attempting to generate") + + try: + # attempts to read existing private key PEM file + with open(self.koi_net.private_key_pem_path, "r") as f: + priv_key_pem = f.read() + priv_key = PrivateKey.from_pem( + priv_key_pem, + password=self.env.priv_key_password) + log.debug("Used existing private key from PEM file") + + except FileNotFoundError: + # generates new private key if PEM not found + priv_key = PrivateKey.generate() + + with open(self.koi_net.private_key_pem_path, "w") as f: + f.write(priv_key.to_pem(self.env.priv_key_password)) + log.debug("Generated new private key, no PEM file found") + + pub_key = priv_key.public_key() + self.koi_net.node_rid = pub_key.to_node_rid(self.koi_net.node_name) + log.debug(f"Node RID set to {self.koi_net.node_rid}") + + if self.koi_net.node_profile.public_key != pub_key.to_der(): + if self.koi_net.node_profile.public_key: + log.warning("New private key overwriting old public key!") + + self.koi_net.node_profile.public_key = pub_key.to_der() + + return self \ No newline at end of file diff --git a/src/koi_net/config/full_node.py b/src/koi_net/config/full_node.py new file mode 100644 index 0000000..7555473 --- /dev/null +++ b/src/koi_net/config/full_node.py @@ -0,0 +1,45 @@ +from pydantic import BaseModel, model_validator +from .core import NodeConfig, KoiNetConfig as BaseKoiNetConfig +from ..protocol.node import ( + NodeProfile as BaseNodeProfile, + NodeType, + NodeProvides +) + + +class NodeProfile(BaseNodeProfile): + """Node profile config class for full nodes.""" + node_type: NodeType = NodeType.FULL + +class KoiNetConfig(BaseKoiNetConfig): + """KOI-net config class for full nodes.""" + node_profile: NodeProfile + +class ServerConfig(BaseModel): + """Server config for full nodes. + + The parameters in this class represent how a server should be hosted, + not accessed. For example, a node may host a server at + `http://127.0.0.1:8000/koi-net`, but serve through nginx at + `https://example.com/koi-net`. + """ + + host: str = "127.0.0.1" + port: int = 8000 + path: str | None = "/koi-net" + + @property + def url(self) -> str: + return f"http://{self.host}:{self.port}{self.path or ''}" + +class FullNodeConfig(NodeConfig): + """Node config class for full nodes.""" + koi_net: KoiNetConfig + server: ServerConfig = ServerConfig() + + @model_validator(mode="after") + def check_url(self): + """Generates base URL if missing from node profile.""" + if not self.koi_net.node_profile.base_url: + self.koi_net.node_profile.base_url = self.server.url + return self diff --git a/src/koi_net/config/loader.py b/src/koi_net/config/loader.py new file mode 100644 index 0000000..3801808 --- /dev/null +++ b/src/koi_net/config/loader.py @@ -0,0 +1,60 @@ +from ruamel.yaml import YAML + +from koi_net.config.proxy import ConfigProxy +from .core import NodeConfig + + +class ConfigLoader: + """Loads node config from a YAML file, and proxies access to it.""" + + file_path: str = "config.yaml" + file_content: str + + config_schema: type[NodeConfig] + proxy: ConfigProxy + + def __init__( + self, + config_schema: type[NodeConfig], + config: ConfigProxy + ): + self.config_schema = config_schema + self.proxy = config + + # this is a special case to allow config state dependent components + # to initialize without a "lazy initialization" approach, in general + # components SHOULD NOT execute code in their init phase + self.load_from_yaml() + + def start(self): + self.save_to_yaml() + + def load_from_yaml(self): + """Loads config from YAML file, or generates it if missing.""" + yaml = YAML() + + try: + with open(self.file_path, "r") as f: + self.file_content = f.read() + config_data = yaml.load(self.file_content) + self.proxy._config = self.config_schema.model_validate(config_data) + + except FileNotFoundError: + self.proxy._config = self.config_schema() + + def save_to_yaml(self): + """Saves config to YAML file.""" + yaml = YAML() + + with open(self.file_path, "w") as f: + try: + config_data = self.proxy._config.model_dump(mode="json") + yaml.dump(config_data, f) + + except Exception as e: + # rewrites original content if YAML dump fails + if self.file_content: + f.seek(0) + f.truncate() + f.write(self.file_content) + raise e \ No newline at end of file diff --git a/src/koi_net/config/partial_node.py b/src/koi_net/config/partial_node.py new file mode 100644 index 0000000..a9aa55b --- /dev/null +++ b/src/koi_net/config/partial_node.py @@ -0,0 +1,26 @@ +from pydantic import BaseModel +from .core import NodeConfig, KoiNetConfig as BaseKoiNetConfig +from ..protocol.node import ( + NodeProfile as BaseNodeProfile, + NodeType, + NodeProvides +) + + +class NodeProfile(BaseNodeProfile): + """Node profile config class for partial nodes.""" + base_url: str | None = None + node_type: NodeType = NodeType.PARTIAL + +class KoiNetConfig(BaseKoiNetConfig): + """KOI-net config class for partial nodes.""" + node_profile: NodeProfile + +class PollerConfig(BaseModel): + """Poller config for partial nodes.""" + polling_interval: int = 5 + +class PartialNodeConfig(NodeConfig): + """Node config class for partial nodes.""" + koi_net: KoiNetConfig + poller: PollerConfig = PollerConfig() \ No newline at end of file diff --git a/src/koi_net/config/proxy.py b/src/koi_net/config/proxy.py new file mode 100644 index 0000000..ad0d94b --- /dev/null +++ b/src/koi_net/config/proxy.py @@ -0,0 +1,20 @@ +from koi_net.config.core import NodeConfig + + +class ConfigProxy: + """Proxy for config access. + + Allows initialization of this component, and updating state without + destroying the original reference. Handled as if it were a config + model by other classes, loaded and saved by the `ConfigLoader`. + """ + _config: NodeConfig + + def __init__(self): + self._config = None + + def __getattr__(self, name): + if not self._config: + raise Exception("Proxy called before config loaded") + + return getattr(self._config, name) \ No newline at end of file diff --git a/src/koi_net/context.py b/src/koi_net/context.py deleted file mode 100644 index ee46b21..0000000 --- a/src/koi_net/context.py +++ /dev/null @@ -1,67 +0,0 @@ -from rid_lib.ext import Cache - -from koi_net.network.resolver import NetworkResolver -from .config import NodeConfig -from .effector import Effector -from .network.graph import NetworkGraph -from .network.event_queue import NetworkEventQueue -from .network.request_handler import RequestHandler -from .identity import NodeIdentity -from .processor.interface import ProcessorInterface - - -class ActionContext: - """Provides action handlers access to other subsystems.""" - - identity: NodeIdentity - effector: Effector - - def __init__( - self, - identity: NodeIdentity, - effector: Effector - ): - self.identity = identity - self.effector = effector - - -class HandlerContext: - """Provides knowledge handlers access to other subsystems.""" - - identity: NodeIdentity - config: NodeConfig - cache: Cache - event_queue: NetworkEventQueue - graph: NetworkGraph - request_handler: RequestHandler - resolver: NetworkResolver - effector: Effector - _processor: ProcessorInterface | None - - def __init__( - self, - identity: NodeIdentity, - config: NodeConfig, - cache: Cache, - event_queue: NetworkEventQueue, - graph: NetworkGraph, - request_handler: RequestHandler, - resolver: NetworkResolver, - effector: Effector - ): - self.identity = identity - self.config = config - self.cache = cache - self.event_queue = event_queue - self.graph = graph - self.request_handler = request_handler - self.resolver = resolver - self.effector = effector - self._processor = None - - def set_processor(self, processor: ProcessorInterface): - self._processor = processor - - @property - def handle(self): - return self._processor.handle \ No newline at end of file diff --git a/src/koi_net/core.py b/src/koi_net/core.py index cdb7338..fbf4596 100644 --- a/src/koi_net/core.py +++ b/src/koi_net/core.py @@ -1,215 +1,80 @@ -import logging -from typing import Generic, TypeVar -from rid_lib.ext import Cache -from .network.resolver import NetworkResolver -from .network.event_queue import NetworkEventQueue +from .cache import Cache +from .log_system import LogSystem +from .build.assembler import NodeAssembler +from .config.core import NodeConfig +from .config.proxy import ConfigProxy +from .config.loader import ConfigLoader +from .config.full_node import FullNodeConfig +from .config.partial_node import PartialNodeConfig +from .processor.context import HandlerContext +from .effector import DerefHandler, Effector +from .behaviors.handshaker import Handshaker +from .behaviors.sync_manager import SyncManager +from .identity import NodeIdentity +from .workers import KnowledgeProcessingWorker, EventProcessingWorker +from .network.error_handler import ErrorHandler +from .network.event_queue import EventQueue from .network.graph import NetworkGraph from .network.request_handler import RequestHandler +from .network.resolver import NetworkResolver from .network.response_handler import ResponseHandler -from .network.error_handler import ErrorHandler -from .actor import Actor -from .processor.interface import ProcessorInterface -from .processor import default_handlers +from .network.event_buffer import EventBuffer +from .processor.pipeline import KnowledgePipeline +from .processor.kobj_queue import KobjQueue from .processor.handler import KnowledgeHandler -from .processor.knowledge_pipeline import KnowledgePipeline -from .identity import NodeIdentity -from .secure import Secure -from .config import NodeConfig -from .context import HandlerContext, ActionContext -from .effector import Effector -from .server import NodeServer -from .lifecycle import NodeLifecycle -from .poller import NodePoller -from . import default_actions - -logger = logging.getLogger(__name__) - - -T = TypeVar("T", bound=NodeConfig) - -class NodeInterface(Generic[T]): - """Interface for a node's subsystems. - - This class embodies a node, and wires up all of its subsystems to - work together. Currently, node implementations create an instance of - this class and override behavior where needed. Most commonly this - will be creating a new `Config` class, and adding additional knowledge - handlers to the `pipeline`, but all subsystems may be overriden by - passing new class implementations into `__init__`. - """ - - config: T - cache: Cache - identity: NodeIdentity - effector: Effector - graph: NetworkGraph - secure: Secure - request_handler: RequestHandler - response_handler: ResponseHandler - resolver: NetworkResolver - event_queue: NetworkEventQueue - actor: Actor - action_context: ActionContext - handler_context: HandlerContext - pipeline: KnowledgePipeline - processor: ProcessorInterface - error_handler: ErrorHandler - lifecycle: NodeLifecycle - server: NodeServer - poller: NodePoller - - use_kobj_processor_thread: bool - - def __init__( - self, - config: T, - use_kobj_processor_thread: bool = False, - handlers: list[KnowledgeHandler] | None = None, - - # optional overrides - CacheOverride: type[Cache] | None = None, - NodeIdentityOverride: type[NodeIdentity] | None = None, - EffectorOverride: type[Effector] | None = None, - NetworkGraphOverride: type[NetworkGraph] | None = None, - SecureOverride: type[Secure] | None = None, - RequestHandlerOverride: type[RequestHandler] | None = None, - ResponseHandlerOverride: type[ResponseHandler] | None = None, - NetworkResolverOverride: type[NetworkResolver] | None = None, - NetworkEventQueueOverride: type[NetworkEventQueue] | None = None, - ActorOverride: type[Actor] | None = None, - ActionContextOverride: type[ActionContext] | None = None, - HandlerContextOverride: type[HandlerContext] | None = None, - KnowledgePipelineOverride: type[KnowledgePipeline] | None = None, - ProcessorInterfaceOverride: type[ProcessorInterface] | None = None, - ErrorHandlerOverride: type[ErrorHandler] | None = None, - NodeLifecycleOverride: type[NodeLifecycle] | None = None, - NodeServerOverride: type[NodeServer] | None = None, - NodePollerOverride: type[NodePoller] | None = None, - ): - self.use_kobj_processor_thread = use_kobj_processor_thread - - self.config = config - self.cache = (CacheOverride or Cache)( - directory_path=self.config.koi_net.cache_directory_path - ) - - self.identity = (NodeIdentityOverride or NodeIdentity)(config=self.config) - self.effector = (EffectorOverride or Effector)(cache=self.cache) - - self.graph = (NetworkGraphOverride or NetworkGraph)( - cache=self.cache, - identity=self.identity - ) - - self.secure = (SecureOverride or Secure)( - identity=self.identity, - effector=self.effector, - config=self.config - ) - - self.request_handler = (RequestHandlerOverride or RequestHandler)( - effector=self.effector, - identity=self.identity, - secure=self.secure - ) +from .secure_manager import SecureManager +from .behaviors.profile_monitor import ProfileMonitor +from .entrypoints import NodeServer, NodePoller +from .processor.knowledge_handlers import ( + basic_manifest_handler, + basic_network_output_filter, + basic_rid_handler, + node_contact_handler, + edge_negotiation_handler, + forget_edge_on_node_deletion, + secure_profile_handler +) - self.response_handler = (ResponseHandlerOverride or ResponseHandler)(self.cache, self.effector) +class BaseNode(NodeAssembler): + log_system: LogSystem = LogSystem + kobj_queue: KobjQueue = KobjQueue + event_queue: EventQueue = EventQueue + poll_event_buf: EventBuffer = EventBuffer + broadcast_event_buf: EventBuffer = EventBuffer + config_schema = NodeConfig + config: NodeConfig = ConfigProxy + config_loader: ConfigLoader = ConfigLoader + knowledge_handlers: list[KnowledgeHandler] = [ + basic_rid_handler, + basic_manifest_handler, + secure_profile_handler, + edge_negotiation_handler, + node_contact_handler, + basic_network_output_filter, + forget_edge_on_node_deletion + ] + deref_handlers: list[DerefHandler] = [] + cache: Cache = Cache + identity: NodeIdentity = NodeIdentity + graph: NetworkGraph = NetworkGraph + secure_manager: SecureManager = SecureManager + handshaker: Handshaker = Handshaker + error_handler: ErrorHandler = ErrorHandler + request_handler: RequestHandler = RequestHandler + sync_manager: SyncManager = SyncManager + response_handler: ResponseHandler = ResponseHandler + resolver: NetworkResolver = NetworkResolver + handler_context: HandlerContext = HandlerContext + effector: Effector = Effector + pipeline: KnowledgePipeline = KnowledgePipeline + kobj_worker: KnowledgeProcessingWorker = KnowledgeProcessingWorker + event_worker: EventProcessingWorker = EventProcessingWorker + profile_monitor: ProfileMonitor = ProfileMonitor - self.resolver = (NetworkResolverOverride or NetworkResolver)( - config=self.config, - cache=self.cache, - identity=self.identity, - graph=self.graph, - request_handler=self.request_handler, - effector=self.effector - ) +class FullNode(BaseNode): + entrypoint: NodeServer = NodeServer + config: FullNodeConfig - self.event_queue = (NetworkEventQueueOverride or NetworkEventQueue)( - config=self.config, - cache=self.cache, - identity=self.identity, - graph=self.graph, - request_handler=self.request_handler, - effector=self.effector - ) - - self.actor = (ActorOverride or Actor)() - - # pull all handlers defined in default_handlers module - if handlers is None: - handlers = [ - obj for obj in vars(default_handlers).values() - if isinstance(obj, KnowledgeHandler) - ] - - self.action_context = (ActionContextOverride or ActionContext)( - identity=self.identity, - effector=self.effector - ) - - self.handler_context = (HandlerContextOverride or HandlerContext)( - identity=self.identity, - config=self.config, - cache=self.cache, - event_queue=self.event_queue, - graph=self.graph, - request_handler=self.request_handler, - resolver=self.resolver, - effector=self.effector - ) - - self.pipeline = (KnowledgePipelineOverride or KnowledgePipeline)( - handler_context=self.handler_context, - cache=self.cache, - request_handler=self.request_handler, - event_queue=self.event_queue, - graph=self.graph, - default_handlers=handlers - ) - - self.processor = (ProcessorInterfaceOverride or ProcessorInterface)( - pipeline=self.pipeline, - use_kobj_processor_thread=self.use_kobj_processor_thread - ) - - self.error_handler = (ErrorHandlerOverride or ErrorHandler)( - processor=self.processor, - actor=self.actor - ) - - self.request_handler.set_error_handler(self.error_handler) - - self.handler_context.set_processor(self.processor) - - self.effector.set_processor(self.processor) - self.effector.set_resolver(self.resolver) - self.effector.set_action_context(self.action_context) - - self.actor.set_ctx(self.handler_context) - - self.lifecycle = (NodeLifecycleOverride or NodeLifecycle)( - config=self.config, - identity=self.identity, - graph=self.graph, - processor=self.processor, - effector=self.effector, - actor=self.actor, - use_kobj_processor_thread=use_kobj_processor_thread - ) - - # if self.config.koi_net.node_profile.node_type == NodeType.FULL: - self.server = (NodeServerOverride or NodeServer)( - config=self.config, - lifecycle=self.lifecycle, - secure=self.secure, - processor=self.processor, - event_queue=self.event_queue, - response_handler=self.response_handler - ) - - self.poller = (NodePollerOverride or NodePoller)( - processor=self.processor, - lifecycle=self.lifecycle, - resolver=self.resolver, - config=self.config - ) +class PartialNode(BaseNode): + entrypoint: NodePoller = NodePoller + config: PartialNodeConfig \ No newline at end of file diff --git a/src/koi_net/default_actions.py b/src/koi_net/default_actions.py deleted file mode 100644 index cd0550c..0000000 --- a/src/koi_net/default_actions.py +++ /dev/null @@ -1,24 +0,0 @@ -"""Implementations of default dereference actions.""" - -from .context import ActionContext -from rid_lib.types import KoiNetNode -from rid_lib.ext import Bundle -from .effector import Effector - - -@Effector.register_default_action(KoiNetNode) -def dereference_koi_node( - ctx: ActionContext, rid: KoiNetNode -) -> Bundle | None: - """Dereference function for this KOI node. - - Generates a bundle from this node's profile data in the config. - """ - - if rid != ctx.identity.rid: - return - - return Bundle.generate( - rid=ctx.identity.rid, - contents=ctx.identity.profile.model_dump() - ) \ No newline at end of file diff --git a/src/koi_net/effector.py b/src/koi_net/effector.py index da51e56..e589cd4 100644 --- a/src/koi_net/effector.py +++ b/src/koi_net/effector.py @@ -1,18 +1,32 @@ -import logging +from dataclasses import dataclass from typing import Callable from enum import StrEnum + +import structlog from rid_lib.ext import Cache, Bundle from rid_lib.core import RID, RIDType from rid_lib.types import KoiNetNode -from typing import TYPE_CHECKING +from .processor.context import HandlerContext +from .network.resolver import NetworkResolver +from .processor.kobj_queue import KobjQueue -if TYPE_CHECKING: - from .network.resolver import NetworkResolver - from .processor.interface import ProcessorInterface - from .context import ActionContext +log = structlog.stdlib.get_logger() -logger = logging.getLogger(__name__) +@dataclass +class DerefHandler: + func: Callable[[HandlerContext, RID], Bundle | None] + rid_types: tuple[RIDType] + + def __call__(self, ctx: HandlerContext, rid: RID) -> Bundle | None: + return self.func(ctx, rid) + + @classmethod + def create(cls, rid_types: tuple[RIDType]): + def decorator(func: Callable) -> DerefHandler: + handler = cls(func, rid_types) + return handler + return decorator class BundleSource(StrEnum): @@ -23,106 +37,74 @@ class Effector: """Subsystem for dereferencing RIDs.""" cache: Cache - resolver: "NetworkResolver | None" - processor: "ProcessorInterface | None" - action_context: "ActionContext | None" - _action_table: dict[ - type[RID], - Callable[ - ["ActionContext", RID], - Bundle | None - ] - ] = dict() + resolver: NetworkResolver + kobj_queue: KobjQueue + handler_context: HandlerContext def __init__( self, cache: Cache, + resolver: NetworkResolver, + kobj_queue: KobjQueue, + handler_context: HandlerContext, + deref_handlers: list[DerefHandler] ): self.cache = cache - self.resolver = None - self.processor = None - self.action_context = None - self._action_table = self.__class__._action_table.copy() - - def set_processor(self, processor: "ProcessorInterface"): - self.processor = processor - - def set_resolver(self, resolver: "NetworkResolver"): self.resolver = resolver + self.kobj_queue = kobj_queue + self.handler_context = handler_context + self.deref_handlers = deref_handlers - def set_action_context(self, action_context: "ActionContext"): - self.action_context = action_context - - @classmethod - def register_default_action(cls, rid_type: RIDType): - def decorator(func: Callable) -> Callable: - cls._action_table[rid_type] = func - return func - return decorator - - def register_action(self, rid_type: RIDType): - """Registers a new dereference action for an RID type. - - Example: - This function should be used as a decorator on an action function:: - - @node.register_action(KoiNetNode) - def deref_koi_net_node(ctx: ActionContext, rid: KoiNetNode): - # return a Bundle or None - return - """ - def decorator(func: Callable) -> Callable: - self._action_table[rid_type] = func - return func - return decorator + self.handler_context.set_effector(self) def _try_cache(self, rid: RID) -> tuple[Bundle, BundleSource] | None: bundle = self.cache.read(rid) if bundle: - logger.debug("Cache hit") + log.debug("Cache hit") return bundle, BundleSource.CACHE else: - logger.debug("Cache miss") + log.debug("Cache miss") return None - + def _try_action(self, rid: RID) -> tuple[Bundle, BundleSource] | None: - if type(rid) not in self._action_table: - logger.debug("No action available") + action = None + for handler in self.deref_handlers: + if type(rid) not in handler.rid_types: + continue + action = handler + break + + if not action: + log.debug("No action found") return None - logger.debug("Action available") - func = self._action_table[type(rid)] - bundle = func( - ctx=self.action_context, - rid=rid - ) + bundle = action(ctx=self.handler_context, rid=rid) if bundle: - logger.debug("Action hit") + log.debug("Action hit") return bundle, BundleSource.ACTION else: - logger.debug("Action miss") + log.debug("Action miss") return None - def _try_network(self, rid: RID) -> tuple[Bundle, KoiNetNode] | None: bundle, source = self.resolver.fetch_remote_bundle(rid) if bundle: - logger.debug("Network hit") + log.debug("Network hit") return bundle, source else: - logger.debug("Network miss") + log.debug("Network miss") return None - def deref( self, rid: RID, refresh_cache: bool = False, use_network: bool = False, - handle_result: bool = True + handle_result: bool = True, + write_through: bool = False ) -> Bundle | None: """Dereferences an RID. @@ -134,10 +116,11 @@ def deref( rid: RID to dereference refresh_cache: skips cache read when `True` use_network: enables fetching from other nodes when `True` - handle_result: handles resulting bundle with knowledge pipeline when `True` + handle_result: sends resulting bundle to kobj queue when `True` + write_through: waits for kobj queue to empty when `True` """ - logger.debug(f"Dereferencing {rid!r}") + log.debug(f"Dereferencing {rid!r}") bundle, source = ( # if `refresh_cache`, skip try cache @@ -153,12 +136,12 @@ def deref( and bundle is not None and source != BundleSource.CACHE ): - self.processor.handle( + self.kobj_queue.push( bundle=bundle, source=source if type(source) is KoiNetNode else None ) - - # TODO: refactor for general solution, param to write through to cache before continuing - # like `self.processor.kobj_queue.join()`` - + + if write_through: + self.kobj_queue.q.join() + return bundle \ No newline at end of file diff --git a/src/koi_net/entrypoints/__init__.py b/src/koi_net/entrypoints/__init__.py new file mode 100644 index 0000000..e3d40bf --- /dev/null +++ b/src/koi_net/entrypoints/__init__.py @@ -0,0 +1,2 @@ +from .poller import NodePoller +from .server import NodeServer \ No newline at end of file diff --git a/src/koi_net/entrypoints/base.py b/src/koi_net/entrypoints/base.py new file mode 100644 index 0000000..ed6c504 --- /dev/null +++ b/src/koi_net/entrypoints/base.py @@ -0,0 +1,8 @@ +from abc import ABC, abstractmethod + + +class EntryPoint(ABC): + """Abstract class for entry point components.""" + @abstractmethod + def run(self): + ... \ No newline at end of file diff --git a/src/koi_net/entrypoints/poller.py b/src/koi_net/entrypoints/poller.py new file mode 100644 index 0000000..52de323 --- /dev/null +++ b/src/koi_net/entrypoints/poller.py @@ -0,0 +1,43 @@ + +import time +import structlog + +from .base import EntryPoint +from ..processor.kobj_queue import KobjQueue +from ..network.resolver import NetworkResolver +from ..config.partial_node import PartialNodeConfig + +log = structlog.stdlib.get_logger() + + +class NodePoller(EntryPoint): + """Entry point for partial nodes, manages polling event loop.""" + kobj_queue: KobjQueue + resolver: NetworkResolver + config: PartialNodeConfig + + def __init__( + self, + config: PartialNodeConfig, + kobj_queue: KobjQueue, + resolver: NetworkResolver + ): + self.kobj_queue = kobj_queue + self.resolver = resolver + self.config = config + + def poll(self): + """Polls neighbor nodes and processes returned events.""" + for node_rid, events in self.resolver.poll_neighbors().items(): + for event in events: + self.kobj_queue.push(event=event, source=node_rid) + + def run(self): + """Runs polling event loop.""" + while True: + start_time = time.time() + self.poll() + elapsed = time.time() - start_time + sleep_time = self.config.poller.polling_interval - elapsed + if sleep_time > 0: + time.sleep(sleep_time) \ No newline at end of file diff --git a/src/koi_net/entrypoints/server.py b/src/koi_net/entrypoints/server.py new file mode 100644 index 0000000..10920dc --- /dev/null +++ b/src/koi_net/entrypoints/server.py @@ -0,0 +1,85 @@ +import structlog +import uvicorn +from fastapi import FastAPI, APIRouter +from fastapi.responses import JSONResponse + +from .base import EntryPoint +from ..network.response_handler import ResponseHandler +from ..protocol.model_map import API_MODEL_MAP +from ..protocol.api_models import ErrorResponse +from ..protocol.errors import ProtocolError +from ..config.full_node import FullNodeConfig + +log = structlog.stdlib.get_logger() + + +class NodeServer(EntryPoint): + """Entry point for full nodes, manages FastAPI server.""" + config: FullNodeConfig + response_handler: ResponseHandler + app: FastAPI + router: APIRouter + + def __init__( + self, + config: FullNodeConfig, + response_handler: ResponseHandler, + ): + self.config = config + self.response_handler = response_handler + + self.build_app() + + def build_endpoints(self, router: APIRouter): + """Builds endpoints for API router.""" + for path, models in API_MODEL_MAP.items(): + def create_endpoint(path: str): + async def endpoint(req): + return self.response_handler.handle_response(path, req) + + # programmatically setting type hint annotations for FastAPI's model validation + endpoint.__annotations__ = { + "req": models.request_envelope, + "return": models.response_envelope + } + + return endpoint + + router.add_api_route( + path=path, + endpoint=create_endpoint(path), + methods=["POST"], + response_model_exclude_none=True + ) + + def build_app(self): + """Builds FastAPI app.""" + self.app = FastAPI( + title="KOI-net Protocol API", + version="1.1.0" + ) + + self.app.add_exception_handler(ProtocolError, self.protocol_error_handler) + self.router = APIRouter(prefix="/koi-net") + self.build_endpoints(self.router) + self.app.include_router(self.router) + + def protocol_error_handler(self, request, exc: ProtocolError): + """Catches `ProtocolError` and returns an `ErrorResponse` payload.""" + log.error(exc) + resp = ErrorResponse(error=exc.error_type) + log.info(f"Returning error response: {resp}") + return JSONResponse( + status_code=400, + content=resp.model_dump(mode="json") + ) + + def run(self): + """Starts FastAPI server and event handler.""" + + uvicorn.run( + app=self.app, + host=self.config.server.host, + port=self.config.server.port, + log_config=None + ) \ No newline at end of file diff --git a/src/koi_net/identity.py b/src/koi_net/identity.py index 449c52b..4723d40 100644 --- a/src/koi_net/identity.py +++ b/src/koi_net/identity.py @@ -1,16 +1,12 @@ -import logging -from rid_lib.types.koi_net_node import KoiNetNode -from .config import NodeConfig +from rid_lib.types import KoiNetNode +from .config.core import NodeConfig from .protocol.node import NodeProfile -logger = logging.getLogger(__name__) - - class NodeIdentity: """Represents a node's identity (RID, profile).""" - config: NodeConfig + config: NodeConfig def __init__(self, config: NodeConfig): self.config = config @@ -21,4 +17,4 @@ def rid(self) -> KoiNetNode: @property def profile(self) -> NodeProfile: - return self.config.koi_net.node_profile \ No newline at end of file + return self.config.koi_net.node_profile diff --git a/src/koi_net/lifecycle.py b/src/koi_net/lifecycle.py deleted file mode 100644 index fac444e..0000000 --- a/src/koi_net/lifecycle.py +++ /dev/null @@ -1,113 +0,0 @@ -import logging -from contextlib import contextmanager, asynccontextmanager - -from rid_lib.types import KoiNetNode - -from .actor import Actor -from .effector import Effector -from .config import NodeConfig -from .processor.interface import ProcessorInterface -from .network.graph import NetworkGraph -from .identity import NodeIdentity - -logger = logging.getLogger(__name__) - - -class NodeLifecycle: - """Manages node startup and shutdown processes.""" - - config: NodeConfig - identity: NodeIdentity - graph: NetworkGraph - processor: ProcessorInterface - effector: Effector - actor: Actor - use_kobj_processor_thread: bool - - def __init__( - self, - config: NodeConfig, - identity: NodeIdentity, - graph: NetworkGraph, - processor: ProcessorInterface, - effector: Effector, - actor: Actor, - use_kobj_processor_thread: bool - ): - self.config = config - self.identity = identity - self.graph = graph - self.processor = processor - self.effector = effector - self.actor = actor - self.use_kobj_processor_thread = use_kobj_processor_thread - - @contextmanager - def run(self): - """Synchronous context manager for node startup and shutdown.""" - try: - logger.info("Starting node lifecycle...") - self.start() - yield - except KeyboardInterrupt: - logger.info("Keyboard interrupt!") - finally: - logger.info("Stopping node lifecycle...") - self.stop() - - @asynccontextmanager - async def async_run(self): - """Asynchronous context manager for node startup and shutdown.""" - try: - logger.info("Starting async node lifecycle...") - self.start() - yield - except KeyboardInterrupt: - logger.info("Keyboard interrupt!") - finally: - logger.info("Stopping async node lifecycle...") - self.stop() - - def start(self): - """Starts a node. - - Starts the processor thread (if enabled). Generates network - graph from nodes and edges in cache. Processes any state changes - of node bundle. Initiates handshake with first contact if node - doesn't have any neighbors. Catches up with coordinator state. - """ - if self.use_kobj_processor_thread: - logger.info("Starting processor worker thread") - self.processor.worker_thread.start() - - self.graph.generate() - - # refresh to reflect changes (if any) in config.yaml - self.effector.deref(self.identity.rid, refresh_cache=True) - - logger.debug("Waiting for kobj queue to empty") - if self.use_kobj_processor_thread: - self.processor.kobj_queue.join() - else: - self.processor.flush_kobj_queue() - logger.debug("Done") - - if not self.graph.get_neighbors() and self.config.koi_net.first_contact.rid: - logger.debug(f"I don't have any neighbors, reaching out to first contact {self.config.koi_net.first_contact.rid!r}") - - self.actor.handshake_with(self.config.koi_net.first_contact.rid) - - for coordinator in self.actor.identify_coordinators(): - self.actor.catch_up_with(coordinator, rid_types=[KoiNetNode]) - - - def stop(self): - """Stops a node. - - Finishes processing knowledge object queue. - """ - if self.use_kobj_processor_thread: - logger.info(f"Waiting for kobj queue to empty ({self.processor.kobj_queue.unfinished_tasks} tasks remaining)") - self.processor.kobj_queue.join() - else: - self.processor.flush_kobj_queue() \ No newline at end of file diff --git a/src/koi_net/log_system.py b/src/koi_net/log_system.py new file mode 100644 index 0000000..9f6b657 --- /dev/null +++ b/src/koi_net/log_system.py @@ -0,0 +1,153 @@ +import sys +import logging +from logging.handlers import RotatingFileHandler +from datetime import datetime + +import structlog +import colorama + + +class LogSystem: + """Handles initializing the logging system.""" + + COMMON_PROCESSORS = [ + structlog.stdlib.add_logger_name, + structlog.stdlib.add_log_level, + structlog.stdlib.PositionalArgumentsFormatter(), + structlog.processors.TimeStamper(fmt="iso"), + structlog.processors.UnicodeDecoder(), + structlog.processors.CallsiteParameterAdder({ + structlog.processors.CallsiteParameter.MODULE, + structlog.processors.CallsiteParameter.FUNC_NAME + }), + ] + + def __init__(self): + self.setup_logging() + + def console_handler(self): + console_handler = logging.StreamHandler(sys.stdout) + console_handler.setFormatter( + structlog.stdlib.ProcessorFormatter( + processor=structlog.dev.ConsoleRenderer( + columns=[ + # Render the timestamp without the key name in yellow. + structlog.dev.Column( + "timestamp", + structlog.dev.KeyValueColumnFormatter( + key_style=None, + value_style=colorama.Style.DIM, + reset_style=colorama.Style.RESET_ALL, + value_repr=lambda t: datetime.fromisoformat(t).strftime("%Y-%m-%d %H:%M:%S"), + ), + ), + structlog.dev.Column( + "level", + structlog.dev.LogLevelColumnFormatter( + level_styles={ + level: colorama.Style.BRIGHT + color + for level, color in { + "critical": colorama.Fore.RED, + "exception": colorama.Fore.RED, + "error": colorama.Fore.RED, + "warn": colorama.Fore.YELLOW, + "warning": colorama.Fore.YELLOW, + "info": colorama.Fore.GREEN, + "debug": colorama.Fore.GREEN, + "notset": colorama.Back.RED, + }.items() + }, + reset_style=colorama.Style.RESET_ALL, + width=9 + ) + ), + # Render the event without the key name in bright magenta. + + # Default formatter for all keys not explicitly mentioned. The key is + # cyan, the value is green. + structlog.dev.Column( + "path", + structlog.dev.KeyValueColumnFormatter( + key_style=None, + value_style=colorama.Fore.MAGENTA, + reset_style=colorama.Style.RESET_ALL, + value_repr=str, + width=30 + ), + ), + # structlog.dev.Column( + # "func_name", + # structlog.dev.KeyValueColumnFormatter( + # key_style=None, + # value_style=colorama.Fore.MAGENTA, + # reset_style=colorama.Style.RESET_ALL, + # value_repr=str, + # prefix="(", + # postfix=")", + # width=15 + # ), + # ), + structlog.dev.Column( + "event", + structlog.dev.KeyValueColumnFormatter( + key_style=None, + value_style=colorama.Fore.WHITE, + reset_style=colorama.Style.RESET_ALL, + value_repr=str, + width=30 + ), + ), + structlog.dev.Column( + "", + structlog.dev.KeyValueColumnFormatter( + key_style=colorama.Fore.BLUE, + value_style=colorama.Fore.GREEN, + reset_style=colorama.Style.RESET_ALL, + value_repr=str, + ), + ) + ] + ), + foreign_pre_chain=self.COMMON_PROCESSORS + ) + ) + + return console_handler + + + + def file_handler(self): + file_handler = RotatingFileHandler( + filename="log.ndjson", + maxBytes=10 * 1024 ** 2, + backupCount=50, + encoding="utf-8" + ) + + file_handler.setFormatter( + structlog.stdlib.ProcessorFormatter( + processor=structlog.processors.JSONRenderer(), + foreign_pre_chain=self.COMMON_PROCESSORS + ) + ) + + return file_handler + + def setup_logging(self): + handlers = [ + self.file_handler(), + self.console_handler() + ] + + logging.basicConfig(level=logging.DEBUG, handlers=handlers) + + structlog.configure( + processors=self.COMMON_PROCESSORS + [ + structlog.stdlib.ProcessorFormatter.wrap_for_formatter], + wrapper_class=structlog.stdlib.BoundLogger, + logger_factory=structlog.stdlib.LoggerFactory(), + cache_logger_on_first_use=True, + ) + + def get_logger(self): + return structlog.stdlib.get_logger() diff --git a/src/koi_net/network/error_handler.py b/src/koi_net/network/error_handler.py index fd7cae5..f6dd775 100644 --- a/src/koi_net/network/error_handler.py +++ b/src/koi_net/network/error_handler.py @@ -1,52 +1,62 @@ -from logging import getLogger -from koi_net.protocol.errors import ErrorType -from koi_net.protocol.event import EventType +import structlog from rid_lib.types import KoiNetNode -from ..processor.interface import ProcessorInterface -from ..actor import Actor +from ..behaviors.handshaker import Handshaker +from ..protocol.errors import ErrorType +from ..protocol.event import EventType +from ..processor.kobj_queue import KobjQueue -logger = getLogger(__name__) +log = structlog.stdlib.get_logger() class ErrorHandler: - """Handles network errors that may occur during requests.""" + """Handles network and protocol errors that may occur during requests.""" timeout_counter: dict[KoiNetNode, int] - processor: ProcessorInterface - actor: Actor + kobj_queue: KobjQueue def __init__( self, - processor: ProcessorInterface, - actor: Actor + kobj_queue: KobjQueue, + handshaker: Handshaker ): - self.processor = processor - self.actor = actor + self.kobj_queue = kobj_queue + self.handshaker = handshaker self.timeout_counter = {} - + + def reset_timeout_counter(self, node: KoiNetNode): + """Reset's a node timeout counter to zero.""" + self.timeout_counter[node] = 0 + def handle_connection_error(self, node: KoiNetNode): - """Drops nodes after timing out three times.""" + """Drops nodes after timing out three times. + + TODO: Need a better heuristic for network state. For example, if + a node lost connection to the internet, it would quickly forget + all other nodes. + """ self.timeout_counter.setdefault(node, 0) self.timeout_counter[node] += 1 - logger.debug(f"{node} has timed out {self.timeout_counter[node]} time(s)") + log.debug(f"{node} has timed out {self.timeout_counter[node]} time(s)") if self.timeout_counter[node] > 3: - logger.debug(f"Exceeded time out limit, forgetting node") - self.processor.handle(rid=node, event_type=EventType.FORGET) - # do something - + log.debug(f"Exceeded time out limit, forgetting node") + self.kobj_queue.push(rid=node, event_type=EventType.FORGET) def handle_protocol_error( self, error_type: ErrorType, node: KoiNetNode ): - """Attempts handshake when this node is unknown to target.""" - logger.info(f"Handling protocol error {error_type} for node {node!r}") + """Handles protocol errors that occur during network requests. + + Attempts handshake when this node is unknown to target. + """ + + log.info(f"Handling protocol error {error_type} for node {node!r}") match error_type: case ErrorType.UnknownNode: - logger.info("Peer doesn't know me, attempting handshake...") - self.actor.handshake_with(node) + log.info("Peer doesn't know me, attempting handshake...") + self.handshaker.handshake_with(node) case ErrorType.InvalidKey: ... case ErrorType.InvalidSignature: ... diff --git a/src/koi_net/network/event_buffer.py b/src/koi_net/network/event_buffer.py new file mode 100644 index 0000000..ebd021d --- /dev/null +++ b/src/koi_net/network/event_buffer.py @@ -0,0 +1,53 @@ +import time +from rid_lib.types import KoiNetNode + +from ..protocol.event import Event + + +class EventBuffer: + """Stores outgoing events intended for polling nodes.""" + buffers: dict[KoiNetNode, list[Event]] + start_time: dict[KoiNetNode, float] + + def __init__(self): + self.buffers = {} + self.start_time = {} + + def push(self, node: KoiNetNode, event: Event): + """Pushes event to specified node. + + Sets start time to now if unset. + """ + + if node not in self.buffers: + self.start_time[node] = time.time() + + event_buf = self.buffers.setdefault(node, []) + event_buf.append(event) + + def buf_len(self, node: KoiNetNode): + """Returns the length of a node's event buffer.""" + return len(self.buffers.get(node, [])) + + def flush(self, node: KoiNetNode, limit: int = 0) -> list[Event]: + """Flushes all (or limit) events for a node. + + Resets start time. + """ + + if node in self.start_time: + del self.start_time[node] + + if node not in self.buffers: + return [] + + event_buf = self.buffers[node] + + if limit and len(event_buf) > limit: + flushed_events = event_buf[:limit] + self.buffers[node] = event_buf[limit:] + else: + flushed_events = event_buf.copy() + del self.buffers[node] + + return flushed_events \ No newline at end of file diff --git a/src/koi_net/network/event_queue.py b/src/koi_net/network/event_queue.py index 4f52552..cd12a02 100644 --- a/src/koi_net/network/event_queue.py +++ b/src/koi_net/network/event_queue.py @@ -1,204 +1,31 @@ -import logging +import structlog from queue import Queue -import httpx -from pydantic import BaseModel -from rid_lib import RID -from rid_lib.ext import Cache + from rid_lib.types import KoiNetNode +from pydantic import BaseModel -from .graph import NetworkGraph -from .request_handler import NodeNotFoundError, RequestHandler -from ..protocol.node import NodeProfile, NodeType -from ..protocol.edge import EdgeProfile, EdgeType from ..protocol.event import Event -from ..identity import NodeIdentity -from ..config import NodeConfig -from ..effector import Effector - -logger = logging.getLogger(__name__) +log = structlog.stdlib.get_logger() -class EventQueueModel(BaseModel): - webhook: dict[KoiNetNode, list[Event]] - poll: dict[KoiNetNode, list[Event]] -type EventQueue = dict[RID, Queue[Event]] +class QueuedEvent(BaseModel): + event: Event + target: KoiNetNode -class NetworkEventQueue: - """Handles out going network event queues.""" +class EventQueue: + """Queue for outgoing network events.""" + q: Queue[QueuedEvent] - config: NodeConfig - identity: NodeIdentity - effector: Effector - cache: Cache - graph: NetworkGraph - request_handler: RequestHandler - poll_event_queue: EventQueue - webhook_event_queue: EventQueue + def __init__(self): + self.q = Queue() - def __init__( - self, - config: NodeConfig, - cache: Cache, - identity: NodeIdentity, - effector: Effector, - graph: NetworkGraph, - request_handler: RequestHandler, - ): - self.config = config - self.identity = identity - self.cache = cache - self.graph = graph - self.request_handler = request_handler - self.effector = effector - - self.poll_event_queue = dict() - self.webhook_event_queue = dict() - - def _load_event_queues(self): - """Loads event queues from storage.""" - try: - with open(self.config.koi_net.event_queues_path, "r") as f: - queues = EventQueueModel.model_validate_json(f.read()) - - for node in queues.poll.keys(): - for event in queues.poll[node]: - queue = self.poll_event_queue.setdefault(node, Queue()) - queue.put(event) - - for node in queues.webhook.keys(): - for event in queues.webhook[node]: - queue = self.webhook_event_queue.setdefault(node, Queue()) - queue.put(event) - - except FileNotFoundError: - return - - def _save_event_queues(self): - """Saves event queues to storage.""" - events_model = EventQueueModel( - poll={ - node: list(queue.queue) - for node, queue in self.poll_event_queue.items() - if not queue.empty() - }, - webhook={ - node: list(queue.queue) - for node, queue in self.webhook_event_queue.items() - if not queue.empty() - } - ) - - if len(events_model.poll) == 0 and len(events_model.webhook) == 0: - return - - with open(self.config.koi_net.event_queues_path, "w") as f: - f.write(events_model.model_dump_json(indent=2)) - - def push_event_to(self, event: Event, node: KoiNetNode, flush=False): + def push(self, event: Event, target: KoiNetNode): """Pushes event to queue of specified node. - Event will be sent to webhook or poll queue depending on the - node type and edge type of the specified node. If `flush` is set - to `True`, the webhook queued will be flushed after pushing the - event. + Event will be sent to webhook or poll queue by the event worker + depending on the node type and edge type of the specified node. """ - logger.debug(f"Pushing event {event.event_type} {event.rid!r} to {node}") - - node_bundle = self.effector.deref(node) - - # if there's an edge from me to the target node, override broadcast type - edge_rid = self.graph.get_edge( - source=self.identity.rid, - target=node - ) - - edge_bundle = self.effector.deref(edge_rid) if edge_rid else None - - if edge_bundle: - logger.debug(f"Found edge from me to {node!r}") - edge_profile = edge_bundle.validate_contents(EdgeProfile) - if edge_profile.edge_type == EdgeType.WEBHOOK: - event_queue = self.webhook_event_queue - elif edge_profile.edge_type == EdgeType.POLL: - event_queue = self.poll_event_queue - - elif node_bundle: - logger.debug(f"Found bundle for {node!r}") - node_profile = node_bundle.validate_contents(NodeProfile) - if node_profile.node_type == NodeType.FULL: - event_queue = self.webhook_event_queue - elif node_profile.node_type == NodeType.PARTIAL: - event_queue = self.poll_event_queue - - elif node == self.config.koi_net.first_contact.rid: - logger.debug(f"Node {node!r} is my first contact") - # first contact node is always a webhook node - event_queue = self.webhook_event_queue - - else: - logger.warning(f"Node {node!r} unknown to me") - return - queue = event_queue.setdefault(node, Queue()) - queue.put(event) - - if flush and event_queue is self.webhook_event_queue: - self.flush_webhook_queue(node) - - def _flush_queue(self, event_queue: EventQueue, node: KoiNetNode) -> list[Event]: - """Flushes a node's queue, returning list of events.""" - queue = event_queue.get(node) - events = list() - if queue: - while not queue.empty(): - event = queue.get() - logger.debug(f"Dequeued {event.event_type} {event.rid!r}") - events.append(event) - - return events - - def flush_poll_queue(self, node: KoiNetNode) -> list[Event]: - """Flushes a node's poll queue, returning list of events.""" - logger.debug(f"Flushing poll queue for {node}") - return self._flush_queue(self.poll_event_queue, node) - - def flush_webhook_queue(self, node: KoiNetNode, requeue_on_fail: bool = True): - """Flushes a node's webhook queue, and broadcasts events. - - If node profile is unknown, or node type is not `FULL`, this - operation will fail silently. If 'requeue_on_fail` is `True` and - the remote node cannot be reached, all events will be requeued. - """ - - logger.debug(f"Flushing webhook queue for {node}") - - # node_bundle = self.effector.deref(node) - - # if not node_bundle: - # logger.warning(f"{node!r} not found") - # return - - # node_profile = node_bundle.validate_contents(NodeProfile) - - # if node_profile.node_type != NodeType.FULL: - # logger.warning(f"{node!r} is a partial node!") - # return - - events = self._flush_queue(self.webhook_event_queue, node) - if not events: return - - logger.debug(f"Broadcasting {len(events)} events") - - try: - self.request_handler.broadcast_events(node, events=events) - - except NodeNotFoundError: - logger.warning("Broadcast failed (node not found)") - - except httpx.ConnectError: - logger.warning("Broadcast failed (couldn't connect)") - - if requeue_on_fail: - for event in events: - self.push_event_to(event, node) \ No newline at end of file + self.q.put(QueuedEvent(target=target, event=event)) + \ No newline at end of file diff --git a/src/koi_net/network/graph.py b/src/koi_net/network/graph.py index 24152ac..3d73531 100644 --- a/src/koi_net/network/graph.py +++ b/src/koi_net/network/graph.py @@ -1,4 +1,4 @@ -import logging +import structlog from typing import Literal import networkx as nx from rid_lib import RIDType @@ -7,7 +7,7 @@ from ..identity import NodeIdentity from ..protocol.edge import EdgeProfile, EdgeStatus -logger = logging.getLogger(__name__) +log = structlog.stdlib.get_logger() class NetworkGraph: @@ -21,27 +21,34 @@ def __init__(self, cache: Cache, identity: NodeIdentity): self.cache = cache self.dg = nx.DiGraph() self.identity = identity + + def start(self): + self.generate() def generate(self): """Generates directed graph from cached KOI nodes and edges.""" - logger.debug("Generating network graph") + log.debug("Generating network graph") self.dg.clear() for rid in self.cache.list_rids(): if type(rid) == KoiNetNode: self.dg.add_node(rid) - logger.debug(f"Added node {rid!r}") + log.debug(f"Added node {rid!r}") elif type(rid) == KoiNetEdge: edge_bundle = self.cache.read(rid) if not edge_bundle: - logger.warning(f"Failed to load {rid!r}") + log.warning(f"Failed to load {rid!r}") continue edge_profile = edge_bundle.validate_contents(EdgeProfile) self.dg.add_edge(edge_profile.source, edge_profile.target, rid=rid) - logger.debug(f"Added edge {rid!r} ({edge_profile.source} -> {edge_profile.target})") - logger.debug("Done") + log.debug(f"Added edge {rid!r} ({edge_profile.source} -> {edge_profile.target})") + log.debug("Done") - def get_edge(self, source: KoiNetNode, target: KoiNetNode,) -> KoiNetEdge | None: + def get_edge( + self, + source: KoiNetNode, + target: KoiNetNode + ) -> KoiNetEdge | None: """Returns edge RID given the RIDs of a source and target node.""" if (source, target) in self.dg.edges: edge_data = self.dg.get_edge_data(source, target) @@ -59,16 +66,16 @@ def get_edges( All edges returned by default, specify `direction` to restrict to incoming or outgoing edges only. """ - + edges = [] - if direction != "in" and self.dg.out_edges: + if (direction is None or direction == "out") and self.dg.out_edges: out_edges = self.dg.out_edges(self.identity.rid) edges.extend(out_edges) - - if direction != "out" and self.dg.in_edges: + + if (direction is None or direction == "in") and self.dg.in_edges: in_edges = self.dg.in_edges(self.identity.rid) edges.extend(in_edges) - + edge_rids = [] for edge in edges: edge_data = self.dg.get_edge_data(*edge) @@ -97,7 +104,7 @@ def get_neighbors( edge_bundle = self.cache.read(edge_rid) if not edge_bundle: - logger.warning(f"Failed to find edge {edge_rid!r} in cache") + log.warning(f"Failed to find edge {edge_rid!r} in cache") continue edge_profile = edge_bundle.validate_contents(EdgeProfile) @@ -112,6 +119,5 @@ def get_neighbors( neighbors.add(edge_profile.source) elif edge_profile.source == self.identity.rid: neighbors.add(edge_profile.target) - + return list(neighbors) - diff --git a/src/koi_net/network/request_handler.py b/src/koi_net/network/request_handler.py index 1c17383..0f58658 100644 --- a/src/koi_net/network/request_handler.py +++ b/src/koi_net/network/request_handler.py @@ -1,6 +1,7 @@ -import logging +import structlog import httpx from rid_lib import RID +from rid_lib.ext import Cache from rid_lib.types.koi_net_node import KoiNetNode from ..identity import NodeIdentity @@ -17,7 +18,6 @@ ResponseModels, ErrorResponse ) -from ..protocol.envelope import SignedEnvelope from ..protocol.consts import ( BROADCAST_EVENTS_PATH, POLL_EVENTS_PATH, @@ -26,82 +26,70 @@ FETCH_BUNDLES_PATH ) from ..protocol.node import NodeProfile, NodeType -from ..secure import Secure -from ..effector import Effector +from ..protocol.model_map import API_MODEL_MAP +from ..secure_manager import SecureManager +from .error_handler import ErrorHandler -from typing import TYPE_CHECKING -if TYPE_CHECKING: - from .error_handler import ErrorHandler +log = structlog.stdlib.get_logger() -logger = logging.getLogger(__name__) - +class KoiNetRequestError(Exception): + pass # Custom error types for request handling -class SelfRequestError(Exception): +class SelfRequestError(KoiNetRequestError): """Raised when a node tries to request itself.""" pass -class PartialNodeQueryError(Exception): +class PartialNodeQueryError(KoiNetRequestError): """Raised when attempting to query a partial node.""" pass -class NodeNotFoundError(Exception): +class NodeNotFoundError(KoiNetRequestError): """Raised when a node URL cannot be found.""" pass -class UnknownPathError(Exception): +class UnknownPathError(KoiNetRequestError): """Raised when an unknown path is requested.""" pass class RequestHandler: """Handles making requests to other KOI nodes.""" - effector: Effector + cache: Cache identity: NodeIdentity - secure: Secure - error_handler: "ErrorHandler" + secure_manager: SecureManager + error_handler: ErrorHandler def __init__( self, - effector: Effector, + cache: Cache, identity: NodeIdentity, - secure: Secure + secure_manager: SecureManager, + error_handler: ErrorHandler ): - self.effector = effector + self.cache = cache self.identity = identity - self.secure = secure - - def set_error_handler(self, error_handler: "ErrorHandler"): + self.secure_manager = secure_manager self.error_handler = error_handler - def get_url(self, node_rid: KoiNetNode) -> str: + def get_base_url(self, node_rid: KoiNetNode) -> str: """Retrieves URL of a node from its RID.""" - logger.debug(f"Getting URL for {node_rid!r}") - node_url = None - - if node_rid == self.identity.rid: - raise SelfRequestError("Don't talk to yourself") - - node_bundle = self.effector.deref(node_rid) - + node_bundle = self.cache.read(node_rid) if node_bundle: node_profile = node_bundle.validate_contents(NodeProfile) - logger.debug(f"Found node profile: {node_profile}") if node_profile.node_type != NodeType.FULL: - raise PartialNodeQueryError("Can't query partial node") + raise PartialNodeQueryError("Partial nodes don't have URLs") node_url = node_profile.base_url - else: - if node_rid == self.identity.config.koi_net.first_contact.rid: - logger.debug("Found URL of first contact") - node_url = self.identity.config.koi_net.first_contact.url + elif node_rid == self.identity.config.koi_net.first_contact.rid: + node_url = self.identity.config.koi_net.first_contact.url - if not node_url: - raise NodeNotFoundError("Node not found") + else: + raise NodeNotFoundError(f"URL not found for {node_rid!r}") - logger.debug(f"Resolved {node_rid!r} to {node_url}") + log.debug(f"Resolved {node_rid!r} to {node_url}") return node_url def make_request( @@ -111,18 +99,25 @@ def make_request( request: RequestModels, ) -> ResponseModels | None: """Makes a request to a node.""" - url = self.get_url(node) + path - logger.info(f"Making request to {url}") + if node == self.identity.rid: + raise SelfRequestError("Don't talk to yourself") + + url = self.get_base_url(node) + path + log.info(f"Making request to {url}") - signed_envelope = self.secure.create_envelope( + signed_envelope = self.secure_manager.create_envelope( payload=request, target=node ) + data = signed_envelope.model_dump_json(exclude_none=True) + try: - result = httpx.post(url, data=signed_envelope.model_dump_json(exclude_none=True)) - except httpx.ConnectError as err: - logger.debug("Failed to connect") + result = httpx.post(url, data=data) + self.error_handler.reset_timeout_counter(node) + + except httpx.RequestError as err: + log.debug("Failed to connect") self.error_handler.handle_connection_error(node) raise err @@ -131,21 +126,12 @@ def make_request( self.error_handler.handle_protocol_error(resp.error, node) return resp - if path == BROADCAST_EVENTS_PATH: - return None - elif path == POLL_EVENTS_PATH: - EnvelopeModel = SignedEnvelope[EventsPayload] - elif path == FETCH_RIDS_PATH: - EnvelopeModel = SignedEnvelope[RidsPayload] - elif path == FETCH_MANIFESTS_PATH: - EnvelopeModel = SignedEnvelope[ManifestsPayload] - elif path == FETCH_BUNDLES_PATH: - EnvelopeModel = SignedEnvelope[BundlesPayload] - else: - raise UnknownPathError(f"Unknown path '{path}'") + resp_env_model = API_MODEL_MAP[path].response_envelope + if not resp_env_model: + return - resp_envelope = EnvelopeModel.model_validate_json(result.text) - self.secure.validate_envelope(resp_envelope) + resp_envelope = resp_env_model.model_validate_json(result.text) + self.secure_manager.validate_envelope(resp_envelope) return resp_envelope.payload @@ -161,14 +147,14 @@ def broadcast_events( """ request = req or EventsPayload.model_validate(kwargs) self.make_request(node, BROADCAST_EVENTS_PATH, request) - logger.info(f"Broadcasted {len(request.events)} event(s) to {node!r}") + log.info(f"Broadcasted {len(request.events)} event(s) to {node!r}") def poll_events( self, node: RID, req: PollEvents | None = None, **kwargs - ) -> EventsPayload: + ) -> EventsPayload | ErrorResponse: """Polls events from a node. Pass `PollEvents` object as `req` or fields as kwargs. @@ -176,7 +162,7 @@ def poll_events( request = req or PollEvents.model_validate(kwargs) resp = self.make_request(node, POLL_EVENTS_PATH, request) if type(resp) != ErrorResponse: - logger.info(f"Polled {len(resp.events)} events from {node!r}") + log.info(f"Polled {len(resp.events)} events from {node!r}") return resp def fetch_rids( @@ -184,7 +170,7 @@ def fetch_rids( node: RID, req: FetchRids | None = None, **kwargs - ) -> RidsPayload: + ) -> RidsPayload | ErrorResponse: """Fetches RIDs from a node. Pass `FetchRids` object as `req` or fields as kwargs. @@ -192,7 +178,7 @@ def fetch_rids( request = req or FetchRids.model_validate(kwargs) resp = self.make_request(node, FETCH_RIDS_PATH, request) if type(resp) != ErrorResponse: - logger.info(f"Fetched {len(resp.rids)} RID(s) from {node!r}") + log.info(f"Fetched {len(resp.rids)} RID(s) from {node!r}") return resp def fetch_manifests( @@ -200,7 +186,7 @@ def fetch_manifests( node: RID, req: FetchManifests | None = None, **kwargs - ) -> ManifestsPayload: + ) -> ManifestsPayload | ErrorResponse: """Fetches manifests from a node. Pass `FetchManifests` object as `req` or fields as kwargs. @@ -208,7 +194,7 @@ def fetch_manifests( request = req or FetchManifests.model_validate(kwargs) resp = self.make_request(node, FETCH_MANIFESTS_PATH, request) if type(resp) != ErrorResponse: - logger.info(f"Fetched {len(resp.manifests)} manifest(s) from {node!r}") + log.info(f"Fetched {len(resp.manifests)} manifest(s) from {node!r}") return resp def fetch_bundles( @@ -216,7 +202,7 @@ def fetch_bundles( node: RID, req: FetchBundles | None = None, **kwargs - ) -> BundlesPayload: + ) -> BundlesPayload | ErrorResponse: """Fetches bundles from a node. Pass `FetchBundles` object as `req` or fields as kwargs. @@ -224,5 +210,5 @@ def fetch_bundles( request = req or FetchBundles.model_validate(kwargs) resp = self.make_request(node, FETCH_BUNDLES_PATH, request) if type(resp) != ErrorResponse: - logger.info(f"Fetched {len(resp.bundles)} bundle(s) from {node!r}") + log.info(f"Fetched {len(resp.bundles)} bundle(s) from {node!r}") return resp \ No newline at end of file diff --git a/src/koi_net/network/resolver.py b/src/koi_net/network/resolver.py index 5cc12c8..df5f94f 100644 --- a/src/koi_net/network/resolver.py +++ b/src/koi_net/network/resolver.py @@ -1,4 +1,4 @@ -import logging +import structlog import httpx from rid_lib import RID from rid_lib.core import RIDType @@ -11,10 +11,9 @@ from ..protocol.event import Event from ..protocol.api_models import ErrorResponse from ..identity import NodeIdentity -from ..config import NodeConfig -from ..effector import Effector +from ..config.core import NodeConfig -logger = logging.getLogger(__name__) +log = structlog.stdlib.get_logger() class NetworkResolver: @@ -22,7 +21,6 @@ class NetworkResolver: config: NodeConfig identity: NodeIdentity - effector: Effector cache: Cache graph: NetworkGraph request_handler: RequestHandler @@ -32,7 +30,6 @@ def __init__( config: NodeConfig, cache: Cache, identity: NodeIdentity, - effector: Effector, graph: NetworkGraph, request_handler: RequestHandler, ): @@ -41,7 +38,6 @@ def __init__( self.cache = cache self.graph = graph self.request_handler = request_handler - self.effector = effector self.poll_event_queue = dict() self.webhook_event_queue = dict() @@ -49,86 +45,99 @@ def __init__( def get_state_providers(self, rid_type: RIDType) -> list[KoiNetNode]: """Returns list of node RIDs which provide state for specified RID type.""" - logger.debug(f"Looking for state providers of {rid_type}") + log.debug(f"Looking for state providers of {rid_type}") provider_nodes = [] for node_rid in self.cache.list_rids(rid_types=[KoiNetNode]): if node_rid == self.identity.rid: continue node_bundle = self.cache.read(node_rid) - node_profile = node_bundle.validate_contents(NodeProfile) - if (node_profile.node_type == NodeType.FULL) and (rid_type in node_profile.provides.state): - logger.debug(f"Found provider {node_rid!r}") - provider_nodes.append(node_rid) + if node_profile.node_type != NodeType.FULL: + continue + + if rid_type not in node_profile.provides.state: + continue + + provider_nodes.append(node_rid) - if not provider_nodes: - logger.debug("Failed to find providers") + if provider_nodes: + log.debug(f"Found provider(s) {provider_nodes}") + else: + log.debug("Failed to find providers") + return provider_nodes def fetch_remote_bundle(self, rid: RID) -> tuple[Bundle | None, KoiNetNode | None]: """Attempts to fetch a bundle by RID from known peer nodes.""" - logger.debug(f"Fetching remote bundle {rid!r}") + log.debug(f"Fetching remote bundle {rid!r}") remote_bundle, node_rid = None, None for node_rid in self.get_state_providers(type(rid)): payload = self.request_handler.fetch_bundles( node=node_rid, rids=[rid]) + if type(payload) == ErrorResponse: + continue + if payload.bundles: remote_bundle = payload.bundles[0] - logger.debug(f"Got bundle from {node_rid!r}") + log.debug(f"Got bundle from {node_rid!r}") break if not remote_bundle: - logger.warning("Failed to fetch remote bundle") + log.warning("Failed to fetch remote bundle") return remote_bundle, node_rid def fetch_remote_manifest(self, rid: RID) -> tuple[Bundle | None, KoiNetNode | None]: """Attempts to fetch a manifest by RID from known peer nodes.""" - logger.debug(f"Fetching remote manifest {rid!r}") + log.debug(f"Fetching remote manifest {rid!r}") remote_manifest, node_rid = None, None for node_rid in self.get_state_providers(type(rid)): payload = self.request_handler.fetch_manifests( node=node_rid, rids=[rid]) + if type(payload) == ErrorResponse: + continue + if payload.manifests: remote_manifest = payload.manifests[0] - logger.debug(f"Got bundle from {node_rid!r}") + log.debug(f"Got bundle from {node_rid!r}") break if not remote_manifest: - logger.warning("Failed to fetch remote bundle") + log.warning("Failed to fetch remote bundle") return remote_manifest, node_rid def poll_neighbors(self) -> dict[KoiNetNode, list[Event]]: """Polls all neighbor nodes and returns compiled list of events. - Neighbor nodes also include the first contact, regardless of - whether the first contact profile is known to this node. - """ + Neighbor nodes include any node this node shares an edge with, + or the first contact, if no neighbors are found. - graph_neighbors = self.graph.get_neighbors() - neighbors = [] + NOTE: This function does not poll nodes that don't share edges + with this node. Events sent by non neighboring nodes will not + be polled. + """ - if graph_neighbors: - for node_rid in graph_neighbors: - node_bundle = self.cache.read(node_rid) - if not node_bundle: - continue - node_profile = node_bundle.validate_contents(NodeProfile) - if node_profile.node_type != NodeType.FULL: - continue - neighbors.append(node_rid) + neighbors: list[KoiNetNode] = [] + for node_rid in self.graph.get_neighbors(): + node_bundle = self.cache.read(node_rid) + if not node_bundle: + continue + node_profile = node_bundle.validate_contents(NodeProfile) + if node_profile.node_type != NodeType.FULL: + continue + neighbors.append(node_rid) - elif self.config.koi_net.first_contact.rid: + if not neighbors and self.config.koi_net.first_contact.rid: neighbors.append(self.config.koi_net.first_contact.rid) - event_dict = dict() + event_dict: dict[KoiNetNode, list[Event]] = {} for node_rid in neighbors: try: payload = self.request_handler.poll_events( @@ -138,14 +147,13 @@ def poll_neighbors(self) -> dict[KoiNetNode, list[Event]]: if type(payload) == ErrorResponse: continue - + if payload.events: - logger.debug(f"Received {len(payload.events)} events from {node_rid!r}") - + log.debug(f"Received {len(payload.events)} events from {node_rid!r}") event_dict[node_rid] = payload.events - except httpx.ConnectError: - logger.debug(f"Failed to reach node {node_rid!r}") + except httpx.RequestError: + log.debug(f"Failed to reach node {node_rid!r}") continue return event_dict \ No newline at end of file diff --git a/src/koi_net/network/response_handler.py b/src/koi_net/network/response_handler.py index 2e5c3fe..2e3144b 100644 --- a/src/koi_net/network/response_handler.py +++ b/src/koi_net/network/response_handler.py @@ -1,10 +1,18 @@ -import logging +import structlog from rid_lib import RID from rid_lib.types import KoiNetNode from rid_lib.ext import Manifest, Cache from rid_lib.ext.bundle import Bundle +from koi_net.network.event_buffer import EventBuffer +from koi_net.processor.kobj_queue import KobjQueue +from koi_net.protocol.consts import BROADCAST_EVENTS_PATH, FETCH_BUNDLES_PATH, FETCH_MANIFESTS_PATH, FETCH_RIDS_PATH, POLL_EVENTS_PATH +from koi_net.protocol.envelope import SignedEnvelope +from koi_net.secure_manager import SecureManager + from ..protocol.api_models import ( + EventsPayload, + PollEvents, RidsPayload, ManifestsPayload, BundlesPayload, @@ -12,60 +20,110 @@ FetchManifests, FetchBundles, ) -from ..effector import Effector -logger = logging.getLogger(__name__) +log = structlog.stdlib.get_logger() class ResponseHandler: """Handles generating responses to requests from other KOI nodes.""" cache: Cache - effector: Effector + kobj_queue: KobjQueue + poll_event_buf: EventBuffer def __init__( self, - cache: Cache, - effector: Effector, + cache: Cache, + kobj_queue: KobjQueue, + poll_event_buf: EventBuffer, + secure_manager: SecureManager ): self.cache = cache - self.effector = effector + self.kobj_queue = kobj_queue + self.poll_event_buf = poll_event_buf + self.secure_manager = secure_manager + + def handle_response(self, path: str, req: SignedEnvelope): + self.secure_manager.validate_envelope(req) + + response_map = { + BROADCAST_EVENTS_PATH: self.broadcast_events_handler, + POLL_EVENTS_PATH: self.poll_events_handler, + FETCH_RIDS_PATH: self.fetch_rids_handler, + FETCH_MANIFESTS_PATH: self.fetch_manifests_handler, + FETCH_BUNDLES_PATH: self.fetch_bundles_handler + } + + response = response_map[path](req.payload, req.source_node) + + if response is None: + return + + return self.secure_manager.create_envelope( + payload=response, + target=req.source_node + ) + + def broadcast_events_handler(self, req: EventsPayload, source: KoiNetNode): + log.info(f"Request to broadcast events, received {len(req.events)} event(s)") + + for event in req.events: + self.kobj_queue.push(event=event, source=source) - def fetch_rids(self, req: FetchRids, source: KoiNetNode) -> RidsPayload: + def poll_events_handler( + self, + req: PollEvents, + source: KoiNetNode + ) -> EventsPayload: + events = self.poll_event_buf.flush(source, limit=req.limit) + log.info(f"Request to poll events, returning {len(events)} event(s)") + return EventsPayload(events=events) + + def fetch_rids_handler( + self, + req: FetchRids, + source: KoiNetNode + ) -> RidsPayload: """Returns response to fetch RIDs request.""" - logger.info(f"Request to fetch rids, allowed types {req.rid_types}") rids = self.cache.list_rids(req.rid_types) - + log.info(f"Request to fetch rids, allowed types {req.rid_types}, returning {len(rids)} RID(s)") return RidsPayload(rids=rids) - def fetch_manifests(self, req: FetchManifests, source: KoiNetNode) -> ManifestsPayload: - """Returns response to fetch manifests request.""" - logger.info(f"Request to fetch manifests, allowed types {req.rid_types}, rids {req.rids}") - + def fetch_manifests_handler( + self, + req: FetchManifests, + source: KoiNetNode + ) -> ManifestsPayload: + """Returns response to fetch manifests request.""" manifests: list[Manifest] = [] not_found: list[RID] = [] for rid in (req.rids or self.cache.list_rids(req.rid_types)): - bundle = self.effector.deref(rid) + bundle = self.cache.read(rid) if bundle: manifests.append(bundle.manifest) else: not_found.append(rid) + log.info(f"Request to fetch manifests, allowed types {req.rid_types}, rids {req.rids}, returning {len(manifests)} manifest(s)") return ManifestsPayload(manifests=manifests, not_found=not_found) - def fetch_bundles(self, req: FetchBundles, source: KoiNetNode) -> BundlesPayload: + def fetch_bundles_handler( + self, + req: FetchBundles, + source: KoiNetNode + ) -> BundlesPayload: """Returns response to fetch bundles request.""" - logger.info(f"Request to fetch bundles, requested rids {req.rids}") bundles: list[Bundle] = [] not_found: list[RID] = [] for rid in req.rids: - bundle = self.effector.deref(rid) + bundle = self.cache.read(rid) if bundle: bundles.append(bundle) else: not_found.append(rid) - + + log.info(f"Request to fetch bundles, requested rids {req.rids}, returning {len(bundles)} bundle(s)") return BundlesPayload(bundles=bundles, not_found=not_found) \ No newline at end of file diff --git a/src/koi_net/poller.py b/src/koi_net/poller.py deleted file mode 100644 index 2ff54e4..0000000 --- a/src/koi_net/poller.py +++ /dev/null @@ -1,48 +0,0 @@ - -import time -import logging -from .processor.interface import ProcessorInterface -from .lifecycle import NodeLifecycle -from .network.resolver import NetworkResolver -from .config import NodeConfig - -logger = logging.getLogger(__name__) - - -class NodePoller: - """Manages polling based event loop for partial nodes.""" - processor: ProcessorInterface - lifecycle: NodeLifecycle - resolver: NetworkResolver - config: NodeConfig - - def __init__( - self, - processor: ProcessorInterface, - lifecycle: NodeLifecycle, - resolver: NetworkResolver, - config: NodeConfig - ): - self.processor = processor - self.lifecycle = lifecycle - self.resolver = resolver - self.config = config - - def poll(self): - """Polls neighbors and processes returned events.""" - neighbors = self.resolver.poll_neighbors() - for node_rid in neighbors: - for event in neighbors[node_rid]: - self.processor.handle(event=event, source=node_rid) - self.processor.flush_kobj_queue() - - def run(self): - """Runs polling event loop.""" - with self.lifecycle.run(): - while True: - start_time = time.time() - self.poll() - elapsed = time.time() - start_time - sleep_time = self.config.koi_net.polling_interval - elapsed - if sleep_time > 0: - time.sleep(sleep_time) \ No newline at end of file diff --git a/src/koi_net/processor/context.py b/src/koi_net/processor/context.py new file mode 100644 index 0000000..f7cac21 --- /dev/null +++ b/src/koi_net/processor/context.py @@ -0,0 +1,55 @@ +from typing import TYPE_CHECKING +from rid_lib.ext import Cache + +from ..network.resolver import NetworkResolver +from ..config.core import NodeConfig +from ..config.loader import ConfigLoader +from ..network.graph import NetworkGraph +from ..network.event_queue import EventQueue +from ..network.request_handler import RequestHandler +from ..identity import NodeIdentity +from .kobj_queue import KobjQueue + +if TYPE_CHECKING: + from ..effector import Effector + + +class HandlerContext: + """Context object provides knowledge handlers access to other components.""" + + identity: NodeIdentity + config: NodeConfig + config_loader: ConfigLoader + cache: Cache + event_queue: EventQueue + kobj_queue: KobjQueue + graph: NetworkGraph + request_handler: RequestHandler + resolver: NetworkResolver + effector: "Effector" + + def __init__( + self, + identity: NodeIdentity, + config: NodeConfig, + config_loader: ConfigLoader, + cache: Cache, + event_queue: EventQueue, + kobj_queue: KobjQueue, + graph: NetworkGraph, + request_handler: RequestHandler, + resolver: NetworkResolver + ): + self.identity = identity + self.config = config + self.config_loader = config_loader + self.cache = cache + self.event_queue = event_queue + self.kobj_queue = kobj_queue + self.graph = graph + self.request_handler = request_handler + self.resolver = resolver + + def set_effector(self, effector: "Effector"): + """Post initialization injection of effector component.""" + self.effector = effector \ No newline at end of file diff --git a/src/koi_net/processor/handler.py b/src/koi_net/processor/handler.py index 6ded184..528d4cd 100644 --- a/src/koi_net/processor/handler.py +++ b/src/koi_net/processor/handler.py @@ -1,12 +1,15 @@ from dataclasses import dataclass from enum import StrEnum from typing import Callable -from rid_lib import RIDType +from rid_lib.core import RIDType + from ..protocol.event import EventType +from .knowledge_object import KnowledgeObject +from .context import HandlerContext class StopChain: - """Class for a sentinel value by knowledge handlers.""" + """Class for STOP_CHAIN sentinel returned by knowledge handlers.""" pass STOP_CHAIN = StopChain() @@ -32,26 +35,27 @@ class HandlerType(StrEnum): class KnowledgeHandler: """Handles knowledge processing events of the provided types.""" - func: Callable + func: Callable[[HandlerContext, KnowledgeObject], None | KnowledgeObject | StopChain] handler_type: HandlerType - rid_types: list[RIDType] | None - event_types: list[EventType | None] | None = None + rid_types: tuple[RIDType] = () + event_types: tuple[EventType | None] = () + + def __call__( + self, + ctx: HandlerContext, + kobj: KnowledgeObject + ) -> None | KnowledgeObject | StopChain: + return self.func(ctx, kobj) @classmethod def create( cls, handler_type: HandlerType, - rid_types: list[RIDType] | None = None, - event_types: list[EventType | None] | None = None + rid_types: tuple[RIDType] = (), + event_types: tuple[EventType | None] = () ): - """Decorator wraps a function, returns a KnowledgeHandler. - - The function symbol will redefined as a `KnowledgeHandler`, - which can be passed into the `ProcessorInterface` constructor. - This is used to register default handlers. - """ + """Decorator wraps a function, returns a KnowledgeHandler.""" def decorator(func: Callable) -> KnowledgeHandler: handler = cls(func, handler_type, rid_types, event_types) return handler return decorator - diff --git a/src/koi_net/processor/interface.py b/src/koi_net/processor/interface.py deleted file mode 100644 index a23b81f..0000000 --- a/src/koi_net/processor/interface.py +++ /dev/null @@ -1,106 +0,0 @@ -import logging -import queue -import threading -from rid_lib.core import RID -from rid_lib.ext import Bundle, Manifest -from rid_lib.types import KoiNetNode -from ..protocol.event import Event, EventType -from .knowledge_object import KnowledgeObject -from .knowledge_pipeline import KnowledgePipeline - - -logger = logging.getLogger(__name__) - - -class ProcessorInterface: - """Provides access to this node's knowledge processing pipeline.""" - pipeline: KnowledgePipeline - kobj_queue: queue.Queue[KnowledgeObject] - use_kobj_processor_thread: bool - worker_thread: threading.Thread | None = None - - def __init__( - self, - pipeline: KnowledgePipeline, - use_kobj_processor_thread: bool, - ): - self.pipeline = pipeline - self.use_kobj_processor_thread = use_kobj_processor_thread - self.kobj_queue = queue.Queue() - - if self.use_kobj_processor_thread: - self.worker_thread = threading.Thread( - target=self.kobj_processor_worker, - daemon=True - ) - - def flush_kobj_queue(self): - """Flushes all knowledge objects from queue and processes them. - - NOTE: ONLY CALL THIS METHOD IN SINGLE THREADED NODES, OTHERWISE - THIS WILL CAUSE RACE CONDITIONS. - """ - if self.use_kobj_processor_thread: - logger.warning("You are using a worker thread, calling this method can cause race conditions!") - - while not self.kobj_queue.empty(): - kobj = self.kobj_queue.get() - logger.debug(f"Dequeued {kobj!r}") - - try: - self.pipeline.process(kobj) - finally: - self.kobj_queue.task_done() - logger.debug("Done") - - def kobj_processor_worker(self, timeout=0.1): - while True: - try: - kobj = self.kobj_queue.get(timeout=timeout) - logger.debug(f"Dequeued {kobj!r}") - - try: - self.pipeline.process(kobj) - finally: - self.kobj_queue.task_done() - logger.debug("Done") - - except queue.Empty: - pass - - except Exception as e: - logger.warning(f"Error processing kobj: {e}") - - def handle( - self, - rid: RID | None = None, - manifest: Manifest | None = None, - bundle: Bundle | None = None, - event: Event | None = None, - kobj: KnowledgeObject | None = None, - event_type: EventType | None = None, - source: KoiNetNode | None = None - ): - """Queues knowledge object to be handled by processing pipeline. - - Knowledge may take the form of an RID, manifest, bundle, event, - or knowledge object (with an optional event type for RID, - manifest, or bundle objects). All objects will be normalized - to knowledge objects and queued. If `flush` is `True`, the queue - will be flushed immediately after adding the new knowledge. - """ - if rid: - _kobj = KnowledgeObject.from_rid(rid, event_type, source) - elif manifest: - _kobj = KnowledgeObject.from_manifest(manifest, event_type, source) - elif bundle: - _kobj = KnowledgeObject.from_bundle(bundle, event_type, source) - elif event: - _kobj = KnowledgeObject.from_event(event, source) - elif kobj: - _kobj = kobj - else: - raise ValueError("One of 'rid', 'manifest', 'bundle', 'event', or 'kobj' must be provided") - - self.kobj_queue.put(_kobj) - logger.debug(f"Queued {_kobj!r}") diff --git a/src/koi_net/processor/default_handlers.py b/src/koi_net/processor/knowledge_handlers.py similarity index 51% rename from src/koi_net/processor/default_handlers.py rename to src/koi_net/processor/knowledge_handlers.py index 28b95a8..50662af 100644 --- a/src/koi_net/processor/default_handlers.py +++ b/src/koi_net/processor/knowledge_handlers.py @@ -1,18 +1,19 @@ """Implementation of default knowledge handlers.""" -import logging +import structlog from rid_lib.ext import Bundle from rid_lib.ext.utils import sha256_hash from rid_lib.types import KoiNetNode, KoiNetEdge from koi_net.protocol.node import NodeType from .handler import KnowledgeHandler, HandlerType, STOP_CHAIN from .knowledge_object import KnowledgeObject -from ..context import HandlerContext +from .context import HandlerContext from ..protocol.event import Event, EventType from ..protocol.edge import EdgeProfile, EdgeStatus, EdgeType, generate_edge_bundle from ..protocol.node import NodeProfile -logger = logging.getLogger(__name__) +log = structlog.stdlib.get_logger() + # RID handlers @@ -20,42 +21,45 @@ def basic_rid_handler(ctx: HandlerContext, kobj: KnowledgeObject): """Default RID handler. - Blocks external events about this node. Allows `FORGET` events if - RID is known to this node. + Blocks external events about this node. Sets normalized event type + for `FORGET` events. """ - if (kobj.rid == ctx.identity.rid and kobj.source): - logger.debug("Don't let anyone else tell me who I am!") + + if (kobj.rid == ctx.identity.rid and kobj.source is not None): + log.debug("Externally sourced events about this node not allowed.") return STOP_CHAIN if kobj.event_type == EventType.FORGET: kobj.normalized_event_type = EventType.FORGET return kobj + # Manifest handlers @KnowledgeHandler.create(HandlerType.Manifest) def basic_manifest_handler(ctx: HandlerContext, kobj: KnowledgeObject): - """Decider based on incoming manifest and cache state. + """Normalized event decider based on manifest and cache state. - Blocks manifests which have the same hash, or aren't newer than the - cached version. Sets the normalized event type to `NEW` or `UPDATE` - depending on whether the RID was previously known. + Stops processing for manifests which have the same hash, or aren't + newer than the cached version. Sets the normalized event type to + `NEW` or `UPDATE` depending on whether the RID was previously known. """ + prev_bundle = ctx.cache.read(kobj.rid) if prev_bundle: if kobj.manifest.sha256_hash == prev_bundle.manifest.sha256_hash: - logger.debug("Hash of incoming manifest is same as existing knowledge, ignoring") + log.debug("Hash of incoming manifest is same as existing knowledge, ignoring") return STOP_CHAIN if kobj.manifest.timestamp <= prev_bundle.manifest.timestamp: - logger.debug("Timestamp of incoming manifest is the same or older than existing knowledge, ignoring") + log.debug("Timestamp of incoming manifest is the same or older than existing knowledge, ignoring") return STOP_CHAIN - logger.debug("RID previously known to me, labeling as 'UPDATE'") + log.debug("RID previously known to me, labeling as 'UPDATE'") kobj.normalized_event_type = EventType.UPDATE else: - logger.debug("RID previously unknown to me, labeling as 'NEW'") + log.debug("RID previously unknown to me, labeling as 'NEW'") kobj.normalized_event_type = EventType.NEW return kobj @@ -65,52 +69,53 @@ def basic_manifest_handler(ctx: HandlerContext, kobj: KnowledgeObject): @KnowledgeHandler.create( handler_type=HandlerType.Bundle, - rid_types=[KoiNetNode], - event_types=[EventType.NEW, EventType.UPDATE]) + rid_types=(KoiNetNode,), + event_types=(EventType.NEW, EventType.UPDATE)) def secure_profile_handler(ctx: HandlerContext, kobj: KnowledgeObject): """Maintains security of cached node profiles. Blocks bundles with a mismatching public keys in their node profile - and RID from continuing through the pipeline. + and RID from being written to cache. """ node_profile = kobj.bundle.validate_contents(NodeProfile) node_rid: KoiNetNode = kobj.rid if sha256_hash(node_profile.public_key) != node_rid.hash: - logger.warning(f"Public key hash mismatch for {node_rid!r}!") + log.warning(f"Public key hash mismatch for {node_rid!r}!") return STOP_CHAIN @KnowledgeHandler.create( handler_type=HandlerType.Bundle, - rid_types=[KoiNetEdge], - event_types=[EventType.NEW, EventType.UPDATE]) + rid_types=(KoiNetEdge,), + event_types=(EventType.NEW, EventType.UPDATE)) def edge_negotiation_handler(ctx: HandlerContext, kobj: KnowledgeObject): - """Handles basic edge negotiation process. + """Handles edge negotiation process. Automatically approves proposed edges if they request RID types this - node can provide (or KOI nodes/edges). Validates the edge type is - allowed for the node type (partial nodes cannot use webhooks). If + node can provide (or KOI node, edge RIDs). Validates the edge type + is allowed for the node type (partial nodes cannot use webhooks). If edge is invalid, a `FORGET` event is sent to the other node. """ - # only respond when source is another node - if kobj.source is None: return + # only handle incoming events (ignore internal edge knowledge objects) + if kobj.source is None: + return edge_profile = kobj.bundle.validate_contents(EdgeProfile) - # indicates peer subscribing to me - if edge_profile.source == ctx.identity.rid: + # indicates peer subscribing to this node + if edge_profile.source == ctx.identity.rid: if edge_profile.status != EdgeStatus.PROPOSED: return - logger.debug("Handling edge negotiation") + log.debug("Handling edge negotiation") peer_rid = edge_profile.target - peer_bundle = ctx.effector.deref(peer_rid) + peer_bundle = ctx.cache.read(peer_rid) if not peer_bundle: - logger.warning(f"Peer {peer_rid!r} unknown to me") + log.warning(f"Peer {peer_rid!r} unknown to me") return STOP_CHAIN peer_profile = peer_bundle.validate_contents(NodeProfile) @@ -121,140 +126,153 @@ def edge_negotiation_handler(ctx: HandlerContext, kobj: KnowledgeObject): KoiNetNode, KoiNetEdge ) - abort = False if (edge_profile.edge_type == EdgeType.WEBHOOK and peer_profile.node_type == NodeType.PARTIAL): - logger.debug("Partial nodes cannot use webhooks") + log.debug("Partial nodes cannot use webhooks") abort = True if not set(edge_profile.rid_types).issubset(provided_events): - logger.debug("Requested RID types not provided by this node") + log.debug("Requested RID types not provided by this node") abort = True if abort: event = Event.from_rid(EventType.FORGET, kobj.rid) - ctx.event_queue.push_event_to(event, peer_rid, flush=True) + ctx.event_queue.push(event, peer_rid, flush=True) return STOP_CHAIN - + else: - # approve edge profile - logger.debug("Approving proposed edge") + log.debug("Approving proposed edge") edge_profile.status = EdgeStatus.APPROVED updated_bundle = Bundle.generate(kobj.rid, edge_profile.model_dump()) - ctx.handle(bundle=updated_bundle, event_type=EventType.UPDATE) + ctx.kobj_queue.push(bundle=updated_bundle, event_type=EventType.UPDATE) return elif edge_profile.target == ctx.identity.rid: if edge_profile.status == EdgeStatus.APPROVED: - logger.debug("Edge approved by other node!") + log.debug("Edge approved by other node!") # Network handlers -@KnowledgeHandler.create(HandlerType.Network, rid_types=[KoiNetNode]) -def coordinator_contact(ctx: HandlerContext, kobj: KnowledgeObject): - """Makes contact with identified coordinator nodes. +@KnowledgeHandler.create(HandlerType.Network, rid_types=(KoiNetNode,)) +def node_contact_handler(ctx: HandlerContext, kobj: KnowledgeObject): + """Makes contact with providers of RID types of interest. When an incoming node knowledge object is identified as a provider - of `orn:koi-net.node`, and not already known to the node, this - handler will propose a new edge subscribing to future node events, - and fetch existing nodes to catch up to the current state. + of an RID type of interest, this handler will propose a new edge + subscribing to future node events, and fetch existing nodes to catch + up to the current state. """ - node_profile = kobj.bundle.validate_contents(NodeProfile) - - # looking for event provider of nodes - if KoiNetNode not in node_profile.provides.event: + # prevents nodes from attempting to form a self loop + if kobj.rid == ctx.identity.rid: return - # prevents coordinators from attempting to form a self loop - if kobj.rid == ctx.identity.rid: + node_profile = kobj.bundle.validate_contents(NodeProfile) + + available_rid_types = list( + set(ctx.config.koi_net.rid_types_of_interest) & + set(node_profile.provides.event) + ) + + if not available_rid_types: return - # already have an edge established - if ctx.graph.get_edge( + edge_rid = ctx.graph.get_edge( source=kobj.rid, target=ctx.identity.rid, - ) is not None: - return + ) - logger.info("Identified a coordinator!") - logger.info("Proposing new edge") + # already have an edge established + if edge_rid: + prev_edge_bundle = ctx.cache.read(edge_rid) + edge_profile = prev_edge_bundle.validate_contents(EdgeProfile) + + if set(edge_profile.rid_types) == set(available_rid_types): + # no change in rid types + return + + log.info(f"Proposing updated edge with node provider {available_rid_types}") + + edge_profile.rid_types = available_rid_types + edge_profile.status = EdgeStatus.PROPOSED + edge_bundle = Bundle.generate(edge_rid, edge_profile.model_dump()) - if ctx.identity.profile.node_type == NodeType.FULL: - edge_type = EdgeType.WEBHOOK + # no existing edge else: - edge_type = EdgeType.POLL + log.info(f"Proposing new edge with node provider {available_rid_types}") + edge_bundle = generate_edge_bundle( + source=kobj.rid, + target=ctx.identity.rid, + rid_types=available_rid_types, + edge_type=( + EdgeType.WEBHOOK + if ctx.identity.profile.node_type == NodeType.FULL + else EdgeType.POLL + ) + ) # queued for processing - ctx.handle(bundle=generate_edge_bundle( - source=kobj.rid, - target=ctx.identity.rid, - edge_type=edge_type, - rid_types=[KoiNetNode] - )) - - logger.info("Catching up on network state") + ctx.kobj_queue.push(bundle=edge_bundle) + log.info("Catching up on network state") payload = ctx.request_handler.fetch_rids( node=kobj.rid, - rid_types=[KoiNetNode] + rid_types=available_rid_types ) for rid in payload.rids: if rid == ctx.identity.rid: - logger.info("Skipping myself") + log.info("Skipping myself") continue if ctx.cache.exists(rid): - logger.info(f"Skipping known RID {rid!r}") + log.info(f"Skipping known RID {rid!r}") continue # marked as external since we are handling RIDs from another node # will fetch remotely instead of checking local cache - ctx.handle(rid=rid, source=kobj.rid) - logger.info("Done") - + ctx.kobj_queue.push(rid=rid, source=kobj.rid) + log.info("Done") @KnowledgeHandler.create(HandlerType.Network) def basic_network_output_filter(ctx: HandlerContext, kobj: KnowledgeObject): - """Adds subscriber nodes to network targetes. + """Sets network targets of outgoing event for knowledge object. Allows broadcasting of all RID types this node is an event provider for (set in node profile), and other nodes have subscribed to. All - nodes will also broadcast about their own (internally sourced) KOI - node, and KOI edges that they are part of, regardless of their node - profile configuration. Finally, nodes will also broadcast about - edges to the other node involved (regardless of if they are - subscribed). + nodes will also broadcast events about their own (internally sourced) + KOI node, and KOI edges that they are part of, regardless of their + node profile configuration. Finally, nodes will also broadcast about + edges to the other node involved (regardless of if they are subscribed). """ - involves_me = False + involves_this_node = False + # internally source knowledge objects if kobj.source is None: - if (type(kobj.rid) == KoiNetNode): + if type(kobj.rid) is KoiNetNode: if (kobj.rid == ctx.identity.rid): - involves_me = True + involves_this_node = True - elif type(kobj.rid) == KoiNetEdge: + elif type(kobj.rid) is KoiNetEdge: edge_profile = kobj.bundle.validate_contents(EdgeProfile) if edge_profile.source == ctx.identity.rid: - logger.debug(f"Adding edge target '{edge_profile.target!r}' to network targets") - kobj.network_targets.update([edge_profile.target]) - involves_me = True + log.debug(f"Adding edge target '{edge_profile.target!r}' to network targets") + kobj.network_targets.add(edge_profile.target) + involves_this_node = True elif edge_profile.target == ctx.identity.rid: - logger.debug(f"Adding edge source '{edge_profile.source!r}' to network targets") - kobj.network_targets.update([edge_profile.source]) - involves_me = True + log.debug(f"Adding edge source '{edge_profile.source!r}' to network targets") + kobj.network_targets.add(edge_profile.source) + involves_this_node = True - if (type(kobj.rid) in ctx.identity.profile.provides.event or involves_me): - # broadcasts to subscribers if I'm an event provider of this RID type OR it involves me + if (type(kobj.rid) in ctx.identity.profile.provides.event or involves_this_node): subscribers = ctx.graph.get_neighbors( direction="out", allowed_type=type(kobj.rid) ) - logger.debug(f"Updating network targets with '{type(kobj.rid)}' subscribers: {subscribers}") + log.debug(f"Updating network targets with '{type(kobj.rid)}' subscribers: {subscribers}") kobj.network_targets.update(subscribers) return kobj @@ -268,9 +286,10 @@ def forget_edge_on_node_deletion(ctx: HandlerContext, kobj: KnowledgeObject): for edge_rid in ctx.graph.get_edges(): edge_bundle = ctx.cache.read(edge_rid) - if not edge_bundle: continue + if not edge_bundle: + continue edge_profile = edge_bundle.validate_contents(EdgeProfile) if kobj.rid in (edge_profile.source, edge_profile.target): - logger.debug("Identified edge with forgotten node") - ctx.handle(rid=edge_rid, event_type=EventType.FORGET) \ No newline at end of file + log.debug("Identified edge with forgotten node") + ctx.kobj_queue.push(rid=edge_rid, event_type=EventType.FORGET) \ No newline at end of file diff --git a/src/koi_net/processor/knowledge_object.py b/src/koi_net/processor/knowledge_object.py index 82713b3..c6c17d2 100644 --- a/src/koi_net/processor/knowledge_object.py +++ b/src/koi_net/processor/knowledge_object.py @@ -1,8 +1,7 @@ from pydantic import BaseModel from rid_lib import RID -from rid_lib.ext import Manifest -from rid_lib.ext.bundle import Bundle -from rid_lib.types.koi_net_node import KoiNetNode +from rid_lib.ext import Manifest, Bundle +from rid_lib.types import KoiNetNode from ..protocol.event import Event, EventType @@ -11,20 +10,22 @@ class KnowledgeObject(BaseModel): Represents an RID, manifest, bundle, or event. Contains three fields (`normalized_event_type`, `source`, `network_targets`) used for - decision making in the knowledge processing pipeline. The source - indicates which node this object originated from, or `None` if it - was generated by this node. + decision making in the knowledge processing pipeline. + + The source indicates which node this object originated from, or + `None` if it was generated by this node. The normalized event type indicates how the knowledge object is viewed from the perspective of this node, and what cache actions will take place. (`NEW`, `UPDATE`) -> cache write, `FORGET` -> - cache delete, `None` -> no cache action. + cache delete, `None` -> no cache action, and end of pipeline. The network targets indicate other nodes in the network this knowledge object will be sent to. The event sent to them will be constructed from this knowledge object's RID, manifest, contents, and normalized event type. """ + rid: RID manifest: Manifest | None = None contents: dict | None = None @@ -44,6 +45,7 @@ def from_rid( source: KoiNetNode | None = None ) -> "KnowledgeObject": """Creates a `KnowledgeObject` from an `RID`.""" + return cls( rid=rid, event_type=event_type, @@ -58,6 +60,7 @@ def from_manifest( source: KoiNetNode | None = None ) -> "KnowledgeObject": """Creates a `KnowledgeObject` from a `Manifest`.""" + return cls( rid=manifest.rid, manifest=manifest, @@ -73,6 +76,7 @@ def from_bundle( source: KoiNetNode | None = None ) -> "KnowledgeObject": """Creates a `KnowledgeObject` from a `Bundle`.""" + return cls( rid=bundle.rid, manifest=bundle.manifest, @@ -88,6 +92,7 @@ def from_event( source: KoiNetNode | None = None ) -> "KnowledgeObject": """Creates a `KnowledgeObject` from an `Event`.""" + return cls( rid=event.rid, manifest=event.manifest, @@ -97,10 +102,11 @@ def from_event( ) @property - def bundle(self): + def bundle(self) -> Bundle: """Bundle representation of knowledge object.""" + if self.manifest is None or self.contents is None: - return + raise ValueError("Knowledge object missing manifest or contents, cannot convert to `Bundle`.") return Bundle( manifest=self.manifest, @@ -108,8 +114,9 @@ def bundle(self): ) @property - def normalized_event(self): + def normalized_event(self) -> Event: """Event representation of knowledge object.""" + if self.normalized_event_type is None: raise ValueError("Internal event's normalized event type is None, cannot convert to Event") @@ -119,7 +126,7 @@ def normalized_event(self): event_type=EventType.FORGET ) - else: + else: return Event( rid=self.rid, event_type=self.normalized_event_type, diff --git a/src/koi_net/processor/kobj_queue.py b/src/koi_net/processor/kobj_queue.py new file mode 100644 index 0000000..bfad24c --- /dev/null +++ b/src/koi_net/processor/kobj_queue.py @@ -0,0 +1,51 @@ +import structlog +from queue import Queue +from rid_lib.core import RID +from rid_lib.ext import Bundle, Manifest +from rid_lib.types import KoiNetNode +from ..protocol.event import Event, EventType +from .knowledge_object import KnowledgeObject + +log = structlog.stdlib.get_logger() + + +class KobjQueue: + """Queue for knowledge objects entering the processing pipeline.""" + q: Queue[KnowledgeObject] + + def __init__(self): + self.q = Queue() + + def push( + self, + rid: RID | None = None, + manifest: Manifest | None = None, + bundle: Bundle | None = None, + event: Event | None = None, + kobj: KnowledgeObject | None = None, + event_type: EventType | None = None, + source: KoiNetNode | None = None + ): + """Pushes knowledge object to queue. + + Input may take the form of an RID, manifest, bundle, event, + or knowledge object (with an optional event type for RID, + manifest, or bundle objects). All objects will be normalized + to knowledge objects and queued. + """ + + if rid: + _kobj = KnowledgeObject.from_rid(rid, event_type, source) + elif manifest: + _kobj = KnowledgeObject.from_manifest(manifest, event_type, source) + elif bundle: + _kobj = KnowledgeObject.from_bundle(bundle, event_type, source) + elif event: + _kobj = KnowledgeObject.from_event(event, source) + elif kobj: + _kobj = kobj + else: + raise ValueError("One of 'rid', 'manifest', 'bundle', 'event', or 'kobj' must be provided") + + self.q.put(_kobj) + log.debug(f"Queued {_kobj!r}") diff --git a/src/koi_net/processor/knowledge_pipeline.py b/src/koi_net/processor/pipeline.py similarity index 57% rename from src/koi_net/processor/knowledge_pipeline.py rename to src/koi_net/processor/pipeline.py index cd0d0af..e1e8339 100644 --- a/src/koi_net/processor/knowledge_pipeline.py +++ b/src/koi_net/processor/pipeline.py @@ -1,11 +1,9 @@ -import logging -from typing import Callable -from rid_lib.core import RIDType +import structlog from rid_lib.types import KoiNetEdge, KoiNetNode from rid_lib.ext import Cache from ..protocol.event import EventType from ..network.request_handler import RequestHandler -from ..network.event_queue import NetworkEventQueue +from ..network.event_queue import EventQueue from ..network.graph import NetworkGraph from ..identity import NodeIdentity from .handler import ( @@ -15,55 +13,36 @@ StopChain ) from .knowledge_object import KnowledgeObject +from .context import HandlerContext -from typing import TYPE_CHECKING -if TYPE_CHECKING: - from ..context import HandlerContext - -logger = logging.getLogger(__name__) +log = structlog.stdlib.get_logger() class KnowledgePipeline: - handler_context: "HandlerContext" + handler_context: HandlerContext cache: Cache identity: NodeIdentity request_handler: RequestHandler - event_queue: NetworkEventQueue + event_queue: EventQueue graph: NetworkGraph - handlers: list[KnowledgeHandler] + knowledge_handlers: list[KnowledgeHandler] def __init__( self, - handler_context: "HandlerContext", + handler_context: HandlerContext, cache: Cache, request_handler: RequestHandler, - event_queue: NetworkEventQueue, + event_queue: EventQueue, graph: NetworkGraph, - default_handlers: list[KnowledgeHandler] = [] + knowledge_handlers: list[KnowledgeHandler] ): self.handler_context = handler_context self.cache = cache self.request_handler = request_handler self.event_queue = event_queue self.graph = graph - self.handlers = default_handlers + self.knowledge_handlers = knowledge_handlers - def add_handler(self, handler: KnowledgeHandler): - self.handlers.append(handler) - - def register_handler( - self, - handler_type: HandlerType, - rid_types: list[RIDType] | None = None, - event_types: list[EventType | None] | None = None - ): - """Assigns decorated function as handler for this processor.""" - def decorator(func: Callable) -> Callable: - handler = KnowledgeHandler(func, handler_type, rid_types, event_types) - self.add_handler(handler) - return func - return decorator - def call_handler_chain( self, handler_type: HandlerType, @@ -79,7 +58,7 @@ def call_handler_chain( Handlers will only be called in the chain if their handler and RID type match that of the inputted knowledge object. """ - for handler in self.handlers: + for handler in self.knowledge_handlers: if handler_type != handler.handler_type: continue @@ -89,72 +68,95 @@ def call_handler_chain( if handler.event_types and kobj.event_type not in handler.event_types: continue - logger.debug(f"Calling {handler_type} handler '{handler.func.__name__}'") + log.debug(f"Calling {handler_type} handler '{handler.func.__name__}'") - resp = handler.func( + resp = handler( ctx=self.handler_context, kobj=kobj.model_copy() ) # stops handler chain execution if resp is STOP_CHAIN: - logger.debug(f"Handler chain stopped by {handler.func.__name__}") + log.debug(f"Handler chain stopped by {handler.func.__name__}") return STOP_CHAIN + # kobj unmodified elif resp is None: continue + # kobj modified by handler elif isinstance(resp, KnowledgeObject): kobj = resp - logger.debug(f"Knowledge object modified by {handler.func.__name__}") + log.debug(f"Knowledge object modified by {handler.func.__name__}") + else: raise ValueError(f"Handler {handler.func.__name__} returned invalid response '{resp}'") - + return kobj def process(self, kobj: KnowledgeObject): - """Sends provided knowledge obejct through knowledge processing pipeline. + """Sends knowledge object through knowledge processing pipeline. - Handler chains are called in between major events in the pipeline, indicated by their handler type. Each handler type is guaranteed to have access to certain knowledge, and may affect a subsequent action in the pipeline. The five handler types are as follows: - - RID - provided RID; if event type is `FORGET`, this handler decides whether to delete the knowledge from the cache by setting the normalized event type to `FORGET`, otherwise this handler decides whether to validate the manifest (and fetch it if not provided). - - Manifest - provided RID, manifest; decides whether to validate the bundle (and fetch it if not provided). - - Bundle - provided RID, manifest, contents (bundle); decides whether to write knowledge to the cache by setting the normalized event type to `NEW` or `UPDATE`. - - Network - provided RID, manifest, contents (bundle); decides which nodes (if any) to broadcast an event about this knowledge to. (Note, if event type is `FORGET`, the manifest and contents will be retrieved from the local cache, and indicate the last state of the knowledge before it was deleted.) - - Final - provided RID, manifests, contents (bundle); final action taken after network broadcast. + Handler chains are called in between major events in the + pipeline, indicated by their handler type. Each handler type is + guaranteed to have access to certain knowledge, and may affect a + subsequent action in the pipeline. The five handler types are as + follows: + - RID - provided RID; if event type is `FORGET`, this handler + decides whether to delete the knowledge from the cache by + setting the normalized event type to `FORGET`, otherwise this + handler decides whether to validate the manifest (and fetch it + if not provided). After processing, if event type is `FORGET`, + the manifest and contents will be retrieved from the local cache, + and indicate the last state of the knowledge before it was + deleted. + - Manifest - provided RID, manifest; decides whether to validate + the bundle (and fetch it if not provided). + - Bundle - provided RID, manifest, contents (bundle); decides + whether to write knowledge to the cache by setting the + normalized event type to `NEW` or `UPDATE`. + - Network - provided RID, manifest, contents (bundle); decides + which nodes (if any) to broadcast an event about this knowledge + to. + - Final - provided RID, manifests, contents (bundle); final + action taken after network broadcast. - The pipeline may be stopped by any point by a single handler returning the `STOP_CHAIN` sentinel. In that case, the process will exit immediately. Further handlers of that type and later handler chains will not be called. + The pipeline may be stopped by any point by a single handler + returning the `STOP_CHAIN` sentinel. In that case, the process + will exit immediately. Further handlers of that type and later + handler chains will not be called. """ - logger.debug(f"Handling {kobj!r}") + log.debug(f"Handling {kobj!r}") kobj = self.call_handler_chain(HandlerType.RID, kobj) if kobj is STOP_CHAIN: return if kobj.event_type == EventType.FORGET: bundle = self.cache.read(kobj.rid) - if not bundle: - logger.debug("Local bundle not found") + if not bundle: + log.debug("Local bundle not found") return # the bundle (to be deleted) attached to kobj for downstream analysis - logger.debug("Adding local bundle (to be deleted) to knowledge object") + log.debug("Adding local bundle (to be deleted) to knowledge object") kobj.manifest = bundle.manifest kobj.contents = bundle.contents else: # attempt to retrieve manifest if not kobj.manifest: - logger.debug("Manifest not found") + log.debug("Manifest not found") if not kobj.source: return - logger.debug("Attempting to fetch remote manifest from source") + log.debug("Attempting to fetch remote manifest from source") payload = self.request_handler.fetch_manifests( node=kobj.source, rids=[kobj.rid] ) - + if not payload.manifests: - logger.debug("Failed to find manifest") + log.debug("Failed to find manifest") return kobj.manifest = payload.manifests[0] @@ -163,58 +165,57 @@ def process(self, kobj: KnowledgeObject): if kobj is STOP_CHAIN: return # attempt to retrieve bundle - if not kobj.bundle: - logger.debug("Bundle not found") + if not kobj.contents: + log.debug("Bundle not found") if kobj.source is None: return - logger.debug("Attempting to fetch remote bundle from source") + log.debug("Attempting to fetch remote bundle from source") payload = self.request_handler.fetch_bundles( node=kobj.source, rids=[kobj.rid] ) if not payload.bundles: - logger.debug("Failed to find bundle") + log.debug("Failed to find bundle") return - bundle = payload.bundles[0] + bundle = payload.bundles[0] if kobj.manifest != bundle.manifest: - logger.warning("Retrieved bundle contains a different manifest") + log.warning("Retrieved bundle contains a different manifest") kobj.manifest = bundle.manifest - kobj.contents = bundle.contents + kobj.contents = bundle.contents kobj = self.call_handler_chain(HandlerType.Bundle, kobj) if kobj is STOP_CHAIN: return if kobj.normalized_event_type in (EventType.UPDATE, EventType.NEW): - logger.info(f"Writing to cache: {kobj!r}") + log.info(f"Writing to cache: {kobj!r}") self.cache.write(kobj.bundle) elif kobj.normalized_event_type == EventType.FORGET: - logger.info(f"Deleting from cache: {kobj!r}") + log.info(f"Deleting from cache: {kobj!r}") self.cache.delete(kobj.rid) else: - logger.debug("Normalized event type was never set, no cache or network operations will occur") + log.debug("Normalized event type was not set, no cache or network operations will occur") return if type(kobj.rid) in (KoiNetNode, KoiNetEdge): - logger.debug("Change to node or edge, regenerating network graph") + log.debug("Change to node or edge, regenerating network graph") self.graph.generate() kobj = self.call_handler_chain(HandlerType.Network, kobj) if kobj is STOP_CHAIN: return if kobj.network_targets: - logger.debug(f"Broadcasting event to {len(kobj.network_targets)} network target(s)") + log.debug(f"Broadcasting event to {len(kobj.network_targets)} network target(s)") else: - logger.debug("No network targets set") + log.debug("No network targets set") for node in kobj.network_targets: - self.event_queue.push_event_to(kobj.normalized_event, node) - self.event_queue.flush_webhook_queue(node) + self.event_queue.push(kobj.normalized_event, node) kobj = self.call_handler_chain(HandlerType.Final, kobj) diff --git a/src/koi_net/protocol/api_models.py b/src/koi_net/protocol/api_models.py index ae11f72..c6d1152 100644 --- a/src/koi_net/protocol/api_models.py +++ b/src/koi_net/protocol/api_models.py @@ -1,6 +1,6 @@ """Pydantic models for request and response objects in the KOI-net API.""" -from typing import Literal +from typing import Annotated, Literal from pydantic import BaseModel, Field from rid_lib import RID, RIDType from rid_lib.ext import Bundle, Manifest @@ -60,4 +60,8 @@ class ErrorResponse(BaseModel): type RequestModels = EventsPayload | PollEvents | FetchRids | FetchManifests | FetchBundles type ResponseModels = RidsPayload | ManifestsPayload | BundlesPayload | EventsPayload | ErrorResponse -type ApiModels = RequestModels | ResponseModels \ No newline at end of file + +type ApiModels = Annotated[ + RequestModels | ResponseModels, + Field(discriminator="type") +] \ No newline at end of file diff --git a/src/koi_net/protocol/edge.py b/src/koi_net/protocol/edge.py index daeecf0..aded064 100644 --- a/src/koi_net/protocol/edge.py +++ b/src/koi_net/protocol/edge.py @@ -28,9 +28,12 @@ def generate_edge_bundle( rid_types: list[RIDType], edge_type: EdgeType ) -> Bundle: + """Returns edge bundle.""" + edge_rid = KoiNetEdge(sha256_hash( str(source) + str(target) )) + edge_profile = EdgeProfile( source=source, target=target, @@ -38,8 +41,10 @@ def generate_edge_bundle( edge_type=edge_type, status=EdgeStatus.PROPOSED ) + edge_bundle = Bundle.generate( edge_rid, edge_profile.model_dump() ) + return edge_bundle \ No newline at end of file diff --git a/src/koi_net/protocol/envelope.py b/src/koi_net/protocol/envelope.py index 0d77d6a..3cce4fe 100644 --- a/src/koi_net/protocol/envelope.py +++ b/src/koi_net/protocol/envelope.py @@ -1,4 +1,4 @@ -import logging +import structlog from typing import Generic, TypeVar from pydantic import BaseModel, ConfigDict from rid_lib.types import KoiNetNode @@ -6,45 +6,52 @@ from .secure import PrivateKey, PublicKey from .api_models import RequestModels, ResponseModels - -logger = logging.getLogger(__name__) +log = structlog.stdlib.get_logger() T = TypeVar("T", bound=RequestModels | ResponseModels) class SignedEnvelope(BaseModel, Generic[T]): - model_config = ConfigDict(exclude_none=True) - payload: T source_node: KoiNetNode target_node: KoiNetNode signature: str - def verify_with(self, pub_key: PublicKey): + model_config = ConfigDict(exclude_none=True) + + def verify_with(self, pub_key: PublicKey): + """Verifies signed envelope with public key. + + Raises `cryptography.exceptions.InvalidSignature` on failure. + """ + # IMPORTANT: calling `model_dump()` loses all typing! when converting between SignedEnvelope and UnsignedEnvelope, use the Pydantic classes, not the dictionary form + unsigned_envelope = UnsignedEnvelope[T]( payload=self.payload, source_node=self.source_node, target_node=self.target_node ) - logger.debug(f"Verifying envelope: {unsigned_envelope.model_dump_json(exclude_none=True)}") - + log.debug(f"Verifying envelope: {unsigned_envelope.model_dump_json(exclude_none=True)}") + pub_key.verify( self.signature, unsigned_envelope.model_dump_json(exclude_none=True).encode() ) class UnsignedEnvelope(BaseModel, Generic[T]): - model_config = ConfigDict(exclude_none=True) - payload: T source_node: KoiNetNode target_node: KoiNetNode + model_config = ConfigDict(exclude_none=True) + def sign_with(self, priv_key: PrivateKey) -> SignedEnvelope[T]: - logger.debug(f"Signing envelope: {self.model_dump_json(exclude_none=True)}") - logger.debug(f"Type: [{type(self.payload)}]") + """Signs with private key and returns `SignedEnvelope`.""" + + log.debug(f"Signing envelope: {self.model_dump_json(exclude_none=True)}") + log.debug(f"Type: [{type(self.payload)}]") signature = priv_key.sign( self.model_dump_json(exclude_none=True).encode() diff --git a/src/koi_net/protocol/errors.py b/src/koi_net/protocol/errors.py index 2d1ebe7..1784097 100644 --- a/src/koi_net/protocol/errors.py +++ b/src/koi_net/protocol/errors.py @@ -1,3 +1,5 @@ +"""Defines KOI-net protocol errors.""" + from enum import StrEnum diff --git a/src/koi_net/protocol/model_map.py b/src/koi_net/protocol/model_map.py new file mode 100644 index 0000000..192cb38 --- /dev/null +++ b/src/koi_net/protocol/model_map.py @@ -0,0 +1,62 @@ +from typing import NamedTuple +from pydantic import BaseModel +from .envelope import SignedEnvelope +from .consts import ( + BROADCAST_EVENTS_PATH, + POLL_EVENTS_PATH, + FETCH_BUNDLES_PATH, + FETCH_MANIFESTS_PATH, + FETCH_RIDS_PATH +) +from .api_models import ( + EventsPayload, + PollEvents, + FetchBundles, + BundlesPayload, + FetchManifests, + ManifestsPayload, + FetchRids, + RidsPayload +) + + +class Models(NamedTuple): + request: type[BaseModel] + response: type[BaseModel] | None + request_envelope: type[SignedEnvelope] + response_envelope: type[SignedEnvelope] | None + + +# maps API paths to request and response models +API_MODEL_MAP: dict[str, Models] = { + BROADCAST_EVENTS_PATH: Models( + request=EventsPayload, + response=None, + request_envelope=SignedEnvelope[EventsPayload], + response_envelope=None + ), + POLL_EVENTS_PATH: Models( + request=PollEvents, + response=EventsPayload, + request_envelope=SignedEnvelope[PollEvents], + response_envelope=SignedEnvelope[EventsPayload] + ), + FETCH_BUNDLES_PATH: Models( + request=FetchBundles, + response=BundlesPayload, + request_envelope=SignedEnvelope[FetchBundles], + response_envelope=SignedEnvelope[BundlesPayload] + ), + FETCH_MANIFESTS_PATH: Models( + request=FetchManifests, + response=ManifestsPayload, + request_envelope=SignedEnvelope[FetchManifests], + response_envelope=SignedEnvelope[ManifestsPayload] + ), + FETCH_RIDS_PATH: Models( + request=FetchRids, + response=RidsPayload, + request_envelope=SignedEnvelope[FetchRids], + response_envelope=SignedEnvelope[RidsPayload] + ) +} \ No newline at end of file diff --git a/src/koi_net/protocol/secure.py b/src/koi_net/protocol/secure.py index 683f8be..8aa9d36 100644 --- a/src/koi_net/protocol/secure.py +++ b/src/koi_net/protocol/secure.py @@ -1,4 +1,5 @@ -import logging +from rid_lib.types import KoiNetNode +import structlog from base64 import b64decode, b64encode from cryptography.hazmat.primitives import hashes from cryptography.hazmat.primitives.asymmetric import ec @@ -9,11 +10,11 @@ encode_dss_signature ) -logger = logging.getLogger(__name__) +log = structlog.stdlib.get_logger() def der_to_raw_signature(der_signature: bytes, curve=ec.SECP256R1()) -> bytes: - """Convert a DER-encoded signature to raw r||s format.""" + """Converts a DER-encoded signature to raw r||s format.""" # Decode the DER signature to get r and s r, s = decode_dss_signature(der_signature) @@ -30,7 +31,7 @@ def der_to_raw_signature(der_signature: bytes, curve=ec.SECP256R1()) -> bytes: def raw_to_der_signature(raw_signature: bytes, curve=ec.SECP256R1()) -> bytes: - """Convert a raw r||s signature to DER format.""" + """Converts a raw r||s signature to DER format.""" # Determine byte length based on curve bit size byte_length = (curve.key_size + 7) // 8 @@ -58,13 +59,16 @@ def __init__(self, priv_key): @classmethod def generate(cls): + """Generates a new `Private Key`.""" return cls(priv_key=ec.generate_private_key(ec.SECP256R1())) def public_key(self) -> "PublicKey": + """Returns instance of `PublicKey` dervied from this private key.""" return PublicKey(self.priv_key.public_key()) @classmethod def from_pem(cls, priv_key_pem: str, password: str): + """Loads `PrivateKey` from encrypted PEM string.""" return cls( priv_key=serialization.load_pem_private_key( data=priv_key_pem.encode(), @@ -73,6 +77,7 @@ def from_pem(cls, priv_key_pem: str, password: str): ) def to_pem(self, password: str) -> str: + """Saves `PrivateKey` to encrypted PEM string.""" return self.priv_key.private_bytes( encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.PKCS8, @@ -80,6 +85,7 @@ def to_pem(self, password: str) -> str: ).decode() def sign(self, message: bytes) -> str: + """Returns base64 encoded raw signature bytes of the form r||s.""" hashed_message = sha256_hash(message.decode()) der_signature_bytes = self.priv_key.sign( @@ -91,12 +97,12 @@ def sign(self, message: bytes) -> str: signature = b64encode(raw_signature_bytes).decode() - logger.debug(f"Signing message with [{self.public_key().to_der()}]") - logger.debug(f"hash: {hashed_message}") - logger.debug(f"signature: {signature}") + log.debug(f"Signing message with [{self.public_key().to_der()}]") + log.debug(f"hash: {hashed_message}") + log.debug(f"signature: {signature}") return signature - + class PublicKey: pub_key: ec.EllipticCurvePublicKey @@ -106,6 +112,7 @@ def __init__(self, pub_key): @classmethod def from_pem(cls, pub_key_pem: str): + """Loads `PublicKey` from PEM string.""" return cls( pub_key=serialization.load_pem_public_key( data=pub_key_pem.encode() @@ -113,13 +120,15 @@ def from_pem(cls, pub_key_pem: str): ) def to_pem(self) -> str: + """Saves `PublicKey` to PEM string.""" return self.pub_key.public_bytes( encoding=serialization.Encoding.PEM, format=serialization.PublicFormat.SubjectPublicKeyInfo ).decode() @classmethod - def from_der(cls, pub_key_der: str): + def from_der(cls, pub_key_der: str): + """Loads `PublicKey` from base64 encoded DER string.""" return cls( pub_key=serialization.load_der_public_key( data=b64decode(pub_key_der) @@ -127,6 +136,7 @@ def from_der(cls, pub_key_der: str): ) def to_der(self) -> str: + """Saves `PublicKey` to base64 encoded DER string.""" return b64encode( self.pub_key.public_bytes( encoding=serialization.Encoding.DER, @@ -134,25 +144,22 @@ def to_der(self) -> str: ) ).decode() + def to_node_rid(self, name) -> KoiNetNode: + """Returns an orn:koi-net.node RID from hashed DER string.""" + return KoiNetNode( + name=name, + hash=sha256_hash(self.to_der()) + ) - def verify(self, signature: str, message: bytes) -> bool: - # hashed_message = sha256_hash(message.decode()) + def verify(self, signature: str, message: bytes): + """Verifies a signature for a message. - # print(message.hex()) - # print() - # print(hashed_message) - # print() - # print(message.decode()) - - # logger.debug(f"Verifying message with [{self.to_der()}]") - # logger.debug(f"hash: {hashed_message}") - # logger.debug(f"signature: {signature}") + Raises `cryptography.exceptions.InvalidSignature` on failure. + """ raw_signature_bytes = b64decode(signature) der_signature_bytes = raw_to_der_signature(raw_signature_bytes) - # NOTE: throws cryptography.exceptions.InvalidSignature on failure - self.pub_key.verify( signature=der_signature_bytes, data=message, diff --git a/src/koi_net/secure.py b/src/koi_net/secure_manager.py similarity index 70% rename from src/koi_net/secure.py rename to src/koi_net/secure_manager.py index 98f0044..fb49c20 100644 --- a/src/koi_net/secure.py +++ b/src/koi_net/secure_manager.py @@ -1,8 +1,6 @@ -import logging -from functools import wraps - +import structlog import cryptography.exceptions -from rid_lib.ext import Bundle +from rid_lib.ext import Bundle, Cache from rid_lib.ext.utils import sha256_hash from rid_lib.types import KoiNetNode from .identity import NodeIdentity @@ -18,42 +16,44 @@ InvalidSignatureError, InvalidTargetError ) -from .effector import Effector -from .config import NodeConfig +from .config.core import NodeConfig -logger = logging.getLogger(__name__) +log = structlog.stdlib.get_logger() -class Secure: +class SecureManager: """Subsystem handling secure protocol logic.""" identity: NodeIdentity - effector: Effector + cache: Cache config: NodeConfig priv_key: PrivateKey def __init__( self, identity: NodeIdentity, - effector: Effector, + cache: Cache, config: NodeConfig ): self.identity = identity - self.effector = effector + self.cache = cache self.config = config - - self.priv_key = self._load_priv_key() - def _load_priv_key(self) -> PrivateKey: + def start(self): + self.load_priv_key() + + def load_priv_key(self) -> PrivateKey: """Loads private key from PEM file path in config.""" + + # TODO: handle missing private key with open(self.config.koi_net.private_key_pem_path, "r") as f: priv_key_pem = f.read() - return PrivateKey.from_pem( + self.priv_key = PrivateKey.from_pem( priv_key_pem=priv_key_pem, password=self.config.env.priv_key_password ) - def _handle_unknown_node(self, envelope: SignedEnvelope) -> Bundle | None: + def handle_unknown_node(self, envelope: SignedEnvelope) -> Bundle | None: """Attempts to find node profile in proided envelope. If an unknown node sends an envelope, it may still be able to be @@ -89,8 +89,8 @@ def validate_envelope(self, envelope: SignedEnvelope): """Validates signed envelope from another node.""" node_bundle = ( - self.effector.deref(envelope.source_node) or - self._handle_unknown_node(envelope) + self.cache.read(envelope.source_node) or + self.handle_unknown_node(envelope) ) if not node_bundle: @@ -112,27 +112,4 @@ def validate_envelope(self, envelope: SignedEnvelope): # check that this node is the target of the envelope if envelope.target_node != self.identity.rid: raise InvalidTargetError(f"Envelope target {envelope.target_node!r} is not me") - - def envelope_handler(self, func): - """Wrapper function validates envelopes for server endpoints. - - Validates incoming envelope and passes payload to endpoint - handler. Resulting payload is returned as a signed envelope. - """ - @wraps(func) - async def wrapper(req: SignedEnvelope, *args, **kwargs) -> SignedEnvelope | None: - logger.info("Validating envelope") - - self.validate_envelope(req) - logger.info("Calling endpoint handler") - - result = await func(req, *args, **kwargs) - - if result is not None: - logger.info("Creating response envelope") - return self.create_envelope( - payload=result, - target=req.source_node - ) - return wrapper diff --git a/src/koi_net/server.py b/src/koi_net/server.py deleted file mode 100644 index 86f0347..0000000 --- a/src/koi_net/server.py +++ /dev/null @@ -1,144 +0,0 @@ -import logging -import uvicorn -from contextlib import asynccontextmanager -from fastapi import FastAPI, APIRouter -from fastapi.responses import JSONResponse -from .network.event_queue import NetworkEventQueue -from .network.response_handler import ResponseHandler -from .processor.interface import ProcessorInterface -from .protocol.api_models import ( - PollEvents, - FetchRids, - FetchManifests, - FetchBundles, - EventsPayload, - RidsPayload, - ManifestsPayload, - BundlesPayload, - ErrorResponse -) -from .protocol.errors import ProtocolError -from .protocol.envelope import SignedEnvelope -from .protocol.consts import ( - BROADCAST_EVENTS_PATH, - POLL_EVENTS_PATH, - FETCH_RIDS_PATH, - FETCH_MANIFESTS_PATH, - FETCH_BUNDLES_PATH -) -from .secure import Secure -from .lifecycle import NodeLifecycle -from .config import NodeConfig - -logger = logging.getLogger(__name__) - - -class NodeServer: - """Manages FastAPI server and event handling for full nodes.""" - config: NodeConfig - lifecycle: NodeLifecycle - secure: Secure - processor: ProcessorInterface - event_queue: NetworkEventQueue - response_handler: ResponseHandler - app: FastAPI - router: APIRouter - - def __init__( - self, - config: NodeConfig, - lifecycle: NodeLifecycle, - secure: Secure, - processor: ProcessorInterface, - event_queue: NetworkEventQueue, - response_handler: ResponseHandler - ): - self.config = config - self.lifecycle = lifecycle - self.secure = secure - self.processor = processor - self.event_queue = event_queue - self.response_handler = response_handler - self._build_app() - - def _build_app(self): - """Builds FastAPI app and adds endpoints.""" - @asynccontextmanager - async def lifespan(*args, **kwargs): - async with self.lifecycle.async_run(): - yield - - self.app = FastAPI( - lifespan=lifespan, - title="KOI-net Protocol API", - version="1.0.0" - ) - - self.router = APIRouter(prefix="/koi-net") - self.app.add_exception_handler(ProtocolError, self.protocol_error_handler) - - def _add_endpoint(path, func): - self.router.add_api_route( - path=path, - endpoint=self.secure.envelope_handler(func), - methods=["POST"], - response_model_exclude_none=True - ) - - _add_endpoint(BROADCAST_EVENTS_PATH, self.broadcast_events) - _add_endpoint(POLL_EVENTS_PATH, self.poll_events) - _add_endpoint(FETCH_RIDS_PATH, self.fetch_rids) - _add_endpoint(FETCH_MANIFESTS_PATH, self.fetch_manifests) - _add_endpoint(FETCH_BUNDLES_PATH, self.fetch_bundles) - - self.app.include_router(self.router) - - def run(self): - """Starts FastAPI server and event handler.""" - uvicorn.run( - app=self.app, - host=self.config.server.host, - port=self.config.server.port - ) - - def protocol_error_handler(self, request, exc: ProtocolError): - """Catches `ProtocolError` and returns as `ErrorResponse`.""" - logger.info(f"caught protocol error: {exc}") - resp = ErrorResponse(error=exc.error_type) - logger.info(f"returning error response: {resp}") - return JSONResponse( - status_code=400, - content=resp.model_dump(mode="json") - ) - - async def broadcast_events(self, req: SignedEnvelope[EventsPayload]): - """Handles events broadcast endpoint.""" - logger.info(f"Request to {BROADCAST_EVENTS_PATH}, received {len(req.payload.events)} event(s)") - for event in req.payload.events: - self.processor.handle(event=event, source=req.source_node) - - async def poll_events( - self, req: SignedEnvelope[PollEvents] - ) -> SignedEnvelope[EventsPayload] | ErrorResponse: - """Handles poll events endpoint.""" - logger.info(f"Request to {POLL_EVENTS_PATH}") - events = self.event_queue.flush_poll_queue(req.source_node) - return EventsPayload(events=events) - - async def fetch_rids( - self, req: SignedEnvelope[FetchRids] - ) -> SignedEnvelope[RidsPayload] | ErrorResponse: - """Handles fetch RIDs endpoint.""" - return self.response_handler.fetch_rids(req.payload, req.source_node) - - async def fetch_manifests( - self, req: SignedEnvelope[FetchManifests] - ) -> SignedEnvelope[ManifestsPayload] | ErrorResponse: - """Handles fetch manifests endpoint.""" - return self.response_handler.fetch_manifests(req.payload, req.source_node) - - async def fetch_bundles( - self, req: SignedEnvelope[FetchBundles] - ) -> SignedEnvelope[BundlesPayload] | ErrorResponse: - """Handles fetch bundles endpoint.""" - return self.response_handler.fetch_bundles(req.payload, req.source_node) diff --git a/src/koi_net/workers/__init__.py b/src/koi_net/workers/__init__.py new file mode 100644 index 0000000..52330fd --- /dev/null +++ b/src/koi_net/workers/__init__.py @@ -0,0 +1,2 @@ +from .event_worker import EventProcessingWorker +from .kobj_worker import KnowledgeProcessingWorker \ No newline at end of file diff --git a/src/koi_net/workers/base.py b/src/koi_net/workers/base.py new file mode 100644 index 0000000..d67ceb1 --- /dev/null +++ b/src/koi_net/workers/base.py @@ -0,0 +1,26 @@ +import threading + +from koi_net.build import comp_order + + +class End: + """Class for STOP_WORKER sentinel pushed to worker queues.""" + pass + +STOP_WORKER = End() + +@comp_order.worker +class ThreadWorker: + """Base class for thread workers.""" + + thread: threading.Thread + + def __init__(self): + self.thread = threading.Thread(target=self.run) + + def start(self): + self.thread.start() + + def run(self): + """Processing loop for thread.""" + pass \ No newline at end of file diff --git a/src/koi_net/workers/event_worker.py b/src/koi_net/workers/event_worker.py new file mode 100644 index 0000000..e8abdaf --- /dev/null +++ b/src/koi_net/workers/event_worker.py @@ -0,0 +1,106 @@ +import queue +import traceback +import time +import structlog + +from rid_lib.ext import Cache +from rid_lib.types import KoiNetNode + +from ..config.core import NodeConfig +from ..network.event_queue import EventQueue +from ..network.request_handler import RequestHandler +from ..network.event_buffer import EventBuffer +from ..protocol.node import NodeProfile, NodeType +from .base import ThreadWorker, STOP_WORKER + +log = structlog.stdlib.get_logger() + + +class EventProcessingWorker(ThreadWorker): + """Thread worker that processes the `event_queue`.""" + + def __init__( + self, + config: NodeConfig, + cache: Cache, + event_queue: EventQueue, + request_handler: RequestHandler, + poll_event_buf: EventBuffer, + broadcast_event_buf: EventBuffer + ): + self.event_queue = event_queue + self.request_handler = request_handler + + self.config = config + self.cache = cache + self.poll_event_buf = poll_event_buf + self.broadcast_event_buf = broadcast_event_buf + + super().__init__() + + def flush_and_broadcast(self, target: KoiNetNode): + events = self.broadcast_event_buf.flush(target) + + """Broadcasts all events to target in event buffer.""" + try: + self.request_handler.broadcast_events(target, events=events) + except Exception as e: + traceback.print_exc() + + def stop(self): + self.event_queue.q.put(STOP_WORKER) + + def run(self): + log.info("Started event worker") + while True: + try: + item = self.event_queue.q.get(timeout=self.config.koi_net.event_worker.queue_timeout) + + try: + if item is STOP_WORKER: + log.info(f"Received 'STOP_WORKER' signal, flushing all buffers...") + for target in list(self.broadcast_event_buf.buffers.keys()): + self.flush_and_broadcast(target) + return + + log.info(f"Dequeued {item.event!r} -> {item.target!r}") + + # determines which buffer to push event to based on target node type + node_bundle = self.cache.read(item.target) + if node_bundle: + node_profile = node_bundle.validate_contents(NodeProfile) + + if node_profile.node_type == NodeType.FULL: + self.broadcast_event_buf.push(item.target, item.event) + + elif node_profile.node_type == NodeType.PARTIAL: + self.poll_event_buf.push(item.target, item.event) + continue + + elif item.target == self.config.koi_net.first_contact.rid: + self.broadcast_event_buf.push(item.target, item.event) + + else: + log.warning(f"Couldn't handle event {item.event!r} in queue, node {item.target!r} unknown to me") + continue + + if self.broadcast_event_buf.buf_len(item.target) > self.config.koi_net.event_worker.max_buf_len: + self.flush_and_broadcast(target) + + finally: + self.event_queue.q.task_done() + + except queue.Empty: + # On timeout, check all buffers for max wait time + for target in list(self.broadcast_event_buf.buffers.keys()): + start_time = self.broadcast_event_buf.start_time.get(target) + now = time.time() + + if (start_time is None) or (self.broadcast_event_buf.buf_len(target) == 0): + continue + + if (now - start_time) >= self.config.koi_net.event_worker.max_wait_time: + self.flush_and_broadcast(target) + + except Exception as e: + traceback.print_exc() \ No newline at end of file diff --git a/src/koi_net/workers/kobj_worker.py b/src/koi_net/workers/kobj_worker.py new file mode 100644 index 0000000..d7cdbde --- /dev/null +++ b/src/koi_net/workers/kobj_worker.py @@ -0,0 +1,51 @@ +import queue +import traceback +import structlog + +from ..config.core import NodeConfig +from ..processor.pipeline import KnowledgePipeline +from ..processor.kobj_queue import KobjQueue +from .base import ThreadWorker, STOP_WORKER + +log = structlog.stdlib.get_logger() + + +class KnowledgeProcessingWorker(ThreadWorker): + """Thread worker that processes the `kobj_queue`.""" + + def __init__( + self, + config: NodeConfig, + kobj_queue: KobjQueue, + pipeline: KnowledgePipeline + ): + self.config = config + self.kobj_queue = kobj_queue + self.pipeline = pipeline + + super().__init__() + + def stop(self): + self.kobj_queue.q.put(STOP_WORKER) + + def run(self): + log.info("Started kobj worker") + while True: + try: + item = self.kobj_queue.q.get(timeout=self.config.koi_net.kobj_worker.queue_timeout) + try: + if item is STOP_WORKER: + log.info("Received 'STOP_WORKER' signal, shutting down...") + return + + log.info(f"Dequeued {item!r}") + + self.pipeline.process(item) + finally: + self.kobj_queue.q.task_done() + + except queue.Empty: + pass + + except Exception as e: + traceback.print_exc() diff --git a/uv.lock b/uv.lock index cc4bff2..eb14b30 100644 --- a/uv.lock +++ b/uv.lock @@ -500,7 +500,7 @@ wheels = [ [[package]] name = "koi-net" -version = "1.1.0" +version = "1.2.0b1" source = { editable = "." } dependencies = [ { name = "cryptography" }, @@ -513,6 +513,7 @@ dependencies = [ { name = "rich" }, { name = "rid-lib" }, { name = "ruamel-yaml" }, + { name = "structlog" }, { name = "uvicorn" }, ] @@ -546,6 +547,7 @@ requires-dist = [ { name = "sphinx-autoapi", marker = "extra == 'docs'", specifier = ">=3.6.0" }, { name = "sphinx-autodoc-typehints", marker = "extra == 'docs'", specifier = ">=3.0.1" }, { name = "sphinx-rtd-theme", marker = "extra == 'docs'", specifier = ">=3.0.2" }, + { name = "structlog", specifier = ">=25.4.0" }, { name = "twine", marker = "extra == 'dev'", specifier = ">=6.0" }, { name = "uvicorn", specifier = ">=0.34.2" }, ] @@ -1273,6 +1275,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/ce/fd/901cfa59aaa5b30a99e16876f11abe38b59a1a2c51ffb3d7142bb6089069/starlette-0.47.3-py3-none-any.whl", hash = "sha256:89c0778ca62a76b826101e7c709e70680a1699ca7da6b44d38eb0a7e61fe4b51", size = 72991, upload-time = "2025-08-24T13:36:40.887Z" }, ] +[[package]] +name = "structlog" +version = "25.4.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/79/b9/6e672db4fec07349e7a8a8172c1a6ae235c58679ca29c3f86a61b5e59ff3/structlog-25.4.0.tar.gz", hash = "sha256:186cd1b0a8ae762e29417095664adf1d6a31702160a46dacb7796ea82f7409e4", size = 1369138, upload-time = "2025-06-02T08:21:12.971Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a0/4a/97ee6973e3a73c74c8120d59829c3861ea52210667ec3e7a16045c62b64d/structlog-25.4.0-py3-none-any.whl", hash = "sha256:fe809ff5c27e557d14e613f45ca441aabda051d119ee5a0102aaba6ce40eed2c", size = 68720, upload-time = "2025-06-02T08:21:11.43Z" }, +] + [[package]] name = "tomli" version = "2.2.1"