diff --git a/docker/pyproject.deps.toml b/docker/pyproject.deps.toml index e58924f..7b069ca 100644 --- a/docker/pyproject.deps.toml +++ b/docker/pyproject.deps.toml @@ -1,6 +1,6 @@ [project] name = "mcp-plex" -version = "2.0.7" +version = "2.0.9" requires-python = ">=3.11,<3.13" dependencies = [ "fastmcp>=2.11.2", diff --git a/mcp_plex/common/cache.py b/mcp_plex/common/cache.py index e0aa9b5..7680523 100644 --- a/mcp_plex/common/cache.py +++ b/mcp_plex/common/cache.py @@ -1,4 +1,5 @@ """In-memory LRU cache for media payload and artwork data.""" + from __future__ import annotations from collections import OrderedDict diff --git a/mcp_plex/common/types.py b/mcp_plex/common/types.py index 0e1a342..a64cf62 100644 --- a/mcp_plex/common/types.py +++ b/mcp_plex/common/types.py @@ -1,4 +1,5 @@ """Type definitions for Plex metadata and external services.""" + from __future__ import annotations from dataclasses import dataclass @@ -221,6 +222,7 @@ class ExternalIDs: imdb: Optional[str] = None tmdb: Optional[str] = None + __all__ = [ "IMDbRating", "IMDbTitle", @@ -244,4 +246,3 @@ class ExternalIDs: JSONValue: TypeAlias = JSONScalar | Sequence["JSONValue"] | Mapping[str, "JSONValue"] JSONMapping: TypeAlias = Mapping[str, JSONValue] MutableJSONMapping: TypeAlias = MutableMapping[str, JSONValue] - diff --git a/mcp_plex/loader/__init__.py b/mcp_plex/loader/__init__.py index 8ca8bbf..c41a328 100644 --- a/mcp_plex/loader/__init__.py +++ b/mcp_plex/loader/__init__.py @@ -1,4 +1,5 @@ """Loader orchestration utilities and staged pipeline helpers.""" + from __future__ import annotations import asyncio @@ -183,7 +184,9 @@ def _build_loader_orchestrator( max_concurrent_upserts: int, imdb_config: IMDbRuntimeConfig, qdrant_config: QdrantRuntimeConfig, -) -> tuple[LoaderOrchestrator, list[AggregatedItem], asyncio.Queue[list[models.PointStruct]]]: +) -> tuple[ + LoaderOrchestrator, list[AggregatedItem], asyncio.Queue[list[models.PointStruct]] +]: """Wire the staged loader pipeline and return the orchestrator helpers.""" from .pipeline.ingestion import IngestionStage @@ -206,8 +209,7 @@ async def _upsert_aggregated( return items.extend(batch) points = [ - build_point(item, dense_model_name, sparse_model_name) - for item in batch + build_point(item, dense_model_name, sparse_model_name) for item in batch ] for point_chunk in chunk_sequence(points, upsert_buffer_size): await _upsert_in_batches( @@ -332,7 +334,9 @@ async def run( require_positive(max_concurrent_upserts, name="max_concurrent_upserts") require_positive(qdrant_retry_attempts, name="qdrant_retry_attempts") - imdb_retry_queue = _load_imdb_retry_queue(imdb_queue_path) if imdb_queue_path else IMDbRetryQueue() + imdb_retry_queue = ( + _load_imdb_retry_queue(imdb_queue_path) if imdb_queue_path else IMDbRetryQueue() + ) imdb_config = IMDbRuntimeConfig( cache=imdb_cache, max_retries=imdb_max_retries, @@ -443,7 +447,9 @@ async def run( if imdb_queue_path: _persist_imdb_retry_queue(imdb_queue_path, imdb_config.retry_queue) - json.dump([item.model_dump(mode="json") for item in items], fp=sys.stdout, indent=2) + json.dump( + [item.model_dump(mode="json") for item in items], fp=sys.stdout, indent=2 + ) sys.stdout.write("\n") finally: await client.close() @@ -483,9 +489,7 @@ async def load_media( """Orchestrate one or more runs of :func:`run`.""" if delay < 0: - raise ValueError( - f"Delay between runs must be non-negative; received {delay!r}" - ) + raise ValueError(f"Delay between runs must be non-negative; received {delay!r}") while True: await run( @@ -521,4 +525,3 @@ async def load_media( break await asyncio.sleep(delay) - diff --git a/mcp_plex/loader/__main__.py b/mcp_plex/loader/__main__.py index b9e282f..e44f388 100644 --- a/mcp_plex/loader/__main__.py +++ b/mcp_plex/loader/__main__.py @@ -1,4 +1,5 @@ """Module entrypoint for ``python -m mcp_plex.loader``.""" + from __future__ import annotations from .cli import main diff --git a/mcp_plex/loader/cli.py b/mcp_plex/loader/cli.py index bb6c8aa..da90390 100644 --- a/mcp_plex/loader/cli.py +++ b/mcp_plex/loader/cli.py @@ -1,4 +1,5 @@ """Command-line interface for the loader pipeline.""" + from __future__ import annotations import asyncio @@ -159,7 +160,10 @@ "--log-level", envvar="LOG_LEVEL", show_envvar=True, - type=click.Choice(["critical", "error", "warning", "info", "debug", "notset"], case_sensitive=False), + type=click.Choice( + ["critical", "error", "warning", "info", "debug", "notset"], + case_sensitive=False, + ), default="info", show_default=True, help="Logging level for console output", diff --git a/mcp_plex/loader/pipeline/channels.py b/mcp_plex/loader/pipeline/channels.py index 85326d6..ab5f41c 100644 --- a/mcp_plex/loader/pipeline/channels.py +++ b/mcp_plex/loader/pipeline/channels.py @@ -5,6 +5,7 @@ constants. The loader still emits ``None`` as a completion token for compatibility while downstream components migrate to sentinel-only signaling. """ + from __future__ import annotations import asyncio @@ -74,9 +75,7 @@ class SampleBatch: IngestBatch = MovieBatch | EpisodeBatch | SampleBatch IngestQueueItem: TypeAlias = IngestBatch | None | IngestSentinel -PersistenceQueueItem: TypeAlias = ( - PersistencePayload | None | PersistenceSentinel -) +PersistenceQueueItem: TypeAlias = PersistencePayload | None | PersistenceSentinel IngestQueue: TypeAlias = asyncio.Queue[IngestQueueItem] PersistenceQueue: TypeAlias = asyncio.Queue[PersistenceQueueItem] diff --git a/mcp_plex/loader/pipeline/enrichment.py b/mcp_plex/loader/pipeline/enrichment.py index 90838d5..da1c243 100644 --- a/mcp_plex/loader/pipeline/enrichment.py +++ b/mcp_plex/loader/pipeline/enrichment.py @@ -67,11 +67,9 @@ async def get( *, params: Mapping[str, str] | None = None, headers: Mapping[str, str] | None = None, - ) -> httpx.Response: - ... + ) -> httpx.Response: ... - async def aclose(self) -> None: - ... + async def aclose(self) -> None: ... HTTPClientResource = ( @@ -80,9 +78,7 @@ async def aclose(self) -> None: | AbstractContextManager[AsyncHTTPClient] ) -HTTPClientFactory = Callable[ - [], HTTPClientResource | Awaitable[HTTPClientResource] -] +HTTPClientFactory = Callable[[], HTTPClientResource | Awaitable[HTTPClientResource]] def _extract_external_ids(item: PlexPartialObject) -> ExternalIDs: @@ -207,9 +203,7 @@ async def _fetch_tmdb_episode( ) -> TMDBEpisode | None: """Fetch TMDb data for a TV episode.""" - url = ( - f"https://api.themoviedb.org/3/tv/{show_id}/season/{season_number}/episode/{episode_number}" - ) + url = f"https://api.themoviedb.org/3/tv/{show_id}/season/{season_number}/episode/{episode_number}" try: resp = await client.get(url, headers={"Authorization": f"Bearer {api_key}"}) except httpx.HTTPError: @@ -245,9 +239,7 @@ async def _fetch_tmdb_episode_chunk( url, params=params, headers={"Authorization": f"Bearer {api_key}"} ) except httpx.HTTPError: - LOGGER.exception( - "HTTP error fetching TMDb episode chunk for show %s", show_id - ) + LOGGER.exception("HTTP error fetching TMDb episode chunk for show %s", show_id) return {} if not resp.is_success: return {} @@ -364,9 +356,7 @@ def __init__( int(imdb_batch_limit), name="imdb_batch_limit" ) if imdb_requests_per_window is not None and imdb_requests_per_window <= 0: - raise ValueError( - "imdb_requests_per_window must be positive when provided" - ) + raise ValueError("imdb_requests_per_window must be positive when provided") if imdb_window_seconds <= 0: raise ValueError("imdb_window_seconds must be positive") self._imdb_throttle = _RequestThrottler( @@ -416,9 +406,7 @@ async def run(self) -> None: got_item = True try: if batch is None: - self._logger.debug( - "Received legacy completion token; ignoring." - ) + self._logger.debug("Received legacy completion token; ignoring.") continue if batch is INGEST_DONE: @@ -450,9 +438,7 @@ async def run(self) -> None: ) await self._handle_sample_batch(batch) else: # pragma: no cover - defensive logging for future types - self._logger.warning( - "Received unsupported batch type: %r", batch - ) + self._logger.warning("Received unsupported batch type: %r", batch) finally: if got_item: self._ingest_queue.task_done() @@ -526,7 +512,9 @@ async def _acquire_http_client(self) -> AsyncIterator[AsyncHTTPClient]: return if hasattr(resource, "__aenter__") and hasattr(resource, "__aexit__"): - async with cast(AbstractAsyncContextManager[AsyncHTTPClient], resource) as client: + async with cast( + AbstractAsyncContextManager[AsyncHTTPClient], resource + ) as client: yield client return @@ -592,9 +580,7 @@ async def _enrich_movies( if not ids.tmdb: continue tmdb_tasks.append( - asyncio.create_task( - _fetch_tmdb_movie(client, ids.tmdb, api_key) - ) + asyncio.create_task(_fetch_tmdb_movie(client, ids.tmdb, api_key)) ) imdb_map: dict[str, IMDbTitle | None] = {} @@ -604,8 +590,7 @@ async def _enrich_movies( combined_results = await asyncio.gather(imdb_future, *tmdb_tasks) imdb_map = cast(dict[str, IMDbTitle | None], combined_results[0]) tmdb_results = [ - cast(TMDBMovie | None, result) - for result in combined_results[1:] + cast(TMDBMovie | None, result) for result in combined_results[1:] ] retry_snapshot = set(self._imdb_retry_queue.snapshot()) elif tmdb_tasks: @@ -686,9 +671,7 @@ async def _enrich_episodes( retry_snapshot: set[str] = set() tmdb_results: list[TMDBEpisode | None] = [None] * len(episodes) if imdb_future and tmdb_future: - imdb_map, tmdb_results = await asyncio.gather( - imdb_future, tmdb_future - ) + imdb_map, tmdb_results = await asyncio.gather(imdb_future, tmdb_future) retry_snapshot = set(self._imdb_retry_queue.snapshot()) elif imdb_future: imdb_map = await imdb_future @@ -723,9 +706,7 @@ async def _get_tmdb_show( tmdb_id_str = str(tmdb_id) if tmdb_id_str in self._show_tmdb_cache: return self._show_tmdb_cache[tmdb_id_str] - show = await _fetch_tmdb_show( - client, tmdb_id_str, self._tmdb_api_key or "" - ) + show = await _fetch_tmdb_show(client, tmdb_id_str, self._tmdb_api_key or "") self._show_tmdb_cache[tmdb_id_str] = show return show @@ -972,9 +953,7 @@ async def _fetch_imdb_batch( try: response = await client.get(url, params=params) except httpx.HTTPError: - LOGGER.exception( - "HTTP error fetching IMDb IDs %s", ",".join(chunk) - ) + LOGGER.exception("HTTP error fetching IMDb IDs %s", ",".join(chunk)) for imdb_id in chunk: results[imdb_id] = None break diff --git a/mcp_plex/loader/pipeline/ingestion.py b/mcp_plex/loader/pipeline/ingestion.py index 795f560..cbb0e17 100644 --- a/mcp_plex/loader/pipeline/ingestion.py +++ b/mcp_plex/loader/pipeline/ingestion.py @@ -5,6 +5,7 @@ having the stage skeleton in place allows other components to depend on the interface. """ + from __future__ import annotations import asyncio @@ -80,7 +81,9 @@ async def run(self) -> None: else: await self._run_plex_ingestion() - self._logger.debug("Publishing ingestion completion sentinels to downstream stages.") + self._logger.debug( + "Publishing ingestion completion sentinels to downstream stages." + ) await enqueue_nowait(self._output_queue, None) await enqueue_nowait(self._output_queue, self._completion_sentinel) self._logger.info( @@ -346,9 +349,7 @@ def _fetch_shows(start: int) -> Sequence[Show]: episode_total, ) - async def _enqueue_sample_batches( - self, items: Sequence[AggregatedItem] - ) -> None: + async def _enqueue_sample_batches(self, items: Sequence[AggregatedItem]) -> None: """Place sample items onto the ingest queue in configured batch sizes.""" for chunk in chunk_sequence(items, self._sample_batch_size): @@ -356,9 +357,7 @@ async def _enqueue_sample_batches( if not batch_items: continue - await enqueue_nowait( - self._output_queue, SampleBatch(items=batch_items) - ) + await enqueue_nowait(self._output_queue, SampleBatch(items=batch_items)) self._items_ingested += len(batch_items) self._batches_ingested += 1 self._logger.debug( diff --git a/mcp_plex/loader/pipeline/orchestrator.py b/mcp_plex/loader/pipeline/orchestrator.py index 126e71a..0c57a06 100644 --- a/mcp_plex/loader/pipeline/orchestrator.py +++ b/mcp_plex/loader/pipeline/orchestrator.py @@ -31,18 +31,15 @@ def __init__(self, spec: _StageSpec, error: BaseException) -> None: class IngestionStageProtocol(Protocol): - async def run(self) -> None: - ... + async def run(self) -> None: ... class EnrichmentStageProtocol(Protocol): - async def run(self) -> None: - ... + async def run(self) -> None: ... class PersistenceStageProtocol(Protocol): - async def run(self, worker_id: int) -> None: - ... + async def run(self, worker_id: int) -> None: ... class LoaderOrchestrator: @@ -127,9 +124,7 @@ async def _run_stage( self._logger.debug("%s cancelled.", stage_name) raise except BaseException as exc: - self._logger.debug( - "%s raised %s", stage_name, exc, exc_info=exc - ) + self._logger.debug("%s raised %s", stage_name, exc, exc_info=exc) raise _StageFailure(spec, exc) from exc else: self._logger.info("%s completed successfully.", stage_name) diff --git a/mcp_plex/loader/pipeline/persistence.py b/mcp_plex/loader/pipeline/persistence.py index 8c3519e..1d491a0 100644 --- a/mcp_plex/loader/pipeline/persistence.py +++ b/mcp_plex/loader/pipeline/persistence.py @@ -17,6 +17,7 @@ try: # pragma: no cover - allow import to fail when qdrant_client is absent from qdrant_client import AsyncQdrantClient, models except ModuleNotFoundError: # pragma: no cover - tooling without qdrant installed + class AsyncQdrantClient: # type: ignore[too-few-public-methods] """Fallback stub used when qdrant_client is unavailable.""" @@ -166,9 +167,7 @@ def _drain_additional_sentinels(self) -> int: return drained - async def enqueue_points( - self, points: Sequence["models.PointStruct"] - ) -> None: + async def enqueue_points(self, points: Sequence["models.PointStruct"]) -> None: """Chunk *points* and place them on the persistence queue.""" if not points: @@ -229,14 +228,10 @@ async def run(self, worker_id: int) -> None: outstanding_workers = max( self._worker_count - self._shutdown_tokens_seen, 0 ) - additional_tokens = max( - outstanding_workers - drained_sentinels, 0 - ) + additional_tokens = max(outstanding_workers - drained_sentinels, 0) if additional_tokens: for _ in range(additional_tokens): - await enqueue_nowait( - self._persistence_queue, PERSIST_DONE - ) + await enqueue_nowait(self._persistence_queue, PERSIST_DONE) self._logger.debug( "Persistence queue sentinel received; finishing run for worker %d.", worker_id, diff --git a/mcp_plex/loader/qdrant.py b/mcp_plex/loader/qdrant.py index 8ec0dd9..f08f4cb 100644 --- a/mcp_plex/loader/qdrant.py +++ b/mcp_plex/loader/qdrant.py @@ -1,4 +1,5 @@ """Qdrant helper utilities shared across the loader pipeline.""" + from __future__ import annotations import asyncio @@ -60,7 +61,9 @@ async def _ensure_collection( if not await client.collection_exists(collection_name): await client.create_collection( collection_name=collection_name, - vectors_config={"dense": models.VectorParams(size=dense_size, distance=dense_distance)}, + vectors_config={ + "dense": models.VectorParams(size=dense_size, distance=dense_distance) + }, sparse_vectors_config={"sparse": models.SparseVectorParams()}, ) created_collection = True diff --git a/mcp_plex/loader/samples.py b/mcp_plex/loader/samples.py index 1ab3466..d25fb07 100644 --- a/mcp_plex/loader/samples.py +++ b/mcp_plex/loader/samples.py @@ -1,4 +1,5 @@ """Helpers for working with built-in sample data files.""" + from __future__ import annotations import json @@ -68,7 +69,9 @@ def _load_plex_movie(data: dict[str, Any]) -> PlexItem: summary=data.get("summary"), year=data.get("year"), added_at=data.get("addedAt"), - guids=[PlexGuid(id=str(guid.get("id", ""))) for guid in data.get("Guid", []) or []], + guids=[ + PlexGuid(id=str(guid.get("id", ""))) for guid in data.get("Guid", []) or [] + ], thumb=data.get("thumb"), art=data.get("art"), tagline=data.get("tagline"), @@ -100,7 +103,9 @@ def _load_plex_episode(data: dict[str, Any]) -> PlexItem: summary=data.get("summary"), year=data.get("year"), added_at=data.get("addedAt"), - guids=[PlexGuid(id=str(guid.get("id", ""))) for guid in data.get("Guid", []) or []], + guids=[ + PlexGuid(id=str(guid.get("id", ""))) for guid in data.get("Guid", []) or [] + ], thumb=data.get("thumb"), art=data.get("art"), tagline=data.get("tagline"), @@ -127,7 +132,9 @@ def _load_from_sample(sample_dir: Path) -> list[AggregatedItem]: imdb_movie = IMDbTitle.model_validate(_read_json(movie_dir / "imdb.json")) tmdb_movie = TMDBMovie.model_validate(_read_json(movie_dir / "tmdb.json")) - episode_data = _read_json(episode_dir / "plex.tv.json")["MediaContainer"]["Metadata"][0] + episode_data = _read_json(episode_dir / "plex.tv.json")["MediaContainer"][ + "Metadata" + ][0] imdb_episode = IMDbTitle.model_validate(_read_json(episode_dir / "imdb.tv.json")) tmdb_show = TMDBShow.model_validate(_read_json(episode_dir / "tmdb.tv.json")) diff --git a/mcp_plex/server/__init__.py b/mcp_plex/server/__init__.py index 7bc051d..ba75056 100644 --- a/mcp_plex/server/__init__.py +++ b/mcp_plex/server/__init__.py @@ -1,4 +1,5 @@ """FastMCP server exposing Plex metadata tools.""" + from __future__ import annotations import asyncio @@ -117,9 +118,7 @@ def reranker(self) -> CrossEncoder | None: return None if not self._reranker_loaded: try: - self._reranker = CrossEncoder( - "cross-encoder/ms-marco-MiniLM-L-6-v2" - ) + self._reranker = CrossEncoder("cross-encoder/ms-marco-MiniLM-L-6-v2") except Exception as exc: logger.warning( "Failed to initialize CrossEncoder reranker: %s", @@ -201,7 +200,9 @@ def _request_model(name: str, fn: Callable[..., object]) -> type[BaseModel] | No if not fields: return None - model_name = "".join(part.capitalize() for part in name.replace("-", "_").split("_")) + model_name = "".join( + part.capitalize() for part in name.replace("-", "_").split("_") + ) model_name = f"{model_name or 'Request'}Request" request_model = create_model(model_name, **fields) # type: ignore[arg-type] return request_model @@ -299,11 +300,7 @@ def _collect_alias(identifier: str | None) -> None: display_name = ( friendly_names[0] if friendly_names - else name - or product - or machine_id - or client_id - or "Unknown player" + else name or product or machine_id or client_id or "Unknown player" ) if display_name not in friendly_names: @@ -336,7 +333,9 @@ def _collect_alias(identifier: str | None) -> None: _FUZZY_MATCH_THRESHOLD = 70 -def _match_player(query: str, players: Sequence[PlexPlayerMetadata]) -> PlexPlayerMetadata: +def _match_player( + query: str, players: Sequence[PlexPlayerMetadata] +) -> PlexPlayerMetadata: """Locate a Plex player by friendly name or identifier.""" normalized_query = query.strip() @@ -368,9 +367,8 @@ def _match_player(query: str, players: Sequence[PlexPlayerMetadata]) -> PlexPlay candidate_entries.append((candidate_str, candidate_lower, player)) if candidate_lower == normalized: return player - def _process_choice( - choice: str | tuple[str, str, dict[str, Any]] - ) -> str: + + def _process_choice(choice: str | tuple[str, str, dict[str, Any]]) -> str: if isinstance(choice, tuple): return choice[1] return str(choice).strip().lower() @@ -505,8 +503,10 @@ async def _tool_stub(payload: request_model) -> None: # type: ignore[name-defin app.post(f"/rest/{name}")(_tool_stub) for name, prompt in server._prompt_manager._prompts.items(): + async def _p_stub(**kwargs): # noqa: ARG001 pass + _p_stub.__name__ = f"prompt_{name.replace('-', '_')}" _p_stub.__doc__ = prompt.fn.__doc__ request_model = _request_model(name, prompt.fn) @@ -529,9 +529,13 @@ async def _p_stub(**kwargs): # noqa: ARG001 app.post(f"/rest/prompt/{name}")(_p_stub) for uri, resource in server._resource_manager._templates.items(): path = uri.replace("resource://", "") + async def _r_stub(**kwargs): # noqa: ARG001 pass - _r_stub.__name__ = f"resource_{path.replace('/', '_').replace('{', '').replace('}', '')}" + + _r_stub.__name__ = ( + f"resource_{path.replace('/', '_').replace('{', '').replace('}', '')}" + ) _r_stub.__doc__ = resource.fn.__doc__ _r_stub.__signature__ = inspect.signature(resource.fn).replace( return_annotation=inspect.Signature.empty @@ -549,9 +553,10 @@ async def openapi_json(request: Request) -> Response: # noqa: ARG001 return JSONResponse(_OPENAPI_SCHEMA) - def _register_rest_endpoints() -> None: - def _register(path: str, method: str, handler: Callable, fn: Callable, name: str) -> None: + def _register( + path: str, method: str, handler: Callable, fn: Callable, name: str + ) -> None: handler.__name__ = name handler.__doc__ = fn.__doc__ handler.__signature__ = inspect.signature(fn).replace( @@ -560,6 +565,7 @@ def _register(path: str, method: str, handler: Callable, fn: Callable, name: str server.custom_route(path, methods=[method])(handler) for name, tool in server._tool_manager._tools.items(): + async def _rest_tool(request: Request, _tool=tool) -> Response: # noqa: ARG001 try: arguments = await request.json() @@ -578,6 +584,7 @@ async def _rest_tool(request: Request, _tool=tool) -> Response: # noqa: ARG001 ) for name, prompt in server._prompt_manager._prompts.items(): + async def _rest_prompt(request: Request, _prompt=prompt) -> Response: # noqa: ARG001 try: arguments = await request.json() @@ -598,7 +605,9 @@ async def _rest_prompt(request: Request, _prompt=prompt) -> Response: # noqa: A for uri, resource in server._resource_manager._templates.items(): path = uri.replace("resource://", "") - async def _rest_resource(request: Request, _uri_template=uri, _resource=resource) -> Response: + async def _rest_resource( + request: Request, _uri_template=uri, _resource=resource + ) -> Response: formatted = _uri_template for key, value in request.path_params.items(): formatted = formatted.replace(f"{{{key}}}", value) @@ -611,7 +620,9 @@ async def _rest_resource(request: Request, _uri_template=uri, _resource=resource except Exception: return PlainTextResponse(str(data), media_type=_resource.mime_type) - handler_name = f"rest_resource_{path.replace('/', '_').replace('{', '').replace('}', '')}" + handler_name = ( + f"rest_resource_{path.replace('/', '_').replace('{', '').replace('}', '')}" + ) _register( f"/rest/resource/{path}", "GET", @@ -624,8 +635,6 @@ async def _rest_resource(request: Request, _uri_template=uri, _resource=resource _register_rest_endpoints() - - def main(argv: list[str] | None = None) -> None: """Entry point retained for backwards compatibility.""" diff --git a/mcp_plex/server/config.py b/mcp_plex/server/config.py index 04c8465..3bc6cfc 100644 --- a/mcp_plex/server/config.py +++ b/mcp_plex/server/config.py @@ -17,25 +17,15 @@ class Settings(BaseSettings): """Application configuration settings.""" - qdrant_url: str | None = Field( - default=None, validation_alias="QDRANT_URL" - ) - qdrant_api_key: str | None = Field( - default=None, validation_alias="QDRANT_API_KEY" - ) - qdrant_host: str | None = Field( - default=None, validation_alias="QDRANT_HOST" - ) + qdrant_url: str | None = Field(default=None, validation_alias="QDRANT_URL") + qdrant_api_key: str | None = Field(default=None, validation_alias="QDRANT_API_KEY") + qdrant_host: str | None = Field(default=None, validation_alias="QDRANT_HOST") qdrant_port: int = Field(default=6333, validation_alias="QDRANT_PORT") - qdrant_grpc_port: int = Field( - default=6334, validation_alias="QDRANT_GRPC_PORT" - ) + qdrant_grpc_port: int = Field(default=6334, validation_alias="QDRANT_GRPC_PORT") qdrant_prefer_grpc: bool = Field( default=False, validation_alias="QDRANT_PREFER_GRPC" ) - qdrant_https: bool | None = Field( - default=None, validation_alias="QDRANT_HTTPS" - ) + qdrant_https: bool | None = Field(default=None, validation_alias="QDRANT_HTTPS") dense_model: str = Field( default="BAAI/bge-small-en-v1.5", validation_alias="DENSE_MODEL" ) @@ -91,7 +81,9 @@ def _items_from_sequence(value: Sequence[Any]) -> RawAliasItems: for entry in value: if isinstance(entry, Mapping): items.extend(entry.items()) - elif isinstance(entry, Sequence) and not isinstance(entry, (str, bytes, bytearray)): + elif isinstance(entry, Sequence) and not isinstance( + entry, (str, bytes, bytearray) + ): entry_list = list(entry) if len(entry_list) != 2: raise ValueError( diff --git a/mcp_plex/server/media.py b/mcp_plex/server/media.py index c7c7f50..143f7ba 100644 --- a/mcp_plex/server/media.py +++ b/mcp_plex/server/media.py @@ -1,4 +1,5 @@ """Media helper functions for Plex server tools.""" + from __future__ import annotations from typing import Any, Mapping, TYPE_CHECKING, cast @@ -95,9 +96,7 @@ def _extract_plex_metadata(media: AggregatedMediaItem) -> PlexMediaMetadata: return cast(PlexMediaMetadata, {}) -async def _get_media_data( - server: "PlexServer", identifier: str -) -> AggregatedMediaItem: +async def _get_media_data(server: "PlexServer", identifier: str) -> AggregatedMediaItem: """Return the first matching media record's payload.""" cached = server.cache.get_payload(identifier) @@ -106,9 +105,7 @@ async def _get_media_data( records = await _find_records(server, identifier, limit=1) if not records: raise ValueError("Media item not found") - payload = _flatten_payload( - cast(Mapping[str, JSONValue] | None, records[0].payload) - ) + payload = _flatten_payload(cast(Mapping[str, JSONValue] | None, records[0].payload)) data = payload cache_keys: set[str] = set() diff --git a/mcp_plex/server/models.py b/mcp_plex/server/models.py index e8e8b20..3981aa6 100644 --- a/mcp_plex/server/models.py +++ b/mcp_plex/server/models.py @@ -1,4 +1,5 @@ """Typed models shared across the Plex server package.""" + from __future__ import annotations from typing import NotRequired, TypedDict diff --git a/mcp_plex/server/tools/media_library.py b/mcp_plex/server/tools/media_library.py index e450acf..5354cfc 100644 --- a/mcp_plex/server/tools/media_library.py +++ b/mcp_plex/server/tools/media_library.py @@ -1,4 +1,5 @@ """Media discovery and metadata tools for the Plex MCP server.""" + from __future__ import annotations import asyncio @@ -28,7 +29,7 @@ async def get_media( description="Rating key, IMDb/TMDb ID, or media title", examples=["49915", "tt8367814", "The Gentlemen"], ), - ] + ], ) -> list[AggregatedMediaItem]: """Retrieve media items by rating key, IMDb/TMDb ID or title.""" @@ -95,9 +96,7 @@ async def _prefetch(hit: models.ScoredPoint) -> None: plex_info.get("rating_key") ) if rating_key: - server.cache.set_payload( - rating_key, cast(dict[str, JSONValue], data) - ) + server.cache.set_payload(rating_key, cast(dict[str, JSONValue], data)) thumb = plex_info.get("thumb") if isinstance(thumb, str) and thumb: server.cache.set_poster(rating_key, thumb) @@ -233,7 +232,9 @@ async def query_media( ] = None, actors: Annotated[ Sequence[str] | None, - Field(description="Match actors by name", examples=[["Matthew McConaughey"]]), + Field( + description="Match actors by name", examples=[["Matthew McConaughey"]] + ), ] = None, directors: Annotated[ Sequence[str] | None, @@ -249,7 +250,10 @@ async def query_media( ] = None, collections: Annotated[ Sequence[str] | None, - Field(description="Match Plex collection names", examples=[["John Wick Collection"]]), + Field( + description="Match Plex collection names", + examples=[["John Wick Collection"]], + ), ] = None, show_title: Annotated[ str | None, @@ -337,14 +341,18 @@ def _listify(value: Sequence[str] | str | None) -> list[str]: vector_queries.append( ( "dense", - models.Document(text=dense_query, model=server.settings.dense_model), + models.Document( + text=dense_query, model=server.settings.dense_model + ), ) ) if sparse_query: vector_queries.append( ( "sparse", - models.Document(text=sparse_query, model=server.settings.sparse_model), + models.Document( + text=sparse_query, model=server.settings.sparse_model + ), ) ) @@ -352,7 +360,9 @@ def _listify(value: Sequence[str] | str | None) -> list[str]: keyword_prefetch_conditions: list[models.FieldCondition] = [] if title: - must.append(models.FieldCondition(key="title", match=models.MatchText(text=title))) + must.append( + models.FieldCondition(key="title", match=models.MatchText(text=title)) + ) media_type = type if media_type: condition = models.FieldCondition( @@ -361,7 +371,9 @@ def _listify(value: Sequence[str] | str | None) -> list[str]: must.append(condition) keyword_prefetch_conditions.append(condition) if year is not None: - must.append(models.FieldCondition(key="year", match=models.MatchValue(value=year))) + must.append( + models.FieldCondition(key="year", match=models.MatchValue(value=year)) + ) if year_from is not None or year_to is not None: rng: dict[str, int] = {} if year_from is not None: @@ -375,7 +387,9 @@ def _listify(value: Sequence[str] | str | None) -> list[str]: rng_at["gte"] = added_after if added_before is not None: rng_at["lte"] = added_before - must.append(models.FieldCondition(key="added_at", range=models.Range(**rng_at))) + must.append( + models.FieldCondition(key="added_at", range=models.Range(**rng_at)) + ) for actor in _listify(actors): condition = models.FieldCondition( @@ -429,21 +443,31 @@ def _listify(value: Sequence[str] | str | None) -> list[str]: if summary: must.append( - models.FieldCondition(key="summary", match=models.MatchText(text=summary)) + models.FieldCondition( + key="summary", match=models.MatchText(text=summary) + ) ) if overview: must.append( - models.FieldCondition(key="overview", match=models.MatchText(text=overview)) + models.FieldCondition( + key="overview", match=models.MatchText(text=overview) + ) ) if plot: - must.append(models.FieldCondition(key="plot", match=models.MatchText(text=plot))) + must.append( + models.FieldCondition(key="plot", match=models.MatchText(text=plot)) + ) if tagline: must.append( - models.FieldCondition(key="tagline", match=models.MatchText(text=tagline)) + models.FieldCondition( + key="tagline", match=models.MatchText(text=tagline) + ) ) if reviews: must.append( - models.FieldCondition(key="reviews", match=models.MatchText(text=reviews)) + models.FieldCondition( + key="reviews", match=models.MatchText(text=reviews) + ) ) if plex_rating_key: diff --git a/pyproject.toml b/pyproject.toml index a49b1b8..491006f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "mcp-plex" -version = "2.0.7" +version = "2.0.9" description = "Plex-Oriented Model Context Protocol Server" requires-python = ">=3.11,<3.13" diff --git a/tests/test_common_additional.py b/tests/test_common_additional.py new file mode 100644 index 0000000..0ebaaf6 --- /dev/null +++ b/tests/test_common_additional.py @@ -0,0 +1,18 @@ +from __future__ import annotations + +from mcp_plex.common.validation import coerce_plex_tag_id +from mcp_plex.common.types import TMDBEpisode + + +class _BadInt: + def __int__(self) -> int: + raise TypeError("no int") + + +def test_coerce_plex_tag_id_handles_bad_objects(): + assert coerce_plex_tag_id(_BadInt()) == 0 + + +def test_tmdb_episode_normalise_non_mapping(): + payload = ("value",) + assert TMDBEpisode._normalise_episode_id(payload) is payload diff --git a/tests/test_config.py b/tests/test_config.py index 7b4e128..882f86a 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -20,10 +20,7 @@ def test_settings_invalid_cache_size(monkeypatch): def test_settings_player_aliases(monkeypatch): monkeypatch.setenv( "PLEX_PLAYER_ALIASES", - ( - "{\"machine-1\": [\"Living Room TV\", \"Living Room\"]," - " \"client-2\": \"Bedroom\"}" - ), + ('{"machine-1": ["Living Room TV", "Living Room"], "client-2": "Bedroom"}'), ) settings = Settings() assert settings.plex_player_aliases == { @@ -71,7 +68,5 @@ def test_settings_aliases_from_sequence(): def test_settings_invalid_alias_sequence(): with pytest.raises(ValidationError): Settings.model_validate( - { - "PLEX_PLAYER_ALIASES": [("machine-1", "Living Room", "Extra")] - } + {"PLEX_PLAYER_ALIASES": [("machine-1", "Living Room", "Extra")]} ) diff --git a/tests/test_enrichment_helpers.py b/tests/test_enrichment_helpers.py index 29071d8..4a252d6 100644 --- a/tests/test_enrichment_helpers.py +++ b/tests/test_enrichment_helpers.py @@ -55,7 +55,9 @@ def test_build_plex_item_handles_full_metadata(): contentRating="R", directors=[types.SimpleNamespace(id=1, tag="Lana Wachowski", thumb="lana.jpg")], writers=[types.SimpleNamespace(id=2, tag="Lilly Wachowski", thumb="lilly.jpg")], - actors=[types.SimpleNamespace(id=3, tag="Keanu Reeves", thumb="neo.jpg", role="Neo")], + actors=[ + types.SimpleNamespace(id=3, tag="Keanu Reeves", thumb="neo.jpg", role="Neo") + ], ) item = _build_plex_item(raw) @@ -211,9 +213,7 @@ async def main(): assert chunk["season/1/episode/1"].guest_stars[0].character == "Self" assert chunk["season/1/episode/1"].id == "1/season/1/episode/1" assert ( - await _fetch_tmdb_episode_chunk( - client, 1, ["season/1/episode/3"], "k" - ) + await _fetch_tmdb_episode_chunk(client, 1, ["season/1/episode/3"], "k") ) == {} asyncio.run(main()) @@ -232,9 +232,10 @@ async def main() -> None: async with httpx.AsyncClient(transport=transport) as client: assert await _fetch_tmdb_episode(client, 1, 1, 1, "k") is None async with httpx.AsyncClient(transport=transport) as client: - assert await _fetch_tmdb_episode_chunk( - client, 1, ["season/1/episode/1"], "k" - ) == {} + assert ( + await _fetch_tmdb_episode_chunk(client, 1, ["season/1/episode/1"], "k") + == {} + ) asyncio.run(main()) diff --git a/tests/test_enrichment_stage.py b/tests/test_enrichment_stage.py index 11cc4ea..4aad58a 100644 --- a/tests/test_enrichment_stage.py +++ b/tests/test_enrichment_stage.py @@ -121,10 +121,12 @@ async def fake_fetch_imdb_batch(client, imdb_ids, **kwargs): async def fake_fetch_tmdb_movie(client, tmdb_id, api_key): events["tmdb_started"].set() await asyncio.wait_for(events["imdb_started"].wait(), timeout=1) - return TMDBMovie.model_validate({ - "id": int(tmdb_id), - "title": f"TMDb {tmdb_id}", - }) + return TMDBMovie.model_validate( + { + "id": int(tmdb_id), + "title": f"TMDb {tmdb_id}", + } + ) monkeypatch.setattr( "mcp_plex.loader.pipeline.enrichment._fetch_imdb_batch", @@ -269,10 +271,12 @@ async def fake_fetch_imdb_batch(client, imdb_ids, **kwargs): async def fake_fetch_tmdb_movie(client, tmdb_id, api_key): tmdb_requests.append(tmdb_id) - return TMDBMovie.model_validate({ - "id": int(tmdb_id), - "title": f"TMDb {tmdb_id}", - }) + return TMDBMovie.model_validate( + { + "id": int(tmdb_id), + "title": f"TMDb {tmdb_id}", + } + ) monkeypatch.setattr( EnrichmentStage, "_handle_episode_batch", lambda self, batch: asyncio.sleep(0) @@ -419,9 +423,13 @@ async def fake_fetch_tmdb_movie(client, tmdb_id, api_key): async def fake_fetch_tmdb_show(client, tmdb_id, api_key): show_calls.append(tmdb_id) - return TMDBShow.model_validate({"id": int(tmdb_id), "name": tmdb_id, "seasons": []}) + return TMDBShow.model_validate( + {"id": int(tmdb_id), "name": tmdb_id, "seasons": []} + ) - async def fake_fetch_tmdb_episode(client, show_id, season_number, episode_number, api_key): + async def fake_fetch_tmdb_episode( + client, show_id, season_number, episode_number, api_key + ): episode_calls.append((show_id, season_number, episode_number)) return TMDBEpisode.model_validate( { @@ -627,11 +635,17 @@ async def scenario() -> list[list[AggregatedItem] | None]: show = _FakeShow("show", tmdb_id="301") episodes_first = [ - _FakeEpisode("e1", show=show, season_index=1, episode_index=1, imdb_id="ttA"), - _FakeEpisode("e2", show=show, season_index=1, episode_index=2, imdb_id="ttB"), + _FakeEpisode( + "e1", show=show, season_index=1, episode_index=1, imdb_id="ttA" + ), + _FakeEpisode( + "e2", show=show, season_index=1, episode_index=2, imdb_id="ttB" + ), ] episodes_second = [ - _FakeEpisode("e3", show=show, season_index=1, episode_index=3, imdb_id="ttC"), + _FakeEpisode( + "e3", show=show, season_index=1, episode_index=3, imdb_id="ttC" + ), ] await ingest_queue.put(EpisodeBatch(show=show, episodes=episodes_first)) @@ -663,7 +677,11 @@ async def scenario() -> list[list[AggregatedItem] | None]: assert [item.plex.rating_key for item in first] == ["e1", "e2"] assert [item.plex.rating_key for item in second] == ["e3"] assert all(item.tmdb for item in first + second) - assert [item.tmdb.episode_number for item in first + second if isinstance(item.tmdb, TMDBEpisode)] == [1, 2, 3] + assert [ + item.tmdb.episode_number + for item in first + second + if isinstance(item.tmdb, TMDBEpisode) + ] == [1, 2, 3] def test_enrichment_stage_falls_back_to_individual_episode_fetch(monkeypatch): @@ -740,8 +758,12 @@ async def scenario() -> list[list[AggregatedItem] | None]: show = _FakeShow("show", tmdb_id="302") episodes = [ - _FakeEpisode("e1", show=show, season_index=1, episode_index=1, imdb_id="ttA"), - _FakeEpisode("e2", show=show, season_index=1, episode_index=2, imdb_id="ttB"), + _FakeEpisode( + "e1", show=show, season_index=1, episode_index=1, imdb_id="ttA" + ), + _FakeEpisode( + "e2", show=show, season_index=1, episode_index=2, imdb_id="ttB" + ), ] await ingest_queue.put(EpisodeBatch(show=show, episodes=episodes)) @@ -760,7 +782,9 @@ async def scenario() -> list[list[AggregatedItem] | None]: payloads = asyncio.run(scenario()) assert show_requests == ["302"] - assert chunk_requests == [(302, ("season/1/episode/1", "season/1/episode/2"), "token")] + assert chunk_requests == [ + (302, ("season/1/episode/1", "season/1/episode/2"), "token") + ] assert fallback_requests == [ (302, 1, 1, "token"), (302, 1, 2, "token"), @@ -770,15 +794,16 @@ async def scenario() -> list[list[AggregatedItem] | None]: first, sentinel = payloads assert sentinel is PERSIST_DONE assert [ - item.tmdb.episode_number - for item in first - if isinstance(item.tmdb, TMDBEpisode) + item.tmdb.episode_number for item in first if isinstance(item.tmdb, TMDBEpisode) ] == [1, 2] + def test_enrichment_stage_sample_batches_pass_through(monkeypatch): handler = _ListHandler() - async def scenario() -> tuple[list[list[AggregatedItem] | None], list[Any], list[AggregatedItem]]: + async def scenario() -> tuple[ + list[list[AggregatedItem] | None], list[Any], list[AggregatedItem] + ]: ingest_queue: asyncio.Queue = asyncio.Queue() persistence_queue: _RecordingQueue = _RecordingQueue() @@ -910,7 +935,9 @@ async def fake_fetch_imdb_batch(client, imdb_ids, **kwargs): fake_fetch_imdb_batch, ) - async def scenario() -> tuple[list[list[AggregatedItem] | None], int, list[list[str]]]: + async def scenario() -> tuple[ + list[list[AggregatedItem] | None], int, list[list[str]] + ]: ingest_queue: asyncio.Queue = asyncio.Queue() persistence_queue: asyncio.Queue = asyncio.Queue() retry_queue = IMDbRetryQueue() diff --git a/tests/test_entrypoints.py b/tests/test_entrypoints.py new file mode 100644 index 0000000..10fcd54 --- /dev/null +++ b/tests/test_entrypoints.py @@ -0,0 +1,29 @@ +from __future__ import annotations + +import runpy + + +def test_loader_module_entrypoint(monkeypatch): + captured: dict[str, object] = {} + + def fake_main(argv=None): + captured["argv"] = argv + + monkeypatch.setattr("mcp_plex.loader.cli.main", fake_main) + + runpy.run_module("mcp_plex.loader", run_name="__main__") + + assert captured["argv"] is None + + +def test_server_module_entrypoint(monkeypatch): + captured: dict[str, object] = {} + + def fake_main(argv=None): + captured["argv"] = argv + + monkeypatch.setattr("mcp_plex.server.cli.main", fake_main) + + runpy.run_module("mcp_plex.server", run_name="__main__") + + assert captured["argv"] is None diff --git a/tests/test_imdb_cache.py b/tests/test_imdb_cache.py index a6fb72f..7525cd6 100644 --- a/tests/test_imdb_cache.py +++ b/tests/test_imdb_cache.py @@ -22,9 +22,7 @@ def test_imdb_cache_loads_existing_and_persists(tmp_path: Path): "primaryTitle": "The Shawshank Redemption", } - cache.set( - "tt0068646", {"id": "tt0068646", "primaryTitle": "The Godfather"} - ) + cache.set("tt0068646", {"id": "tt0068646", "primaryTitle": "The Godfather"}) assert json.loads(path.read_text()) == { "tt0111161": { "id": "tt0111161", diff --git a/tests/test_ingestion_stage.py b/tests/test_ingestion_stage.py index 2d4fb9a..092d356 100644 --- a/tests/test_ingestion_stage.py +++ b/tests/test_ingestion_stage.py @@ -47,7 +47,9 @@ async def scenario() -> str: def test_ingestion_stage_sample_empty_batches() -> None: - async def scenario() -> tuple[SampleBatch | None, None | IngestSentinel, bool, int, int]: + async def scenario() -> tuple[ + SampleBatch | None, None | IngestSentinel, bool, int, int + ]: queue: asyncio.Queue = asyncio.Queue() stage = IngestionStage( plex_server=None, @@ -63,7 +65,13 @@ async def scenario() -> tuple[SampleBatch | None, None | IngestSentinel, bool, i first = await queue.get() second = await queue.get() - return first, second, queue.empty(), stage.items_ingested, stage.batches_ingested + return ( + first, + second, + queue.empty(), + stage.items_ingested, + stage.batches_ingested, + ) first, second, empty, items_ingested, batches_ingested = asyncio.run(scenario()) @@ -103,7 +111,13 @@ async def scenario() -> tuple[ batches = [await queue.get(), await queue.get()] first_token = await queue.get() second_token = await queue.get() - return batches, first_token, second_token, stage.items_ingested, stage.batches_ingested + return ( + batches, + first_token, + second_token, + stage.items_ingested, + stage.batches_ingested, + ) batches, first_token, second_token, items_ingested, batches_ingested = asyncio.run( scenario() @@ -238,12 +252,16 @@ def movie_search(*, container_start=None, container_size=None, **_kwargs): movie_section.search.side_effect = movie_search def _episodes(titles: list[str]) -> list[Episode]: - return [create_autospec(Episode, instance=True, title=title) for title in titles] + return [ + create_autospec(Episode, instance=True, title=title) for title in titles + ] show_a_season_1 = create_autospec(Season, instance=True) show_a_s1_eps = _episodes(["S01E01", "S01E02"]) - def show_a_s1_side_effect(*, container_start=None, container_size=None, **_kwargs): + def show_a_s1_side_effect( + *, container_start=None, container_size=None, **_kwargs + ): start = container_start or 0 size = container_size or len(show_a_s1_eps) return show_a_s1_eps[start : start + size] @@ -253,7 +271,9 @@ def show_a_s1_side_effect(*, container_start=None, container_size=None, **_kwarg show_a_season_2 = create_autospec(Season, instance=True) show_a_s2_eps = _episodes(["S01E03"]) - def show_a_s2_side_effect(*, container_start=None, container_size=None, **_kwargs): + def show_a_s2_side_effect( + *, container_start=None, container_size=None, **_kwargs + ): start = container_start or 0 size = container_size or len(show_a_s2_eps) return show_a_s2_eps[start : start + size] @@ -266,7 +286,9 @@ def show_a_s2_side_effect(*, container_start=None, container_size=None, **_kwarg show_b_season_1 = create_autospec(Season, instance=True) show_b_s1_eps = _episodes(["S01E01", "S01E02"]) - def show_b_s1_side_effect(*, container_start=None, container_size=None, **_kwargs): + def show_b_s1_side_effect( + *, container_start=None, container_size=None, **_kwargs + ): start = container_start or 0 size = container_size or len(show_b_s1_eps) return show_b_s1_eps[start : start + size] @@ -448,7 +470,9 @@ def movie_search(*, container_start=None, container_size=None, **_kwargs): ] def _make_side_effect(eps: list[Episode]): - def _side_effect(*, container_start=None, container_size=None, **_kwargs): + def _side_effect( + *, container_start=None, container_size=None, **_kwargs + ): start = container_start or 0 size = container_size or len(eps) return eps[start : start + size] diff --git a/tests/test_loader_channels_additional.py b/tests/test_loader_channels_additional.py new file mode 100644 index 0000000..25f6f67 --- /dev/null +++ b/tests/test_loader_channels_additional.py @@ -0,0 +1,23 @@ +from __future__ import annotations + +import asyncio + +from mcp_plex.loader.pipeline import channels + + +def test_enqueue_nowait_applies_backpressure(): + async def _run(): + queue: asyncio.Queue[int] = asyncio.Queue(maxsize=1) + await queue.put(1) + + enqueue_task = asyncio.create_task(channels.enqueue_nowait(queue, 2)) + await asyncio.sleep(0) + assert not enqueue_task.done() + + await queue.get() + await enqueue_task + + assert queue.qsize() == 1 + assert queue.get_nowait() == 2 + + asyncio.run(_run()) diff --git a/tests/test_loader_imdb_cache_additional.py b/tests/test_loader_imdb_cache_additional.py new file mode 100644 index 0000000..eba9ddb --- /dev/null +++ b/tests/test_loader_imdb_cache_additional.py @@ -0,0 +1,23 @@ +from __future__ import annotations + +import json + +from mcp_plex.loader.imdb_cache import IMDbCache + + +def test_imdb_cache_accepts_non_dict_payload(tmp_path): + cache_path = tmp_path / "imdb.json" + cache_path.write_text(json.dumps({"tt123": ["raw"]})) + + cache = IMDbCache(cache_path) + + assert cache.get("tt123") == ["raw"] + + +def test_imdb_cache_warns_on_non_object(tmp_path, caplog): + cache_path = tmp_path / "imdb.json" + cache_path.write_text(json.dumps([1, 2, 3])) + + IMDbCache(cache_path) + + assert "did not contain an object" in caplog.text diff --git a/tests/test_loader_integration.py b/tests/test_loader_integration.py index 71e35a9..9f9f98e 100644 --- a/tests/test_loader_integration.py +++ b/tests/test_loader_integration.py @@ -89,7 +89,10 @@ def test_run_writes_points(monkeypatch): assert any("Directed by" in t for t in texts) assert any("Starring" in t for t in texts) movie_point = next(p for p in captured if p.payload["type"] == "movie") - assert "directors" in movie_point.payload and "Guy Ritchie" in movie_point.payload["directors"] + assert ( + "directors" in movie_point.payload + and "Guy Ritchie" in movie_point.payload["directors"] + ) assert "writers" in movie_point.payload and movie_point.payload["writers"] assert "genres" in movie_point.payload and movie_point.payload["genres"] assert movie_point.payload.get("summary") @@ -102,9 +105,11 @@ def test_run_writes_points(monkeypatch): assert episode_point.payload.get("season_title") == "Season 1" assert episode_point.payload.get("season_number") == 1 assert episode_point.payload.get("episode_number") == 4 - episode_vector = next( - p for p in captured if p.payload.get("type") == "episode" - ).vector["dense"].text + episode_vector = ( + next(p for p in captured if p.payload.get("type") == "episode") + .vector["dense"] + .text + ) assert "Alien: Earth" in episode_vector assert "S01E04" in episode_vector diff --git a/tests/test_loader_logging.py b/tests/test_loader_logging.py index 8628b0b..e0461e5 100644 --- a/tests/test_loader_logging.py +++ b/tests/test_loader_logging.py @@ -18,6 +18,10 @@ def __init__(self, *args, **kwargs): def _get_model_params(self, model_name): return 1, models.Distance.COSINE + async def close(self): + """Match the AsyncQdrantClient interface used by the loader.""" + return None + async def collection_exists(self, name): return False @@ -146,6 +150,7 @@ async def fake_ensure(*args, **kwargs): monkeypatch.setattr(loader, "_ensure_collection", fake_ensure) sample_dir = Path(__file__).resolve().parents[1] / "sample-data" + async def fake_run(self): order.append("execute") diff --git a/tests/test_loader_runtime_additional.py b/tests/test_loader_runtime_additional.py new file mode 100644 index 0000000..d4d2a61 --- /dev/null +++ b/tests/test_loader_runtime_additional.py @@ -0,0 +1,51 @@ +from __future__ import annotations + +import asyncio + +import pytest + +from mcp_plex import loader + + +def test_imdb_runtime_config_creates_throttle(): + config = loader.IMDbRuntimeConfig( + cache=None, + max_retries=1, + backoff=1.0, + retry_queue=loader.IMDbRetryQueue(), + requests_per_window=2, + window_seconds=3.0, + ) + + throttle = config.get_throttle() + assert throttle is not None + assert config.get_throttle() is throttle + + +class _DummyClient: + async def close(self) -> None: + return None + + +def test_run_requires_plex_configuration(monkeypatch): + async def _run() -> None: + monkeypatch.setattr(loader, "AsyncQdrantClient", lambda *a, **k: _DummyClient()) + + async def _noop(*args, **kwargs): + return None + + monkeypatch.setattr(loader, "_ensure_collection", _noop) + + with pytest.raises( + RuntimeError, match="PLEX_URL and PLEX_TOKEN must be provided" + ): + await loader.run( + plex_url=None, + plex_token=None, + tmdb_api_key=None, + sample_dir=None, + qdrant_url=None, + qdrant_api_key=None, + ) + + asyncio.run(_run()) diff --git a/tests/test_loader_unit.py b/tests/test_loader_unit.py index ab094c6..4b954c7 100644 --- a/tests/test_loader_unit.py +++ b/tests/test_loader_unit.py @@ -81,6 +81,8 @@ def fake_import(name, globals=None, locals=None, fromlist=(), level=0): assert module.PlexServer is PlexServer assert module.PlexPartialObject is PlexPartialObject assert not hasattr(module, "PartialPlexObject") + + def test_load_from_sample_returns_items(): sample_dir = Path(__file__).resolve().parents[1] / "sample-data" items = loader_samples._load_from_sample(sample_dir) @@ -102,7 +104,9 @@ async def imdb_mock(request): ) async def main(): - async with httpx.AsyncClient(transport=httpx.MockTransport(imdb_mock)) as client: + async with httpx.AsyncClient( + transport=httpx.MockTransport(imdb_mock) + ) as client: result = await _fetch_imdb(client, "tt1", config) assert result is not None @@ -123,7 +127,9 @@ async def error_mock(request): raise AssertionError("network should not be called") async def main(): - async with httpx.AsyncClient(transport=httpx.MockTransport(error_mock)) as client: + async with httpx.AsyncClient( + transport=httpx.MockTransport(error_mock) + ) as client: result = await _fetch_imdb(client, "tt1", config) assert result is not None assert result.id == "tt1" @@ -158,7 +164,9 @@ async def fake_sleep(seconds: float) -> None: monkeypatch.setattr(asyncio, "sleep", fake_sleep) async def main(): - async with httpx.AsyncClient(transport=httpx.MockTransport(imdb_mock)) as client: + async with httpx.AsyncClient( + transport=httpx.MockTransport(imdb_mock) + ) as client: result = await _fetch_imdb(client, "tt1", config) assert result is not None @@ -192,7 +200,9 @@ async def first_run() -> IMDbRuntimeConfig: backoff=0, retry_queue=queue, ) - async with httpx.AsyncClient(transport=httpx.MockTransport(first_transport)) as client: + async with httpx.AsyncClient( + transport=httpx.MockTransport(first_transport) + ) as client: await _process_imdb_retry_queue(client, config) await _fetch_imdb(client, "tt0111161", config) _persist_imdb_retry_queue(queue_path, config.retry_queue) @@ -211,7 +221,9 @@ async def second_run() -> IMDbRuntimeConfig: ) assert config.retry_queue.qsize() == 1 assert config.retry_queue.snapshot() == ["tt0111161"] - async with httpx.AsyncClient(transport=httpx.MockTransport(second_transport)) as client: + async with httpx.AsyncClient( + transport=httpx.MockTransport(second_transport) + ) as client: await _process_imdb_retry_queue(client, config) _persist_imdb_retry_queue(queue_path, config.retry_queue) return config @@ -245,6 +257,8 @@ async def run_test(): asyncio.run(run_test()) assert queue.qsize() == 1 assert queue.snapshot() == ["tt0111161"] + + def test_upsert_in_batches_handles_errors(): class DummyClient: def __init__(self): @@ -309,9 +323,7 @@ async def upsert(self, collection_name: str, points, **kwargs): retry_queue: asyncio.Queue[list[models.PointStruct]] = asyncio.Queue() async def main() -> None: - retry_queue.put_nowait( - [models.PointStruct(id=1, vector={}, payload={})] - ) + retry_queue.put_nowait([models.PointStruct(id=1, vector={}, payload={})]) config = QdrantRuntimeConfig(retry_attempts=2, retry_backoff=0.01) sleeps: list[float] = [] diff --git a/tests/test_server.py b/tests/test_server.py index 71c4e95..c269fd8 100644 --- a/tests/test_server.py +++ b/tests/test_server.py @@ -42,6 +42,10 @@ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.__class__._initialized = True + async def close(self) -> None: # type: ignore[override] + """Prevent sample data teardown from closing the shared instance.""" + return None + monkeypatch.setattr(loader, "AsyncQdrantClient", SharedClient) monkeypatch.setattr(async_qdrant_client, "AsyncQdrantClient", SharedClient) sample_dir = Path(__file__).resolve().parents[1] / "sample-data" @@ -61,6 +65,7 @@ def test_qdrant_env_config(monkeypatch): class CaptureClient: def __init__(self, *args, **kwargs): captured.update(kwargs) + async def close(self): pass @@ -107,8 +112,7 @@ def test_server_tools(monkeypatch): item = json.loads(asyncio.run(server.media_item.fn(identifier=movie_id))) assert item["plex"]["rating_key"] == movie_id assert ( - server.server.cache.get_payload(movie_id)["plex"]["rating_key"] - == movie_id + server.server.cache.get_payload(movie_id)["plex"]["rating_key"] == movie_id ) ids = json.loads(asyncio.run(server.media_ids.fn(identifier=movie_id))) @@ -140,7 +144,10 @@ def test_server_tools(monkeypatch): limit=1, ) ) - assert episode_structured and episode_structured[0]["plex"]["rating_key"] == "61960" + assert ( + episode_structured + and episode_structured[0]["plex"]["rating_key"] == "61960" + ) assert episode_structured[0]["show_title"] == "Alien: Earth" rec = asyncio.run(server.recommend_media.fn(identifier=movie_id, limit=1)) @@ -164,9 +171,7 @@ def test_get_media_data_caches_external_ids(monkeypatch): original_find_records = media_helpers._find_records - async def _counting_find_records( - plex_server, identifier: str, limit: int = 1 - ): + async def _counting_find_records(plex_server, identifier: str, limit: int = 1): nonlocal call_count call_count += 1 return await original_find_records(plex_server, identifier, limit=limit) @@ -223,9 +228,7 @@ def test_actor_movies(monkeypatch): def test_play_media_requires_configuration(monkeypatch): with _load_server(monkeypatch) as server: with pytest.raises(RuntimeError): - asyncio.run( - server.play_media.fn(identifier="49915", player="Living Room") - ) + asyncio.run(server.play_media.fn(identifier="49915", player="Living Room")) def test_play_media_with_alias(monkeypatch): @@ -346,9 +349,7 @@ def fetchItem(self, key: str) -> Any: with _load_server(monkeypatch) as server: monkeypatch.setattr(server, "PlexServerClient", FakePlex) with pytest.raises(ValueError, match="cannot be controlled for playback"): - asyncio.run( - server.play_media.fn(identifier="49915", player="machine-999") - ) + asyncio.run(server.play_media.fn(identifier="49915", player="machine-999")) def test_match_player_fuzzy_alias_resolution(): @@ -420,8 +421,7 @@ def fake_import(name, *args, **kwargs): module = importlib.reload(importlib.import_module("mcp_plex.server")) assert module.server.reranker is None assert any( - "Failed to import CrossEncoder" in message - for message in caplog.messages + "Failed to import CrossEncoder" in message for message in caplog.messages ) asyncio.run(module.server.close()) @@ -465,6 +465,7 @@ def test_rest_endpoints(monkeypatch): assert resp.json()["rating_key"] == "49915" spec = client.get("/openapi.json").json() + def _resolve(schema: dict): if "$ref" in schema: ref = schema["$ref"].split("/")[-1] @@ -478,17 +479,15 @@ def _resolve(schema: dict): "schema" ] get_media_schema = _resolve(get_media_schema) - assert ( - get_media_schema["properties"]["identifier"]["description"].startswith( - "Rating key" - ) + assert get_media_schema["properties"]["identifier"]["description"].startswith( + "Rating key" ) search_media = spec["paths"]["/rest/search-media"]["post"] assert "parameters" not in search_media or not search_media["parameters"] - search_schema = search_media["requestBody"]["content"][ - "application/json" - ]["schema"] + search_schema = search_media["requestBody"]["content"]["application/json"][ + "schema" + ] search_schema = _resolve(search_schema) assert "query" in search_schema["required"] assert "/rest/prompt/media-info" in spec["paths"] @@ -566,6 +565,7 @@ def test_run_config_to_kwargs(): def test_find_records_handles_retrieve_error(monkeypatch): with _load_server(monkeypatch) as module: + async def fail_retrieve(*args, **kwargs): raise RuntimeError("boom") @@ -587,8 +587,12 @@ def test_media_resources_cache_hits(monkeypatch): poster_cached = asyncio.run(module.media_poster.fn(identifier=rating_key)) assert poster_cached == poster_first - background_first = asyncio.run(module.media_background.fn(identifier=rating_key)) - background_cached = asyncio.run(module.media_background.fn(identifier=rating_key)) + background_first = asyncio.run( + module.media_background.fn(identifier=rating_key) + ) + background_cached = asyncio.run( + module.media_background.fn(identifier=rating_key) + ) assert background_cached == background_first @@ -623,6 +627,7 @@ async def fake_render(self, arguments): def test_rest_resource_content_types(monkeypatch): with _load_server(monkeypatch) as module: + async def fake_read_resource(formatted: str): if formatted.endswith("binary"): return b"binary" @@ -663,7 +668,9 @@ async def fake_query_points(*args, **kwargs): async def immediate_to_thread(fn, *args, **kwargs): return fn(*args, **kwargs) - monkeypatch.setattr(module.server.qdrant_client, "query_points", fake_query_points) + monkeypatch.setattr( + module.server.qdrant_client, "query_points", fake_query_points + ) monkeypatch.setattr(module.asyncio, "to_thread", immediate_to_thread) monkeypatch.setattr(module.server, "_reranker", None) monkeypatch.setattr(module.server, "_reranker_loaded", True) @@ -713,7 +720,9 @@ class DummyReranker: def predict(self, pairs): return [0.9, 0.1] - monkeypatch.setattr(module.server.qdrant_client, "query_points", fake_query_points) + monkeypatch.setattr( + module.server.qdrant_client, "query_points", fake_query_points + ) monkeypatch.setattr(module.asyncio, "to_thread", immediate_to_thread) monkeypatch.setattr(module.server, "_reranker", DummyReranker()) monkeypatch.setattr(module.server, "_reranker_loaded", True) @@ -730,9 +739,13 @@ def test_query_media_filters(monkeypatch): async def fake_query_points(*args, **kwargs): captured.update(kwargs) payload = {"title": "Result", "plex": {"rating_key": "1"}} - return types.SimpleNamespace(points=[types.SimpleNamespace(payload=payload, score=1.0)]) + return types.SimpleNamespace( + points=[types.SimpleNamespace(payload=payload, score=1.0)] + ) - monkeypatch.setattr(module.server.qdrant_client, "query_points", fake_query_points) + monkeypatch.setattr( + module.server.qdrant_client, "query_points", fake_query_points + ) result = asyncio.run( module.query_media.fn( @@ -800,7 +813,9 @@ async def fake_query_points(*args, **kwargs): points=[types.SimpleNamespace(payload=payload, score=1.0)] ) - monkeypatch.setattr(module.server.qdrant_client, "query_points", fake_query_points) + monkeypatch.setattr( + module.server.qdrant_client, "query_points", fake_query_points + ) result = asyncio.run( module.query_media.fn( diff --git a/tests/test_server_cli.py b/tests/test_server_cli.py index 479062f..54f369d 100644 --- a/tests/test_server_cli.py +++ b/tests/test_server_cli.py @@ -34,18 +34,33 @@ def test_main_mount_disallowed_for_stdio(): def test_main_http_with_mount_runs(): with patch.object(server.server, "run") as mock_run: - server.main(["--transport", "sse", "--bind", "0.0.0.0", "--port", "8000", "--mount", "/mcp"]) - mock_run.assert_called_once_with(transport="sse", host="0.0.0.0", port=8000, path="/mcp") + server.main( + [ + "--transport", + "sse", + "--bind", + "0.0.0.0", + "--port", + "8000", + "--mount", + "/mcp", + ] + ) + mock_run.assert_called_once_with( + transport="sse", host="0.0.0.0", port=8000, path="/mcp" + ) def test_main_model_overrides(): with patch.object(server.server, "run") as mock_run: - server.main([ - "--dense-model", - "foo", - "--sparse-model", - "bar", - ]) + server.main( + [ + "--dense-model", + "foo", + "--sparse-model", + "bar", + ] + ) assert server.settings.dense_model == "foo" assert server.settings.sparse_model == "bar" mock_run.assert_called_once_with(transport="stdio") @@ -95,9 +110,7 @@ def test_env_only_http_configuration(monkeypatch): monkeypatch.setenv("MCP_PORT", "8000") with patch.object(server.server, "run") as mock_run: server.main([]) - mock_run.assert_called_once_with( - transport="sse", host="0.0.0.0", port=8000 - ) + mock_run.assert_called_once_with(transport="sse", host="0.0.0.0", port=8000) def test_env_invalid_port(monkeypatch): diff --git a/tests/test_server_config_additional.py b/tests/test_server_config_additional.py new file mode 100644 index 0000000..0648644 --- /dev/null +++ b/tests/test_server_config_additional.py @@ -0,0 +1,43 @@ +from __future__ import annotations + +import pytest + +from mcp_plex.server.config import Settings + + +def test_parse_aliases_rejects_invalid_json(): + with pytest.raises(ValueError): + Settings._parse_aliases("not json") + + +def test_parse_aliases_rejects_unexpected_type(): + with pytest.raises(ValueError): + Settings._parse_aliases(123) # type: ignore[arg-type] + + +def test_parse_aliases_handles_empty_input(): + assert Settings._parse_aliases("") == {} + + +def test_parse_aliases_rejects_non_collection_json(): + with pytest.raises(ValueError): + Settings._parse_aliases("123") + + +def test_parse_aliases_skips_blank_keys(): + assert Settings._parse_aliases({"": ["alias"]}) == {} + + +def test_items_from_sequence_requires_pairs(): + with pytest.raises(ValueError): + Settings._items_from_sequence([["only-one"]]) + + +def test_normalize_alias_values_rejects_invalid_type(): + with pytest.raises(ValueError): + Settings._normalize_alias_values(123) # type: ignore[arg-type] + + +def test_items_from_sequence_rejects_invalid_entry_type(): + with pytest.raises(ValueError): + Settings._items_from_sequence(["string-entry"]) # type: ignore[list-item] diff --git a/tests/test_server_internal_additional.py b/tests/test_server_internal_additional.py new file mode 100644 index 0000000..0a23185 --- /dev/null +++ b/tests/test_server_internal_additional.py @@ -0,0 +1,171 @@ +from __future__ import annotations + +import asyncio +import importlib +import importlib.metadata as metadata +from types import SimpleNamespace + +import pytest +from plexapi.exceptions import PlexApiException + +from mcp_plex import server as server_module +from mcp_plex.server import PlexServer + + +def test_version_fallback(monkeypatch): + def _raise(*args, **kwargs): + raise metadata.PackageNotFoundError + + monkeypatch.setattr(metadata, "version", _raise) + reloaded = importlib.reload(server_module) + try: + assert reloaded.__version__ == "0.0.0" + finally: + importlib.reload(reloaded) + + +def test_clear_plex_identity_cache_resets_state(): + class DummyClient: + async def close(self) -> None: + return None + + settings = server_module.server.settings.model_copy( + update={"qdrant_url": ":memory:"} + ) + plex_server = PlexServer(settings=settings, qdrant_client=DummyClient()) + plex_server._plex_identity = {"machineIdentifier": "abc"} + plex_server._plex_client = SimpleNamespace() + + plex_server.clear_plex_identity_cache() + + assert plex_server._plex_identity is None + assert plex_server._plex_client is None + + +def test_request_model_skips_variadic_params(): + def _callable(arg, *args, **kwargs): + return arg + + model = server_module._request_model("test", _callable) + assert model is not None + assert list(model.model_fields) == ["arg"] + + +def test_request_model_returns_none_for_no_params(): + def _empty(): + return None + + assert server_module._request_model("noop", _empty) is None + + +def test_ensure_plex_configuration_requires_settings(monkeypatch): + original_settings = server_module.server.settings + modified = original_settings.model_copy( + update={"plex_url": None, "plex_token": None} + ) + monkeypatch.setattr(server_module.server, "_settings", modified) + try: + with pytest.raises(RuntimeError): + server_module._ensure_plex_configuration() + finally: + monkeypatch.setattr(server_module.server, "_settings", original_settings) + + +def test_fetch_plex_identity_requires_identifier(monkeypatch): + async def _fake_get_client(): + return SimpleNamespace(machineIdentifier=None) + + monkeypatch.setattr(server_module, "_get_plex_client", _fake_get_client) + server_module.server._plex_identity = None + + with pytest.raises(RuntimeError): + asyncio.run(server_module._fetch_plex_identity()) + + +def test_get_plex_players_handles_non_iterable_provides(monkeypatch): + class StubClient: + provides = 123 + machineIdentifier = "machine" + clientIdentifier = "client" + address = "127.0.0.1" + port = "32400" + title = "Living Room" + product = "Plex" + + async def _fake_get_client(): + return SimpleNamespace(clients=lambda: [StubClient()]) + + monkeypatch.setattr(server_module, "_get_plex_client", _fake_get_client) + server_module.server._plex_client = None + + players = asyncio.run(server_module._get_plex_players()) + assert players[0]["display_name"] == "Living Room" + + +def test_match_player_skips_blank_candidates(): + player = { + "display_name": "Living Room", + "friendly_names": ["", "Lounge"], + "provides": {"player"}, + } + + selected = server_module._match_player("lounge", [player]) + assert selected["display_name"] == "Living Room" + + +def test_start_playback_requires_client(): + with pytest.raises(ValueError): + asyncio.run( + server_module._start_playback( + "1", + {"display_name": "Living Room", "provides": {"player"}}, + 0, + ) + ) + + +def test_start_playback_wraps_plex_errors(monkeypatch): + class PlexClientStub: + def playMedia(self, *args, **kwargs): + raise PlexApiException("boom") + + async def _fake_get_client(): + return SimpleNamespace(fetchItem=lambda path: object()) + + async def _fake_fetch_identity(): + return {"machineIdentifier": "abc"} + + monkeypatch.setattr(server_module, "_get_plex_client", _fake_get_client) + monkeypatch.setattr(server_module, "_fetch_plex_identity", _fake_fetch_identity) + + with pytest.raises(RuntimeError): + asyncio.run( + server_module._start_playback( + "1", + { + "display_name": "Living Room", + "provides": {"player"}, + "client": PlexClientStub(), + }, + 0, + ) + ) + + +def test_server_main_invokes_cli(monkeypatch): + captured: dict[str, object] = {} + + def fake_main(argv=None): + captured["argv"] = argv + + from mcp_plex.server import cli as server_cli + + monkeypatch.setattr(server_cli, "main", fake_main) + server_module.main(["--help"]) + assert captured["argv"] == ["--help"] + + +def test_module_getattr_exposes_runconfig(): + from mcp_plex.server.cli import RunConfig + + assert server_module.RunConfig is RunConfig diff --git a/tests/test_server_media_resources_additional.py b/tests/test_server_media_resources_additional.py new file mode 100644 index 0000000..f999150 --- /dev/null +++ b/tests/test_server_media_resources_additional.py @@ -0,0 +1,34 @@ +from __future__ import annotations + +import asyncio + +import pytest + +from mcp_plex.server import media as media_helpers +from mcp_plex.server import media_background, media_poster + + +def test_media_poster_raises_when_missing(monkeypatch): + async def _run() -> None: + async def _fake_get_media(*args, **kwargs): + return {"plex": {"rating_key": "missing"}} + + monkeypatch.setattr(media_helpers, "_get_media_data", _fake_get_media) + + with pytest.raises(ValueError, match="Poster not available"): + await media_poster.fn(identifier="missing") + + asyncio.run(_run()) + + +def test_media_background_raises_when_missing(monkeypatch): + async def _run() -> None: + async def _fake_get_media(*args, **kwargs): + return {"plex": {"rating_key": "missing"}} + + monkeypatch.setattr(media_helpers, "_get_media_data", _fake_get_media) + + with pytest.raises(ValueError, match="Background not available"): + await media_background.fn(identifier="missing") + + asyncio.run(_run()) diff --git a/tests/test_server_media_utils.py b/tests/test_server_media_utils.py new file mode 100644 index 0000000..7720ef3 --- /dev/null +++ b/tests/test_server_media_utils.py @@ -0,0 +1,19 @@ +from __future__ import annotations + +from mcp_plex.server import media as media_helpers + + +def test_flatten_payload_with_none(): + assert media_helpers._flatten_payload(None) == {} + + +def test_normalize_identifier_handles_bad_string(): + class BadStr: + def __str__(self) -> str: + raise RuntimeError("boom") + + assert media_helpers._normalize_identifier(BadStr()) is None + + +def test_extract_plex_metadata_returns_empty_when_missing(): + assert media_helpers._extract_plex_metadata({"plex": "not-a-dict"}) == {} diff --git a/uv.lock b/uv.lock index c878390..ee42ebe 100644 --- a/uv.lock +++ b/uv.lock @@ -730,7 +730,7 @@ wheels = [ [[package]] name = "mcp-plex" -version = "2.0.7" +version = "2.0.9" source = { editable = "." } dependencies = [ { name = "fastapi" },