Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion docker/pyproject.deps.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[project]
name = "mcp-plex"
version = "1.0.0"
version = "1.0.1"
requires-python = ">=3.11,<3.13"
dependencies = [
"fastmcp>=2.11.2",
Expand Down
3 changes: 2 additions & 1 deletion mcp_plex/common/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,5 +3,6 @@
from __future__ import annotations

from .cache import MediaCache
from .validation import require_positive

__all__ = ["MediaCache"]
__all__ = ["MediaCache", "require_positive"]
16 changes: 16 additions & 0 deletions mcp_plex/common/validation.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
"""Validation helpers shared across packages."""

from __future__ import annotations


def require_positive(value: int, *, name: str) -> int:
"""Return *value* if it is a positive integer, otherwise raise an error."""

if not isinstance(value, int) or isinstance(value, bool):
raise TypeError(f"{name} must be an int")
if value <= 0:
raise ValueError(f"{name} must be positive")
return value


__all__ = ["require_positive"]
106 changes: 53 additions & 53 deletions mcp_plex/loader/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,8 @@
IngestQueue,
PersistenceQueue,
chunk_sequence,
require_positive,
)
from ..common.validation import require_positive
from .pipeline.orchestrator import LoaderOrchestrator
from .pipeline.persistence import PersistenceStage as _PersistenceStage
from ..common.types import (
Expand Down Expand Up @@ -1096,31 +1096,31 @@ def main(

asyncio.run(
load_media(
plex_url,
plex_token,
tmdb_api_key,
sample_dir,
qdrant_url,
qdrant_api_key,
qdrant_host,
qdrant_port,
qdrant_grpc_port,
qdrant_https,
qdrant_prefer_grpc,
dense_model,
sparse_model,
continuous,
delay,
imdb_cache,
imdb_max_retries,
imdb_backoff,
imdb_requests_per_window,
imdb_window_seconds,
imdb_queue,
upsert_buffer_size,
plex_chunk_size,
enrichment_batch_size,
enrichment_workers,
plex_url=plex_url,
plex_token=plex_token,
tmdb_api_key=tmdb_api_key,
sample_dir=sample_dir,
qdrant_url=qdrant_url,
qdrant_api_key=qdrant_api_key,
qdrant_host=qdrant_host,
qdrant_port=qdrant_port,
qdrant_grpc_port=qdrant_grpc_port,
qdrant_https=qdrant_https,
qdrant_prefer_grpc=qdrant_prefer_grpc,
dense_model_name=dense_model,
sparse_model_name=sparse_model,
continuous=continuous,
delay=delay,
imdb_cache=imdb_cache,
imdb_max_retries=imdb_max_retries,
imdb_backoff=imdb_backoff,
imdb_requests_per_window=imdb_requests_per_window,
imdb_window_seconds=imdb_window_seconds,
imdb_queue=imdb_queue,
upsert_buffer_size=upsert_buffer_size,
plex_chunk_size=plex_chunk_size,
enrichment_batch_size=enrichment_batch_size,
enrichment_workers=enrichment_workers,
)
)

Expand Down Expand Up @@ -1160,33 +1160,33 @@ async def load_media(

while True:
await run(
plex_url,
plex_token,
tmdb_api_key,
sample_dir,
qdrant_url,
qdrant_api_key,
qdrant_host,
qdrant_port,
qdrant_grpc_port,
qdrant_https,
qdrant_prefer_grpc,
dense_model_name,
sparse_model_name,
imdb_cache,
imdb_max_retries,
imdb_backoff,
imdb_requests_per_window,
imdb_window_seconds,
imdb_queue,
upsert_buffer_size,
plex_chunk_size,
enrichment_batch_size,
enrichment_workers,
qdrant_batch_size,
max_concurrent_upserts,
qdrant_retry_attempts,
qdrant_retry_backoff,
plex_url=plex_url,
plex_token=plex_token,
tmdb_api_key=tmdb_api_key,
sample_dir=sample_dir,
qdrant_url=qdrant_url,
qdrant_api_key=qdrant_api_key,
qdrant_host=qdrant_host,
qdrant_port=qdrant_port,
qdrant_grpc_port=qdrant_grpc_port,
qdrant_https=qdrant_https,
qdrant_prefer_grpc=qdrant_prefer_grpc,
dense_model_name=dense_model_name,
sparse_model_name=sparse_model_name,
imdb_cache_path=imdb_cache,
imdb_max_retries=imdb_max_retries,
imdb_backoff=imdb_backoff,
imdb_queue_path=imdb_queue,
imdb_requests_per_window=imdb_requests_per_window,
imdb_window_seconds=imdb_window_seconds,
upsert_buffer_size=upsert_buffer_size,
plex_chunk_size=plex_chunk_size,
enrichment_batch_size=enrichment_batch_size,
enrichment_workers=enrichment_workers,
qdrant_batch_size=qdrant_batch_size,
max_concurrent_upserts=max_concurrent_upserts,
qdrant_retry_attempts=qdrant_retry_attempts,
qdrant_retry_backoff=qdrant_retry_backoff,
)
if not continuous:
break
Expand Down
2 changes: 1 addition & 1 deletion mcp_plex/loader/pipeline/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,8 @@
PersistenceQueue,
SampleBatch,
chunk_sequence,
require_positive,
)
from ...common.validation import require_positive

if TYPE_CHECKING:
from .enrichment import EnrichmentStage
Expand Down
10 changes: 1 addition & 9 deletions mcp_plex/loader/pipeline/channels.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
from typing import TYPE_CHECKING, Any, Final, Iterable, Sequence, TypeVar, TypeAlias

from ...common.types import AggregatedItem
from ...common.validation import require_positive

try: # Only import plexapi when available; the sample data mode does not require it.
from plexapi.base import PlexPartialObject
Expand Down Expand Up @@ -72,14 +73,6 @@ class SampleBatch:
PersistenceQueue: TypeAlias = asyncio.Queue[PersistenceQueueItem]


def require_positive(value: int, *, name: str) -> int:
"""Return *value* if positive, otherwise raise a ``ValueError``."""

if value <= 0:
raise ValueError(f"{name} must be positive")
return value


def chunk_sequence(items: Sequence[T], size: int) -> Iterable[Sequence[T]]:
"""Yield ``items`` in chunks of at most ``size`` elements."""

Expand Down Expand Up @@ -133,7 +126,6 @@ def snapshot(self) -> list[str]:
"PERSIST_DONE",
"IngestQueue",
"PersistenceQueue",
"require_positive",
"chunk_sequence",
"IMDbRetryQueue",
]
2 changes: 1 addition & 1 deletion mcp_plex/loader/pipeline/enrichment.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,8 @@
PersistenceQueue,
SampleBatch,
chunk_sequence,
require_positive,
)
from ...common.validation import require_positive

from ...common.types import (
AggregatedItem,
Expand Down
2 changes: 1 addition & 1 deletion mcp_plex/loader/pipeline/persistence.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,8 @@
PERSIST_DONE,
PersistenceQueue,
chunk_sequence,
require_positive,
)
from ...common.validation import require_positive

if TYPE_CHECKING: # pragma: no cover - typing helpers only
from qdrant_client import AsyncQdrantClient, models
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"

[project]
name = "mcp-plex"
version = "1.0.0"
version = "1.0.1"

description = "Plex-Oriented Model Context Protocol Server"
requires-python = ">=3.11,<3.13"
Expand Down
19 changes: 19 additions & 0 deletions tests/test_common_validation.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
import pytest

from mcp_plex.common.validation import require_positive


def test_require_positive_accepts_positive_int():
assert require_positive(5, name="value") == 5


@pytest.mark.parametrize("bad", [0, -1, -100])
def test_require_positive_rejects_non_positive_int(bad):
with pytest.raises(ValueError, match="value must be positive"):
require_positive(bad, name="value")


@pytest.mark.parametrize("bad_type", [1.5, "1", None, object(), True])
def test_require_positive_enforces_int_type(bad_type):
with pytest.raises(TypeError, match="value must be an int"):
require_positive(bad_type, name="value") # type: ignore[arg-type]
61 changes: 55 additions & 6 deletions tests/test_loader_cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,9 +89,9 @@ async def invoke():
def test_cli_model_overrides(monkeypatch):
captured: dict[str, str] = {}

async def fake_run(*args, **kwargs):
captured["dense"] = args[11]
captured["sparse"] = args[12]
async def fake_run(**kwargs):
captured["dense"] = kwargs["dense_model_name"]
captured["sparse"] = kwargs["sparse_model_name"]

monkeypatch.setattr(loader, "run", fake_run)

Expand All @@ -114,9 +114,9 @@ async def fake_run(*args, **kwargs):
def test_cli_model_env(monkeypatch):
captured: dict[str, str] = {}

async def fake_run(*args, **kwargs):
captured["dense"] = args[11]
captured["sparse"] = args[12]
async def fake_run(**kwargs):
captured["dense"] = kwargs["dense_model_name"]
captured["sparse"] = kwargs["sparse_model_name"]

monkeypatch.setattr(loader, "run", fake_run)

Expand All @@ -138,6 +138,55 @@ async def fake_run(*args, **kwargs):
assert captured["sparse"] == "bar"


def test_load_media_passes_imdb_queue_path(monkeypatch, tmp_path):
imdb_queue = tmp_path / "queue.json"
imdb_cache = tmp_path / "cache.json"

captured_kwargs: dict[str, object] = {}

async def fake_run(**kwargs):
captured_kwargs.update(kwargs)

monkeypatch.setattr(loader, "run", fake_run)

asyncio.run(
loader.load_media(
plex_url="http://localhost",
plex_token="token",
tmdb_api_key="key",
sample_dir=None,
qdrant_url=":memory:",
qdrant_api_key=None,
qdrant_host=None,
qdrant_port=6333,
qdrant_grpc_port=6334,
qdrant_https=False,
qdrant_prefer_grpc=False,
dense_model_name="dense",
sparse_model_name="sparse",
continuous=False,
delay=0.0,
imdb_cache=imdb_cache,
imdb_max_retries=3,
imdb_backoff=1.0,
imdb_requests_per_window=None,
imdb_window_seconds=1.0,
imdb_queue=imdb_queue,
upsert_buffer_size=1,
plex_chunk_size=1,
enrichment_batch_size=1,
enrichment_workers=1,
qdrant_batch_size=1,
max_concurrent_upserts=1,
qdrant_retry_attempts=1,
qdrant_retry_backoff=1.0,
)
)

assert captured_kwargs["imdb_queue_path"] == imdb_queue
assert captured_kwargs["imdb_cache_path"] == imdb_cache


def test_loader_script_entrypoint(monkeypatch):
monkeypatch.setattr(sys, "argv", ["loader", "--help"])
module = sys.modules.pop("mcp_plex.loader", None)
Expand Down
2 changes: 1 addition & 1 deletion uv.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.