diff --git a/.github/workflows/impl-generate.yml b/.github/workflows/impl-generate.yml
index 0c57c04ada..b2d032e832 100644
--- a/.github/workflows/impl-generate.yml
+++ b/.github/workflows/impl-generate.yml
@@ -534,7 +534,7 @@ jobs:
# Upload PNG (with watermark)
if [ -f "$IMPL_DIR/plot.png" ]; then
- gsutil cp "$IMPL_DIR/plot.png" "${STAGING_PATH}/plot.png"
+ gsutil -h "Cache-Control:public, max-age=86400" cp "$IMPL_DIR/plot.png" "${STAGING_PATH}/plot.png"
gsutil acl ch -u AllUsers:R "${STAGING_PATH}/plot.png" 2>/dev/null || true
echo "png_url=${PUBLIC_URL}/plot.png" >> $GITHUB_OUTPUT
echo "uploaded=true" >> $GITHUB_OUTPUT
@@ -545,7 +545,7 @@ jobs:
# Upload thumbnail
if [ -f "$IMPL_DIR/plot_thumb.png" ]; then
- gsutil cp "$IMPL_DIR/plot_thumb.png" "${STAGING_PATH}/plot_thumb.png"
+ gsutil -h "Cache-Control:public, max-age=86400" cp "$IMPL_DIR/plot_thumb.png" "${STAGING_PATH}/plot_thumb.png"
gsutil acl ch -u AllUsers:R "${STAGING_PATH}/plot_thumb.png" 2>/dev/null || true
echo "thumb_url=${PUBLIC_URL}/plot_thumb.png" >> $GITHUB_OUTPUT
echo "::notice::Uploaded thumbnail"
@@ -553,7 +553,7 @@ jobs:
# Upload HTML (interactive libraries)
if [ -f "$IMPL_DIR/plot.html" ]; then
- gsutil cp "$IMPL_DIR/plot.html" "${STAGING_PATH}/plot.html"
+ gsutil -h "Cache-Control:public, max-age=86400" cp "$IMPL_DIR/plot.html" "${STAGING_PATH}/plot.html"
gsutil acl ch -u AllUsers:R "${STAGING_PATH}/plot.html" 2>/dev/null || true
echo "html_url=${PUBLIC_URL}/plot.html" >> $GITHUB_OUTPUT
fi
diff --git a/.github/workflows/impl-merge.yml b/.github/workflows/impl-merge.yml
index 0307b0ce5d..0fc033f5cf 100644
--- a/.github/workflows/impl-merge.yml
+++ b/.github/workflows/impl-merge.yml
@@ -201,7 +201,7 @@ jobs:
PRODUCTION="gs://pyplots-images/plots/${SPEC_ID}/${LIBRARY}"
# Copy from staging to production
- gsutil -m cp -r "${STAGING}/*" "${PRODUCTION}/" 2>/dev/null || echo "No staging files to promote"
+ gsutil -m -h "Cache-Control:public, max-age=86400" cp -r "${STAGING}/*" "${PRODUCTION}/" 2>/dev/null || echo "No staging files to promote"
# Make production files public
gsutil -m acl ch -r -u AllUsers:R "${PRODUCTION}/" 2>/dev/null || true
diff --git a/.github/workflows/impl-repair.yml b/.github/workflows/impl-repair.yml
index 21a455a68c..29cfc1f7ce 100644
--- a/.github/workflows/impl-repair.yml
+++ b/.github/workflows/impl-repair.yml
@@ -187,17 +187,17 @@ jobs:
gcloud auth activate-service-account --key-file=/tmp/gcs-key.json
if [ -f "$IMPL_DIR/plot.png" ]; then
- gsutil cp "$IMPL_DIR/plot.png" "${STAGING_PATH}/plot.png"
+ gsutil -h "Cache-Control:public, max-age=86400" cp "$IMPL_DIR/plot.png" "${STAGING_PATH}/plot.png"
gsutil acl ch -u AllUsers:R "${STAGING_PATH}/plot.png" 2>/dev/null || true
fi
if [ -f "$IMPL_DIR/plot_thumb.png" ]; then
- gsutil cp "$IMPL_DIR/plot_thumb.png" "${STAGING_PATH}/plot_thumb.png"
+ gsutil -h "Cache-Control:public, max-age=86400" cp "$IMPL_DIR/plot_thumb.png" "${STAGING_PATH}/plot_thumb.png"
gsutil acl ch -u AllUsers:R "${STAGING_PATH}/plot_thumb.png" 2>/dev/null || true
fi
if [ -f "$IMPL_DIR/plot.html" ]; then
- gsutil cp "$IMPL_DIR/plot.html" "${STAGING_PATH}/plot.html"
+ gsutil -h "Cache-Control:public, max-age=86400" cp "$IMPL_DIR/plot.html" "${STAGING_PATH}/plot.html"
gsutil acl ch -u AllUsers:R "${STAGING_PATH}/plot.html" 2>/dev/null || true
fi
diff --git a/api/cloudbuild.yaml b/api/cloudbuild.yaml
index ac5363f49b..106108a8a2 100644
--- a/api/cloudbuild.yaml
+++ b/api/cloudbuild.yaml
@@ -3,7 +3,7 @@ substitutions:
_SERVICE_NAME: pyplots-backend
_REGION: europe-west4
_MEMORY: 512Mi
- _CPU: "2"
+ _CPU: "1"
_MIN_INSTANCES: "1"
_MAX_INSTANCES: "3"
@@ -55,6 +55,7 @@ steps:
- "--add-cloudsql-instances=pyplots:europe-west4:pyplots-db"
- "--set-secrets=DATABASE_URL=DATABASE_URL:latest"
- "--set-env-vars=ENVIRONMENT=production"
+ - "--execution-environment=gen2"
- "--set-env-vars=GOOGLE_CLOUD_PROJECT=$PROJECT_ID"
- "--set-env-vars=GCS_BUCKET=pyplots-images"
id: "deploy"
diff --git a/api/routers/plots.py b/api/routers/plots.py
index 5a74bc1f3f..29d98dd1c5 100644
--- a/api/routers/plots.py
+++ b/api/routers/plots.py
@@ -3,7 +3,7 @@
import logging
from collections.abc import Callable
-from fastapi import APIRouter, Depends, Request
+from fastapi import APIRouter, Depends, Query, Request
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.ext.asyncio import AsyncSession
@@ -267,6 +267,8 @@ def _build_cache_key(filter_groups: list[dict]) -> str:
"""
Build cache key from filter groups.
+ Groups are sorted by category so key is stable regardless of query param order.
+
Args:
filter_groups: List of filter group dicts
@@ -276,7 +278,8 @@ def _build_cache_key(filter_groups: list[dict]) -> str:
if not filter_groups:
return "filter:all"
- cache_parts = [f"{g['category']}={','.join(sorted(g['values']))}" for g in filter_groups]
+ normalized = sorted(filter_groups, key=lambda g: g["category"])
+ cache_parts = [f"{g['category']}={','.join(sorted(g['values']))}" for g in normalized]
return f"filter:{':'.join(cache_parts)}"
@@ -370,7 +373,12 @@ def _filter_images(
@router.get("/plots/filter", response_model=FilteredPlotsResponse)
-async def get_filtered_plots(request: Request, db: AsyncSession = Depends(require_db)):
+async def get_filtered_plots(
+ request: Request,
+ db: AsyncSession = Depends(require_db),
+ limit: int | None = Query(None, ge=1),
+ offset: int = Query(0, ge=0),
+):
"""
Get filtered plot images with counts for all filter categories.
@@ -398,55 +406,65 @@ async def get_filtered_plots(request: Request, db: AsyncSession = Depends(requir
# Parse query parameters
filter_groups = _parse_filter_groups(request)
- # Check cache
+ # Check cache (cache stores unpaginated result; pagination applied after)
cache_key = _build_cache_key(filter_groups)
+ cached: FilteredPlotsResponse | None = None
try:
cached = get_cache(cache_key)
- if cached:
- return cached
except Exception as e:
- # Cache failures are non-fatal, log and continue
logger.warning("Cache read failed for key %s: %s", cache_key, e)
- # Fetch data from database
- try:
- repo = SpecRepository(db)
- all_specs = await repo.get_all()
- except SQLAlchemyError as e:
- logger.error("Database query failed in get_filtered_plots: %s", e)
- raise DatabaseQueryError("fetch_specs", str(e)) from e
-
- # Build data structures
- spec_lookup = _build_spec_lookup(all_specs)
- impl_lookup = _build_impl_lookup(all_specs)
- all_images = _collect_all_images(all_specs)
- spec_id_to_tags = {spec_id: spec_data["tags"] for spec_id, spec_data in spec_lookup.items()}
-
- # Filter images
- filtered_images = _filter_images(all_images, filter_groups, spec_lookup, impl_lookup)
-
- # Calculate counts
- global_counts = _calculate_global_counts(all_specs)
- counts = _calculate_contextual_counts(filtered_images, spec_id_to_tags, impl_lookup)
- or_counts = _calculate_or_counts(filter_groups, all_images, spec_id_to_tags, spec_lookup, impl_lookup)
-
- # Build spec_id -> title mapping for search/tooltips
- spec_titles = {spec_id: data["spec"].title for spec_id, data in spec_lookup.items() if data["spec"].title}
-
- # Build and cache response
- result = FilteredPlotsResponse(
- total=len(filtered_images),
- images=filtered_images,
- counts=counts,
- globalCounts=global_counts,
- orCounts=or_counts,
- specTitles=spec_titles,
+ if cached is None:
+ # Fetch data from database
+ try:
+ repo = SpecRepository(db)
+ all_specs = await repo.get_all()
+ except SQLAlchemyError as e:
+ logger.error("Database query failed in get_filtered_plots: %s", e)
+ raise DatabaseQueryError("fetch_specs", str(e)) from e
+
+ # Build data structures
+ spec_lookup = _build_spec_lookup(all_specs)
+ impl_lookup = _build_impl_lookup(all_specs)
+ all_images = _collect_all_images(all_specs)
+ spec_id_to_tags = {spec_id: spec_data["tags"] for spec_id, spec_data in spec_lookup.items()}
+
+ # Filter images
+ filtered_images = _filter_images(all_images, filter_groups, spec_lookup, impl_lookup)
+
+ # Calculate counts (always from ALL filtered images, not paginated)
+ global_counts = _calculate_global_counts(all_specs)
+ counts = _calculate_contextual_counts(filtered_images, spec_id_to_tags, impl_lookup)
+ or_counts = _calculate_or_counts(filter_groups, all_images, spec_id_to_tags, spec_lookup, impl_lookup)
+
+ # Build spec_id -> title mapping for search/tooltips
+ spec_titles = {spec_id: data["spec"].title for spec_id, data in spec_lookup.items() if data["spec"].title}
+
+ # Cache the full (unpaginated) result
+ cached = FilteredPlotsResponse(
+ total=len(filtered_images),
+ images=filtered_images,
+ counts=counts,
+ globalCounts=global_counts,
+ orCounts=or_counts,
+ specTitles=spec_titles,
+ )
+
+ try:
+ set_cache(cache_key, cached)
+ except Exception as e:
+ logger.warning("Cache write failed for key %s: %s", cache_key, e)
+
+ # Apply pagination on top of (possibly cached) result
+ paginated = cached.images[offset : offset + limit] if limit else cached.images[offset:]
+
+ return FilteredPlotsResponse(
+ total=cached.total,
+ images=paginated,
+ counts=cached.counts,
+ globalCounts=cached.globalCounts,
+ orCounts=cached.orCounts,
+ specTitles=cached.specTitles,
+ offset=offset,
+ limit=limit,
)
-
- try:
- set_cache(cache_key, result)
- except Exception as e:
- # Cache failures are non-fatal, log and continue
- logger.warning("Cache write failed for key %s: %s", cache_key, e)
-
- return result
diff --git a/api/schemas.py b/api/schemas.py
index c18d47df09..acac9b9167 100644
--- a/api/schemas.py
+++ b/api/schemas.py
@@ -98,6 +98,8 @@ class FilteredPlotsResponse(BaseModel):
globalCounts: dict[str, dict[str, int]] # Same structure for global counts
orCounts: list[dict[str, int]] # Per-group OR counts
specTitles: dict[str, str] = {} # Mapping spec_id -> title for search/tooltips
+ offset: int = 0
+ limit: int | None = None
class LibraryInfo(BaseModel):
diff --git a/app/cloudbuild.yaml b/app/cloudbuild.yaml
index bf656b9acd..d610fe3145 100644
--- a/app/cloudbuild.yaml
+++ b/app/cloudbuild.yaml
@@ -42,7 +42,7 @@ steps:
- "--memory"
- "256Mi"
- "--cpu"
- - "2"
+ - "1"
- "--timeout"
- "60"
- "--min-instances"
diff --git a/app/index.html b/app/index.html
index 5c61ac0edf..99e5f8318c 100644
--- a/app/index.html
+++ b/app/index.html
@@ -23,6 +23,8 @@
+
+
diff --git a/app/src/components/SpecTabs.tsx b/app/src/components/SpecTabs.tsx
index 2e07a99409..cabdf571af 100644
--- a/app/src/components/SpecTabs.tsx
+++ b/app/src/components/SpecTabs.tsx
@@ -15,8 +15,11 @@ import ContentCopyIcon from '@mui/icons-material/ContentCopy';
import CheckIcon from '@mui/icons-material/Check';
import ExpandMoreIcon from '@mui/icons-material/ExpandMore';
import ExpandLessIcon from '@mui/icons-material/ExpandLess';
-import { Prism as SyntaxHighlighter } from 'react-syntax-highlighter';
+import SyntaxHighlighter from 'react-syntax-highlighter/dist/esm/prism-light';
import { oneLight } from 'react-syntax-highlighter/dist/esm/styles/prism';
+import python from 'react-syntax-highlighter/dist/esm/languages/prism/python';
+
+SyntaxHighlighter.registerLanguage('python', python);
// Map tag category names to URL parameter names
const SPEC_TAG_PARAM_MAP: Record = {
diff --git a/app/src/router.tsx b/app/src/router.tsx
index 7a3dc6b97d..45642921b0 100644
--- a/app/src/router.tsx
+++ b/app/src/router.tsx
@@ -1,34 +1,37 @@
import { createBrowserRouter, RouterProvider } from 'react-router-dom';
import { HelmetProvider } from 'react-helmet-async';
+import Box from '@mui/material/Box';
+import CircularProgress from '@mui/material/CircularProgress';
import { Layout, AppDataProvider } from './components/Layout';
import { ErrorBoundary } from './components/ErrorBoundary';
import { HomePage } from './pages/HomePage';
import { SpecPage } from './pages/SpecPage';
-import { CatalogPage } from './pages/CatalogPage';
-import { InteractivePage } from './pages/InteractivePage';
-import { DebugPage } from './pages/DebugPage';
-import { LegalPage } from './pages/LegalPage';
-import { McpPage } from './pages/McpPage';
import { NotFoundPage } from './pages/NotFoundPage';
+const LazyFallback = () => (
+
+
+
+);
+
const router = createBrowserRouter([
{
path: '/',
element: ,
children: [
{ index: true, element: },
- { path: 'catalog', element: },
- { path: 'legal', element: },
- { path: 'mcp', element: },
+ { path: 'catalog', lazy: () => import('./pages/CatalogPage').then(m => ({ Component: m.CatalogPage, HydrateFallback: LazyFallback })) },
+ { path: 'legal', lazy: () => import('./pages/LegalPage').then(m => ({ Component: m.LegalPage, HydrateFallback: LazyFallback })) },
+ { path: 'mcp', lazy: () => import('./pages/McpPage').then(m => ({ Component: m.McpPage, HydrateFallback: LazyFallback })) },
{ path: ':specId', element: },
{ path: ':specId/:library', element: },
{ path: '*', element: },
],
},
// Fullscreen interactive view (outside Layout but inside AppDataProvider)
- { path: 'interactive/:specId/:library', element: },
+ { path: 'interactive/:specId/:library', lazy: () => import('./pages/InteractivePage').then(m => ({ Component: m.InteractivePage, HydrateFallback: LazyFallback })) },
// Hidden debug dashboard (outside Layout - no header/footer)
- { path: 'debug', element: },
+ { path: 'debug', lazy: () => import('./pages/DebugPage').then(m => ({ Component: m.DebugPage, HydrateFallback: LazyFallback })) },
{ path: '*', element: },
]);
diff --git a/app/vite.config.ts b/app/vite.config.ts
index 0e52a684d4..647f3c90d5 100644
--- a/app/vite.config.ts
+++ b/app/vite.config.ts
@@ -11,4 +11,14 @@ export default defineConfig({
port: 3000,
host: true,
},
+ build: {
+ rollupOptions: {
+ output: {
+ manualChunks(id) {
+ if (id.includes('node_modules/@mui/')) return 'mui';
+ if (id.includes('node_modules/react/') || id.includes('node_modules/react-dom/') || id.includes('node_modules/react-router-dom/')) return 'vendor';
+ },
+ },
+ },
+ },
});
diff --git a/tests/unit/api/mcp/test_tools.py b/tests/unit/api/mcp/test_tools.py
index 4d556f3d85..8c2e002590 100644
--- a/tests/unit/api/mcp/test_tools.py
+++ b/tests/unit/api/mcp/test_tools.py
@@ -9,22 +9,15 @@
import pytest
# Import the tool functions from the module
-# Note: These are FunctionTool objects, we need to access .fn to get the actual callable
-from api.mcp.server import get_implementation as get_implementation_tool
-from api.mcp.server import get_spec_detail as get_spec_detail_tool
-from api.mcp.server import get_tag_values as get_tag_values_tool
-from api.mcp.server import list_libraries as list_libraries_tool
-from api.mcp.server import list_specs as list_specs_tool
-from api.mcp.server import search_specs_by_tags as search_specs_by_tags_tool
-
-
-# Extract the actual functions from the FunctionTool wrappers
-list_specs = list_specs_tool.fn
-search_specs_by_tags = search_specs_by_tags_tool.fn
-get_spec_detail = get_spec_detail_tool.fn
-get_implementation = get_implementation_tool.fn
-list_libraries = list_libraries_tool.fn
-get_tag_values = get_tag_values_tool.fn
+# With FastMCP's @mcp_server.tool() decorator, these are plain async functions
+from api.mcp.server import (
+ get_implementation,
+ get_spec_detail,
+ get_tag_values,
+ list_libraries,
+ list_specs,
+ search_specs_by_tags,
+)
@pytest.fixture
@@ -358,7 +351,8 @@ async def test_all_tools_registered(self):
"""MCP server should have all 6 tools registered."""
from api.mcp.server import mcp_server
- tool_names = await mcp_server.get_tools()
+ tools = await mcp_server.list_tools()
+ tool_names = {t.name for t in tools}
expected = {
"list_specs",
"search_specs_by_tags",
@@ -367,31 +361,29 @@ async def test_all_tools_registered(self):
"list_libraries",
"get_tag_values",
}
- assert set(tool_names) == expected
+ assert tool_names == expected
@pytest.mark.asyncio
async def test_tool_objects_have_correct_type(self):
"""Each registered tool should be a FunctionTool with a callable fn."""
from api.mcp.server import mcp_server
- tool_names = await mcp_server.get_tools()
- for name in tool_names:
- tool = await mcp_server.get_tool(name)
- assert hasattr(tool, "fn"), f"Tool {name} has no 'fn' attribute"
- assert callable(tool.fn), f"Tool {name}.fn is not callable"
+ tools = await mcp_server.list_tools()
+ for tool in tools:
+ assert hasattr(tool, "fn"), f"Tool {tool.name} has no 'fn' attribute"
+ assert callable(tool.fn), f"Tool {tool.name}.fn is not callable"
@pytest.mark.asyncio
async def test_tool_schemas_are_valid(self):
"""Each tool should have a valid JSON Schema for its parameters."""
from api.mcp.server import mcp_server
- tool_names = await mcp_server.get_tools()
- for name in tool_names:
- tool = await mcp_server.get_tool(name)
+ tools = await mcp_server.list_tools()
+ for tool in tools:
schema = tool.parameters
- assert isinstance(schema, dict), f"Tool {name} schema is not a dict"
- assert "properties" in schema, f"Tool {name} schema has no 'properties'"
- assert schema.get("type") == "object", f"Tool {name} schema type is not 'object'"
+ assert isinstance(schema, dict), f"Tool {tool.name} schema is not a dict"
+ assert "properties" in schema, f"Tool {tool.name} schema has no 'properties'"
+ assert schema.get("type") == "object", f"Tool {tool.name} schema type is not 'object'"
@pytest.mark.asyncio
async def test_get_tag_values_via_call_tool(self):
diff --git a/tests/unit/api/test_routers.py b/tests/unit/api/test_routers.py
index cad09c5ba2..21c058695f 100644
--- a/tests/unit/api/test_routers.py
+++ b/tests/unit/api/test_routers.py
@@ -841,6 +841,8 @@ def test_filter_cached(self, client: TestClient) -> None:
cached_response.globalCounts = {}
cached_response.orCounts = []
cached_response.specTitles = {}
+ cached_response.offset = 0
+ cached_response.limit = None
with (
patch(DB_CONFIG_PATCH, return_value=True),
@@ -849,6 +851,115 @@ def test_filter_cached(self, client: TestClient) -> None:
response = client.get("/plots/filter")
assert response.status_code == 200
+ def test_filter_with_limit(self, client: TestClient, mock_spec) -> None:
+ """Filter with limit should return limited images but total of all."""
+ # Add a second impl to have 2 images
+ mock_impl2 = MagicMock()
+ mock_impl2.library_id = "seaborn"
+ mock_impl2.preview_url = TEST_IMAGE_URL
+ mock_impl2.preview_thumb = TEST_THUMB_URL
+ mock_impl2.preview_html = None
+ mock_impl2.quality_score = 85.0
+ mock_impl2.impl_tags = {}
+ mock_spec.impls.append(mock_impl2)
+
+ mock_spec_repo = MagicMock()
+ mock_spec_repo.get_all = AsyncMock(return_value=[mock_spec])
+
+ with (
+ patch(DB_CONFIG_PATCH, return_value=True),
+ patch("api.routers.plots.get_cache", return_value=None),
+ patch("api.routers.plots.set_cache"),
+ patch("api.routers.plots.SpecRepository", return_value=mock_spec_repo),
+ ):
+ response = client.get("/plots/filter?limit=1")
+ assert response.status_code == 200
+ data = response.json()
+ assert len(data["images"]) == 1
+ assert data["total"] == 2
+ assert data["limit"] == 1
+ assert data["offset"] == 0
+
+ def test_filter_with_offset(self, client: TestClient, mock_spec) -> None:
+ """Filter with offset should skip images."""
+ mock_impl2 = MagicMock()
+ mock_impl2.library_id = "seaborn"
+ mock_impl2.preview_url = TEST_IMAGE_URL
+ mock_impl2.preview_thumb = TEST_THUMB_URL
+ mock_impl2.preview_html = None
+ mock_impl2.quality_score = 85.0
+ mock_impl2.impl_tags = {}
+ mock_spec.impls.append(mock_impl2)
+
+ mock_spec_repo = MagicMock()
+ mock_spec_repo.get_all = AsyncMock(return_value=[mock_spec])
+
+ with (
+ patch(DB_CONFIG_PATCH, return_value=True),
+ patch("api.routers.plots.get_cache", return_value=None),
+ patch("api.routers.plots.set_cache"),
+ patch("api.routers.plots.SpecRepository", return_value=mock_spec_repo),
+ ):
+ response = client.get("/plots/filter?offset=1")
+ assert response.status_code == 200
+ data = response.json()
+ assert len(data["images"]) == 1
+ assert data["total"] == 2
+ assert data["offset"] == 1
+
+ def test_filter_with_limit_and_offset(self, client: TestClient, mock_spec) -> None:
+ """Filter with limit and offset combined."""
+ mock_impl2 = MagicMock()
+ mock_impl2.library_id = "seaborn"
+ mock_impl2.preview_url = TEST_IMAGE_URL
+ mock_impl2.preview_thumb = TEST_THUMB_URL
+ mock_impl2.preview_html = None
+ mock_impl2.quality_score = 85.0
+ mock_impl2.impl_tags = {}
+ mock_impl3 = MagicMock()
+ mock_impl3.library_id = "plotly"
+ mock_impl3.preview_url = TEST_IMAGE_URL
+ mock_impl3.preview_thumb = TEST_THUMB_URL
+ mock_impl3.preview_html = None
+ mock_impl3.quality_score = 80.0
+ mock_impl3.impl_tags = {}
+ mock_spec.impls.extend([mock_impl2, mock_impl3])
+
+ mock_spec_repo = MagicMock()
+ mock_spec_repo.get_all = AsyncMock(return_value=[mock_spec])
+
+ with (
+ patch(DB_CONFIG_PATCH, return_value=True),
+ patch("api.routers.plots.get_cache", return_value=None),
+ patch("api.routers.plots.set_cache"),
+ patch("api.routers.plots.SpecRepository", return_value=mock_spec_repo),
+ ):
+ response = client.get("/plots/filter?offset=1&limit=1")
+ assert response.status_code == 200
+ data = response.json()
+ assert len(data["images"]) == 1
+ assert data["total"] == 3
+ assert data["offset"] == 1
+ assert data["limit"] == 1
+
+ def test_filter_default_returns_all(self, client: TestClient, mock_spec) -> None:
+ """Filter without pagination params returns all images (backward compat)."""
+ mock_spec_repo = MagicMock()
+ mock_spec_repo.get_all = AsyncMock(return_value=[mock_spec])
+
+ with (
+ patch(DB_CONFIG_PATCH, return_value=True),
+ patch("api.routers.plots.get_cache", return_value=None),
+ patch("api.routers.plots.set_cache"),
+ patch("api.routers.plots.SpecRepository", return_value=mock_spec_repo),
+ ):
+ response = client.get("/plots/filter")
+ assert response.status_code == 200
+ data = response.json()
+ assert len(data["images"]) == data["total"]
+ assert data["offset"] == 0
+ assert data["limit"] is None
+
class TestPlotsHelperFunctions:
"""Tests for plots.py helper functions."""