Skip to content

Commit

Permalink
[FrontendSpec] Adding TTLCache to try_get_grafana_service_url funct…
Browse files Browse the repository at this point in the history
…ion (#5543)
  • Loading branch information
roei3000b committed May 15, 2024
1 parent 9d3152d commit a1899d4
Show file tree
Hide file tree
Showing 5 changed files with 74 additions and 37 deletions.
3 changes: 2 additions & 1 deletion server/api/api/endpoints/client_spec.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,11 +19,12 @@
import mlrun.common.schemas
import server.api.api.utils
import server.api.crud
import server.api.utils.helpers

router = APIRouter()


@server.api.api.utils.lru_cache_with_ttl(maxsize=32, ttl_seconds=60 * 5)
@server.api.utils.helpers.lru_cache_with_ttl(maxsize=32, ttl_seconds=60 * 5)
def get_cached_client_spec(
client_version: typing.Optional[str] = Header(
None, alias=mlrun.common.schemas.HeaderNames.client_version
Expand Down
36 changes: 0 additions & 36 deletions server/api/api/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,8 @@
import asyncio
import collections
import copy
import functools
import json
import re
import time
import traceback
import typing
import uuid
Expand Down Expand Up @@ -67,40 +65,6 @@ def log_and_raise(status=HTTPStatus.BAD_REQUEST.value, **kw):
raise HTTPException(status_code=status, detail=kw)


def lru_cache_with_ttl(maxsize=128, typed=False, ttl_seconds=60):
"""
Thread-safety least-recently used cache with time-to-live (ttl_seconds) limit.
https://stackoverflow.com/a/71634221/5257501
"""

class Result:
__slots__ = ("value", "death")

def __init__(self, value, death):
self.value = value
self.death = death

def decorator(func):
@functools.lru_cache(maxsize=maxsize, typed=typed)
def cached_func(*args, **kwargs):
value = func(*args, **kwargs)
death = time.monotonic() + ttl_seconds
return Result(value, death)

@functools.wraps(func)
def wrapper(*args, **kwargs):
result = cached_func(*args, **kwargs)
if result.death < time.monotonic():
result.value = func(*args, **kwargs)
result.death = time.monotonic() + ttl_seconds
return result.value

wrapper.cache_clear = cached_func.cache_clear
return wrapper

return decorator


def log_path(project, uid) -> Path:
return project_logs_path(project) / uid

Expand Down
2 changes: 2 additions & 0 deletions server/api/utils/clients/iguazio.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@
import mlrun.errors
import mlrun.utils.helpers
import mlrun.utils.singleton
import server.api.utils.helpers
import server.api.utils.projects.remotes.leader as project_leader
from mlrun.utils import get_in, logger

Expand Down Expand Up @@ -340,6 +341,7 @@ def is_sync(self):
"""
return True

@server.api.utils.helpers.lru_cache_with_ttl(maxsize=1, ttl_seconds=60 * 2)
def try_get_grafana_service_url(self, session: str) -> typing.Optional[str]:
"""
Try to find a ready grafana app service, and return its URL
Expand Down
36 changes: 36 additions & 0 deletions server/api/utils/helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,9 @@
#
import asyncio
import datetime
import functools
import re
import time
from typing import Optional

import semver
Expand Down Expand Up @@ -136,3 +138,37 @@ def string_to_timedelta(date_str, raise_on_error=True):
return None

return datetime.timedelta(seconds=seconds)


def lru_cache_with_ttl(maxsize=128, typed=False, ttl_seconds=60):
"""
Thread-safety least-recently used cache with time-to-live (ttl_seconds) limit.
https://stackoverflow.com/a/71634221/5257501
"""

class Result:
__slots__ = ("value", "death")

def __init__(self, value, death):
self.value = value
self.death = death

def decorator(func):
@functools.lru_cache(maxsize=maxsize, typed=typed)
def cached_func(*args, **kwargs):
value = func(*args, **kwargs)
death = time.monotonic() + ttl_seconds
return Result(value, death)

@functools.wraps(func)
def wrapper(*args, **kwargs):
result = cached_func(*args, **kwargs)
if result.death < time.monotonic():
result.value = func(*args, **kwargs)
result.death = time.monotonic() + ttl_seconds
return result.value

wrapper.cache_clear = cached_func.cache_clear
return wrapper

return decorator
34 changes: 34 additions & 0 deletions tests/api/utils/clients/test_iguazio.py
Original file line number Diff line number Diff line change
Expand Up @@ -215,6 +215,40 @@ async def test_get_grafana_service_url_success(
assert grafana_url == expected_grafana_url


@pytest.mark.parametrize("iguazio_client", ("async", "sync"), indirect=True)
@pytest.mark.asyncio
async def test_get_grafana_service_url_cache(
api_url: str,
iguazio_client: server.api.utils.clients.iguazio.Client,
requests_mock: requests_mock_package.Mocker,
):
expected_grafana_url = (
"https://grafana.default-tenant.app.hedingber-301-1.iguazio-cd2.com"
)
grafana_service = {
"spec": {"kind": "grafana"},
"status": {
"state": "ready",
"urls": [
{"kind": "http", "url": "https-has-precedence"},
{"kind": "https", "url": expected_grafana_url},
],
},
}
response_body = _generate_app_services_manifests_body([grafana_service])
requests_mock.get(f"{api_url}/api/app_services_manifests", json=response_body)
grafana_url = await maybe_coroutine(
iguazio_client.try_get_grafana_service_url("session-cookie")
)
assert grafana_url == expected_grafana_url

grafana_url = await maybe_coroutine(
iguazio_client.try_get_grafana_service_url("session-cookie")
)
assert requests_mock.called_once
assert grafana_url == expected_grafana_url


@pytest.mark.parametrize("iguazio_client", ("async", "sync"), indirect=True)
@pytest.mark.asyncio
async def test_get_grafana_service_url_ignoring_disabled_service(
Expand Down

0 comments on commit a1899d4

Please sign in to comment.