Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion ads/aqua/config/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
DEFAULT_EVALUATION_CONTAINER = "odsc-llm-evaluate"


def evaluation_service_config(
def get_evaluation_service_config(
container: Optional[str] = DEFAULT_EVALUATION_CONTAINER,
) -> EvaluationServiceConfig:
"""
Expand Down
8 changes: 4 additions & 4 deletions ads/aqua/evaluation/evaluation.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@
is_valid_ocid,
upload_local_to_os,
)
from ads.aqua.config.config import evaluation_service_config
from ads.aqua.config.config import get_evaluation_service_config
from ads.aqua.config.evaluation.evaluation_service_config import EvaluationServiceConfig
from ads.aqua.constants import (
CONSOLE_LINK_RESOURCE_TYPE_MAPPING,
Expand Down Expand Up @@ -176,7 +176,7 @@ def create(
# The model to evaluate
evaluation_source = None
# The evaluation service config
evaluation_config: EvaluationServiceConfig = evaluation_service_config()
evaluation_config: EvaluationServiceConfig = get_evaluation_service_config()
# The evaluation inference configuration. The inference configuration will be extracted
# based on the inferencing container family.
eval_inference_configuration: Dict = {}
Expand Down Expand Up @@ -931,7 +931,7 @@ def get_status(self, eval_id: str) -> dict:
def get_supported_metrics(self) -> dict:
"""Gets a list of supported metrics for evaluation."""
return [
item.to_dict() for item in evaluation_service_config().ui_config.metrics
item.to_dict() for item in get_evaluation_service_config().ui_config.metrics
]

@telemetry(entry_point="plugin=evaluation&action=load_metrics", name="aqua")
Expand Down Expand Up @@ -1218,7 +1218,7 @@ def load_evaluation_config(self, container: Optional[str] = None) -> Dict:
"""Loads evaluation config."""

# retrieve the evaluation config by container family name
evaluation_config = evaluation_service_config(container)
evaluation_config = get_evaluation_service_config(container)

# convert the new config representation to the old one
return {
Expand Down
4 changes: 2 additions & 2 deletions tests/unitary/with_extras/aqua/test_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
from unittest.mock import patch

from ads.aqua.common.entities import ContainerSpec
from ads.aqua.config.config import evaluation_service_config
from ads.aqua.config.config import get_evaluation_service_config


class TestConfig:
Expand All @@ -32,7 +32,7 @@ def test_evaluation_service_config(self, mock_get_container_config):

mock_get_container_config.return_value = expected_result

test_result = evaluation_service_config(container="test_container")
test_result = get_evaluation_service_config(container="test_container")
assert (
test_result.to_dict()
== expected_result[ContainerSpec.CONTAINER_SPEC]["test_container"]
Expand Down
16 changes: 10 additions & 6 deletions tests/unitary/with_extras/aqua/test_evaluation.py
Original file line number Diff line number Diff line change
Expand Up @@ -426,6 +426,7 @@ def assert_payload(self, response, response_type):
continue
assert rdict.get(attr), f"{attr} is empty"

@patch("ads.aqua.evaluation.evaluation.get_evaluation_service_config")
@patch.object(Job, "run")
@patch("ads.jobs.ads_job.Job.name", new_callable=PropertyMock)
@patch("ads.jobs.ads_job.Job.id", new_callable=PropertyMock)
Expand All @@ -444,6 +445,7 @@ def test_create_evaluation(
mock_job_id,
mock_job_name,
mock_job_run,
mock_get_evaluation_service_config,
):
foundation_model = MagicMock()
foundation_model.display_name = "test_foundation_model"
Expand Down Expand Up @@ -473,6 +475,8 @@ def test_create_evaluation(
evaluation_job_run.lifecycle_state = "IN_PROGRESS"
mock_job_run.return_value = evaluation_job_run

mock_get_evaluation_service_config.return_value = EvaluationServiceConfig()

self.app.ds_client.update_model = MagicMock()
self.app.ds_client.update_model_provenance = MagicMock()

Expand Down Expand Up @@ -883,8 +887,8 @@ def test_extract_job_lifecycle_details(self, input, expect_output):
msg = self.app._extract_job_lifecycle_details(input)
assert msg == expect_output, msg

@patch("ads.aqua.evaluation.evaluation.evaluation_service_config")
def test_get_supported_metrics(self, mock_evaluation_service_config):
@patch("ads.aqua.evaluation.evaluation.get_evaluation_service_config")
def test_get_supported_metrics(self, mock_get_evaluation_service_config):
"""
Tests getting a list of supported metrics for evaluation.
"""
Expand All @@ -905,16 +909,16 @@ def test_get_supported_metrics(self, mock_evaluation_service_config):
]
)
)
mock_evaluation_service_config.return_value = test_evaluation_service_config
mock_get_evaluation_service_config.return_value = test_evaluation_service_config
response = self.app.get_supported_metrics()
assert isinstance(response, list)
assert len(response) == len(test_evaluation_service_config.ui_config.metrics)
assert response == [
item.to_dict() for item in test_evaluation_service_config.ui_config.metrics
]

@patch("ads.aqua.evaluation.evaluation.evaluation_service_config")
def test_load_evaluation_config(self, mock_evaluation_service_config):
@patch("ads.aqua.evaluation.evaluation.get_evaluation_service_config")
def test_load_evaluation_config(self, mock_get_evaluation_service_config):
"""
Tests loading default config for evaluation.
This method currently hardcoded the return value.
Expand Down Expand Up @@ -952,7 +956,7 @@ def test_load_evaluation_config(self, mock_evaluation_service_config):
],
)
)
mock_evaluation_service_config.return_value = test_evaluation_service_config
mock_get_evaluation_service_config.return_value = test_evaluation_service_config

expected_result = {
"model_params": {
Expand Down