diff --git a/src/models/config.py b/src/models/config.py index 1b923728..31cd1551 100644 --- a/src/models/config.py +++ b/src/models/config.py @@ -1,5 +1,6 @@ """Model with service configuration.""" +from pathlib import Path from typing import Optional from pydantic import BaseModel, model_validator, FilePath, AnyHttpUrl, PositiveInt @@ -82,6 +83,10 @@ def check_llama_stack_model(self) -> Self: raise ValueError( "LLama stack library client mode is enabled but a configuration file path is not specified" # noqa: C0301 ) + # the configuration file must exists and be regular readable file + checks.file_check( + Path(self.library_client_config_path), "Llama Stack configuration file" + ) return self diff --git a/tests/configuration/run.yaml b/tests/configuration/run.yaml new file mode 100644 index 00000000..6bb77600 --- /dev/null +++ b/tests/configuration/run.yaml @@ -0,0 +1,125 @@ +version: '2' +image_name: minimal-viable-llama-stack-configuration + +apis: + - agents + - datasetio + - eval + - inference + - post_training + - safety + - scoring + - telemetry + - tool_runtime + - vector_io +benchmarks: [] +container_image: null +datasets: [] +external_providers_dir: null +inference_store: + db_path: .llama/distributions/ollama/inference_store.db + type: sqlite +logging: null +metadata_store: + db_path: .llama/distributions/ollama/registry.db + namespace: null + type: sqlite +providers: + agents: + - config: + persistence_store: + db_path: .llama/distributions/ollama/agents_store.db + namespace: null + type: sqlite + responses_store: + db_path: .llama/distributions/ollama/responses_store.db + type: sqlite + provider_id: meta-reference + provider_type: inline::meta-reference + datasetio: + - config: + kvstore: + db_path: .llama/distributions/ollama/huggingface_datasetio.db + namespace: null + type: sqlite + provider_id: huggingface + provider_type: remote::huggingface + - config: + kvstore: + db_path: .llama/distributions/ollama/localfs_datasetio.db + namespace: null + type: sqlite + provider_id: localfs + provider_type: inline::localfs + eval: + - config: + kvstore: + db_path: .llama/distributions/ollama/meta_reference_eval.db + namespace: null + type: sqlite + provider_id: meta-reference + provider_type: inline::meta-reference + inference: + - provider_id: openai + provider_type: remote::openai + config: + api_key: ${env.OPENAI_API_KEY} + post_training: + - config: + checkpoint_format: huggingface + device: cpu + distributed_backend: null + provider_id: huggingface + provider_type: inline::huggingface + safety: + - config: + excluded_categories: [] + provider_id: llama-guard + provider_type: inline::llama-guard + scoring: + - config: {} + provider_id: basic + provider_type: inline::basic + - config: {} + provider_id: llm-as-judge + provider_type: inline::llm-as-judge + - config: + openai_api_key: '********' + provider_id: braintrust + provider_type: inline::braintrust + telemetry: + - config: + service_name: '' + sinks: sqlite + sqlite_db_path: .llama/distributions/ollama/trace_store.db + provider_id: meta-reference + provider_type: inline::meta-reference + tool_runtime: + - provider_id: model-context-protocol + provider_type: remote::model-context-protocol + config: {} + vector_io: + - config: + kvstore: + db_path: .llama/distributions/ollama/faiss_store.db + namespace: null + type: sqlite + provider_id: faiss + provider_type: inline::faiss +scoring_fns: [] +server: + auth: null + host: null + port: 8321 + quota: null + tls_cafile: null + tls_certfile: null + tls_keyfile: null +shields: [] +vector_dbs: [] + +models: + - model_id: gpt-4-turbo + provider_id: openai + model_type: llm + provider_model_id: gpt-4-turbo diff --git a/tests/unit/models/test_config.py b/tests/unit/models/test_config.py index 376fbbf1..1f25d38f 100644 --- a/tests/unit/models/test_config.py +++ b/tests/unit/models/test_config.py @@ -25,6 +25,8 @@ DataCollectorConfiguration, ) +from utils.checks import InvalidConfigurationError + def test_service_configuration_constructor() -> None: """Test the ServiceConfiguration constructor.""" @@ -58,7 +60,8 @@ def test_service_configuration_workers_value() -> None: def test_llama_stack_configuration_constructor() -> None: """Test the LLamaStackConfiguration constructor.""" llama_stack_configuration = LlamaStackConfiguration( - use_as_library_client=True, library_client_config_path="foo" + use_as_library_client=True, + library_client_config_path="tests/configuration/run.yaml", ) assert llama_stack_configuration is not None @@ -76,6 +79,18 @@ def test_llama_stack_configuration_constructor() -> None: assert llama_stack_configuration is not None +def test_llama_stack_configuration_no_run_yaml() -> None: + """Test the LLamaStackConfiguration constructor when run.yaml file is not a file.""" + with pytest.raises( + InvalidConfigurationError, + match="Llama Stack configuration file 'not a file' is not a file", + ): + LlamaStackConfiguration( + use_as_library_client=True, + library_client_config_path="not a file", + ) + + def test_llama_stack_wrong_configuration_constructor_no_url() -> None: """Test the LLamaStackConfiguration constructor.""" with pytest.raises( @@ -298,7 +313,8 @@ def test_configuration_empty_mcp_servers() -> None: name="test_name", service=ServiceConfiguration(), llama_stack=LlamaStackConfiguration( - use_as_library_client=True, library_client_config_path="foo" + use_as_library_client=True, + library_client_config_path="tests/configuration/run.yaml", ), user_data_collection=UserDataCollection( feedback_disabled=True, feedback_storage=None @@ -319,7 +335,8 @@ def test_configuration_single_mcp_server() -> None: name="test_name", service=ServiceConfiguration(), llama_stack=LlamaStackConfiguration( - use_as_library_client=True, library_client_config_path="foo" + use_as_library_client=True, + library_client_config_path="tests/configuration/run.yaml", ), user_data_collection=UserDataCollection( feedback_disabled=True, feedback_storage=None @@ -346,7 +363,8 @@ def test_configuration_multiple_mcp_servers() -> None: name="test_name", service=ServiceConfiguration(), llama_stack=LlamaStackConfiguration( - use_as_library_client=True, library_client_config_path="foo" + use_as_library_client=True, + library_client_config_path="tests/configuration/run.yaml", ), user_data_collection=UserDataCollection( feedback_disabled=True, feedback_storage=None @@ -368,7 +386,8 @@ def test_dump_configuration(tmp_path) -> None: name="test_name", service=ServiceConfiguration(), llama_stack=LlamaStackConfiguration( - use_as_library_client=True, library_client_config_path="foo" + use_as_library_client=True, + library_client_config_path="tests/configuration/run.yaml", ), user_data_collection=UserDataCollection( feedback_disabled=True, feedback_storage=None @@ -413,7 +432,7 @@ def test_dump_configuration(tmp_path) -> None: "url": None, "api_key": None, "use_as_library_client": True, - "library_client_config_path": "foo", + "library_client_config_path": "tests/configuration/run.yaml", }, "user_data_collection": { "feedback_disabled": True, @@ -450,7 +469,8 @@ def test_dump_configuration_with_one_mcp_server(tmp_path) -> None: name="test_name", service=ServiceConfiguration(), llama_stack=LlamaStackConfiguration( - use_as_library_client=True, library_client_config_path="foo" + use_as_library_client=True, + library_client_config_path="tests/configuration/run.yaml", ), user_data_collection=UserDataCollection( feedback_disabled=True, feedback_storage=None @@ -490,7 +510,7 @@ def test_dump_configuration_with_one_mcp_server(tmp_path) -> None: "url": None, "api_key": None, "use_as_library_client": True, - "library_client_config_path": "foo", + "library_client_config_path": "tests/configuration/run.yaml", }, "user_data_collection": { "feedback_disabled": True, @@ -535,7 +555,8 @@ def test_dump_configuration_with_more_mcp_servers(tmp_path) -> None: name="test_name", service=ServiceConfiguration(), llama_stack=LlamaStackConfiguration( - use_as_library_client=True, library_client_config_path="foo" + use_as_library_client=True, + library_client_config_path="tests/configuration/run.yaml", ), user_data_collection=UserDataCollection( feedback_disabled=True, feedback_storage=None @@ -581,7 +602,7 @@ def test_dump_configuration_with_more_mcp_servers(tmp_path) -> None: "url": None, "api_key": None, "use_as_library_client": True, - "library_client_config_path": "foo", + "library_client_config_path": "tests/configuration/run.yaml", }, "user_data_collection": { "feedback_disabled": True, diff --git a/tests/unit/utils/test_common.py b/tests/unit/utils/test_common.py index 8a0b845f..145e4314 100644 --- a/tests/unit/utils/test_common.py +++ b/tests/unit/utils/test_common.py @@ -269,7 +269,7 @@ async def test_register_mcp_servers_async_with_library_client(mocker): service=ServiceConfiguration(), llama_stack=LlamaStackConfiguration( use_as_library_client=True, - library_client_config_path="/path/to/config.yaml", + library_client_config_path="tests/configuration/run.yaml", ), user_data_collection=UserDataCollection(feedback_disabled=True), mcp_servers=[mcp_server],