1
1
from unittest .mock import Mock , patch
2
-
2
+ import os
3
3
import pytest
4
4
5
5
from mem0 .configs .llms .base import BaseLlmConfig
@@ -14,6 +14,30 @@ def mock_openai_client():
14
14
yield mock_client
15
15
16
16
17
+ def test_openai_llm_base_url ():
18
+ # case1: default config: with openai official base url
19
+ config = BaseLlmConfig (model = "gpt-4o" , temperature = 0.7 , max_tokens = 100 , top_p = 1.0 , api_key = "api_key" )
20
+ llm = OpenAILLM (config )
21
+ # Note: openai client will parse the raw base_url into a URL object, which will have a trailing slash
22
+ assert str (llm .client .base_url ) == "https://api.openai.com/v1/"
23
+
24
+ # case2: with env variable OPENAI_API_BASE
25
+ provider_base_url = "https://api.provider.com/v1"
26
+ os .environ ["OPENAI_API_BASE" ] = provider_base_url
27
+ config = BaseLlmConfig (model = "gpt-4o" , temperature = 0.7 , max_tokens = 100 , top_p = 1.0 , api_key = "api_key" )
28
+ llm = OpenAILLM (config )
29
+ # Note: openai client will parse the raw base_url into a URL object, which will have a trailing slash
30
+ assert str (llm .client .base_url ) == provider_base_url + "/"
31
+
32
+ # case3: with config.openai_base_url
33
+ config_base_url = "https://api.config.com/v1"
34
+ config = BaseLlmConfig (model = "gpt-4o" , temperature = 0.7 , max_tokens = 100 , top_p = 1.0 , api_key = "api_key" ,
35
+ openai_base_url = config_base_url )
36
+ llm = OpenAILLM (config )
37
+ # Note: openai client will parse the raw base_url into a URL object, which will have a trailing slash
38
+ assert str (llm .client .base_url ) == config_base_url + "/"
39
+
40
+
17
41
def test_generate_response_without_tools (mock_openai_client ):
18
42
config = BaseLlmConfig (model = "gpt-4o" , temperature = 0.7 , max_tokens = 100 , top_p = 1.0 )
19
43
llm = OpenAILLM (config )
0 commit comments