You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
The mem0 documentation says to use litellm config for integrating Azure OpenAI LLMs. But the current implementation of LiteLLM does not support Azure OpenAI deployments with custom deployment names. When attempting to use a deployment name that is not listed in the model_prices_and_context_window.json file, an exception is thrown. Link to the JSON file
Here's the code to reproduce the error.
(deployment name redacted for privacy)
importosimportdotenvfrommem0importMemorydotenv.load_dotenv()
config= {
"llm": {
"provider": "litellm",
"config": {
"model": "azure/gpt-4o-custom-deployment",
}
},
"embedder": {
"provider": "huggingface",
}
}
m=Memory.from_config(config)
result=m.add("I am working on improving my tennis skills. Suggest some online courses.", user_id="alice", metadata={"category": "hobbies"})
print(result)
Here's the error message:
(env paths are redacted for privacy)
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
/tmp/ipykernel_171823/3167584660.py in ?()
26 }
27 }
28
29 m = Memory.from_config(config)
---> 30 result = m.add("I am working on improving my tennis skills. Suggest some online courses.", user_id="alice", metadata={"category": "hobbies"})
31 print(result)
./venv/lib/python3.10/site-packages/mem0/memory/main.py in ?(self, data, user_id, agent_id, run_id, metadata, filters, prompt)
139 filters["run_id"] = metadata["run_id"] = run_id
140
141 if not prompt:
142 prompt = MEMORY_DEDUCTION_PROMPT.format(user_input=data, metadata=metadata)
--> 143 extracted_memories = self.llm.generate_response(
144 messages=[
145 {
146 "role": "system",
./venv/lib/python3.10/site-packages/mem0/llms/litellm.py in ?(self, messages, response_format, tools, tool_choice)
63
64 Returns:
65 str: The generated response.
66 """
---> 67 if not litellm.supports_function_calling(self.config.model):
68 raise ValueError(f"Model '{self.config.model}' in litellm does not support function calling.")
69
70 params = {
./venv/lib/python3.10/site-packages/litellm/utils.py in ?(model)
4202 if model_info.get("supports_function_calling", False):
4203 return True
4204 return False
4205 else:
-> 4206 raise Exception(
4207 f"Model not in model_prices_and_context_window.json. You passed model={model}."
4208 )
Exception: Model not in model_prices_and_context_window.json. You passed model=azure/gpt-4o-custom-deployment
The text was updated successfully, but these errors were encountered:
🐛 Describe the bug
The mem0 documentation says to use litellm config for integrating Azure OpenAI LLMs. But the current implementation of LiteLLM does not support Azure OpenAI deployments with custom deployment names. When attempting to use a deployment name that is not listed in the model_prices_and_context_window.json file, an exception is thrown. Link to the JSON file
Here's the code to reproduce the error.
(deployment name redacted for privacy)
Here's the error message:
(env paths are redacted for privacy)
The text was updated successfully, but these errors were encountered: