diff --git a/libs/oci/README.md b/libs/oci/README.md index 2266d10..bd5f8b3 100644 --- a/libs/oci/README.md +++ b/libs/oci/README.md @@ -30,7 +30,14 @@ This repository includes two main integration categories: ```python from langchain_oci import ChatOCIGenAI -llm = ChatOCIGenAI() +llm = ChatOCIGenAI( + model_id="MY_MODEL_ID", + service_endpoint="MY_SERVICE_ENDPOINT", + compartment_id="MY_COMPARTMENT_ID", + model_kwargs={"max_tokens": 1024}, # Use max_completion_tokens instead of max_tokens for OpenAI models + auth_profile="MY_AUTH_PROFILE", + is_stream=True, + auth_type="SECURITY_TOKEN" llm.invoke("Sing a ballad of LangChain.") ``` diff --git a/libs/oci/langchain_oci/chat_models/oci_generative_ai.py b/libs/oci/langchain_oci/chat_models/oci_generative_ai.py index c4d8dc3..e6f620d 100644 --- a/libs/oci/langchain_oci/chat_models/oci_generative_ai.py +++ b/libs/oci/langchain_oci/chat_models/oci_generative_ai.py @@ -1058,6 +1058,15 @@ def _prepare_request( if stop is not None: _model_kwargs[self._provider.stop_sequence_key] = stop + # Warn if using max_tokens with OpenAI models + if self.model_id and self.model_id.startswith("openai.") and "max_tokens" in _model_kwargs: + import warnings + warnings.warn( + f"OpenAI models require 'max_completion_tokens' instead of 'max_tokens'.", + UserWarning, + stacklevel=2 + ) + chat_params = {**_model_kwargs, **kwargs, **oci_params} if not self.model_id: diff --git a/libs/oci/pyproject.toml b/libs/oci/pyproject.toml index c5bda85..150fba6 100644 --- a/libs/oci/pyproject.toml +++ b/libs/oci/pyproject.toml @@ -14,7 +14,7 @@ license = "UPL" python = ">=3.9,<4.0" langchain-core = ">=0.3.20,<1.0.0" langchain = ">=0.3.20,<1.0.0" -oci = ">=2.144.0" +oci = ">=2.161.0" pydantic = ">=2,<3" aiohttp = ">=3.12.14"