Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 8 additions & 1 deletion libs/oci/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,14 @@ This repository includes two main integration categories:
```python
from langchain_oci import ChatOCIGenAI

llm = ChatOCIGenAI()
llm = ChatOCIGenAI(
model_id="MY_MODEL_ID",
service_endpoint="MY_SERVICE_ENDPOINT",
compartment_id="MY_COMPARTMENT_ID",
model_kwargs={"max_tokens": 1024}, # Use max_completion_tokens instead of max_tokens for OpenAI models
auth_profile="MY_AUTH_PROFILE",
is_stream=True,
auth_type="SECURITY_TOKEN"
llm.invoke("Sing a ballad of LangChain.")
```

Expand Down
9 changes: 9 additions & 0 deletions libs/oci/langchain_oci/chat_models/oci_generative_ai.py
Original file line number Diff line number Diff line change
Expand Up @@ -1058,6 +1058,15 @@ def _prepare_request(
if stop is not None:
_model_kwargs[self._provider.stop_sequence_key] = stop

# Warn if using max_tokens with OpenAI models
if self.model_id and self.model_id.startswith("openai.") and "max_tokens" in _model_kwargs:
import warnings
warnings.warn(
f"OpenAI models require 'max_completion_tokens' instead of 'max_tokens'.",
UserWarning,
stacklevel=2
)

chat_params = {**_model_kwargs, **kwargs, **oci_params}

if not self.model_id:
Expand Down
2 changes: 1 addition & 1 deletion libs/oci/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ license = "UPL"
python = ">=3.9,<4.0"
langchain-core = ">=0.3.20,<1.0.0"
langchain = ">=0.3.20,<1.0.0"
oci = ">=2.144.0"
oci = ">=2.161.0"
pydantic = ">=2,<3"
aiohttp = ">=3.12.14"

Expand Down