|
| 1 | +#!/usr/bin/env python3 |
| 2 | +# Copyright (c) 2025 Oracle and/or its affiliates. |
| 3 | +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ |
| 4 | + |
| 5 | +"""Integration tests for OpenAI models on OCI GenAI. |
| 6 | +
|
| 7 | +These tests verify that LangChain 1.x compatibility works correctly with |
| 8 | +OpenAI models available on OCI Generative AI service. |
| 9 | +
|
| 10 | +## Prerequisites |
| 11 | +
|
| 12 | +1. **OCI Authentication**: Set up OCI authentication with security token: |
| 13 | + ```bash |
| 14 | + oci session authenticate |
| 15 | + ``` |
| 16 | +
|
| 17 | +2. **Environment Variables**: Export the following: |
| 18 | + ```bash |
| 19 | + export OCI_REGION="us-chicago-1" # or your region |
| 20 | + export OCI_COMP="ocid1.compartment.oc1..your-compartment-id" |
| 21 | + ``` |
| 22 | +
|
| 23 | +3. **OCI Config**: Ensure `~/.oci/config` exists with DEFAULT profile |
| 24 | +
|
| 25 | +## Running the Tests |
| 26 | +
|
| 27 | +Run all OpenAI integration tests: |
| 28 | +```bash |
| 29 | +cd libs/oci |
| 30 | +pytest tests/integration_tests/chat_models/test_openai_models.py -v |
| 31 | +``` |
| 32 | +
|
| 33 | +Run specific test: |
| 34 | +```bash |
| 35 | +pytest tests/integration_tests/chat_models/test_openai_models.py::test_openai_basic_completion -v |
| 36 | +``` |
| 37 | +""" |
| 38 | + |
| 39 | +import os |
| 40 | + |
| 41 | +import pytest |
| 42 | +from langchain_core.messages import AIMessage, HumanMessage, SystemMessage |
| 43 | + |
| 44 | +from langchain_oci.chat_models import ChatOCIGenAI |
| 45 | + |
| 46 | + |
| 47 | +@pytest.fixture |
| 48 | +def openai_config(): |
| 49 | + """Get OpenAI model configuration.""" |
| 50 | + compartment_id = os.environ.get("OCI_COMP") |
| 51 | + if not compartment_id: |
| 52 | + pytest.skip("OCI_COMP environment variable not set") |
| 53 | + |
| 54 | + region = os.environ.get("OCI_REGION", "us-chicago-1") |
| 55 | + return { |
| 56 | + "service_endpoint": f"https://inference.generativeai.{region}.oci.oraclecloud.com", |
| 57 | + "compartment_id": compartment_id, |
| 58 | + "auth_profile": os.environ.get("OCI_CONFIG_PROFILE", "DEFAULT"), |
| 59 | + "auth_type": os.environ.get("OCI_AUTH_TYPE", "SECURITY_TOKEN"), |
| 60 | + } |
| 61 | + |
| 62 | + |
| 63 | +@pytest.mark.requires("oci") |
| 64 | +@pytest.mark.parametrize( |
| 65 | + "model_id", |
| 66 | + [ |
| 67 | + "openai.gpt-oss-20b", |
| 68 | + "openai.gpt-oss-120b", |
| 69 | + ], |
| 70 | +) |
| 71 | +def test_openai_basic_completion(model_id: str, openai_config: dict): |
| 72 | + """Test basic completion with OpenAI models. |
| 73 | +
|
| 74 | + This test verifies that: |
| 75 | + 1. The model can be instantiated correctly |
| 76 | + 2. Basic message completion works |
| 77 | + 3. The response is properly formatted as AIMessage |
| 78 | + 4. LangChain 1.x compatibility is maintained |
| 79 | + """ |
| 80 | + chat = ChatOCIGenAI( |
| 81 | + model_id=model_id, |
| 82 | + service_endpoint=openai_config["service_endpoint"], |
| 83 | + compartment_id=openai_config["compartment_id"], |
| 84 | + auth_type=openai_config["auth_type"], |
| 85 | + auth_profile=openai_config["auth_profile"], |
| 86 | + model_kwargs={"temperature": 0.7, "max_completion_tokens": 100}, |
| 87 | + ) |
| 88 | + |
| 89 | + # Test basic completion |
| 90 | + response = chat.invoke([HumanMessage(content="What is 2+2?")]) |
| 91 | + |
| 92 | + # Verify response structure (LangChain 1.x) |
| 93 | + assert isinstance(response, AIMessage), "Response should be AIMessage" |
| 94 | + # OpenAI models may return empty content if max_completion_tokens is too low |
| 95 | + # or finish due to length limit - just verify the structure is correct |
| 96 | + assert isinstance(response.content, str), "Response content should be string" |
| 97 | + assert hasattr(response, "response_metadata"), "Should have response_metadata" |
| 98 | + |
| 99 | + |
| 100 | +@pytest.mark.requires("oci") |
| 101 | +def test_openai_with_system_message(openai_config: dict): |
| 102 | + """Test OpenAI model with system message. |
| 103 | +
|
| 104 | + Verifies that system messages are properly handled and influence |
| 105 | + the model's behavior. |
| 106 | + """ |
| 107 | + chat = ChatOCIGenAI( |
| 108 | + model_id="openai.gpt-oss-20b", |
| 109 | + service_endpoint=openai_config["service_endpoint"], |
| 110 | + compartment_id=openai_config["compartment_id"], |
| 111 | + auth_type=openai_config["auth_type"], |
| 112 | + auth_profile=openai_config["auth_profile"], |
| 113 | + model_kwargs={"temperature": 0.1, "max_completion_tokens": 50}, |
| 114 | + ) |
| 115 | + |
| 116 | + response = chat.invoke( |
| 117 | + [ |
| 118 | + SystemMessage(content="You are a helpful math tutor."), |
| 119 | + HumanMessage(content="What is 12 * 8?"), |
| 120 | + ] |
| 121 | + ) |
| 122 | + |
| 123 | + assert isinstance(response, AIMessage) |
| 124 | + assert response.content |
| 125 | + # Should contain the answer 96 |
| 126 | + assert "96" in response.content |
| 127 | + |
| 128 | + |
| 129 | +@pytest.mark.requires("oci") |
| 130 | +def test_openai_streaming(openai_config: dict): |
| 131 | + """Test streaming with OpenAI models. |
| 132 | +
|
| 133 | + Verifies that: |
| 134 | + 1. Streaming works correctly |
| 135 | + 2. Chunks are properly formatted |
| 136 | + 3. Streaming completes without errors |
| 137 | + """ |
| 138 | + chat = ChatOCIGenAI( |
| 139 | + model_id="openai.gpt-oss-20b", |
| 140 | + service_endpoint=openai_config["service_endpoint"], |
| 141 | + compartment_id=openai_config["compartment_id"], |
| 142 | + auth_type=openai_config["auth_type"], |
| 143 | + auth_profile=openai_config["auth_profile"], |
| 144 | + model_kwargs={"temperature": 0.7, "max_completion_tokens": 100}, |
| 145 | + ) |
| 146 | + |
| 147 | + chunks = [] |
| 148 | + for chunk in chat.stream([HumanMessage(content="Say hello")]): |
| 149 | + assert isinstance(chunk, AIMessage), "Chunk should be AIMessage" |
| 150 | + chunks.append(chunk) |
| 151 | + |
| 152 | + # Verify we got at least one chunk (streaming worked) |
| 153 | + assert len(chunks) > 0, "Should receive at least one chunk" |
| 154 | + |
| 155 | + # Verify chunks are properly formatted |
| 156 | + for chunk in chunks: |
| 157 | + assert isinstance(chunk.content, str), "Chunk content should be string" |
| 158 | + |
| 159 | + |
| 160 | +@pytest.mark.requires("oci") |
| 161 | +def test_openai_multiple_rounds(openai_config: dict): |
| 162 | + """Test multiple conversation rounds with OpenAI model. |
| 163 | +
|
| 164 | + Verifies that conversation history is maintained properly. |
| 165 | + """ |
| 166 | + chat = ChatOCIGenAI( |
| 167 | + model_id="openai.gpt-oss-20b", |
| 168 | + service_endpoint=openai_config["service_endpoint"], |
| 169 | + compartment_id=openai_config["compartment_id"], |
| 170 | + auth_type=openai_config["auth_type"], |
| 171 | + auth_profile=openai_config["auth_profile"], |
| 172 | + model_kwargs={"temperature": 0.7, "max_completion_tokens": 100}, |
| 173 | + ) |
| 174 | + |
| 175 | + # First message |
| 176 | + response1 = chat.invoke([HumanMessage(content="My favorite number is 7")]) |
| 177 | + assert isinstance(response1, AIMessage) |
| 178 | + |
| 179 | + # Second message with context |
| 180 | + response2 = chat.invoke( |
| 181 | + [ |
| 182 | + HumanMessage(content="My favorite number is 7"), |
| 183 | + response1, |
| 184 | + HumanMessage(content="What is my favorite number plus 3?"), |
| 185 | + ] |
| 186 | + ) |
| 187 | + assert isinstance(response2, AIMessage) |
| 188 | + assert response2.content |
| 189 | + # Should reference the number 10 |
| 190 | + assert "10" in response2.content |
| 191 | + |
| 192 | + |
| 193 | +@pytest.mark.requires("oci") |
| 194 | +@pytest.mark.parametrize("model_id", ["openai.gpt-oss-20b", "openai.gpt-oss-120b"]) |
| 195 | +def test_openai_langchain_1x_compatibility(model_id: str, openai_config: dict): |
| 196 | + """Test LangChain 1.x specific compatibility. |
| 197 | +
|
| 198 | + This test specifically verifies features that are part of |
| 199 | + LangChain 1.x to ensure the integration works correctly |
| 200 | + after rebasing onto main. |
| 201 | + """ |
| 202 | + chat = ChatOCIGenAI( |
| 203 | + model_id=model_id, |
| 204 | + service_endpoint=openai_config["service_endpoint"], |
| 205 | + compartment_id=openai_config["compartment_id"], |
| 206 | + auth_type=openai_config["auth_type"], |
| 207 | + auth_profile=openai_config["auth_profile"], |
| 208 | + model_kwargs={"temperature": 0.7, "max_completion_tokens": 50}, |
| 209 | + ) |
| 210 | + |
| 211 | + # Test that invoke returns AIMessage (LangChain 1.x behavior) |
| 212 | + response = chat.invoke([HumanMessage(content="Hello")]) |
| 213 | + assert isinstance(response, AIMessage) |
| 214 | + |
| 215 | + # Verify AIMessage has expected attributes |
| 216 | + assert hasattr(response, "content") |
| 217 | + assert hasattr(response, "response_metadata") |
| 218 | + assert hasattr(response, "id") |
| 219 | + |
| 220 | + # Verify content is populated |
| 221 | + assert response.content is not None |
| 222 | + assert isinstance(response.content, str) |
0 commit comments