diff --git a/docs/docs/integrations/adapters/openai.ipynb b/docs/docs/integrations/adapters/openai.ipynb index 8fd2c5214c767f0..3d57534e3f3ea57 100644 --- a/docs/docs/integrations/adapters/openai.ipynb +++ b/docs/docs/integrations/adapters/openai.ipynb @@ -33,7 +33,7 @@ }, { "cell_type": "code", - "execution_count": 29, + "execution_count": 2, "id": "1d22eb61", "metadata": {}, "outputs": [], @@ -51,26 +51,29 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": 3, "id": "012d81ae", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "{'role': 'assistant', 'content': 'Hello! How can I assist you today?'}" + "{'content': 'Hello! How can I assist you today?',\n", + " 'role': 'assistant',\n", + " 'function_call': None,\n", + " 'tool_calls': None}" ] }, - "execution_count": 15, + "execution_count": 3, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "result = openai.ChatCompletion.create(\n", + "result = openai.chat.completions.create(\n", " messages=messages, model=\"gpt-3.5-turbo\", temperature=0\n", ")\n", - "result[\"choices\"][0][\"message\"].to_dict_recursive()" + "result.choices[0].message.model_dump()" ] }, { @@ -83,26 +86,48 @@ }, { "cell_type": "code", - "execution_count": 17, + "execution_count": 4, "id": "c67a5ac8", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "{'role': 'assistant', 'content': 'Hello! How can I assist you today?'}" + "{'role': 'assistant', 'content': 'Hello! How can I help you today?'}" ] }, - "execution_count": 17, + "execution_count": 4, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "lc_result = lc_openai.ChatCompletion.create(\n", + "lc_result = lc_openai.chat.completions.create(\n", " messages=messages, model=\"gpt-3.5-turbo\", temperature=0\n", ")\n", - "lc_result[\"choices\"][0][\"message\"]" + "\n", + "lc_result.choices[0].message # Attribute access" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "37a6e461-8608-47f6-ac45-12ad753c062a", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'role': 'assistant', 'content': 'Hello! How can I help you today?'}" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "lc_result[\"choices\"][0][\"message\"] # Also compatible with index access" ] }, { @@ -115,26 +140,26 @@ }, { "cell_type": "code", - "execution_count": 19, + "execution_count": 6, "id": "f7c94827", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "{'role': 'assistant', 'content': ' Hello!'}" + "{'role': 'assistant', 'content': 'Hello! How can I assist you today?'}" ] }, - "execution_count": 19, + "execution_count": 6, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "lc_result = lc_openai.ChatCompletion.create(\n", + "lc_result = lc_openai.chat.completions.create(\n", " messages=messages, model=\"claude-2\", temperature=0, provider=\"ChatAnthropic\"\n", ")\n", - "lc_result[\"choices\"][0][\"message\"]" + "lc_result.choices[0].message" ] }, { @@ -155,7 +180,7 @@ }, { "cell_type": "code", - "execution_count": 24, + "execution_count": 7, "id": "fd8cb1ea", "metadata": {}, "outputs": [ @@ -163,25 +188,25 @@ "name": "stdout", "output_type": "stream", "text": [ - "{'role': 'assistant', 'content': ''}\n", - "{'content': 'Hello'}\n", - "{'content': '!'}\n", - "{'content': ' How'}\n", - "{'content': ' can'}\n", - "{'content': ' I'}\n", - "{'content': ' assist'}\n", - "{'content': ' you'}\n", - "{'content': ' today'}\n", - "{'content': '?'}\n", - "{}\n" + "{'content': '', 'function_call': None, 'role': 'assistant', 'tool_calls': None}\n", + "{'content': 'Hello', 'function_call': None, 'role': None, 'tool_calls': None}\n", + "{'content': '!', 'function_call': None, 'role': None, 'tool_calls': None}\n", + "{'content': ' How', 'function_call': None, 'role': None, 'tool_calls': None}\n", + "{'content': ' can', 'function_call': None, 'role': None, 'tool_calls': None}\n", + "{'content': ' I', 'function_call': None, 'role': None, 'tool_calls': None}\n", + "{'content': ' assist', 'function_call': None, 'role': None, 'tool_calls': None}\n", + "{'content': ' you', 'function_call': None, 'role': None, 'tool_calls': None}\n", + "{'content': ' today', 'function_call': None, 'role': None, 'tool_calls': None}\n", + "{'content': '?', 'function_call': None, 'role': None, 'tool_calls': None}\n", + "{'content': None, 'function_call': None, 'role': None, 'tool_calls': None}\n" ] } ], "source": [ - "for c in openai.ChatCompletion.create(\n", + "for c in openai.chat.completions.create(\n", " messages=messages, model=\"gpt-3.5-turbo\", temperature=0, stream=True\n", "):\n", - " print(c[\"choices\"][0][\"delta\"].to_dict_recursive())" + " print(c.choices[0].delta.model_dump())" ] }, { @@ -194,7 +219,7 @@ }, { "cell_type": "code", - "execution_count": 30, + "execution_count": 8, "id": "9521218c", "metadata": {}, "outputs": [ @@ -217,10 +242,10 @@ } ], "source": [ - "for c in lc_openai.ChatCompletion.create(\n", + "for c in lc_openai.chat.completions.create(\n", " messages=messages, model=\"gpt-3.5-turbo\", temperature=0, stream=True\n", "):\n", - " print(c[\"choices\"][0][\"delta\"])" + " print(c.choices[0].delta)" ] }, { @@ -233,7 +258,7 @@ }, { "cell_type": "code", - "execution_count": 31, + "execution_count": 9, "id": "68f0214e", "metadata": {}, "outputs": [ @@ -241,14 +266,22 @@ "name": "stdout", "output_type": "stream", "text": [ - "{'role': 'assistant', 'content': ' Hello'}\n", + "{'role': 'assistant', 'content': ''}\n", + "{'content': 'Hello'}\n", "{'content': '!'}\n", + "{'content': ' How'}\n", + "{'content': ' can'}\n", + "{'content': ' I'}\n", + "{'content': ' assist'}\n", + "{'content': ' you'}\n", + "{'content': ' today'}\n", + "{'content': '?'}\n", "{}\n" ] } ], "source": [ - "for c in lc_openai.ChatCompletion.create(\n", + "for c in lc_openai.chat.completions.create(\n", " messages=messages,\n", " model=\"claude-2\",\n", " temperature=0,\n", @@ -275,7 +308,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.12" + "version": "3.11.5" } }, "nbformat": 4,