From d0597cdc1813cddffacbaa50565e86d2420d1873 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Wed, 1 May 2024 00:00:17 -0400 Subject: [PATCH] feat(api): delete messages (#1388) --- .github/workflows/ci.yml | 22 +++- .gitignore | 1 + .stats.yml | 4 +- Brewfile | 2 + api.md | 1 + bin/check-env-state.py | 40 ------- bin/check-test-server | 50 --------- bin/test | 3 - pyproject.toml | 3 +- scripts/bootstrap | 19 ++++ scripts/format | 8 ++ scripts/lint | 8 ++ scripts/mock | 41 +++++++ scripts/test | 57 ++++++++++ {bin => scripts/utils}/ruffen-docs.py | 0 src/openai/resources/batches.py | 12 +-- src/openai/resources/beta/threads/messages.py | 87 +++++++++++++++ src/openai/types/batch_create_params.py | 4 +- src/openai/types/beta/threads/__init__.py | 1 + .../types/beta/threads/message_deleted.py | 15 +++ .../types/fine_tuning/fine_tuning_job.py | 6 ++ .../beta/threads/test_messages.py | 101 +++++++++++++++++- 22 files changed, 379 insertions(+), 106 deletions(-) create mode 100644 Brewfile delete mode 100644 bin/check-env-state.py delete mode 100755 bin/check-test-server delete mode 100755 bin/test create mode 100755 scripts/bootstrap create mode 100755 scripts/format create mode 100755 scripts/lint create mode 100755 scripts/mock create mode 100755 scripts/test rename {bin => scripts/utils}/ruffen-docs.py (100%) create mode 100644 src/openai/types/beta/threads/message_deleted.py diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c44028d96c..9cbc077a8c 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -39,5 +39,25 @@ jobs: - name: Ensure importable run: | rye run python -c 'import openai' + test: + name: test + runs-on: ubuntu-latest + if: github.repository == 'openai/openai-python' + + steps: + - uses: actions/checkout@v4 + + - name: Install Rye + run: | + curl -sSf https://rye-up.com/get | bash + echo "$HOME/.rye/shims" >> $GITHUB_PATH + env: + RYE_VERSION: 0.24.0 + RYE_INSTALL_OPTION: '--yes' + + - name: Bootstrap + run: ./scripts/bootstrap + + - name: Run tests + run: ./scripts/test - diff --git a/.gitignore b/.gitignore index a4b2f8c0bd..0f9a66a976 100644 --- a/.gitignore +++ b/.gitignore @@ -12,3 +12,4 @@ dist .env .envrc codegen.log +Brewfile.lock.json diff --git a/.stats.yml b/.stats.yml index e904583dae..9797002bf7 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ -configured_endpoints: 63 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-0839c14b2b61dad4e830884410cfc3695546682ced009e50583c8bb5c44512d7.yml +configured_endpoints: 64 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-97c9a5f089049dc9eb5cee9475558049003e37e42202cab39e59d75e08b4c613.yml diff --git a/Brewfile b/Brewfile new file mode 100644 index 0000000000..492ca37bb0 --- /dev/null +++ b/Brewfile @@ -0,0 +1,2 @@ +brew "rye" + diff --git a/api.md b/api.md index 30247e8f7f..9dc42f0f0f 100644 --- a/api.md +++ b/api.md @@ -392,6 +392,7 @@ Methods: - client.beta.threads.messages.retrieve(message_id, \*, thread_id) -> Message - client.beta.threads.messages.update(message_id, \*, thread_id, \*\*params) -> Message - client.beta.threads.messages.list(thread_id, \*\*params) -> SyncCursorPage[Message] +- client.beta.threads.messages.delete(message_id, \*, thread_id) -> MessageDeleted # Batches diff --git a/bin/check-env-state.py b/bin/check-env-state.py deleted file mode 100644 index e1b8b6cb39..0000000000 --- a/bin/check-env-state.py +++ /dev/null @@ -1,40 +0,0 @@ -"""Script that exits 1 if the current environment is not -in sync with the `requirements-dev.lock` file. -""" - -from pathlib import Path - -import importlib_metadata - - -def should_run_sync() -> bool: - dev_lock = Path(__file__).parent.parent.joinpath("requirements-dev.lock") - - for line in dev_lock.read_text().splitlines(): - if not line or line.startswith("#") or line.startswith("-e"): - continue - - dep, lock_version = line.split("==") - - try: - version = importlib_metadata.version(dep) - - if lock_version != version: - print(f"mismatch for {dep} current={version} lock={lock_version}") - return True - except Exception: - print(f"could not import {dep}") - return True - - return False - - -def main() -> None: - if should_run_sync(): - exit(1) - else: - exit(0) - - -if __name__ == "__main__": - main() diff --git a/bin/check-test-server b/bin/check-test-server deleted file mode 100755 index a6fa34950d..0000000000 --- a/bin/check-test-server +++ /dev/null @@ -1,50 +0,0 @@ -#!/usr/bin/env bash - -RED='\033[0;31m' -GREEN='\033[0;32m' -YELLOW='\033[0;33m' -NC='\033[0m' # No Color - -function prism_is_running() { - curl --silent "http://localhost:4010" >/dev/null 2>&1 -} - -function is_overriding_api_base_url() { - [ -n "$TEST_API_BASE_URL" ] -} - -if is_overriding_api_base_url ; then - # If someone is running the tests against the live API, we can trust they know - # what they're doing and exit early. - echo -e "${GREEN}✔ Running tests against ${TEST_API_BASE_URL}${NC}" - - exit 0 -elif prism_is_running ; then - echo -e "${GREEN}✔ Mock prism server is running with your OpenAPI spec${NC}" - echo - - exit 0 -else - echo -e "${RED}ERROR:${NC} The test suite will not run without a mock Prism server" - echo -e "running against your OpenAPI spec." - echo - echo -e "${YELLOW}To fix:${NC}" - echo - echo -e "1. Install Prism (requires Node 16+):" - echo - echo -e " With npm:" - echo -e " \$ ${YELLOW}npm install -g @stoplight/prism-cli${NC}" - echo - echo -e " With yarn:" - echo -e " \$ ${YELLOW}yarn global add @stoplight/prism-cli${NC}" - echo - echo -e "2. Run the mock server" - echo - echo -e " To run the server, pass in the path of your OpenAPI" - echo -e " spec to the prism command:" - echo - echo -e " \$ ${YELLOW}prism mock path/to/your.openapi.yml${NC}" - echo - - exit 1 -fi diff --git a/bin/test b/bin/test deleted file mode 100755 index 60ede7a842..0000000000 --- a/bin/test +++ /dev/null @@ -1,3 +0,0 @@ -#!/usr/bin/env bash - -bin/check-test-server && rye run pytest "$@" diff --git a/pyproject.toml b/pyproject.toml index 66fee29b6d..10a756a22b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -74,7 +74,7 @@ format = { chain = [ "fix:ruff", ]} "format:black" = "black ." -"format:docs" = "python bin/ruffen-docs.py README.md api.md" +"format:docs" = "python scripts/utils/ruffen-docs.py README.md api.md" "format:ruff" = "ruff format" "format:isort" = "isort ." @@ -197,5 +197,6 @@ known-first-party = ["openai", "tests"] [tool.ruff.per-file-ignores] "bin/**.py" = ["T201", "T203"] +"scripts/**.py" = ["T201", "T203"] "tests/**.py" = ["T201", "T203"] "examples/**.py" = ["T201", "T203"] diff --git a/scripts/bootstrap b/scripts/bootstrap new file mode 100755 index 0000000000..29df07e77b --- /dev/null +++ b/scripts/bootstrap @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +set -e + +cd "$(dirname "$0")/.." + +if [ -f "Brewfile" ] && [ "$(uname -s)" = "Darwin" ]; then + brew bundle check >/dev/null 2>&1 || { + echo "==> Installing Homebrew dependencies…" + brew bundle + } +fi + +echo "==> Installing Python dependencies…" + +# experimental uv support makes installations significantly faster +rye config --set-bool behavior.use-uv=true + +rye sync diff --git a/scripts/format b/scripts/format new file mode 100755 index 0000000000..2a9ea4664b --- /dev/null +++ b/scripts/format @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +set -e + +cd "$(dirname "$0")/.." + +rye run format + diff --git a/scripts/lint b/scripts/lint new file mode 100755 index 0000000000..0cc68b5157 --- /dev/null +++ b/scripts/lint @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +set -e + +cd "$(dirname "$0")/.." + +rye run lint + diff --git a/scripts/mock b/scripts/mock new file mode 100755 index 0000000000..5a8c35b725 --- /dev/null +++ b/scripts/mock @@ -0,0 +1,41 @@ +#!/usr/bin/env bash + +set -e + +cd "$(dirname "$0")/.." + +if [[ -n "$1" && "$1" != '--'* ]]; then + URL="$1" + shift +else + URL="$(grep 'openapi_spec_url' .stats.yml | cut -d' ' -f2)" +fi + +# Check if the URL is empty +if [ -z "$URL" ]; then + echo "Error: No OpenAPI spec path/url provided or found in .stats.yml" + exit 1 +fi + +echo "==> Starting mock server with URL ${URL}" + +# Run prism mock on the given spec +if [ "$1" == "--daemon" ]; then + npm exec --package=@stoplight/prism-cli@~5.3.2 -- prism mock "$URL" &> .prism.log & + + # Wait for server to come online + echo -n "Waiting for server" + while ! grep -q "✖ fatal\|Prism is listening" ".prism.log" ; do + echo -n "." + sleep 0.1 + done + + if grep -q "✖ fatal" ".prism.log"; then + cat .prism.log + exit 1 + fi + + echo +else + npm exec --package=@stoplight/prism-cli@~5.3.2 -- prism mock "$URL" +fi diff --git a/scripts/test b/scripts/test new file mode 100755 index 0000000000..be01d04473 --- /dev/null +++ b/scripts/test @@ -0,0 +1,57 @@ +#!/usr/bin/env bash + +set -e + +cd "$(dirname "$0")/.." + +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[0;33m' +NC='\033[0m' # No Color + +function prism_is_running() { + curl --silent "http://localhost:4010" >/dev/null 2>&1 +} + +kill_server_on_port() { + pids=$(lsof -t -i tcp:"$1" || echo "") + if [ "$pids" != "" ]; then + kill "$pids" + echo "Stopped $pids." + fi +} + +function is_overriding_api_base_url() { + [ -n "$TEST_API_BASE_URL" ] +} + +if ! is_overriding_api_base_url && ! prism_is_running ; then + # When we exit this script, make sure to kill the background mock server process + trap 'kill_server_on_port 4010' EXIT + + # Start the dev server + ./scripts/mock --daemon +fi + +if is_overriding_api_base_url ; then + echo -e "${GREEN}✔ Running tests against ${TEST_API_BASE_URL}${NC}" + echo +elif ! prism_is_running ; then + echo -e "${RED}ERROR:${NC} The test suite will not run without a mock Prism server" + echo -e "running against your OpenAPI spec." + echo + echo -e "To run the server, pass in the path or url of your OpenAPI" + echo -e "spec to the prism command:" + echo + echo -e " \$ ${YELLOW}npm exec --package=@stoplight/prism-cli@~5.3.2 -- prism mock path/to/your.openapi.yml${NC}" + echo + + exit 1 +else + echo -e "${GREEN}✔ Mock prism server is running with your OpenAPI spec${NC}" + echo +fi + +# Run tests +echo "==> Running tests" +rye run pytest "$@" diff --git a/bin/ruffen-docs.py b/scripts/utils/ruffen-docs.py similarity index 100% rename from bin/ruffen-docs.py rename to scripts/utils/ruffen-docs.py diff --git a/src/openai/resources/batches.py b/src/openai/resources/batches.py index a2a0272a7d..64a3014c37 100644 --- a/src/openai/resources/batches.py +++ b/src/openai/resources/batches.py @@ -40,7 +40,7 @@ def create( self, *, completion_window: Literal["24h"], - endpoint: Literal["/v1/chat/completions"], + endpoint: Literal["/v1/chat/completions", "/v1/embeddings"], input_file_id: str, metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -57,8 +57,8 @@ def create( completion_window: The time frame within which the batch should be processed. Currently only `24h` is supported. - endpoint: The endpoint to be used for all requests in the batch. Currently only - `/v1/chat/completions` is supported. + endpoint: The endpoint to be used for all requests in the batch. Currently + `/v1/chat/completions` and `/v1/embeddings` are supported. input_file_id: The ID of an uploaded file that contains requests for the new batch. @@ -228,7 +228,7 @@ async def create( self, *, completion_window: Literal["24h"], - endpoint: Literal["/v1/chat/completions"], + endpoint: Literal["/v1/chat/completions", "/v1/embeddings"], input_file_id: str, metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -245,8 +245,8 @@ async def create( completion_window: The time frame within which the batch should be processed. Currently only `24h` is supported. - endpoint: The endpoint to be used for all requests in the batch. Currently only - `/v1/chat/completions` is supported. + endpoint: The endpoint to be used for all requests in the batch. Currently + `/v1/chat/completions` and `/v1/embeddings` are supported. input_file_id: The ID of an uploaded file that contains requests for the new batch. diff --git a/src/openai/resources/beta/threads/messages.py b/src/openai/resources/beta/threads/messages.py index a938c5e15d..0799feed23 100644 --- a/src/openai/resources/beta/threads/messages.py +++ b/src/openai/resources/beta/threads/messages.py @@ -23,6 +23,7 @@ ) from ....types.beta.threads import message_list_params, message_create_params, message_update_params from ....types.beta.threads.message import Message +from ....types.beta.threads.message_deleted import MessageDeleted __all__ = ["Messages", "AsyncMessages"] @@ -252,6 +253,43 @@ def list( model=Message, ) + def delete( + self, + message_id: str, + *, + thread_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> MessageDeleted: + """ + Deletes a message. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not thread_id: + raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") + if not message_id: + raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}") + extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})} + return self._delete( + f"/threads/{thread_id}/messages/{message_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=MessageDeleted, + ) + class AsyncMessages(AsyncAPIResource): @cached_property @@ -478,6 +516,43 @@ def list( model=Message, ) + async def delete( + self, + message_id: str, + *, + thread_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> MessageDeleted: + """ + Deletes a message. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not thread_id: + raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") + if not message_id: + raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}") + extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})} + return await self._delete( + f"/threads/{thread_id}/messages/{message_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=MessageDeleted, + ) + class MessagesWithRawResponse: def __init__(self, messages: Messages) -> None: @@ -495,6 +570,9 @@ def __init__(self, messages: Messages) -> None: self.list = _legacy_response.to_raw_response_wrapper( messages.list, ) + self.delete = _legacy_response.to_raw_response_wrapper( + messages.delete, + ) class AsyncMessagesWithRawResponse: @@ -513,6 +591,9 @@ def __init__(self, messages: AsyncMessages) -> None: self.list = _legacy_response.async_to_raw_response_wrapper( messages.list, ) + self.delete = _legacy_response.async_to_raw_response_wrapper( + messages.delete, + ) class MessagesWithStreamingResponse: @@ -531,6 +612,9 @@ def __init__(self, messages: Messages) -> None: self.list = to_streamed_response_wrapper( messages.list, ) + self.delete = to_streamed_response_wrapper( + messages.delete, + ) class AsyncMessagesWithStreamingResponse: @@ -549,3 +633,6 @@ def __init__(self, messages: AsyncMessages) -> None: self.list = async_to_streamed_response_wrapper( messages.list, ) + self.delete = async_to_streamed_response_wrapper( + messages.delete, + ) diff --git a/src/openai/types/batch_create_params.py b/src/openai/types/batch_create_params.py index a67aaa1e5e..63b4fae91b 100644 --- a/src/openai/types/batch_create_params.py +++ b/src/openai/types/batch_create_params.py @@ -15,10 +15,10 @@ class BatchCreateParams(TypedDict, total=False): Currently only `24h` is supported. """ - endpoint: Required[Literal["/v1/chat/completions"]] + endpoint: Required[Literal["/v1/chat/completions", "/v1/embeddings"]] """The endpoint to be used for all requests in the batch. - Currently only `/v1/chat/completions` is supported. + Currently `/v1/chat/completions` and `/v1/embeddings` are supported. """ input_file_id: Required[str] diff --git a/src/openai/types/beta/threads/__init__.py b/src/openai/types/beta/threads/__init__.py index b57ebccb3a..1e38d5eaa1 100644 --- a/src/openai/types/beta/threads/__init__.py +++ b/src/openai/types/beta/threads/__init__.py @@ -11,6 +11,7 @@ from .text_delta import TextDelta as TextDelta from .message_delta import MessageDelta as MessageDelta from .message_content import MessageContent as MessageContent +from .message_deleted import MessageDeleted as MessageDeleted from .run_list_params import RunListParams as RunListParams from .annotation_delta import AnnotationDelta as AnnotationDelta from .image_file_delta import ImageFileDelta as ImageFileDelta diff --git a/src/openai/types/beta/threads/message_deleted.py b/src/openai/types/beta/threads/message_deleted.py new file mode 100644 index 0000000000..48210777fa --- /dev/null +++ b/src/openai/types/beta/threads/message_deleted.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["MessageDeleted"] + + +class MessageDeleted(BaseModel): + id: str + + deleted: bool + + object: Literal["thread.message.deleted"] diff --git a/src/openai/types/fine_tuning/fine_tuning_job.py b/src/openai/types/fine_tuning/fine_tuning_job.py index 1593bf50c7..7ac8792787 100644 --- a/src/openai/types/fine_tuning/fine_tuning_job.py +++ b/src/openai/types/fine_tuning/fine_tuning_job.py @@ -110,5 +110,11 @@ class FineTuningJob(BaseModel): [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). """ + estimated_finish: Optional[int] = None + """ + The Unix timestamp (in seconds) for when the fine-tuning job is estimated to + finish. The value will be null if the fine-tuning job is not running. + """ + integrations: Optional[List[FineTuningJobWandbIntegrationObject]] = None """A list of integrations to enable for this fine-tuning job.""" diff --git a/tests/api_resources/beta/threads/test_messages.py b/tests/api_resources/beta/threads/test_messages.py index 26eb09acdd..fb42d509a1 100644 --- a/tests/api_resources/beta/threads/test_messages.py +++ b/tests/api_resources/beta/threads/test_messages.py @@ -10,7 +10,10 @@ from openai import OpenAI, AsyncOpenAI from tests.utils import assert_matches_type from openai.pagination import SyncCursorPage, AsyncCursorPage -from openai.types.beta.threads import Message +from openai.types.beta.threads import ( + Message, + MessageDeleted, +) base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -243,6 +246,54 @@ def test_path_params_list(self, client: OpenAI) -> None: "", ) + @parametrize + def test_method_delete(self, client: OpenAI) -> None: + message = client.beta.threads.messages.delete( + "string", + thread_id="string", + ) + assert_matches_type(MessageDeleted, message, path=["response"]) + + @parametrize + def test_raw_response_delete(self, client: OpenAI) -> None: + response = client.beta.threads.messages.with_raw_response.delete( + "string", + thread_id="string", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + message = response.parse() + assert_matches_type(MessageDeleted, message, path=["response"]) + + @parametrize + def test_streaming_response_delete(self, client: OpenAI) -> None: + with client.beta.threads.messages.with_streaming_response.delete( + "string", + thread_id="string", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + message = response.parse() + assert_matches_type(MessageDeleted, message, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + def test_path_params_delete(self, client: OpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): + client.beta.threads.messages.with_raw_response.delete( + "string", + thread_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"): + client.beta.threads.messages.with_raw_response.delete( + "", + thread_id="string", + ) + class TestAsyncMessages: parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) @@ -471,3 +522,51 @@ async def test_path_params_list(self, async_client: AsyncOpenAI) -> None: await async_client.beta.threads.messages.with_raw_response.list( "", ) + + @parametrize + async def test_method_delete(self, async_client: AsyncOpenAI) -> None: + message = await async_client.beta.threads.messages.delete( + "string", + thread_id="string", + ) + assert_matches_type(MessageDeleted, message, path=["response"]) + + @parametrize + async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None: + response = await async_client.beta.threads.messages.with_raw_response.delete( + "string", + thread_id="string", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + message = response.parse() + assert_matches_type(MessageDeleted, message, path=["response"]) + + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None: + async with async_client.beta.threads.messages.with_streaming_response.delete( + "string", + thread_id="string", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + message = await response.parse() + assert_matches_type(MessageDeleted, message, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): + await async_client.beta.threads.messages.with_raw_response.delete( + "string", + thread_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"): + await async_client.beta.threads.messages.with_raw_response.delete( + "", + thread_id="string", + )