From 8d449cf557965d7992cfbc8f5208bd4f8ec32705 Mon Sep 17 00:00:00 2001 From: Vivek Nair Date: Thu, 27 Apr 2023 10:28:15 -0400 Subject: [PATCH] fix: allow prompt, change to pipelineRunId (#68) --- .../create-chat-completion-async-stream.py | 4 +- .../openai/create-chat-completion-async.py | 2 +- .../openai/create-chat-completion-stream.py | 4 +- .../openai/create-completion-async-stream.py | 4 +- .../simple/openai/create-completion-async.py | 2 +- .../simple/openai/create-completion.py | 4 +- .../simple/openai/create-embedding-async.py | 2 +- .../simple/openai/create-embedding.py | 2 +- examples/examples/simple/pinecone/delete.py | 2 +- examples/examples/simple/pinecone/fetch.py | 2 +- examples/examples/simple/pinecone/query.py | 4 +- examples/examples/simple/pinecone/upsert.py | 2 +- examples/poetry.lock | 15 +- examples/pyproject.toml | 2 +- package/gentrace/providers/getters.py | 11 +- package/gentrace/providers/llms/openai.py | 70 +++--- package/gentrace/providers/utils.py | 6 +- .../providers/vectorstores/pinecone.py | 10 +- package/tests/test_openai_completion.py | 225 +++++++++++++++++- package/tests/test_openai_embedding.py | 14 +- package/tests/test_pinecone.py | 6 +- package/tests/test_usage.py | 30 ++- 22 files changed, 331 insertions(+), 92 deletions(-) diff --git a/examples/examples/simple/openai/create-chat-completion-async-stream.py b/examples/examples/simple/openai/create-chat-completion-async-stream.py index 1cffbd8..4aff9cc 100644 --- a/examples/examples/simple/openai/create-chat-completion-async-stream.py +++ b/examples/examples/simple/openai/create-chat-completion-async-stream.py @@ -25,8 +25,8 @@ async def main(): pipeline_run_id = None async for value in result: - if value.get("pipeline_run_id"): - pipeline_run_id = value.get("pipeline_run_id") + if value.get("pipelineRunId"): + pipeline_run_id = value.get("pipelineRunId") gentrace.flush() diff --git a/examples/examples/simple/openai/create-chat-completion-async.py b/examples/examples/simple/openai/create-chat-completion-async.py index 33fa7d6..79b82e4 100644 --- a/examples/examples/simple/openai/create-chat-completion-async.py +++ b/examples/examples/simple/openai/create-chat-completion-async.py @@ -24,7 +24,7 @@ async def main(): gentrace.flush() - print("Result: ", result.pipeline_run_id) + print("Result: ", result["pipelineRunId"]) asyncio.run(main()) diff --git a/examples/examples/simple/openai/create-chat-completion-stream.py b/examples/examples/simple/openai/create-chat-completion-stream.py index 1085ea5..2b493b1 100644 --- a/examples/examples/simple/openai/create-chat-completion-stream.py +++ b/examples/examples/simple/openai/create-chat-completion-stream.py @@ -23,8 +23,8 @@ pipeline_run_id = None for value in result: - if value.get("pipeline_run_id"): - pipeline_run_id = value.get("pipeline_run_id") + if value.get("pipelineRunId"): + pipeline_run_id = value.get("pipelineRunId") print("Result: ", pipeline_run_id) diff --git a/examples/examples/simple/openai/create-completion-async-stream.py b/examples/examples/simple/openai/create-completion-async-stream.py index d84c3ce..cd4158e 100644 --- a/examples/examples/simple/openai/create-completion-async-stream.py +++ b/examples/examples/simple/openai/create-completion-async-stream.py @@ -26,8 +26,8 @@ async def main(): pipeline_run_id = None async for value in result: - if value.get("pipeline_run_id"): - pipeline_run_id = value.get("pipeline_run_id") + if value.get("pipelineRunId"): + pipeline_run_id = value.get("pipelineRunId") gentrace.flush() diff --git a/examples/examples/simple/openai/create-completion-async.py b/examples/examples/simple/openai/create-completion-async.py index 8f02400..3fb2f54 100644 --- a/examples/examples/simple/openai/create-completion-async.py +++ b/examples/examples/simple/openai/create-completion-async.py @@ -25,7 +25,7 @@ async def main(): gentrace.flush() - print("Result: ", result.pipeline_run_id) + print("Result: ", result["pipelineRunId"]) asyncio.run(main()) diff --git a/examples/examples/simple/openai/create-completion.py b/examples/examples/simple/openai/create-completion.py index 95cf132..5061a3d 100644 --- a/examples/examples/simple/openai/create-completion.py +++ b/examples/examples/simple/openai/create-completion.py @@ -14,7 +14,7 @@ openai.api_key = os.getenv("OPENAI_KEY") result = openai.Completion.create( - pipeline_id="text-generation", + pipeline_id="text-generation-test", model="text-davinci-003", prompt_template="Hello world {{ name }}", prompt_inputs={"name": "test"}, @@ -22,4 +22,4 @@ gentrace.flush() -print("Result: ", result.pipeline_run_id) +print("Result: ", result["pipelineRunId"]) diff --git a/examples/examples/simple/openai/create-embedding-async.py b/examples/examples/simple/openai/create-embedding-async.py index d6d6c7e..65b97a8 100644 --- a/examples/examples/simple/openai/create-embedding-async.py +++ b/examples/examples/simple/openai/create-embedding-async.py @@ -24,7 +24,7 @@ async def main(): gentrace.flush() - print("Result: ", result.pipeline_run_id) + print("Result: ", result["pipelineRunId"]) asyncio.run(main()) diff --git a/examples/examples/simple/openai/create-embedding.py b/examples/examples/simple/openai/create-embedding.py index 27578cb..c0161a6 100644 --- a/examples/examples/simple/openai/create-embedding.py +++ b/examples/examples/simple/openai/create-embedding.py @@ -22,4 +22,4 @@ gentrace.flush() -print("Result: ", result.pipeline_run_id) +print("Result: ", result["pipelineRunId"]) diff --git a/examples/examples/simple/pinecone/delete.py b/examples/examples/simple/pinecone/delete.py index 88c4837..d5c310c 100644 --- a/examples/examples/simple/pinecone/delete.py +++ b/examples/examples/simple/pinecone/delete.py @@ -21,4 +21,4 @@ gentrace.flush() -print("Result: ", result.pipeline_run_id) +print("Result: ", result["pipelineRunId"]) diff --git a/examples/examples/simple/pinecone/fetch.py b/examples/examples/simple/pinecone/fetch.py index 0bd7ab0..8d88554 100644 --- a/examples/examples/simple/pinecone/fetch.py +++ b/examples/examples/simple/pinecone/fetch.py @@ -21,4 +21,4 @@ gentrace.flush() -print("Result: ", result.pipeline_run_id) +print("Result: ", result["pipelineRunId"]) diff --git a/examples/examples/simple/pinecone/query.py b/examples/examples/simple/pinecone/query.py index 8208287..e0802e5 100644 --- a/examples/examples/simple/pinecone/query.py +++ b/examples/examples/simple/pinecone/query.py @@ -18,10 +18,10 @@ ) result = pinecone.Index("openai-trec").query( - top_k=10, vector=DEFAULT_VECTOR, pipline_id="self-contained-pinecone-query" + top_k=10, vector=DEFAULT_VECTOR, pipeline_id="self-contained-pinecone-query" ) gentrace.flush() -print("Result: ", result.pipeline_run_id) +print("Result: ", result["pipelineRunId"]) diff --git a/examples/examples/simple/pinecone/upsert.py b/examples/examples/simple/pinecone/upsert.py index af20893..36863c7 100644 --- a/examples/examples/simple/pinecone/upsert.py +++ b/examples/examples/simple/pinecone/upsert.py @@ -30,4 +30,4 @@ gentrace.flush() -print("Result: ", result.pipeline_run_id) +print("Result: ", result["pipelineRunId"]) diff --git a/examples/poetry.lock b/examples/poetry.lock index 0b8332c..f776075 100644 --- a/examples/poetry.lock +++ b/examples/poetry.lock @@ -493,13 +493,13 @@ files = [ [[package]] name = "gentrace-py" -version = "0.5.1" +version = "0.5.5" description = "Python SDK for the Gentrace API" category = "main" optional = false python-versions = ">=3.8.1,<4.0" files = [ - {file = "gentrace_py-0.5.1.tar.gz", hash = "sha256:39c117c2b9b81a3789cd05e746e3cbde3c79d729fd0aa5e61b3d5f38d5e6e5d1"}, + {file = "gentrace_py-0.5.5.tar.gz", hash = "sha256:b1845e3821d1912ab0c1c5d817f60f3e5e3b8a275ef54c9f735a7126f36aa65a"}, ] [package.dependencies] @@ -508,6 +508,7 @@ frozendict = ">=2.3.7,<3.0.0" pydantic = ">=1.10.2" pystache = ">=0.6.0,<0.7.0" python_dateutil = ">=2.5.3" +python-dotenv = ">=1.0.0,<2.0.0" setuptools = ">=21.0.0" urllib3 = ">=1.25.3" @@ -518,7 +519,7 @@ vectorstores = ["pinecone-client (>=2.2.1,<3.0.0)"] [package.source] type = "file" -url = "../package/dist/gentrace_py-0.5.1.tar.gz" +url = "../package/dist/gentrace_py-0.5.5.tar.gz" [[package]] name = "idna" @@ -973,14 +974,14 @@ files = [ [[package]] name = "setuptools" -version = "67.7.1" +version = "67.7.2" description = "Easily download, build, install, upgrade, and uninstall Python packages" category = "main" optional = false python-versions = ">=3.7" files = [ - {file = "setuptools-67.7.1-py3-none-any.whl", hash = "sha256:6f0839fbdb7e3cfef1fc38d7954f5c1c26bf4eebb155a55c9bf8faf997b9fb67"}, - {file = "setuptools-67.7.1.tar.gz", hash = "sha256:bb16732e8eb928922eabaa022f881ae2b7cdcfaf9993ef1f5e841a96d32b8e0c"}, + {file = "setuptools-67.7.2-py3-none-any.whl", hash = "sha256:23aaf86b85ca52ceb801d32703f12d77517b2556af839621c641fca11287952b"}, + {file = "setuptools-67.7.2.tar.gz", hash = "sha256:f104fa03692a2602fa0fec6c6a9e63b6c8a968de13e17c026957dd1f53d80990"}, ] [package.extras] @@ -1156,4 +1157,4 @@ multidict = ">=4.0" [metadata] lock-version = "2.0" python-versions = "^3.11" -content-hash = "f425bbac049a939057b7bffa39d13f25004b5d3c37676f76dbbad2ff5545d96e" +content-hash = "8aaa92d2568aa9c19bb8dbff3dd1e34534107f854638859b4e769bb4f37d44c4" diff --git a/examples/pyproject.toml b/examples/pyproject.toml index 77f7f73..83d0cb3 100644 --- a/examples/pyproject.toml +++ b/examples/pyproject.toml @@ -7,11 +7,11 @@ repository = "https://github.com/gentrace/gentrace-python" version = "0.0.1" [tool.poetry.dependencies] -gentrace-py = {path = "../package/dist/gentrace_py-0.5.1.tar.gz", develop = true} openai = "^0.27.4" pinecone-client = "^2.2.1" python = "^3.11" python-dotenv = "^1.0.0" +gentrace-py = {path = "../package/dist/gentrace_py-0.5.5.tar.gz", develop = true} [tool.poetry.group.lint.dependencies] black = "^23.3.0" diff --git a/package/gentrace/providers/getters.py b/package/gentrace/providers/getters.py index 098f728..9d2a751 100644 --- a/package/gentrace/providers/getters.py +++ b/package/gentrace/providers/getters.py @@ -2,22 +2,22 @@ import re import openai +from urllib3.util import parse_url from gentrace.configuration import Configuration as GentraceConfiguration openai.api_key = os.getenv("OPENAI_KEY") -VALID_GENTRACE_HOST = r"^https?://[\w.-]+:\d{1,5}/api/v1/?$" - - def test_validity(): from gentrace import api_key, host if not api_key: raise ValueError("Gentrace API key not set") - if host and not re.match(VALID_GENTRACE_HOST, host): + path = parse_url(host).path + + if host and path != "/api/v1" and path != "/api/v1/": raise ValueError("Gentrace host is invalid") @@ -28,9 +28,6 @@ def configure_openai(): test_validity() - if host and not re.match(VALID_GENTRACE_HOST, host): - raise ValueError("Gentrace host is invalid") - gentrace_config = GentraceConfiguration(host=host) gentrace_config.access_token = api_key diff --git a/package/gentrace/providers/llms/openai.py b/package/gentrace/providers/llms/openai.py index 804da83..1dd0ba0 100644 --- a/package/gentrace/providers/llms/openai.py +++ b/package/gentrace/providers/llms/openai.py @@ -91,7 +91,7 @@ def create_completion_step_run( submit_result = pipeline_run.submit() if not stream: - completion.pipeline_run_id = ( + completion["pipelineRunId"] = ( submit_result["pipelineRunId"] if "pipelineRunId" in submit_result else None @@ -136,6 +136,7 @@ def intercept_completion(original_fn, gentrace_config: Configuration): def wrapper(cls, *args, **kwargs): prompt_template = kwargs.get("prompt_template") prompt_inputs = kwargs.get("prompt_inputs") + prompt = kwargs.get("prompt") pipeline_id = kwargs.pop("pipeline_id", None) stream = kwargs.get("stream") base_completion_options = { @@ -144,18 +145,11 @@ def wrapper(cls, *args, **kwargs): if k not in ["prompt_template", "prompt_inputs"] } - if "prompt" in base_completion_options: - raise ValueError( - "The prompt attribute cannot be provided when using the Gentrace SDK. Use prompt_template and prompt_inputs instead." - ) - - if not prompt_template: - raise ValueError( - "The prompt_template attribute must be provided when using the Gentrace SDK." - ) - if stream: - rendered_prompt = pystache.render(prompt_template, prompt_inputs) + rendered_prompt = prompt + + if prompt_template and prompt_inputs: + rendered_prompt = pystache.render(prompt_template, prompt_inputs) new_completion_options = { **base_completion_options, @@ -173,7 +167,7 @@ def profiled_completion(): modified_response = [] for value in completion: if value and is_self_contained: - value["pipeline_run_id"] = pipeline_run_id + value["pipelineRunId"] = pipeline_run_id modified_response.append(value) yield value @@ -197,9 +191,15 @@ def profiled_completion(): return profiled_completion() - rendered_prompt = pystache.render(prompt_template, prompt_inputs) + rendered_prompt = prompt + + if prompt_template and prompt_inputs: + rendered_prompt = pystache.render(prompt_template, prompt_inputs) - new_completion_options = {**base_completion_options, "prompt": rendered_prompt} + new_completion_options = { + **base_completion_options, + "prompt": rendered_prompt, + } start_time = time.time() completion = original_fn(**new_completion_options) @@ -230,6 +230,7 @@ def intercept_completion_async(original_fn, gentrace_config: Configuration): async def wrapper(cls, *args, **kwargs): prompt_template = kwargs.get("prompt_template") prompt_inputs = kwargs.get("prompt_inputs") + prompt = kwargs.get("prompt") pipeline_id = kwargs.pop("pipeline_id", None) stream = kwargs.get("stream") base_completion_options = { @@ -238,18 +239,11 @@ async def wrapper(cls, *args, **kwargs): if k not in ["prompt_template", "prompt_inputs"] } - if "prompt" in base_completion_options: - raise ValueError( - "The prompt attribute cannot be provided when using the Gentrace SDK. Use prompt_template and prompt_inputs instead." - ) - - if not prompt_template: - raise ValueError( - "The prompt_template attribute must be provided when using the Gentrace SDK." - ) - if stream: - rendered_prompt = pystache.render(prompt_template, prompt_inputs) + rendered_prompt = prompt + + if prompt_template and prompt_inputs: + rendered_prompt = pystache.render(prompt_template, prompt_inputs) new_completion_options = { **base_completion_options, @@ -267,7 +261,7 @@ async def profiled_completion(): modified_response = [] async for value in completion: if value and is_self_contained: - value["pipeline_run_id"] = pipeline_run_id + value["pipelineRunId"] = pipeline_run_id modified_response.append(value) yield value @@ -291,9 +285,15 @@ async def profiled_completion(): return profiled_completion() - rendered_prompt = pystache.render(prompt_template, prompt_inputs) + rendered_prompt = prompt + + if prompt_template and prompt_inputs: + rendered_prompt = pystache.render(prompt_template, prompt_inputs) - new_completion_options = {**base_completion_options, "prompt": rendered_prompt} + new_completion_options = { + **base_completion_options, + "prompt": rendered_prompt, + } start_time = time.time() completion = await original_fn(**new_completion_options) @@ -341,7 +341,7 @@ def profiled_completion(): modified_response = [] for value in completion: if value and is_self_contained: - value["pipeline_run_id"] = pipeline_run_id + value["pipelineRunId"] = pipeline_run_id modified_response.append(value) yield value @@ -416,7 +416,7 @@ def profiled_completion(): if is_self_contained: submit_result = pipeline_run.submit() - completion.pipeline_run_id = ( + completion["pipelineRunId"] = ( submit_result["pipelineRunId"] if "pipelineRunId" in submit_result else None @@ -450,7 +450,7 @@ async def profiled_completion(): modified_response = [] async for value in completion: if value and is_self_contained: - value["pipeline_run_id"] = pipeline_run_id + value["pipelineRunId"] = pipeline_run_id modified_response.append(value) yield value @@ -526,7 +526,7 @@ async def profiled_completion(): if is_self_contained: submit_result = pipeline_run.submit() - completion.pipeline_run_id = ( + completion["pipelineRunId"] = ( submit_result["pipelineRunId"] if "pipelineRunId" in submit_result else None @@ -578,7 +578,7 @@ def wrapper(cls, *args, **kwargs): if is_self_contained: submit_result = pipeline_run.submit() - completion.pipeline_run_id = ( + completion["pipelineRunId"] = ( submit_result["pipelineRunId"] if "pipelineRunId" in submit_result else None @@ -630,7 +630,7 @@ async def wrapper(cls, *args, **kwargs): if is_self_contained: submit_result = pipeline_run.submit() - completion.pipeline_run_id = ( + completion["pipelineRunId"] = ( submit_result["pipelineRunId"] if "pipelineRunId" in submit_result else None diff --git a/package/gentrace/providers/utils.py b/package/gentrace/providers/utils.py index e7c0fb8..b2e402b 100644 --- a/package/gentrace/providers/utils.py +++ b/package/gentrace/providers/utils.py @@ -9,9 +9,9 @@ def to_date_string(time_value): - return ( - datetime.fromtimestamp(time_value).strftime("%Y-%m-%dT%H:%M:%S.%fZ")[:-4] + "Z" - ) + utc_time = datetime.utcfromtimestamp(time_value) + utc_time_str = utc_time.strftime("%Y-%m-%dT%H:%M:%S.%fZ") + return utc_time_str[:-4] + "Z" async def pipeline_run_post_background( diff --git a/package/gentrace/providers/vectorstores/pinecone.py b/package/gentrace/providers/vectorstores/pinecone.py index 4c386c5..ed1b825 100644 --- a/package/gentrace/providers/vectorstores/pinecone.py +++ b/package/gentrace/providers/vectorstores/pinecone.py @@ -86,7 +86,7 @@ def fetch( ) if pipeline_run_id: - response.pipeline_run_id = pipeline_run_id + response["pipelineRunId"] = pipeline_run_id return response @@ -129,7 +129,7 @@ def update( ) if pipeline_run_id: - response.pipeline_run_id = pipeline_run_id + response["pipelineRunId"] = pipeline_run_id return response @@ -189,7 +189,7 @@ def query( ) if pipeline_run_id: - response.pipeline_run_id = pipeline_run_id + response["pipelineRunId"] = pipeline_run_id return response @@ -226,7 +226,7 @@ def upsert( ) if pipeline_run_id: - response.pipeline_run_id = pipeline_run_id + response["pipelineRunId"] = pipeline_run_id return response @@ -262,7 +262,7 @@ def delete( ) if pipeline_run_id: - response.pipeline_run_id = pipeline_run_id + response["pipelineRunId"] = pipeline_run_id return response diff --git a/package/tests/test_openai_completion.py b/package/tests/test_openai_completion.py index 4c8b9bc..c002882 100644 --- a/package/tests/test_openai_completion.py +++ b/package/tests/test_openai_completion.py @@ -62,7 +62,7 @@ def test_openai_completion_self_contained_pipeline_id( prompt_inputs={"name": "test"}, ) - assert uuid.UUID(result.pipeline_run_id) is not None + assert uuid.UUID(result["pipelineRunId"]) is not None print(setup_teardown_openai) @@ -114,7 +114,7 @@ def test_openai_completion_self_contained_no_pipeline_id( prompt_inputs={"name": "test"}, ) - assert not hasattr(result, "pipeline_run_id") + assert not hasattr(result, "pipelineRunId") print(setup_teardown_openai) @@ -161,7 +161,7 @@ async def test_openai_completion_self_contained_no_pipeline_id_async( prompt_inputs={"name": "test"}, ) - assert not hasattr(result, "pipeline_run_id") + assert not hasattr(result, "pipelineRunId") print(setup_teardown_openai) @@ -210,7 +210,7 @@ async def test_openai_completion_self_contained_pipeline_id_async( pipeline_id="test_openai_completion_self_contained_no_pipeline_id_async", ) - assert uuid.UUID(result.pipeline_run_id) is not None + assert uuid.UUID(result["pipelineRunId"]) is not None print(setup_teardown_openai) @@ -264,7 +264,7 @@ def test_openai_completion_self_contained_pipeline_id_stream( pipeline_run_id = None for value in result: - pipeline_run_id = value["pipeline_run_id"] + pipeline_run_id = value["pipelineRunId"] assert uuid.UUID(pipeline_run_id) is not None @@ -289,8 +289,221 @@ async def test_openai_completion_self_contained_pipeline_id_stream_async( pipeline_run_id = None async for value in result: - pipeline_run_id = value["pipeline_run_id"] + pipeline_run_id = value["pipelineRunId"] assert uuid.UUID(pipeline_run_id) is not None print(setup_teardown_openai) + + +def test_openai_completion_self_contained_pipeline_id_prompt( + mocker, completion_response, gentrace_pipeline_run_response, setup_teardown_openai +): + openai.api_key = os.getenv("OPENAI_KEY") + + # Setup OpenAI mocked request + openai_api_key_getter = mocker.patch.object(openai.util, "default_api_key") + openai_api_key_getter.return_value = "test-key" + + openai_request = mocker.patch.object(requests.sessions.Session, "request") + + response = requests.Response() + response.status_code = 200 + response.headers["Content-Type"] = "application/json" + response._content = json.dumps(completion_response, ensure_ascii=False).encode( + "utf-8" + ) + + openai_request.return_value = response + + # Setup Gentrace mocked response + headers = http.client.HTTPMessage() + headers.add_header("Content-Type", "application/json") + + body = json.dumps(gentrace_pipeline_run_response, ensure_ascii=False).encode( + "utf-8" + ) + + gentrace_response = HTTPResponse( + body=body, + headers=headers, + status=200, + reason="OK", + preload_content=False, + decode_content=True, + enforce_content_length=True, + ) + + gentrace_request = mocker.patch.object(gentrace.api_client.ApiClient, "request") + gentrace_request.return_value = gentrace_response + + result = openai.Completion.create( + pipeline_id="text-generation", + model="text-davinci-003", + prompt="Hello World", + ) + + assert uuid.UUID(result["pipelineRunId"]) is not None + + print(setup_teardown_openai) + + +def test_openai_completion_self_contained_no_pipeline_id_prompt( + mocker, completion_response, gentrace_pipeline_run_response, setup_teardown_openai +): + openai.api_key = os.getenv("OPENAI_KEY") + + # Setup OpenAI mocked request + openai_api_key_getter = mocker.patch.object(openai.util, "default_api_key") + openai_api_key_getter.return_value = "test-key" + + openai_request = mocker.patch.object(requests.sessions.Session, "request") + + response = requests.Response() + response.status_code = 200 + response.headers["Content-Type"] = "application/json" + response._content = json.dumps(completion_response, ensure_ascii=False).encode( + "utf-8" + ) + + openai_request.return_value = response + + # Setup Gentrace mocked response + headers = http.client.HTTPMessage() + headers.add_header("Content-Type", "application/json") + + body = json.dumps(gentrace_pipeline_run_response, ensure_ascii=False).encode( + "utf-8" + ) + + gentrace_response = HTTPResponse( + body=body, + headers=headers, + status=200, + reason="OK", + preload_content=False, + decode_content=True, + enforce_content_length=True, + ) + + gentrace_request = mocker.patch.object(gentrace.api_client.ApiClient, "request") + gentrace_request.return_value = gentrace_response + + result = openai.Completion.create( + model="text-davinci-003", + prompt="Hello World", + ) + + assert "pipelineRunId" not in result + + print(setup_teardown_openai) + + +@responses.activate +def test_openai_completion_self_contained_pipeline_id_stream_prompt( + mocker, completion_response, gentrace_pipeline_run_response, setup_teardown_openai +): + openai.api_key = os.getenv("OPENAI_KEY") + + # Setup OpenAI mocked request + openai_api_key_getter = mocker.patch.object(openai.util, "default_api_key") + openai_api_key_getter.return_value = "test-key" + + responses.add( + responses.POST, + "https://api.openai.com/v1/completions", + body="data: " + json.dumps(completion_response, ensure_ascii=False), + stream=True, + content_type="text/event-stream", + ) + + # Setup Gentrace mocked response + headers = http.client.HTTPMessage() + headers.add_header("Content-Type", "application/json") + + body = json.dumps(gentrace_pipeline_run_response, ensure_ascii=False).encode( + "utf-8" + ) + + gentrace_response = HTTPResponse( + body=body, + headers=headers, + status=200, + reason="OK", + preload_content=False, + decode_content=True, + enforce_content_length=True, + ) + + gentrace_request = mocker.patch.object(gentrace.api_client.ApiClient, "request") + gentrace_request.return_value = gentrace_response + + result = openai.Completion.create( + pipeline_id="text-generation", + model="text-davinci-003", + prompt="Hello world!", + stream=True, + ) + + pipeline_run_id = None + for value in result: + pipeline_run_id = value["pipelineRunId"] + + assert uuid.UUID(pipeline_run_id) is not None + + print(setup_teardown_openai) + + +@responses.activate +def test_openai_completion_self_contained_no_pipeline_id_stream_prompt( + mocker, completion_response, gentrace_pipeline_run_response, setup_teardown_openai +): + openai.api_key = os.getenv("OPENAI_KEY") + + # Setup OpenAI mocked request + openai_api_key_getter = mocker.patch.object(openai.util, "default_api_key") + openai_api_key_getter.return_value = "test-key" + + responses.add( + responses.POST, + "https://api.openai.com/v1/completions", + body="data: " + json.dumps(completion_response, ensure_ascii=False), + stream=True, + content_type="text/event-stream", + ) + + # Setup Gentrace mocked response + headers = http.client.HTTPMessage() + headers.add_header("Content-Type", "application/json") + + body = json.dumps(gentrace_pipeline_run_response, ensure_ascii=False).encode( + "utf-8" + ) + + gentrace_response = HTTPResponse( + body=body, + headers=headers, + status=200, + reason="OK", + preload_content=False, + decode_content=True, + enforce_content_length=True, + ) + + gentrace_request = mocker.patch.object(gentrace.api_client.ApiClient, "request") + gentrace_request.return_value = gentrace_response + + result = openai.Completion.create( + model="text-davinci-003", + prompt="Hello world!", + stream=True, + ) + + pipeline_run_id = None + for value in result: + if "pipelineRunId" in value: + pipeline_run_id = value["pipelineRunId"] + + assert pipeline_run_id is None + + print(setup_teardown_openai) diff --git a/package/tests/test_openai_embedding.py b/package/tests/test_openai_embedding.py index 71c3628..1d3f142 100644 --- a/package/tests/test_openai_embedding.py +++ b/package/tests/test_openai_embedding.py @@ -59,7 +59,7 @@ def test_openai_embedding_self_contained_pipeline_id( pipeline_id="testing-value", ) - assert uuid.UUID(result.pipeline_run_id) is not None + assert uuid.UUID(result["pipelineRunId"]) is not None print(setup_teardown_openai) @@ -110,7 +110,7 @@ def test_openai_embedding_self_contained_no_pipeline_id( model="text-similarity-davinci-001", ) - assert not hasattr(result, "pipeline_run_id") + assert not hasattr(result, "pipelineRunId") print(setup_teardown_openai) @@ -127,9 +127,9 @@ def test_openai_embedding_self_contained_pipeline_id_server( pipeline_id="testing-value-vivek", ) - assert uuid.UUID(result.pipeline_run_id) is not None + assert uuid.UUID(result["pipelineRunId"]) is not None - print("pipeline_id: ", result.pipeline_run_id) + print("pipeline_id: ", result["pipelineRunId"]) print(setup_teardown_openai) @@ -143,7 +143,7 @@ def test_openai_embedding_self_contained_no_pipeline_id_server(setup_teardown_op model="text-similarity-davinci-001", ) - assert not hasattr(result, "pipeline_run_id") + assert not hasattr(result, "pipelineRunId") print(setup_teardown_openai) @@ -244,7 +244,7 @@ async def test_openai_embedding_self_contained_no_pipeline_id_server_async( model="text-similarity-davinci-001", ) - assert not hasattr(result, "pipeline_run_id") + assert not hasattr(result, "pipelineRunId") print(setup_teardown_openai) @@ -260,7 +260,7 @@ async def test_openai_embedding_self_contained_pipeline_id_server_async( pipeline_id="testing-value", ) - assert uuid.UUID(result.pipeline_run_id) is not None + assert uuid.UUID(result["pipelineRunId"]) is not None print(setup_teardown_openai) diff --git a/package/tests/test_pinecone.py b/package/tests/test_pinecone.py index f2dee44..4881395 100644 --- a/package/tests/test_pinecone.py +++ b/package/tests/test_pinecone.py @@ -143,7 +143,7 @@ def test_pinecone_self_contained_fetch_server(setup_teardown_pinecone): ids=["3980"], pipeline_id="self-contained-pinecone-fetch" ) - assert uuid.UUID(result.pipeline_run_id) is not None + assert uuid.UUID(result["pipelineRunId"]) is not None print(setup_teardown_pinecone) @@ -159,7 +159,7 @@ def test_pinecone_self_contained_query_server(setup_teardown_pinecone, vector): top_k=10, vector=vector, pipeline_id="self-contained-pinecone-query" ) - assert uuid.UUID(result.pipeline_run_id) is not None + assert uuid.UUID(result["pipelineRunId"]) is not None print(setup_teardown_pinecone) @@ -175,5 +175,5 @@ def test_pinecone_self_contained_query_server_no_pipeline_id( index = pinecone.Index("openai-trec") result = index.query(top_k=10, vector=vector) - assert result.pipeline_run_id is None + assert "pipelineRunId" not in result print(setup_teardown_pinecone) diff --git a/package/tests/test_usage.py b/package/tests/test_usage.py index af5567d..e4525ae 100644 --- a/package/tests/test_usage.py +++ b/package/tests/test_usage.py @@ -6,7 +6,7 @@ import gentrace -def test_gentrace_host_valid(): +def test_gentrace_localhost_host_valid(): gentrace.api_key = os.getenv("GENTRACE_API_KEY") gentrace.host = "http://localhost:3000/" @@ -21,6 +21,34 @@ def test_gentrace_host_valid(): gentrace.api_key = "" +def test_gentrace_staging_host_valid(): + gentrace.api_key = os.getenv("GENTRACE_API_KEY") + gentrace.host = "https://staging.gentrace.ai/api/v1/" + + gentrace.configure_openai() + + gentrace.host = "https://staging.gentrace.ai/api/v1/feedback" + with pytest.raises(ValueError): + gentrace.configure_openai() + + gentrace.host = "" + gentrace.api_key = "" + + +def test_gentrace_prod_host_valid(): + gentrace.api_key = os.getenv("GENTRACE_API_KEY") + gentrace.host = "https://gentrace.ai/api/v1" + + gentrace.configure_openai() + + gentrace.host = "https://gentrace.ai/api/v1/feedback" + with pytest.raises(ValueError): + gentrace.configure_openai() + + gentrace.host = "" + gentrace.api_key = "" + + def test_openai_configure_should_raise_error(): with pytest.raises(ValueError): gentrace.configure_openai()