diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 6eb00725..dd939620 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -3,7 +3,7 @@ FROM mcr.microsoft.com/vscode/devcontainers/python:0-${VARIANT} USER vscode -RUN curl -sSf https://rye-up.com/get | RYE_VERSION="0.15.2" RYE_INSTALL_OPTION="--yes" bash +RUN curl -sSf https://rye-up.com/get | RYE_VERSION="0.24.0" RYE_INSTALL_OPTION="--yes" bash ENV PATH=/home/vscode/.rye/shims:$PATH RUN echo "[[ -d .venv ]] && source .venv/bin/activate" >> /home/vscode/.bashrc diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index aea1868f..dfb911fb 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -21,7 +21,7 @@ jobs: curl -sSf https://rye-up.com/get | bash echo "$HOME/.rye/shims" >> $GITHUB_PATH env: - RYE_VERSION: 0.15.2 + RYE_VERSION: 0.24.0 RYE_INSTALL_OPTION: "--yes" - name: Install dependencies diff --git a/.github/workflows/publish-pypi.yml b/.github/workflows/publish-pypi.yml index 14ddce27..962b8c4f 100644 --- a/.github/workflows/publish-pypi.yml +++ b/.github/workflows/publish-pypi.yml @@ -21,7 +21,7 @@ jobs: curl -sSf https://rye-up.com/get | bash echo "$HOME/.rye/shims" >> $GITHUB_PATH env: - RYE_VERSION: 0.15.2 + RYE_VERSION: 0.24.0 RYE_INSTALL_OPTION: "--yes" - name: Publish to PyPI diff --git a/.release-please-manifest.json b/.release-please-manifest.json index da59f99e..3e2bf498 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.4.0" + ".": "0.4.1" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index ced1d0d7..dff54ed4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,21 +1,27 @@ # Changelog -## 0.4.0 (2024-02-15) +## 0.4.1 (2024-02-21) -Full Changelog: [v0.1.0...v0.4.0](https://github.com/groq/groq-python/compare/v0.1.0...v0.4.0) +Full Changelog: [v0.1.0...v0.4.1](https://github.com/groq/groq-python/compare/v0.1.0...v0.4.1) ### Features +* Add initial Stainless SDK ([4de80db](https://github.com/groq/groq-python/commit/4de80dbe4c27d6ecbcc2d2b6192d27ade5da3866)) * Add initial Stainless SDK ([d5a8512](https://github.com/groq/groq-python/commit/d5a851262e04e625dde130367ed91d8f95683599)) * Add initial Stainless SDK ([316de2c](https://github.com/groq/groq-python/commit/316de2ccfeb76e36fe34bb8656ea90a8d42a7d00)) * create default branch ([7e00266](https://github.com/groq/groq-python/commit/7e00266e3c691d92d508e753e2c14c03297c09f9)) +* update via SDK Studio ([#10](https://github.com/groq/groq-python/issues/10)) ([0c0d204](https://github.com/groq/groq-python/commit/0c0d20405a96167f060a03a2b8a58a49d9a1c7c8)) +* update via SDK Studio ([#14](https://github.com/groq/groq-python/issues/14)) ([153744e](https://github.com/groq/groq-python/commit/153744e8ca331900adb0bd88a688afedf9a4bf00)) * update via SDK Studio ([#3](https://github.com/groq/groq-python/issues/3)) ([8d92c08](https://github.com/groq/groq-python/commit/8d92c086e320c2715e02bc79807ff872e84c0b0f)) ### Chores * go live ([#2](https://github.com/groq/groq-python/issues/2)) ([ba81c42](https://github.com/groq/groq-python/commit/ba81c42d6d0fd6d47819e0d58962235cb70ca4f1)) +* go live ([#5](https://github.com/groq/groq-python/issues/5)) ([75ea081](https://github.com/groq/groq-python/commit/75ea081a84bbcf15702dcb53fd6411c8de497c83)) * go live ([#5](https://github.com/groq/groq-python/issues/5)) ([af9a838](https://github.com/groq/groq-python/commit/af9a838e240bb0f7385bc33fb18ce246427ca2f7)) +* update branch ([#12](https://github.com/groq/groq-python/issues/12)) ([28f34e1](https://github.com/groq/groq-python/commit/28f34e1cc08b974d31744fd94bf31eafc9e6e867)) +* update branch ([#8](https://github.com/groq/groq-python/issues/8)) ([b9b55b4](https://github.com/groq/groq-python/commit/b9b55b41cb158efd155f9cda829808c877493afd)) ## 0.1.0 (2024-02-10) diff --git a/examples/chat_completion.py b/examples/chat_completion.py index 77511d0b..06e7664a 100644 --- a/examples/chat_completion.py +++ b/examples/chat_completion.py @@ -10,43 +10,33 @@ # Set an optional system message. This sets the behavior of the # assistant and can be used to provide specific instructions for # how it should behave throughout the conversation. - { - "role": "system", - "content": "you are a helpful assistant." - }, + {"role": "system", "content": "you are a helpful assistant."}, # Set a user message for the assistant to respond to. { "role": "user", "content": "Explain the importance of low latency LLMs", }, ], - # The language model which will generate the completion. model="mixtral-8x7b-32768", - # # Optional parameters # - # Controls randomness: lowering results in less random completions. # As the temperature approaches zero, the model will become deterministic # and repetitive. temperature=0.5, - # The maximum number of tokens to generate. Requests can use up to # 2048 tokens shared between prompt and completion. max_tokens=1024, - # Controls diversity via nucleus sampling: 0.5 means half of all # likelihood-weighted options are considered. top_p=1, - # A stop sequence is a predefined or user-specified text string that # signals an AI to stop generating content, ensuring its responses # remain focused and concise. Examples include punctuation marks and # markers like "[end]". stop=None, - # If set, partial message deltas will be sent. stream=False, ) diff --git a/examples/chat_completion_async.py b/examples/chat_completion_async.py index 0d485a26..8e5056d0 100644 --- a/examples/chat_completion_async.py +++ b/examples/chat_completion_async.py @@ -14,43 +14,33 @@ async def main() -> None: # Set an optional system message. This sets the behavior of the # assistant and can be used to provide specific instructions for # how it should behave throughout the conversation. - { - "role": "system", - "content": "you are a helpful assistant." - }, + {"role": "system", "content": "you are a helpful assistant."}, # Set a user message for the assistant to respond to. { "role": "user", "content": "Explain the importance of low latency LLMs", }, ], - # The language model which will generate the completion. model="mixtral-8x7b-32768", - # # Optional parameters # - # Controls randomness: lowering results in less random completions. # As the temperature approaches zero, the model will become # deterministic and repetitive. temperature=0.5, - # The maximum number of tokens to generate. Requests can use up to # 2048 tokens shared between prompt and completion. max_tokens=1024, - # Controls diversity via nucleus sampling: 0.5 means half of all # likelihood-weighted options are considered. top_p=1, - # A stop sequence is a predefined or user-specified text string that # signals an AI to stop generating content, ensuring its responses # remain focused and concise. Examples include punctuation marks and # markers like "[end]". stop=None, - # If set, partial message deltas will be sent. stream=False, ) diff --git a/examples/chat_completion_async_streaming.py b/examples/chat_completion_async_streaming.py index 3ba5edd3..eac7ce9f 100644 --- a/examples/chat_completion_async_streaming.py +++ b/examples/chat_completion_async_streaming.py @@ -14,39 +14,30 @@ async def main() -> None: # Set an optional system message. This sets the behavior of the # assistant and can be used to provide specific instructions for # how it should behave throughout the conversation. - { - "role": "system", - "content": "you are a helpful assistant." - }, + {"role": "system", "content": "you are a helpful assistant."}, # Set a user message for the assistant to respond to. { "role": "user", "content": "Explain the importance of low latency LLMs", }, ], - # The language model which will generate the completion. model="mixtral-8x7b-32768", - # # Optional parameters # - # Controls randomness: lowering results in less random completions. # As the temperature approaches zero, the model will become # deterministic and repetitive. temperature=0.5, - # The maximum number of tokens to generate. Requests can use up to # 2048 tokens shared between prompt and completion. max_tokens=1024, - # A stop sequence is a predefined or user-specified text string that # signals an AI to stop generating content, ensuring its responses # remain focused and concise. Examples include punctuation marks and # markers like "[end]". stop=None, - # Controls diversity via nucleus sampling: 0.5 means half of all # likelihood-weighted options are considered. stream=True, diff --git a/examples/chat_completion_stop.py b/examples/chat_completion_stop.py index 4abb63b7..d68a90df 100644 --- a/examples/chat_completion_stop.py +++ b/examples/chat_completion_stop.py @@ -10,37 +10,28 @@ # Set an optional system message. This sets the behavior of the # assistant and can be used to provide specific instructions for # how it should behave throughout the conversation. - { - "role": "system", - "content": "you are a helpful assistant." - }, + {"role": "system", "content": "you are a helpful assistant."}, # Set a user message for the assistant to respond to. { "role": "user", - "content": "Count to 10. Your response must begin with \"1, \". example: 1, 2, 3, ...", + "content": 'Count to 10. Your response must begin with "1, ". example: 1, 2, 3, ...', }, ], - # The language model which will generate the completion. model="mixtral-8x7b-32768", - # # Optional parameters # - # Controls randomness: lowering results in less random completions. # As the temperature approaches zero, the model will become deterministic # and repetitive. temperature=0.5, - # The maximum number of tokens to generate. Requests can use up to # 2048 tokens shared between prompt and completion. max_tokens=1024, - # Controls diversity via nucleus sampling: 0.5 means half of all # likelihood-weighted options are considered. top_p=1, - # A stop sequence is a predefined or user-specified text string that # signals an AI to stop generating content, ensuring its responses # remain focused and concise. Examples include punctuation marks and @@ -49,7 +40,6 @@ # If multiple stop values are needed, an array of string may be passed, # stop=[", 6", ", six", ", Six"] stop=", 6", - # If set, partial message deltas will be sent. stream=False, ) diff --git a/examples/chat_completion_streaming.py b/examples/chat_completion_streaming.py index 78d2607f..5bae3a79 100644 --- a/examples/chat_completion_streaming.py +++ b/examples/chat_completion_streaming.py @@ -10,43 +10,33 @@ # Set an optional system message. This sets the behavior of the # assistant and can be used to provide specific instructions for # how it should behave throughout the conversation. - { - "role": "system", - "content": "you are a helpful assistant." - }, + {"role": "system", "content": "you are a helpful assistant."}, # Set a user message for the assistant to respond to. { "role": "user", "content": "Explain the importance of low latency LLMs", }, ], - # The language model which will generate the completion. model="mixtral-8x7b-32768", - # # Optional parameters # - # Controls randomness: lowering results in less random completions. # As the temperature approaches zero, the model will become deterministic # and repetitive. temperature=0.5, - # The maximum number of tokens to generate. Requests can use up to # 2048 tokens shared between prompt and completion. max_tokens=1024, - # Controls diversity via nucleus sampling: 0.5 means half of all # likelihood-weighted options are considered. top_p=1, - # A stop sequence is a predefined or user-specified text string that # signals an AI to stop generating content, ensuring its responses # remain focused and concise. Examples include punctuation marks and # markers like "[end]". stop=None, - # If set, partial message deltas will be sent. stream=True, ) diff --git a/pyproject.toml b/pyproject.toml index 5eb419f4..8a910b6b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,11 +1,11 @@ [project] name = "groq" -version = "0.4.0" +version = "0.4.1" description = "The official Python library for the groq API" readme = "README.md" license = "Apache-2.0" authors = [ -{ name = "Groq", email = "grea@groq.com" }, +{ name = "Groq", email = "support@groq.com" }, ] dependencies = [ "httpx>=0.23.0, <1", diff --git a/requirements-dev.lock b/requirements-dev.lock index a0134d65..fb76b532 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -5,48 +5,92 @@ # pre: false # features: [] # all-features: true +# with-sources: false -e file:. annotated-types==0.6.0 + # via pydantic anyio==4.1.0 + # via groq + # via httpx argcomplete==3.1.2 + # via nox attrs==23.1.0 + # via pytest certifi==2023.7.22 + # via httpcore + # via httpx colorlog==6.7.0 + # via nox dirty-equals==0.6.0 distlib==0.3.7 + # via virtualenv distro==1.8.0 + # via groq exceptiongroup==1.1.3 + # via anyio filelock==3.12.4 + # via virtualenv h11==0.14.0 + # via httpcore httpcore==1.0.2 + # via httpx httpx==0.25.2 + # via groq + # via respx idna==3.4 + # via anyio + # via httpx importlib-metadata==7.0.0 iniconfig==2.0.0 + # via pytest mypy==1.7.1 mypy-extensions==1.0.0 + # via mypy nodeenv==1.8.0 + # via pyright nox==2023.4.22 packaging==23.2 + # via nox + # via pytest platformdirs==3.11.0 + # via virtualenv pluggy==1.3.0 + # via pytest py==1.11.0 + # via pytest pydantic==2.4.2 + # via groq pydantic-core==2.10.1 + # via pydantic pyright==1.1.332 pytest==7.1.1 + # via pytest-asyncio pytest-asyncio==0.21.1 python-dateutil==2.8.2 + # via time-machine pytz==2023.3.post1 + # via dirty-equals respx==0.20.2 ruff==0.1.9 +setuptools==68.2.2 + # via nodeenv six==1.16.0 + # via python-dateutil sniffio==1.3.0 + # via anyio + # via groq + # via httpx time-machine==2.9.0 tomli==2.0.1 + # via mypy + # via pytest typing-extensions==4.8.0 + # via groq + # via mypy + # via pydantic + # via pydantic-core virtualenv==20.24.5 + # via nox zipp==3.17.0 -# The following packages are considered to be unsafe in a requirements file: -setuptools==68.2.2 + # via importlib-metadata diff --git a/requirements.lock b/requirements.lock index 2022a5c5..991544f0 100644 --- a/requirements.lock +++ b/requirements.lock @@ -5,18 +5,39 @@ # pre: false # features: [] # all-features: true +# with-sources: false -e file:. annotated-types==0.6.0 + # via pydantic anyio==4.1.0 + # via groq + # via httpx certifi==2023.7.22 + # via httpcore + # via httpx distro==1.8.0 + # via groq exceptiongroup==1.1.3 + # via anyio h11==0.14.0 + # via httpcore httpcore==1.0.2 + # via httpx httpx==0.25.2 + # via groq idna==3.4 + # via anyio + # via httpx pydantic==2.4.2 + # via groq pydantic-core==2.10.1 + # via pydantic sniffio==1.3.0 + # via anyio + # via groq + # via httpx typing-extensions==4.8.0 + # via groq + # via pydantic + # via pydantic-core diff --git a/src/groq/_version.py b/src/groq/_version.py index 1612b721..b44df410 100644 --- a/src/groq/_version.py +++ b/src/groq/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. __title__ = "groq" -__version__ = "0.4.0" # x-release-please-version +__version__ = "0.4.1" # x-release-please-version