From 1df453b03f1fa390e29510c5204676bd4f7de68b Mon Sep 17 00:00:00 2001 From: Ian Macleod Date: Tue, 8 Aug 2023 23:47:12 +0000 Subject: [PATCH 1/3] bumping version, modifying docs --- clients/python/llmengine/__init__.py | 2 +- clients/python/pyproject.toml | 2 +- clients/python/setup.py | 2 +- docs/guides/endpoint_creation.md | 3 ++- 4 files changed, 5 insertions(+), 4 deletions(-) diff --git a/clients/python/llmengine/__init__.py b/clients/python/llmengine/__init__.py index 66efafb4..f883b5b8 100644 --- a/clients/python/llmengine/__init__.py +++ b/clients/python/llmengine/__init__.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "0.0.0.beta10" +__version__ = "0.0.0.beta11" from typing import Sequence diff --git a/clients/python/pyproject.toml b/clients/python/pyproject.toml index adf2ed20..4adfdb19 100644 --- a/clients/python/pyproject.toml +++ b/clients/python/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "scale-llm-engine" -version = "0.0.0.beta10" +version = "0.0.0.beta11" description = "Scale LLM Engine Python client" license = "Apache-2.0" authors = ["Phil Chen "] diff --git a/clients/python/setup.py b/clients/python/setup.py index 1baaf486..b8559ba8 100644 --- a/clients/python/setup.py +++ b/clients/python/setup.py @@ -3,6 +3,6 @@ setup( name="scale-llm-engine", python_requires=">=3.7", - version="0.0.0.beta10", + version="0.0.0.beta11", packages=find_packages(), ) diff --git a/docs/guides/endpoint_creation.md b/docs/guides/endpoint_creation.md index 2a51d0bc..58c3f998 100644 --- a/docs/guides/endpoint_creation.md +++ b/docs/guides/endpoint_creation.md @@ -7,7 +7,8 @@ An example is provided below: ``` model_name = "llama-2-7b.suffix.2023-07-18-12-00-00" response = Model.get(model_name) -while response.status != "READY": +while response.status.name != "READY": + print(response.status.name) time.sleep(60) response = Model.get(model_name) ``` From a1faaaf4635b0325b7d81d84fcd0e65dda6d4975 Mon Sep 17 00:00:00 2001 From: Ian Macleod Date: Tue, 15 Aug 2023 21:06:23 +0000 Subject: [PATCH 2/3] updating docs for endpoint creation and finetune download --- clients/python/llmengine/model.py | 8 ++++---- docs/guides/endpoint_creation.md | 5 +++-- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/clients/python/llmengine/model.py b/clients/python/llmengine/model.py index cd2191e3..593f714f 100644 --- a/clients/python/llmengine/model.py +++ b/clients/python/llmengine/model.py @@ -380,9 +380,9 @@ def download( This API can be used to download the resulting model from a fine-tuning job. It takes the `model_name` and `download_format` as parameter and returns a - response object which contains a list of urls associated with the fine-tuned model. - The user can then download these urls to obtain the fine-tuned model. If called - on a nonexistent model, an error will be thrown. + response object which contains a dictonary of filename, url pairs associated + with the fine-tuned model. The user can then download these urls to obtain + the fine-tuned model. If called on a nonexistent model, an error will be thrown. Args: model_name (`str`): @@ -404,7 +404,7 @@ def download( === "Response in JSON" ```json { - "urls": {"my_model_file": 'https://url-to-my-model-weights'} + "urls": {"my_model_file": "https://url-to-my-model-weights"} } ``` """ diff --git a/docs/guides/endpoint_creation.md b/docs/guides/endpoint_creation.md index 58c3f998..e16602b7 100644 --- a/docs/guides/endpoint_creation.md +++ b/docs/guides/endpoint_creation.md @@ -3,9 +3,10 @@ track the status of your model endpoint. In general, you'll need to wait after t model creation step for the model endpoint to be ready and available for use. An example is provided below: -*Assuming the user has created a model named "llama-2-7b.suffix.2023-07-18-12-00-00"* + ``` -model_name = "llama-2-7b.suffix.2023-07-18-12-00-00" +model_name = "test_deploy" +model = Model.create(name=model_name, model="llama-2-7b", inference_frame_image_tag="0.9.4") response = Model.get(model_name) while response.status.name != "READY": print(response.status.name) From 254cfc822b13555d5ee7109973ff955e7063cadb Mon Sep 17 00:00:00 2001 From: Ian Macleod Date: Tue, 15 Aug 2023 22:11:55 +0000 Subject: [PATCH 3/3] updating to fix pre-commit issue --- clients/python/llmengine/model.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/clients/python/llmengine/model.py b/clients/python/llmengine/model.py index 593f714f..26bbcf2d 100644 --- a/clients/python/llmengine/model.py +++ b/clients/python/llmengine/model.py @@ -380,8 +380,8 @@ def download( This API can be used to download the resulting model from a fine-tuning job. It takes the `model_name` and `download_format` as parameter and returns a - response object which contains a dictonary of filename, url pairs associated - with the fine-tuned model. The user can then download these urls to obtain + response object which contains a dictonary of filename, url pairs associated + with the fine-tuned model. The user can then download these urls to obtain the fine-tuned model. If called on a nonexistent model, an error will be thrown. Args: