diff --git a/docs/cluster-management/install.md b/docs/cluster-management/install.md index 0525b52368..19eb6673e2 100644 --- a/docs/cluster-management/install.md +++ b/docs/cluster-management/install.md @@ -19,13 +19,16 @@ See [here](../miscellaneous/cli.md#install-cortex-cli-without-python-client) to ```bash # clone the Cortex repository git clone -b master https://github.com/cortexlabs/cortex.git + +# navigate to the Pytorch text generator example +cd cortex/examples/pytorch/text-generator ``` ### Using the CLI ```bash # deploy the model as a realtime api -cortex deploy cortex/examples/pytorch/text-generator/cortex.yaml +cortex deploy # view the status of the api cortex get --watch @@ -39,7 +42,7 @@ cortex get text-generator # generate text curl \ -X POST -H "Content-Type: application/json" \ - -d '{"text": "machine learning is"}' \ + -d '{"text": "machine learning is"}' # delete the api cortex delete text-generator @@ -54,7 +57,7 @@ import requests local_client = cortex.client("local") # deploy the model as a realtime api and wait for it to become active -deployments = local_client.deploy("cortex/examples/pytorch/text-generator/cortex.yaml", wait=True) +deployments = local_client.deploy("./cortex.yaml", wait=True) # get the api's endpoint url = deployments[0]["api"]["endpoint"] diff --git a/pkg/workloads/cortex/client/README.md b/pkg/workloads/cortex/client/README.md index d54fe029d5..38eb3abac4 100644 --- a/pkg/workloads/cortex/client/README.md +++ b/pkg/workloads/cortex/client/README.md @@ -38,6 +38,9 @@ You must have [Docker](https://docs.docker.com/install) installed to run Cortex ```bash # clone the Cortex repository git clone -b master https://github.com/cortexlabs/cortex.git + +# navigate to the Pytorch text generator example +cd cortex/examples/pytorch/text-generator ``` ### In Python @@ -48,7 +51,7 @@ import requests local_client = cortex.client("local") # deploy the model as a realtime api and wait for it to become active -deployments = local_client.deploy("cortex/examples/pytorch/text-generator/cortex.yaml", wait=True) +deployments = local_client.deploy("./cortex.yaml", wait=True) # get the api's endpoint url = deployments[0]["api"]["endpoint"] @@ -63,7 +66,7 @@ local_client.delete_api("text-generator") ### Using the CLI ```bash # deploy the model as a realtime api -cortex deploy cortex/examples/pytorch/text-generator/cortex.yaml +cortex deploy # view the status of the api cortex get --watch @@ -77,7 +80,7 @@ cortex get text-generator # generate text curl \ -X POST -H "Content-Type: application/json" \ - -d '{"text": "machine learning is"}' \ + -d '{"text": "machine learning is"}' # delete the api cortex delete text-generator diff --git a/pkg/workloads/cortex/client/cortex/binary/__init__.py b/pkg/workloads/cortex/client/cortex/binary/__init__.py index 0c0b8541f6..f7cf580015 100644 --- a/pkg/workloads/cortex/client/cortex/binary/__init__.py +++ b/pkg/workloads/cortex/client/cortex/binary/__init__.py @@ -87,6 +87,7 @@ def run_cli( if not hide_output: if (not mixed_output) or (mixed_output and not result_found): sys.stdout.write(c) + sys.stdout.flush() process.wait()