diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 9885b3b51b..dddac1c00c 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -274,19 +274,12 @@ If you want to switch back to the in-cluster operator: 1. `` to stop your local operator 2. `make operator-start-aws` to restart the operator in your cluster -If you only want to test Cortex's local environment, here is the common workflow: - -1. `make cli-watch` (builds the CLI and re-builds it when files are changed) -2. Make your changes -3. `make images-dev-local` (only necessary if API images or the manager are modified) -4. Test your changes e.g. via `cortex deploy` (and repeat steps 2 and 3 as necessary) - ### Dev workflow optimizations If you are only modifying the CLI, `make cli-watch` will build the CLI and re-build it when files are changed. When doing this, you can leave the operator running in the cluster instead of running it locally. If you are only modifying the operator, `make operator-local-aws` will build and start the operator locally, and build/restart it when files are changed. -If you are modifying code in the API images (i.e. any of the Python serving code), `make images-dev-aws` may build more images than you need during testing. For example, if you are only testing using the `python-predictor-cpu` image, you can run `./dev/registry.sh update-single python-predictor-cpu --provider aws` (or use `--provider local` if testing locally). +If you are modifying code in the API images (i.e. any of the Python serving code), `make images-dev-aws` may build more images than you need during testing. For example, if you are only testing using the `python-predictor-cpu` image, you can run `./dev/registry.sh update-single python-predictor-cpu --provider aws`. See `Makefile` for additional dev commands. diff --git a/Makefile b/Makefile index 2fc7af2e5e..ba18de55fd 100644 --- a/Makefile +++ b/Makefile @@ -91,33 +91,33 @@ cluster-up-gcp-y: @$(MAKE) kubectl-gcp cluster-down-aws: - @$(MAKE) images-manager-local + @$(MAKE) images-manager-skip-push @$(MAKE) cli @kill $(shell pgrep -f rerun) >/dev/null 2>&1 || true @./bin/cortex cluster down --config=./dev/config/cluster-aws.yaml --aws-key="$$AWS_ACCESS_KEY_ID" --aws-secret=$$AWS_SECRET_ACCESS_KEY cluster-down-gcp: - @$(MAKE) images-manager-local + @$(MAKE) images-manager-skip-push @$(MAKE) cli @kill $(shell pgrep -f rerun) >/dev/null 2>&1 || true @./bin/cortex cluster-gcp down --config=./dev/config/cluster-gcp.yaml cluster-down-aws-y: - @$(MAKE) images-manager-local + @$(MAKE) images-manager-skip-push @$(MAKE) cli @kill $(shell pgrep -f rerun) >/dev/null 2>&1 || true @./bin/cortex cluster down --config=./dev/config/cluster-aws.yaml --aws-key="$$AWS_ACCESS_KEY_ID" --aws-secret="$$AWS_SECRET_ACCESS_KEY" --yes cluster-down-gcp-y: - @$(MAKE) images-manager-local + @$(MAKE) images-manager-skip-push @$(MAKE) cli @kill $(shell pgrep -f rerun) >/dev/null 2>&1 || true @./bin/cortex cluster-gcp down --config=./dev/config/cluster-gcp.yaml --yes cluster-info-aws: - @$(MAKE) images-manager-local + @$(MAKE) images-manager-skip-push @$(MAKE) cli @eval $$(python3 ./manager/cluster_config_env.py ./dev/config/cluster-aws.yaml) && ./bin/cortex cluster info --config=./dev/config/cluster-aws.yaml --configure-env="$$CORTEX_CLUSTER_NAME-aws" --aws-key="$$AWS_ACCESS_KEY_ID" --aws-secret="$$AWS_SECRET_ACCESS_KEY" --yes cluster-info-gcp: - @$(MAKE) images-manager-local + @$(MAKE) images-manager-skip-push @$(MAKE) cli @eval $$(python3 ./manager/cluster_config_env.py ./dev/config/cluster-gcp.yaml) && ./bin/cortex cluster-gcp info --config=./dev/config/cluster-gcp.yaml --configure-env="$$CORTEX_CLUSTER_NAME-gcp" --yes @@ -181,49 +181,47 @@ operator-update-gcp: # Docker images +images-all-skip-push: + @./dev/registry.sh update all images-all-aws: @./dev/registry.sh update all -p aws images-all-gcp: @./dev/registry.sh update all -p gcp -images-all-local: - @./dev/registry.sh update all -p local +images-all-slim-skip-push: + @./dev/registry.sh update all --include-slim images-all-slim-aws: @./dev/registry.sh update all -p aws --include-slim images-all-slim-gcp: @./dev/registry.sh update all -p gcp --include-slim -images-all-slim-local: - @./dev/registry.sh update all -p local --include-slim +images-dev-skip-push: + @./dev/registry.sh update dev images-dev-aws: @./dev/registry.sh update dev -p aws images-dev-gcp: @./dev/registry.sh update dev -p gcp -images-dev-local: - @./dev/registry.sh update dev -p local +images-dev-slim-skip-push: + @./dev/registry.sh update dev --include-slim images-dev-slim-aws: @./dev/registry.sh update dev -p aws --include-slim images-dev-slim-gcp: @./dev/registry.sh update dev -p gcp --include-slim -images-dev-slim-local: - @./dev/registry.sh update dev -p local --include-slim +images-api-skip-push: + @./dev/registry.sh update api images-api-aws: @./dev/registry.sh update api -p aws images-api-gcp: @./dev/registry.sh update api -p gcp -images-api-local: - @./dev/registry.sh update api -p local +images-api-slim-skip-push: + @./dev/registry.sh update api --include-slim images-api-slim-aws: @./dev/registry.sh update api -p aws --include-slim images-api-slim-gcp: @./dev/registry.sh update api -p gcp --include-slim -images-api-slim-local: - @./dev/registry.sh update api -p local --include-slim -images-manager-local: - @./dev/registry.sh update-single manager -p local -images-iris-local: - @./dev/registry.sh update-single python-predictor-cpu -p local +images-manager-skip-push: + @./dev/registry.sh update-single manager images-iris-aws: @./dev/registry.sh update-single python-predictor-cpu -p aws images-iris-gcp: @@ -234,8 +232,6 @@ registry-create-aws: registry-clean-aws: @./dev/registry.sh clean -p aws -registry-clean-local: - @./dev/registry.sh clean -p local # Misc diff --git a/build/images.sh b/build/images.sh index cca5997bb4..f7074f7b3c 100644 --- a/build/images.sh +++ b/build/images.sh @@ -19,69 +19,57 @@ set -euo pipefail -api_images_local=( +api_images_cluster=( "python-predictor-cpu" "python-predictor-gpu" "tensorflow-predictor" "onnx-predictor-cpu" "onnx-predictor-gpu" ) -api_images_cluster=( - # includes api_images_local -) api_images_aws=( - # includes api_images_local and api_images_cluster + # includes api_images_cluster "python-predictor-inf" ) api_images_gcp=( - # includes api_images_local and api_images_cluster + # includes api_images_cluster ) -api_slim_images_local=( +api_slim_images_cluster=( "python-predictor-cpu-slim" "python-predictor-gpu-slim" "tensorflow-predictor-slim" "onnx-predictor-cpu-slim" "onnx-predictor-gpu-slim" ) -api_slim_images_cluster=( - # includes api_slim_images_local -) api_slim_images_aws=( - # includes api_slim_images_local and api_slim_images_cluster + # includes api_slim_images_cluster "python-predictor-inf-slim" ) api_slim_images_gcp=( - # includes api_slim_images_local and api_slim_images_cluster + # includes api_slim_images_cluster ) -dev_images_local=( - "downloader" -) dev_images_cluster=( - # includes dev_images_local + "downloader" "manager" ) dev_images_aws=( - # includes dev_images_local and dev_images_cluster + # includes dev_images_cluster "request-monitor" ) dev_images_gcp=( - # includes dev_images_local and dev_images_cluster + # includes dev_images_cluster ) -non_dev_images_local=( +non_dev_images_cluster=( "tensorflow-serving-cpu" "tensorflow-serving-gpu" -) -non_dev_images_cluster=( - # includes non_dev_images_local "operator" "istio-proxy" "istio-pilot" ) non_dev_images_aws=( - # includes non_dev_images_local and non_dev_images_cluster + # includes non_dev_images_cluster "tensorflow-serving-inf" "cluster-autoscaler" "metrics-server" @@ -92,55 +80,43 @@ non_dev_images_aws=( "statsd" ) non_dev_images_gcp=( - # includes non_dev_images_local and non_dev_images_cluster + # includes non_dev_images_cluster "google-pause" ) all_images=( - "${api_images_local[@]}" "${api_images_cluster[@]}" "${api_images_aws[@]}" "${api_images_gcp[@]}" - "${api_slim_images_local[@]}" "${api_slim_images_cluster[@]}" "${api_slim_images_aws[@]}" "${api_slim_images_gcp[@]}" - "${dev_images_local[@]}" "${dev_images_cluster[@]}" "${dev_images_aws[@]}" "${dev_images_gcp[@]}" - "${non_dev_images_local[@]}" "${non_dev_images_cluster[@]}" "${non_dev_images_aws[@]}" "${non_dev_images_gcp[@]}" ) aws_images=( - "${api_images_local[@]}" "${api_images_cluster[@]}" "${api_images_aws[@]}" - "${api_slim_images_local[@]}" "${api_slim_images_cluster[@]}" "${api_slim_images_aws[@]}" - "${dev_images_local[@]}" "${dev_images_cluster[@]}" "${dev_images_aws[@]}" - "${non_dev_images_local[@]}" "${non_dev_images_cluster[@]}" "${non_dev_images_aws[@]}" ) gcp_images=( - "${api_images_local[@]}" "${api_images_cluster[@]}" "${api_images_gcp[@]}" - "${api_slim_images_local[@]}" "${api_slim_images_cluster[@]}" "${api_slim_images_gcp[@]}" - "${dev_images_local[@]}" "${dev_images_cluster[@]}" "${dev_images_gcp[@]}" - "${non_dev_images_local[@]}" "${non_dev_images_cluster[@]}" "${non_dev_images_gcp[@]}" ) diff --git a/cli/cmd/cluster.go b/cli/cmd/cluster.go index 1135007cf0..28a146c266 100644 --- a/cli/cmd/cluster.go +++ b/cli/cmd/cluster.go @@ -70,8 +70,7 @@ func clusterInit() { addClusterConfigFlag(_clusterUpCmd) addAWSCredentialsFlags(_clusterUpCmd) addClusterAWSCredentialsFlags(_clusterUpCmd) - defaultEnv := getDefaultEnv(_clusterCommandType) - _clusterUpCmd.Flags().StringVarP(&_flagClusterUpEnv, "configure-env", "e", defaultEnv, "name of environment to configure") + _clusterUpCmd.Flags().StringVarP(&_flagClusterUpEnv, "configure-env", "e", "aws", "name of environment to configure") _clusterUpCmd.Flags().BoolVarP(&_flagClusterDisallowPrompt, "yes", "y", false, "skip prompts") _clusterCmd.AddCommand(_clusterUpCmd) @@ -144,10 +143,6 @@ var _clusterUpCmd = &cobra.Command{ Run: func(cmd *cobra.Command, args []string) { telemetry.EventNotify("cli.cluster.up", map[string]interface{}{"provider": types.AWSProviderType}) - if _flagClusterUpEnv == "local" { - exit.Error(ErrorLocalEnvironmentCantUseClusterProvider(types.AWSProviderType)) - } - envExists, err := isEnvConfigured(_flagClusterUpEnv) if err != nil { exit.Error(err) @@ -322,10 +317,6 @@ var _clusterConfigureCmd = &cobra.Command{ Run: func(cmd *cobra.Command, args []string) { telemetry.Event("cli.cluster.configure", map[string]interface{}{"provider": types.AWSProviderType}) - if _flagClusterConfigureEnv == "local" { - exit.Error(ErrorLocalEnvironmentCantUseClusterProvider(types.AWSProviderType)) - } - if _, err := docker.GetDockerClient(); err != nil { exit.Error(err) } @@ -403,10 +394,6 @@ var _clusterInfoCmd = &cobra.Command{ Run: func(cmd *cobra.Command, args []string) { telemetry.Event("cli.cluster.info", map[string]interface{}{"provider": types.AWSProviderType}) - if _flagClusterInfoEnv == "local" { - exit.Error(ErrorLocalEnvironmentCantUseClusterProvider(types.AWSProviderType)) - } - if _, err := docker.GetDockerClient(); err != nil { exit.Error(err) } @@ -532,11 +519,20 @@ var _clusterDownCmd = &cobra.Command{ envNames, isDefaultEnv, _ := getEnvNamesByOperatorEndpoint(*loadBalancer.DNSName) if len(envNames) > 0 { for _, envName := range envNames { - removeEnvFromCLIConfig(envName) + err := removeEnvFromCLIConfig(envName) + if err != nil { + exit.Error(err) + } } fmt.Printf("✓ deleted the %s environment configuration%s\n", s.StrsAnd(envNames), s.SIfPlural(len(envNames))) if isDefaultEnv { - fmt.Println("✓ set the default environment to local") + newDefaultEnv, err := getDefaultEnv() + if err != nil { + exit.Error(err) + } + if newDefaultEnv != nil { + fmt.Println(fmt.Sprintf("✓ set the default environment to %s", *newDefaultEnv)) + } } } } diff --git a/cli/cmd/cluster_gcp.go b/cli/cmd/cluster_gcp.go index 397d626b6c..8531d48dd9 100644 --- a/cli/cmd/cluster_gcp.go +++ b/cli/cmd/cluster_gcp.go @@ -54,8 +54,7 @@ var ( func clusterGCPInit() { _clusterGCPUpCmd.Flags().SortFlags = false addClusterGCPConfigFlag(_clusterGCPUpCmd) - defaultEnv := getDefaultEnv(_clusterGCPCommandType) - _clusterGCPUpCmd.Flags().StringVarP(&_flagClusterGCPUpEnv, "configure-env", "e", defaultEnv, "name of environment to configure") + _clusterGCPUpCmd.Flags().StringVarP(&_flagClusterGCPUpEnv, "configure-env", "e", "gcp", "name of environment to configure") addClusterGCPDisallowPromptFlag(_clusterGCPUpCmd) _clusterGCPCmd.AddCommand(_clusterGCPUpCmd) @@ -111,10 +110,6 @@ var _clusterGCPUpCmd = &cobra.Command{ Run: func(cmd *cobra.Command, args []string) { telemetry.EventNotify("cli.cluster.up", map[string]interface{}{"provider": types.GCPProviderType}) - if _flagClusterGCPUpEnv == "local" { - exit.Error(ErrorLocalEnvironmentCantUseClusterProvider(types.GCPProviderType)) - } - envExists, err := isEnvConfigured(_flagClusterGCPUpEnv) if err != nil { exit.Error(err) @@ -198,10 +193,6 @@ var _clusterGCPInfoCmd = &cobra.Command{ Run: func(cmd *cobra.Command, args []string) { telemetry.Event("cli.cluster.info", map[string]interface{}{"provider": types.GCPProviderType}) - if _flagClusterGCPInfoEnv == "local" { - exit.Error(ErrorLocalEnvironmentCantUseClusterProvider(types.GCPProviderType)) - } - if _, err := docker.GetDockerClient(); err != nil { exit.Error(err) } @@ -211,6 +202,12 @@ var _clusterGCPInfoCmd = &cobra.Command{ exit.Error(err) } + // need to ensure that the google creds are configured for the manager + _, err = gcp.NewFromEnvCheckProjectID(*accessConfig.Project) + if err != nil { + exit.Error(err) + } + if _flagClusterGCPInfoDebug { cmdDebugGCP(accessConfig) } else { @@ -226,10 +223,6 @@ var _clusterGCPDownCmd = &cobra.Command{ Run: func(cmd *cobra.Command, args []string) { telemetry.Event("cli.cluster.down", map[string]interface{}{"provider": types.GCPProviderType}) - if _flagClusterGCPUpEnv == "local" { - exit.Error(ErrorLocalEnvironmentCantUseClusterProvider(types.GCPProviderType)) - } - if _, err := docker.GetDockerClient(); err != nil { exit.Error(err) } @@ -281,11 +274,20 @@ var _clusterGCPDownCmd = &cobra.Command{ envNames, isDefaultEnv, _ := getEnvNamesByOperatorEndpoint(operatorLoadBalancerIP) if len(envNames) > 0 { for _, envName := range envNames { - removeEnvFromCLIConfig(envName) + err := removeEnvFromCLIConfig(envName) + if err != nil { + exit.Error(err) + } } fmt.Printf("✓ deleted the %s environment configuration%s\n", s.StrsAnd(envNames), s.SIfPlural(len(envNames))) if isDefaultEnv { - fmt.Println("✓ set the default environment to local") + newDefaultEnv, err := getDefaultEnv() + if err != nil { + exit.Error(err) + } + if newDefaultEnv != nil { + fmt.Println(fmt.Sprintf("✓ set the default environment to %s", *newDefaultEnv)) + } } } } diff --git a/cli/cmd/delete.go b/cli/cmd/delete.go index a4b9b15980..cbb4d58c2d 100644 --- a/cli/cmd/delete.go +++ b/cli/cmd/delete.go @@ -21,14 +21,12 @@ import ( "strings" "github.com/cortexlabs/cortex/cli/cluster" - "github.com/cortexlabs/cortex/cli/local" "github.com/cortexlabs/cortex/cli/types/flags" "github.com/cortexlabs/cortex/pkg/lib/exit" libjson "github.com/cortexlabs/cortex/pkg/lib/json" "github.com/cortexlabs/cortex/pkg/lib/print" "github.com/cortexlabs/cortex/pkg/lib/telemetry" "github.com/cortexlabs/cortex/pkg/operator/schema" - "github.com/cortexlabs/cortex/pkg/types" "github.com/spf13/cobra" ) @@ -40,9 +38,8 @@ var ( func deleteInit() { _deleteCmd.Flags().SortFlags = false - _deleteCmd.Flags().StringVarP(&_flagDeleteEnv, "env", "e", getDefaultEnv(_generalCommandType), "environment to use") + _deleteCmd.Flags().StringVarP(&_flagDeleteEnv, "env", "e", "", "environment to use") - // only applies to aws provider because local doesn't support multiple replicas _deleteCmd.Flags().BoolVarP(&_flagDeleteForce, "force", "f", false, "delete the api without confirmation") _deleteCmd.Flags().BoolVarP(&_flagDeleteKeepCache, "keep-cache", "c", false, "keep cached data for the api") _deleteCmd.Flags().VarP(&_flagOutput, "output", "o", fmt.Sprintf("output format: one of %s", strings.Join(flags.UserOutputTypeStrings(), "|"))) @@ -53,38 +50,32 @@ var _deleteCmd = &cobra.Command{ Short: "delete any kind of api or stop a batch job", Args: cobra.RangeArgs(1, 2), Run: func(cmd *cobra.Command, args []string) { - env, err := ReadOrConfigureEnv(_flagDeleteEnv) + envName, err := getEnvFromFlag(_flagDeleteEnv) + if err != nil { + telemetry.Event("cli.delete") + exit.Error(err) + } + + env, err := ReadOrConfigureEnv(envName) if err != nil { telemetry.Event("cli.delete") exit.Error(err) } telemetry.Event("cli.delete", map[string]interface{}{"provider": env.Provider.String(), "env_name": env.Name}) - err = printEnvIfNotSpecified(_flagDeleteEnv, cmd) + err = printEnvIfNotSpecified(env.Name, cmd) if err != nil { exit.Error(err) } var deleteResponse schema.DeleteResponse - if env.Provider != types.LocalProviderType { - if len(args) == 2 { - deleteResponse, err = cluster.StopJob(MustGetOperatorConfig(env.Name), args[0], args[1]) - if err != nil { - exit.Error(err) - } - } else { - deleteResponse, err = cluster.Delete(MustGetOperatorConfig(env.Name), args[0], _flagDeleteKeepCache, _flagDeleteForce) - if err != nil { - exit.Error(err) - } + if len(args) == 2 { + deleteResponse, err = cluster.StopJob(MustGetOperatorConfig(env.Name), args[0], args[1]) + if err != nil { + exit.Error(err) } } else { - if len(args) == 2 { - exit.Error(ErrorNotSupportedInLocalEnvironment(), fmt.Sprintf("cannot delete job %s for api %s", args[1], args[0])) - } - - // local only supports deploying 1 replica at a time, so _flagDeleteForce is only useful when attempting to delete an API that has been deployed with different CLI version - deleteResponse, err = local.Delete(args[0], _flagDeleteKeepCache, _flagDeleteForce) + deleteResponse, err = cluster.Delete(MustGetOperatorConfig(env.Name), args[0], _flagDeleteKeepCache, _flagDeleteForce) if err != nil { exit.Error(err) } diff --git a/cli/cmd/deploy.go b/cli/cmd/deploy.go index 08fbe81b3f..c8db8fc51c 100644 --- a/cli/cmd/deploy.go +++ b/cli/cmd/deploy.go @@ -22,7 +22,6 @@ import ( "strings" "github.com/cortexlabs/cortex/cli/cluster" - "github.com/cortexlabs/cortex/cli/local" "github.com/cortexlabs/cortex/cli/types/flags" "github.com/cortexlabs/cortex/pkg/lib/archive" "github.com/cortexlabs/cortex/pkg/lib/errors" @@ -56,7 +55,7 @@ var ( func deployInit() { _deployCmd.Flags().SortFlags = false - _deployCmd.Flags().StringVarP(&_flagDeployEnv, "env", "e", getDefaultEnv(_generalCommandType), "environment to use") + _deployCmd.Flags().StringVarP(&_flagDeployEnv, "env", "e", "", "environment to use") _deployCmd.Flags().BoolVarP(&_flagDeployForce, "force", "f", false, "override the in-progress api update") _deployCmd.Flags().BoolVarP(&_flagDeployDisallowPrompt, "yes", "y", false, "skip prompts") _deployCmd.Flags().VarP(&_flagOutput, "output", "o", fmt.Sprintf("output format: one of %s", strings.Join(flags.UserOutputTypeStrings(), "|"))) @@ -67,14 +66,20 @@ var _deployCmd = &cobra.Command{ Short: "create or update apis", Args: cobra.RangeArgs(0, 1), Run: func(cmd *cobra.Command, args []string) { - env, err := ReadOrConfigureEnv(_flagDeployEnv) + envName, err := getEnvFromFlag(_flagDeployEnv) + if err != nil { + telemetry.Event("cli.deploy") + exit.Error(err) + } + + env, err := ReadOrConfigureEnv(envName) if err != nil { telemetry.Event("cli.deploy") exit.Error(err) } telemetry.Event("cli.deploy", map[string]interface{}{"provider": env.Provider.String(), "env_name": env.Name}) - err = printEnvIfNotSpecified(_flagDeployEnv, cmd) + err = printEnvIfNotSpecified(env.Name, cmd) if err != nil { exit.Error(err) } @@ -89,28 +94,14 @@ var _deployCmd = &cobra.Command{ exit.Error(ErrorDeployFromTopLevelDir("root", env.Provider)) } - var deployResults []schema.DeployResult - if env.Provider == types.LocalProviderType { - projectFiles, err := findProjectFiles(env.Provider, configPath) - if err != nil { - exit.Error(err) - } - - local.OutputType = _flagOutput // Set output type for the Local package - deployResults, err = local.Deploy(env, configPath, projectFiles, _flagDeployDisallowPrompt) - if err != nil { - exit.Error(err) - } - } else { - deploymentBytes, err := getDeploymentBytes(env.Provider, configPath) - if err != nil { - exit.Error(err) - } + deploymentBytes, err := getDeploymentBytes(env.Provider, configPath) + if err != nil { + exit.Error(err) + } - deployResults, err = cluster.Deploy(MustGetOperatorConfig(env.Name), configPath, deploymentBytes, _flagDeployForce) - if err != nil { - exit.Error(err) - } + deployResults, err := cluster.Deploy(MustGetOperatorConfig(env.Name), configPath, deploymentBytes, _flagDeployForce) + if err != nil { + exit.Error(err) } switch _flagOutput { @@ -126,7 +117,10 @@ var _deployCmd = &cobra.Command{ exit.Error(err) } case flags.PrettyOutputType: - message := deployMessage(deployResults, env.Name) + message, err := deployMessage(deployResults, env.Name) + if err != nil { + exit.Error(err) + } if didAnyResultsError(deployResults) { print.StderrBoldFirstBlock(message) } else { @@ -179,16 +173,14 @@ func findProjectFiles(provider types.ProviderType, configPath string) ([]string, ignoreFns = append(ignoreFns, cortexIgnore) } - if provider != types.LocalProviderType { - if !_flagDeployDisallowPrompt { - ignoreFns = append(ignoreFns, files.PromptForFilesAboveSize(_warningFileBytes, "do you want to upload %s (%s)?")) - } - ignoreFns = append(ignoreFns, - files.ErrorOnBigFilesFn(_maxFileSizeBytes), - // must be the last appended IgnoreFn - files.ErrorOnProjectSizeLimit(_maxProjectSizeBytes), - ) + if !_flagDeployDisallowPrompt { + ignoreFns = append(ignoreFns, files.PromptForFilesAboveSize(_warningFileBytes, "do you want to upload %s (%s)?")) } + ignoreFns = append(ignoreFns, + files.ErrorOnBigFilesFn(_maxFileSizeBytes), + // must be the last appended IgnoreFn + files.ErrorOnProjectSizeLimit(_maxProjectSizeBytes), + ) projectPaths, err := files.ListDirRecursive(projectRoot, false, ignoreFns...) if err != nil { @@ -255,16 +247,19 @@ func getDeploymentBytes(provider types.ProviderType, configPath string) (map[str return uploadBytes, nil } -func deployMessage(results []schema.DeployResult, envName string) string { +func deployMessage(results []schema.DeployResult, envName string) (string, error) { statusMessage := mergeResultMessages(results) if didAllResultsError(results) { - return statusMessage + return statusMessage, nil } - apiCommandsMessage := getAPICommandsMessage(results, envName) + apiCommandsMessage, err := getAPICommandsMessage(results, envName) + if err != nil { + return "", err + } - return statusMessage + "\n\n" + apiCommandsMessage + return statusMessage + "\n\n" + apiCommandsMessage, nil } func mergeResultMessages(results []schema.DeployResult) string { @@ -313,15 +308,18 @@ func didAnyResultsError(results []schema.DeployResult) bool { return false } -func getAPICommandsMessage(results []schema.DeployResult, envName string) string { +func getAPICommandsMessage(results []schema.DeployResult, envName string) (string, error) { apiName := "" if len(results) == 1 { apiName = results[0].API.Spec.Name } - defaultEnv := getDefaultEnv(_generalCommandType) + defaultEnv, err := getDefaultEnv() + if err != nil { + return "", err + } var envArg string - if envName != defaultEnv { + if defaultEnv == nil || envName != *defaultEnv { envArg = " --env " + envName } @@ -342,5 +340,5 @@ func getAPICommandsMessage(results []schema.DeployResult, envName string) string return strings.TrimSpace(items.String(&table.KeyValuePairOpts{ Delimiter: pointer.String(""), NumSpaces: pointer.Int(2), - })) + })), nil } diff --git a/cli/cmd/env.go b/cli/cmd/env.go index 46e4c8b260..6bcb771c9f 100644 --- a/cli/cmd/env.go +++ b/cli/cmd/env.go @@ -35,7 +35,6 @@ var ( _flagEnvOperatorEndpoint string _flagEnvAWSAccessKeyID string _flagEnvAWSSecretAccessKey string - _flagEnvAWSRegion string ) func envInit() { @@ -44,7 +43,6 @@ func envInit() { _envConfigureCmd.Flags().StringVarP(&_flagEnvOperatorEndpoint, "operator-endpoint", "o", "", "set the operator endpoint without prompting") _envConfigureCmd.Flags().StringVarP(&_flagEnvAWSAccessKeyID, "aws-access-key-id", "k", "", "set the aws access key id without prompting") _envConfigureCmd.Flags().StringVarP(&_flagEnvAWSSecretAccessKey, "aws-secret-access-key", "s", "", "set the aws secret access key without prompting") - _envConfigureCmd.Flags().StringVarP(&_flagEnvAWSRegion, "aws-region", "r", "", "set the aws region without prompting") _envCmd.AddCommand(_envConfigureCmd) _envListCmd.Flags().SortFlags = false @@ -98,17 +96,11 @@ var _envConfigureCmd = &cobra.Command{ skipAWSSecretAccessKey = &_flagEnvAWSSecretAccessKey } - var skipAWSRegion *string - if _flagEnvAWSRegion != "" { - skipAWSRegion = &_flagEnvAWSRegion - } - fieldsToSkipPrompt := cliconfig.Environment{ Provider: skipProvider, OperatorEndpoint: skipOperatorEndpoint, AWSAccessKeyID: skipAWSAccessKeyID, AWSSecretAccessKey: skipAWSSecretAccessKey, - AWSRegion: skipAWSRegion, } if _, err := configureEnv(envName, fieldsToSkipPrompt); err != nil { @@ -138,10 +130,13 @@ var _envListCmd = &cobra.Command{ return } - defaultEnv := getDefaultEnv(_generalCommandType) + defaultEnv, err := getDefaultEnv() + if err != nil { + exit.Error(err) + } for i, env := range cliConfig.Environments { - fmt.Print(env.String(defaultEnv == env.Name)) + fmt.Print(env.String(defaultEnv != nil && *defaultEnv == env.Name)) if i+1 < len(cliConfig.Environments) { fmt.Println() } @@ -156,17 +151,22 @@ var _envDefaultCmd = &cobra.Command{ Run: func(cmd *cobra.Command, args []string) { telemetry.Event("cli.env.default") - defaultEnv := getDefaultEnv(_generalCommandType) + defaultEnv, err := getDefaultEnv() + if err != nil { + exit.Error(err) + } var envName string if len(args) == 0 { - fmt.Printf("current default environment: %s\n\n", defaultEnv) + if defaultEnv != nil { + fmt.Printf("current default environment: %s\n\n", *defaultEnv) + } envName = promptForExistingEnvName("name of environment to set as default") } else { envName = args[0] } - if envName == defaultEnv { + if defaultEnv != nil && *defaultEnv == envName { print.BoldFirstLine(fmt.Sprintf("%s is already the default environment", envName)) exit.Ok() } @@ -193,21 +193,25 @@ var _envDeleteCmd = &cobra.Command{ envName = promptForExistingEnvName("name of environment to delete") } - prevDefault := getDefaultEnv(_generalCommandType) + prevDefault, err := getDefaultEnv() + if err != nil { + exit.Error(err) + } if err := removeEnvFromCLIConfig(envName); err != nil { exit.Error(err) } - newDefault := getDefaultEnv(_generalCommandType) - - if envName == types.LocalProviderType.String() { - print.BoldFirstLine(fmt.Sprintf("cleared the %s environment configuration", envName)) - } else { - print.BoldFirstLine(fmt.Sprintf("deleted the %s environment configuration", envName)) + newDefault, err := getDefaultEnv() + if err != nil { + exit.Error(err) } - if prevDefault != newDefault { - print.BoldFirstLine(fmt.Sprintf("set the default environment to %s", newDefault)) + + print.BoldFirstLine(fmt.Sprintf("deleted the %s environment configuration", envName)) + if prevDefault != nil && newDefault == nil { + print.BoldFirstLine("unset the default environment") + } else if newDefault != nil && (prevDefault == nil || *prevDefault != *newDefault) { + print.BoldFirstLine(fmt.Sprintf("set the default environment to %s", *newDefault)) } }, } diff --git a/cli/cmd/errors.go b/cli/cmd/errors.go index bd94fa8b11..14db81515a 100644 --- a/cli/cmd/errors.go +++ b/cli/cmd/errors.go @@ -42,12 +42,11 @@ func errStrFailedToConnect(u url.URL) string { const ( ErrInvalidProvider = "cli.invalid_provider" - ErrNotSupportedInLocalEnvironment = "cli.not_supported_in_local_environment" - ErrLocalEnvironmentCantUseClusterProvider = "cli.local_environment_cant_use_cluster_provider" + ErrInvalidLegacyProvider = "cli.invalid_legacy_provider" ErrCommandNotSupportedForKind = "cli.command_not_supported_for_kind" + ErrNoAvailableEnvironment = "cli.no_available_environment" + ErrEnvironmentNotSet = "cli.environment_not_set" ErrEnvironmentNotFound = "cli.environment_not_found" - ErrOperatorEndpointInLocalEnvironment = "cli.operator_endpoint_in_local_environment" - ErrOperatorConfigFromLocalEnvironment = "cli.operater_config_from_local_environment" ErrFieldNotFoundInEnvironment = "cli.field_not_found_in_environment" ErrInvalidOperatorEndpoint = "cli.invalid_operator_endpoint" ErrNoOperatorLoadBalancer = "cli.no_operator_load_balancer" @@ -85,17 +84,10 @@ func ErrorInvalidProvider(providerStr string) error { }) } -func ErrorNotSupportedInLocalEnvironment() error { +func ErrorInvalidLegacyProvider(providerStr, cliConfigPath string) error { return errors.WithStack(&errors.Error{ - Kind: ErrNotSupportedInLocalEnvironment, - Message: "this command is not supported in local environment", - }) -} - -func ErrorLocalEnvironmentCantUseClusterProvider(provider types.ProviderType) error { - return errors.WithStack(&errors.Error{ - Kind: ErrLocalEnvironmentCantUseClusterProvider, - Message: fmt.Sprintf("the environment named \"local\" cannot be configured to point to a cortex cluster in %s", provider), + Kind: ErrInvalidLegacyProvider, + Message: fmt.Sprintf("the %s provider is no longer supported on cortex v%s; remove the environment(s) which use the %s provider from %s or delete %s entirely (it will be recreated on subsequent CLI commands)", providerStr, consts.CortexVersionMinor, providerStr, cliConfigPath, cliConfigPath), }) } @@ -106,33 +98,31 @@ func ErrorCommandNotSupportedForKind(kind userconfig.Kind, command string) error }) } -func ErrorEnvironmentNotFound(envName string) error { +func ErrorNoAvailableEnvironment() error { return errors.WithStack(&errors.Error{ - Kind: ErrEnvironmentNotFound, - Message: fmt.Sprintf("unable to find environment named \"%s\"", envName), + Kind: ErrNoAvailableEnvironment, + Message: "no environments are configured; run `cortex cluster up` or `cortex cluster-gcp up` to create a cluster, or run `cortex env configure` to connect to an existing cluster", }) } -// unexpected error if code tries to create operator config from local environment -func ErrorOperatorConfigFromLocalEnvironment() error { +func ErrorEnvironmentNotSet() error { return errors.WithStack(&errors.Error{ - Kind: ErrOperatorConfigFromLocalEnvironment, - Message: "attempting to retrieve cluster operator config from local environment", + Kind: ErrEnvironmentNotSet, + Message: "no environment was provided and the default environment is not set; specify the environment to use via the `-e/--env` flag, or run `cortex env default` to set the default environment", }) } -// unexpected error if code tries to create operator config from local environment -func ErrorFieldNotFoundInEnvironment(fieldName string, envName string) error { +func ErrorEnvironmentNotFound(envName string) error { return errors.WithStack(&errors.Error{ - Kind: ErrFieldNotFoundInEnvironment, - Message: fmt.Sprintf("%s was not found in %s environment", fieldName, envName), + Kind: ErrEnvironmentNotFound, + Message: fmt.Sprintf("unable to find environment named \"%s\"", envName), }) } -func ErrorOperatorEndpointInLocalEnvironment() error { +func ErrorFieldNotFoundInEnvironment(fieldName string, envName string) error { return errors.WithStack(&errors.Error{ - Kind: ErrOperatorEndpointInLocalEnvironment, - Message: fmt.Sprintf("operator_endpoint should not be specified (it's not used in local environment)"), + Kind: ErrFieldNotFoundInEnvironment, + Message: fmt.Sprintf("%s was not found in %s environment", fieldName, envName), }) } @@ -337,12 +327,8 @@ func ErrorNoTerminalWidth() error { } func ErrorDeployFromTopLevelDir(genericDirName string, providerType types.ProviderType) error { - targetStr := "cluster" - if providerType == types.LocalProviderType { - targetStr = "API container" - } return errors.WithStack(&errors.Error{ Kind: ErrDeployFromTopLevelDir, - Message: fmt.Sprintf("cannot deploy from your %s directory - when deploying your API, cortex sends all files in your project directory (i.e. the directory which contains cortex.yaml) to your %s (see https://docs.cortex.dev/v/%s/); therefore it is recommended to create a subdirectory for your project files", genericDirName, targetStr, consts.CortexVersionMinor), + Message: fmt.Sprintf("cannot deploy from your %s directory - when deploying your API, cortex sends all files in your project directory (i.e. the directory which contains cortex.yaml) to your cluster (see https://docs.cortex.dev/v/%s/); therefore it is recommended to create a subdirectory for your project files", genericDirName, consts.CortexVersionMinor), }) } diff --git a/cli/cmd/get.go b/cli/cmd/get.go index cc905670c0..9f26a326df 100644 --- a/cli/cmd/get.go +++ b/cli/cmd/get.go @@ -22,7 +22,6 @@ import ( "time" "github.com/cortexlabs/cortex/cli/cluster" - "github.com/cortexlabs/cortex/cli/local" "github.com/cortexlabs/cortex/cli/types/cliconfig" "github.com/cortexlabs/cortex/cli/types/flags" "github.com/cortexlabs/cortex/pkg/lib/console" @@ -30,13 +29,11 @@ import ( "github.com/cortexlabs/cortex/pkg/lib/exit" libjson "github.com/cortexlabs/cortex/pkg/lib/json" "github.com/cortexlabs/cortex/pkg/lib/pointer" - "github.com/cortexlabs/cortex/pkg/lib/sets/strset" s "github.com/cortexlabs/cortex/pkg/lib/strings" "github.com/cortexlabs/cortex/pkg/lib/table" "github.com/cortexlabs/cortex/pkg/lib/telemetry" libtime "github.com/cortexlabs/cortex/pkg/lib/time" "github.com/cortexlabs/cortex/pkg/operator/schema" - "github.com/cortexlabs/cortex/pkg/types" "github.com/cortexlabs/cortex/pkg/types/userconfig" "github.com/spf13/cobra" ) @@ -63,7 +60,7 @@ var ( func getInit() { _getCmd.Flags().SortFlags = false - _getCmd.Flags().StringVarP(&_flagGetEnv, "env", "e", getDefaultEnv(_generalCommandType), "environment to use") + _getCmd.Flags().StringVarP(&_flagGetEnv, "env", "e", "", "environment to use") _getCmd.Flags().BoolVarP(&_flagWatch, "watch", "w", false, "re-run the command every 2 seconds") _getCmd.Flags().VarP(&_flagOutput, "output", "o", fmt.Sprintf("output format: one of %s", strings.Join(flags.UserOutputTypeStrings(), "|"))) addVerboseFlag(_getCmd) @@ -74,9 +71,21 @@ var _getCmd = &cobra.Command{ Short: "get information about apis or jobs", Args: cobra.RangeArgs(0, 2), Run: func(cmd *cobra.Command, args []string) { + var envName string + if wasEnvFlagProvided(cmd) { + envName = _flagGetEnv + } else if len(args) > 0 { + var err error + envName, err = getEnvFromFlag("") + if err != nil { + telemetry.Event("cli.get") + exit.Error(err) + } + } + // if API_NAME is specified or env name is provided then the provider is known, otherwise provider isn't because all apis from all environments will be fetched if len(args) == 1 || wasEnvFlagProvided(cmd) { - env, err := ReadOrConfigureEnv(_flagGetEnv) + env, err := ReadOrConfigureEnv(envName) if err != nil { telemetry.Event("cli.get") exit.Error(err) @@ -88,12 +97,12 @@ var _getCmd = &cobra.Command{ rerun(func() (string, error) { if len(args) == 1 { - env, err := ReadOrConfigureEnv(_flagGetEnv) + env, err := ReadOrConfigureEnv(envName) if err != nil { exit.Error(err) } - out, err := envStringIfNotSpecified(_flagGetEnv, cmd) + out, err := envStringIfNotSpecified(envName, cmd) if err != nil { return "", err } @@ -108,20 +117,16 @@ var _getCmd = &cobra.Command{ return out + apiTable, nil } else if len(args) == 2 { - env, err := ReadOrConfigureEnv(_flagGetEnv) + env, err := ReadOrConfigureEnv(envName) if err != nil { exit.Error(err) } - out, err := envStringIfNotSpecified(_flagGetEnv, cmd) + out, err := envStringIfNotSpecified(envName, cmd) if err != nil { return "", err } - if env.Provider == types.LocalProviderType { - return "", errors.Wrap(ErrorNotSupportedInLocalEnvironment(), fmt.Sprintf("cannot get status of job %s for api %s", args[1], args[0])) - } - jobTable, err := getJob(env, args[0], args[1]) if err != nil { return "", err @@ -133,12 +138,12 @@ var _getCmd = &cobra.Command{ return out + jobTable, nil } else { if wasEnvFlagProvided(cmd) { - env, err := ReadOrConfigureEnv(_flagGetEnv) + env, err := ReadOrConfigureEnv(envName) if err != nil { exit.Error(err) } - out, err := envStringIfNotSpecified(_flagGetEnv, cmd) + out, err := envStringIfNotSpecified(envName, cmd) if err != nil { return "", err } @@ -190,13 +195,7 @@ func getAPIsInAllEnvironments() (string, error) { errorsMap := map[string]error{} // get apis from both environments for _, env := range cliConfig.Environments { - var apisRes []schema.APIResponse - var err error - if env.Provider == types.LocalProviderType { - apisRes, err = local.GetAPIs() - } else { - apisRes, err = cluster.GetAPIs(MustGetOperatorConfig(env.Name)) - } + apisRes, err := cluster.GetAPIs(MustGetOperatorConfig(env.Name)) apisOutput := getAPIsOutput{ EnvName: env.Name, @@ -240,11 +239,6 @@ func getAPIsInAllEnvironments() (string, error) { // check if any environments errorred if len(errorsMap) != len(cliConfig.Environments) { if len(errorsMap) == 0 { - mismatchedAPIMessage, err := getLocalVersionMismatchedAPIsMessage() - if err == nil && len(mismatchedAPIMessage) > 0 { - return console.Bold("no apis are deployed") + "\n\n" + mismatchedAPIMessage, nil - } - return console.Bold("no apis are deployed"), nil } @@ -271,9 +265,6 @@ func getAPIsInAllEnvironments() (string, error) { if len(allRealtimeAPIs) > 0 { t := realtimeAPIsTable(allRealtimeAPIs, allRealtimeAPIEnvs) - if strset.New(allRealtimeAPIEnvs...).IsEqual(strset.New(types.LocalProviderType.String())) { - hideReplicaCountColumns(&t) - } if len(allBatchAPIs) > 0 { out += "\n" @@ -301,52 +292,21 @@ func getAPIsInAllEnvironments() (string, error) { out += fmt.Sprintf("unable to detect apis from the %s environments; run `cortex get --env ENV_NAME` if this is unexpected\n", s.StrsAnd(errors.NonNilErrorMapKeys(errorsMap))) } - mismatchedAPIMessage, err := getLocalVersionMismatchedAPIsMessage() - if err == nil { - out = s.EnsureBlankLineIfNotEmpty(out) - out += mismatchedAPIMessage - } - return out, nil } -func hideReplicaCountColumns(t *table.Table) { - t.FindHeaderByTitle(_titleUpToDate).Hidden = true - t.FindHeaderByTitle(_titleStale).Hidden = true - t.FindHeaderByTitle(_titleRequested).Hidden = true - t.FindHeaderByTitle(_titleFailed).Hidden = true -} - func getAPIsByEnv(env cliconfig.Environment, printEnv bool) (string, error) { - var apisRes []schema.APIResponse - var err error - - if env.Provider == types.LocalProviderType { - apisRes, err = local.GetAPIs() - if err != nil { - return "", err - } + apisRes, err := cluster.GetAPIs(MustGetOperatorConfig(env.Name)) + if err != nil { + return "", err + } - if _flagOutput == flags.JSONOutputType { - bytes, err := libjson.Marshal(apisRes) - if err != nil { - return "", err - } - return string(bytes), nil - } - } else { - apisRes, err = cluster.GetAPIs(MustGetOperatorConfig(env.Name)) + if _flagOutput == flags.JSONOutputType { + bytes, err := libjson.Marshal(apisRes) if err != nil { return "", err } - - if _flagOutput == flags.JSONOutputType { - bytes, err := libjson.Marshal(apisRes) - if err != nil { - return "", err - } - return string(bytes), nil - } + return string(bytes), nil } var allRealtimeAPIs []schema.APIResponse @@ -365,10 +325,6 @@ func getAPIsByEnv(env cliconfig.Environment, printEnv bool) (string, error) { } if len(allRealtimeAPIs) == 0 && len(allBatchAPIs) == 0 && len(allTrafficSplitters) == 0 { - mismatchedAPIMessage, err := getLocalVersionMismatchedAPIsMessage() - if err == nil && len(mismatchedAPIMessage) > 0 { - return console.Bold("no apis are deployed") + "\n\n" + mismatchedAPIMessage, nil - } return console.Bold("no apis are deployed"), nil } @@ -399,10 +355,6 @@ func getAPIsByEnv(env cliconfig.Environment, printEnv bool) (string, error) { out += "\n" } - if env.Provider == types.LocalProviderType { - hideReplicaCountColumns(&t) - } - out += t.MustFormat() } @@ -422,65 +374,11 @@ func getAPIsByEnv(env cliconfig.Environment, printEnv bool) (string, error) { out += t.MustFormat() } - if env.Provider == types.LocalProviderType { - mismatchedVersionAPIsErrorMessage, _ := getLocalVersionMismatchedAPIsMessage() - if len(mismatchedVersionAPIsErrorMessage) > 0 { - out += "\n" + mismatchedVersionAPIsErrorMessage - } - } - return out, nil } -func getLocalVersionMismatchedAPIsMessage() (string, error) { - mismatchedAPINames, err := local.ListVersionMismatchedAPIs() - if err != nil { - return "", err - } - if len(mismatchedAPINames) == 0 { - return "", nil - } - - if len(mismatchedAPINames) == 1 { - return fmt.Sprintf("an api named %s was deployed in your local environment using a different version of the cortex cli; please delete it using `cortex delete %s` and then redeploy it\n", s.UserStr(mismatchedAPINames[0]), mismatchedAPINames[0]), nil - } - return fmt.Sprintf("apis named %s were deployed in your local environment using a different version of the cortex cli; please delete them using `cortex delete API_NAME` and then redeploy them\n", s.UserStrsAnd(mismatchedAPINames)), nil -} - func getAPI(env cliconfig.Environment, apiName string) (string, error) { - if env.Provider != types.LocalProviderType { - apisRes, err := cluster.GetAPI(MustGetOperatorConfig(env.Name), apiName) - if err != nil { - return "", err - } - - if _flagOutput == flags.JSONOutputType { - bytes, err := libjson.Marshal(apisRes) - if err != nil { - return "", err - } - return string(bytes), nil - } - - if len(apisRes) == 0 { - exit.Error(errors.ErrorUnexpected(fmt.Sprintf("unable to find API %s", apiName))) - } - - apiRes := apisRes[0] - - switch apiRes.Spec.Kind { - case userconfig.RealtimeAPIKind: - return realtimeAPITable(apiRes, env) - case userconfig.TrafficSplitterKind: - return trafficSplitterTable(apiRes, env) - case userconfig.BatchAPIKind: - return batchAPITable(apiRes), nil - default: - return "", errors.ErrorUnexpected(fmt.Sprintf("encountered unexpected kind %s for api %s", apiRes.Spec.Kind, apiRes.Spec.Name)) - } - } - - apisRes, err := local.GetAPI(apiName) + apisRes, err := cluster.GetAPI(MustGetOperatorConfig(env.Name), apiName) if err != nil { return "", err } @@ -499,7 +397,16 @@ func getAPI(env cliconfig.Environment, apiName string) (string, error) { apiRes := apisRes[0] - return realtimeAPITable(apiRes, env) + switch apiRes.Spec.Kind { + case userconfig.RealtimeAPIKind: + return realtimeAPITable(apiRes, env) + case userconfig.TrafficSplitterKind: + return trafficSplitterTable(apiRes, env) + case userconfig.BatchAPIKind: + return batchAPITable(apiRes), nil + default: + return "", errors.ErrorUnexpected(fmt.Sprintf("encountered unexpected kind %s for api %s", apiRes.Spec.Kind, apiRes.Spec.Name)) + } } func apiHistoryTable(apiVersions []schema.APIVersion) string { diff --git a/cli/cmd/lib_cli_config.go b/cli/cmd/lib_cli_config.go index 391e047774..e89172cd5f 100644 --- a/cli/cmd/lib_cli_config.go +++ b/cli/cmd/lib_cli_config.go @@ -32,10 +32,10 @@ import ( "github.com/cortexlabs/cortex/pkg/lib/pointer" "github.com/cortexlabs/cortex/pkg/lib/print" "github.com/cortexlabs/cortex/pkg/lib/prompt" + "github.com/cortexlabs/cortex/pkg/lib/slices" s "github.com/cortexlabs/cortex/pkg/lib/strings" "github.com/cortexlabs/cortex/pkg/lib/urls" "github.com/cortexlabs/cortex/pkg/types" - "github.com/cortexlabs/cortex/pkg/types/clusterconfig" "github.com/cortexlabs/yaml" ) @@ -50,10 +50,9 @@ var _cliConfigValidation = &cr.StructValidation{ }, { StructField: "DefaultEnvironment", - StringValidation: &cr.StringValidation{ - Default: "local", - Required: false, - AllowEmpty: true, // will get set to "local" in validate() if empty + StringPtrValidation: &cr.StringPtrValidation{ + Required: false, + AllowExplicitNull: true, }, }, { @@ -72,8 +71,16 @@ var _cliConfigValidation = &cr.StructValidation{ { StructField: "Provider", StringValidation: &cr.StringValidation{ - Required: true, - AllowedValues: types.ProviderTypeStrings(), + Required: true, + Validator: func(provider string) (string, error) { + if slices.HasString(types.ProviderTypeStrings(), provider) { + return provider, nil + } + if provider == "local" { + return "", ErrorInvalidLegacyProvider(provider, _cliConfigPath) + } + return "", ErrorInvalidProvider(provider) + }, }, Parser: func(str string) (interface{}, error) { return types.ProviderTypeFromString(str), nil @@ -98,13 +105,6 @@ var _cliConfigValidation = &cr.StructValidation{ Required: false, }, }, - { - StructField: "AWSRegion", - StringPtrValidation: &cr.StringPtrValidation{ - Required: false, - Validator: clusterconfig.RegionValidator, - }, - }, }, }, }, @@ -112,6 +112,31 @@ var _cliConfigValidation = &cr.StructValidation{ }, } +func getEnvFromFlag(envFlag string) (string, error) { + if envFlag != "" { + return envFlag, nil + } + + defaultEnv, err := getDefaultEnv() + if err != nil { + return "", err + } + + if defaultEnv != nil { + return *defaultEnv, nil + } + + envs, err := listConfiguredEnvs() + if err != nil { + return "", err + } + if len(envs) == 0 { + return "", ErrorNoAvailableEnvironment() + } + + return "", ErrorEnvironmentNotSet() +} + func promptForExistingEnvName(promptMsg string) string { configuredEnvNames, err := listConfiguredEnvNames() if err != nil { @@ -216,8 +241,6 @@ func promptGCPEnvName() string { func promptProvider(env *cliconfig.Environment) error { if env.Name != "" { switch env.Name { - case types.LocalProviderType.String(): - env.Provider = types.LocalProviderType case types.AWSProviderType.String(): env.Provider = types.AWSProviderType case types.GCPProviderType.String(): @@ -251,94 +274,6 @@ func promptProvider(env *cliconfig.Environment) error { }) } -func promptLocalEnv(env *cliconfig.Environment, defaults cliconfig.Environment) error { - accessKeyIDPrompt := "aws access key id" - if defaults.AWSAccessKeyID == nil { - accessKeyIDPrompt += " [press ENTER to skip]" - fmt.Print("if you have an AWS account and wish to access resources in it when running locally (e.g. S3 files), you can provide AWS credentials now\n\n") - } - - for true { - err := cr.ReadPrompt(env, &cr.PromptValidation{ - SkipNonEmptyFields: true, - PromptItemValidations: []*cr.PromptItemValidation{ - { - StructField: "AWSAccessKeyID", - PromptOpts: &prompt.Options{ - Prompt: accessKeyIDPrompt, - }, - StringPtrValidation: &cr.StringPtrValidation{ - Required: false, - AllowEmpty: true, - Default: defaults.AWSAccessKeyID, - }, - }, - }, - }) - if err != nil { - return err - } - - // Don't prompt for secret access key if access key ID was not provided - if env.AWSAccessKeyID == nil { - env.AWSSecretAccessKey = nil - env.AWSRegion = nil - return nil - } - - err = cr.ReadPrompt(env, &cr.PromptValidation{ - SkipNonEmptyFields: true, - PromptItemValidations: []*cr.PromptItemValidation{ - { - StructField: "AWSSecretAccessKey", - PromptOpts: &prompt.Options{ - Prompt: "aws secret access key", - MaskDefault: true, - HideTyping: true, - }, - StringPtrValidation: &cr.StringPtrValidation{ - Required: true, - Default: defaults.AWSSecretAccessKey, - }, - }, - { - StructField: "AWSRegion", - PromptOpts: &prompt.Options{ - Prompt: "aws region", - }, - StringPtrValidation: &cr.StringPtrValidation{ - Required: true, - Default: defaults.AWSRegion, - Validator: clusterconfig.RegionValidator, - }, - }, - }, - }) - if err != nil { - return err - } - - if err := validateAWSCreds(*env); err != nil { - errors.PrintError(err) - fmt.Println() - - // reset fields so they get re-prompted - env.AWSAccessKeyID = nil - env.AWSSecretAccessKey = nil - if env.AWSRegion != nil { - defaults.AWSRegion = env.AWSRegion // update default since we know a valid region was provided - } - env.AWSRegion = nil - - continue - } - - return nil - } - - return nil -} - func promptAWSEnv(env *cliconfig.Environment, defaults cliconfig.Environment) error { if env.OperatorEndpoint == nil { fmt.Print("you can get your cortex operator endpoint using `cortex cluster info` if you already have a cortex cluster running, otherwise run `cortex cluster up` to create a cortex cluster\n\n") @@ -474,22 +409,26 @@ func validateOperatorEndpoint(endpoint string) (string, error) { return url, nil } -func getDefaultEnv(cmdType commandType) string { - defaultEnv := types.LocalProviderType.String() - - if cliConfig, err := readCLIConfig(); err == nil { - defaultEnv = cliConfig.DefaultEnvironment +func getDefaultEnv() (*string, error) { + cliConfig, err := readCLIConfig() + if err != nil { + return nil, err } - if cmdType == _clusterCommandType && defaultEnv == types.LocalProviderType.String() { - defaultEnv = types.AWSProviderType.String() + if cliConfig.DefaultEnvironment != nil { + return cliConfig.DefaultEnvironment, nil } - if cmdType == _clusterGCPCommandType && defaultEnv == types.LocalProviderType.String() { - defaultEnv = types.GCPProviderType.String() + if len(cliConfig.Environments) == 1 { + defaultEnv := cliConfig.Environments[0].Name + err := setDefaultEnv(defaultEnv) + if err != nil { + return nil, err + } + return &defaultEnv, nil } - return defaultEnv + return nil, nil } func setDefaultEnv(envName string) error { @@ -506,7 +445,7 @@ func setDefaultEnv(envName string) error { return cliconfig.ErrorEnvironmentNotConfigured(envName) } - cliConfig.DefaultEnvironment = envName + cliConfig.DefaultEnvironment = &envName if err := writeCLIConfig(cliConfig); err != nil { return err @@ -537,7 +476,7 @@ func isTelemetryEnabled() bool { return enabled } -// Will return nil if not configured, except for local +// Will return nil if not configured func readEnv(envName string) (*cliconfig.Environment, error) { cliConfig, err := readCLIConfig() if err != nil { @@ -550,13 +489,6 @@ func readEnv(envName string) (*cliconfig.Environment, error) { } } - if envName == types.LocalProviderType.String() { - return &cliconfig.Environment{ - Name: types.LocalProviderType.String(), - Provider: types.LocalProviderType, - }, nil - } - return nil, nil } @@ -572,7 +504,7 @@ func ReadOrConfigureEnv(envName string) (cliconfig.Environment, error) { promptStr := fmt.Sprintf("the %s environment is not configured; do you already have a Cortex cluster running?", envName) yesMsg := fmt.Sprintf("please configure the %s environment to point to your running cluster:\n", envName) - noMsg := "you can create a cluster on AWS by running the `cortex cluster up` or `cortex cluster-gcp up` command" + noMsg := "you can create a cluster on AWS or GCP by running the `cortex cluster up` or `cortex cluster-gcp up` command" prompt.YesOrExit(promptStr, yesMsg, noMsg) env, err := configureEnv(envName, cliconfig.Environment{}) @@ -603,32 +535,21 @@ func getEnvConfigDefaults(envName string) cliconfig.Environment { if defaults.AWSSecretAccessKey == nil && os.Getenv("AWS_SECRET_ACCESS_KEY") != "" { defaults.AWSSecretAccessKey = pointer.String(os.Getenv("AWS_SECRET_ACCESS_KEY")) } - if defaults.AWSRegion == nil && os.Getenv("AWS_REGION") != "" { - defaults.AWSRegion = pointer.String(os.Getenv("AWS_REGION")) - } if defaults.AWSAccessKeyID == nil && defaults.AWSSecretAccessKey == nil { // search other envs for credentials (favoring the env named "aws", or the last entry in the list) - regionWasNil := defaults.AWSRegion == nil cliConfig, _ := readCLIConfig() for _, env := range cliConfig.Environments { if env.AWSAccessKeyID != nil && env.AWSSecretAccessKey != nil { defaults.AWSAccessKeyID = env.AWSAccessKeyID defaults.AWSSecretAccessKey = env.AWSSecretAccessKey } - if regionWasNil && env.AWSRegion != nil { - defaults.AWSRegion = env.AWSRegion - } if env.Name == "aws" { break // favor the env named "aws" } } } - if defaults.AWSRegion == nil { - defaults.AWSRegion = pointer.String("us-east-1") - } - if defaults.OperatorEndpoint == nil && os.Getenv("CORTEX_OPERATOR_ENDPOINT") != "" { defaults.OperatorEndpoint = pointer.String(os.Getenv("CORTEX_OPERATOR_ENDPOINT")) } @@ -648,7 +569,6 @@ func configureEnv(envName string, fieldsToSkipPrompt cliconfig.Environment) (cli OperatorEndpoint: fieldsToSkipPrompt.OperatorEndpoint, AWSAccessKeyID: fieldsToSkipPrompt.AWSAccessKeyID, AWSSecretAccessKey: fieldsToSkipPrompt.AWSSecretAccessKey, - AWSRegion: fieldsToSkipPrompt.AWSRegion, } if env.Provider == types.UnknownProviderType { @@ -660,8 +580,6 @@ func configureEnv(envName string, fieldsToSkipPrompt cliconfig.Environment) (cli if envName == "" { switch env.Provider { - case types.LocalProviderType: - env.Name = types.LocalProviderType.String() case types.AWSProviderType: env.Name = promptAWSEnvName() case types.GCPProviderType: @@ -677,11 +595,6 @@ func configureEnv(envName string, fieldsToSkipPrompt cliconfig.Environment) (cli defaults := getEnvConfigDefaults(env.Name) switch env.Provider { - case types.LocalProviderType: - err := promptLocalEnv(&env, defaults) - if err != nil { - return cliconfig.Environment{}, err - } case types.AWSProviderType: err := promptAWSEnv(&env, defaults) if err != nil { @@ -712,18 +625,12 @@ func validateAWSCreds(env cliconfig.Environment) error { return nil } - // region is not applicable for the AWS provider, so we can use a default if it's missing - region := "us-east-1" - if env.AWSRegion != nil { - region = *env.AWSRegion - } - awsCreds := AWSCredentials{ AWSAccessKeyID: *env.AWSAccessKeyID, AWSSecretAccessKey: *env.AWSSecretAccessKey, } - if _, err := newAWSClient(region, awsCreds); err != nil { + if _, err := newAWSClient("us-east-1", awsCreds); err != nil { return err } @@ -741,10 +648,6 @@ func MustGetOperatorConfig(envName string) cluster.OperatorConfig { exit.Error(ErrorEnvironmentNotFound(envName)) } - if env.Provider == types.LocalProviderType { - exit.Error(ErrorOperatorConfigFromLocalEnvironment()) - } - operatorConfig := cluster.OperatorConfig{ Telemetry: isTelemetryEnabled(), ClientID: clientID, @@ -859,7 +762,7 @@ func addEnvToCLIConfig(newEnv cliconfig.Environment, setAsDefault bool) error { } if setAsDefault { - cliConfig.DefaultEnvironment = newEnv.Name + cliConfig.DefaultEnvironment = &newEnv.Name } if err := writeCLIConfig(cliConfig); err != nil { @@ -875,7 +778,10 @@ func removeEnvFromCLIConfig(envName string) error { return err } - prevDefault := getDefaultEnv(_generalCommandType) + prevDefault, err := getDefaultEnv() + if err != nil { + return err + } var updatedEnvs []*cliconfig.Environment deleted := false @@ -887,14 +793,17 @@ func removeEnvFromCLIConfig(envName string) error { updatedEnvs = append(updatedEnvs, env) } - if !deleted && envName != types.LocalProviderType.String() { + if !deleted { return cliconfig.ErrorEnvironmentNotConfigured(envName) } cliConfig.Environments = updatedEnvs - if envName == prevDefault { - cliConfig.DefaultEnvironment = types.LocalProviderType.String() + if prevDefault != nil && envName == *prevDefault { + cliConfig.DefaultEnvironment = nil + } + if len(cliConfig.Environments) == 1 { + cliConfig.DefaultEnvironment = &cliConfig.Environments[0].Name } if err := writeCLIConfig(cliConfig); err != nil { @@ -917,7 +826,7 @@ func getEnvNamesByOperatorEndpoint(operatorEndpoint string) ([]string, bool, err for _, env := range cliConfig.Environments { if env.OperatorEndpoint != nil && s.LastSplit(*env.OperatorEndpoint, "//") == s.LastSplit(operatorEndpoint, "//") { envNames = append(envNames, env.Name) - if env.Name == cliConfig.DefaultEnvironment { + if cliConfig.DefaultEnvironment != nil && env.Name == *cliConfig.DefaultEnvironment { isDefaultEnv = true } } @@ -928,15 +837,7 @@ func getEnvNamesByOperatorEndpoint(operatorEndpoint string) ([]string, bool, err func readCLIConfig() (cliconfig.CLIConfig, error) { if !files.IsFile(_cliConfigPath) { - cliConfig := cliconfig.CLIConfig{ - DefaultEnvironment: types.LocalProviderType.String(), - Environments: []*cliconfig.Environment{ - { - Name: types.LocalProviderType.String(), - Provider: types.LocalProviderType, - }, - }, - } + cliConfig := cliconfig.CLIConfig{} if err := cliConfig.Validate(); err != nil { return cliconfig.CLIConfig{}, err // unexpected diff --git a/cli/cmd/lib_realtime_apis.go b/cli/cmd/lib_realtime_apis.go index 85b1d0df5a..3908a851db 100644 --- a/cli/cmd/lib_realtime_apis.go +++ b/cli/cmd/lib_realtime_apis.go @@ -34,7 +34,6 @@ import ( "github.com/cortexlabs/cortex/pkg/lib/table" libtime "github.com/cortexlabs/cortex/pkg/lib/time" "github.com/cortexlabs/cortex/pkg/operator/schema" - "github.com/cortexlabs/cortex/pkg/types" "github.com/cortexlabs/cortex/pkg/types/metrics" "github.com/cortexlabs/cortex/pkg/types/status" "github.com/cortexlabs/cortex/pkg/types/userconfig" @@ -46,9 +45,6 @@ func realtimeAPITable(realtimeAPI schema.APIResponse, env cliconfig.Environment) t := realtimeAPIsTable([]schema.APIResponse{realtimeAPI}, []string{env.Name}) t.FindHeaderByTitle(_titleEnvironment).Hidden = true t.FindHeaderByTitle(_titleRealtimeAPI).Hidden = true - if env.Provider == types.LocalProviderType { - hideReplicaCountColumns(&t) - } out += t.MustFormat() @@ -62,9 +58,7 @@ func realtimeAPITable(realtimeAPI schema.APIResponse, env cliconfig.Environment) out += "\n" + describeModelInput(realtimeAPI.Status, realtimeAPI.Spec.Predictor, realtimeAPI.Endpoint) } - if env.Provider != types.LocalProviderType { - out += "\n" + apiHistoryTable(realtimeAPI.APIVersions) - } + out += "\n" + apiHistoryTable(realtimeAPI.APIVersions) if !_flagVerbose { return out, nil diff --git a/cli/cmd/logs.go b/cli/cmd/logs.go index ad86a890bd..47c1b72450 100644 --- a/cli/cmd/logs.go +++ b/cli/cmd/logs.go @@ -22,7 +22,6 @@ import ( "net/url" "github.com/cortexlabs/cortex/cli/cluster" - "github.com/cortexlabs/cortex/cli/local" "github.com/cortexlabs/cortex/pkg/lib/console" "github.com/cortexlabs/cortex/pkg/lib/exit" "github.com/cortexlabs/cortex/pkg/lib/telemetry" @@ -34,7 +33,7 @@ var _flagLogsEnv string func logsInit() { _logsCmd.Flags().SortFlags = false - _logsCmd.Flags().StringVarP(&_flagLogsEnv, "env", "e", getDefaultEnv(_generalCommandType), "environment to use") + _logsCmd.Flags().StringVarP(&_flagLogsEnv, "env", "e", "", "environment to use") } var _logsCmd = &cobra.Command{ @@ -42,14 +41,20 @@ var _logsCmd = &cobra.Command{ Short: "stream logs from an api", Args: cobra.RangeArgs(1, 2), Run: func(cmd *cobra.Command, args []string) { - env, err := ReadOrConfigureEnv(_flagLogsEnv) + envName, err := getEnvFromFlag(_flagLogsEnv) + if err != nil { + telemetry.Event("cli.logs") + exit.Error(err) + } + + env, err := ReadOrConfigureEnv(envName) if err != nil { telemetry.Event("cli.logs") exit.Error(err) } telemetry.Event("cli.logs", map[string]interface{}{"provider": env.Provider.String(), "env_name": env.Name}) - err = printEnvIfNotSpecified(_flagLogsEnv, cmd) + err = printEnvIfNotSpecified(env.Name, cmd) if err != nil { exit.Error(err) } @@ -92,15 +97,5 @@ var _logsCmd = &cobra.Command{ consoleOutput := console.Bold(fmt.Sprintf("visit the following link to view logs for api %s: ", apiName)) + gcpLogsURL fmt.Println(consoleOutput) } - - if env.Provider == types.LocalProviderType { - if len(args) == 2 { - exit.Error(ErrorNotSupportedInLocalEnvironment(), fmt.Sprintf("cannot stream logs for job %s for api %s", args[1], args[0])) - } - err := local.StreamLogs(apiName) - if err != nil { - exit.Error(err) - } - } }, } diff --git a/cli/cmd/patch.go b/cli/cmd/patch.go index dd94715bb8..17ccc2f08c 100644 --- a/cli/cmd/patch.go +++ b/cli/cmd/patch.go @@ -21,14 +21,11 @@ import ( "strings" "github.com/cortexlabs/cortex/cli/cluster" - "github.com/cortexlabs/cortex/cli/local" "github.com/cortexlabs/cortex/cli/types/flags" "github.com/cortexlabs/cortex/pkg/lib/exit" libjson "github.com/cortexlabs/cortex/pkg/lib/json" "github.com/cortexlabs/cortex/pkg/lib/print" "github.com/cortexlabs/cortex/pkg/lib/telemetry" - "github.com/cortexlabs/cortex/pkg/operator/schema" - "github.com/cortexlabs/cortex/pkg/types" "github.com/spf13/cobra" ) @@ -39,7 +36,7 @@ var ( func patchInit() { _patchCmd.Flags().SortFlags = false - _patchCmd.Flags().StringVarP(&_flagPatchEnv, "env", "e", getDefaultEnv(_generalCommandType), "environment to use") + _patchCmd.Flags().StringVarP(&_flagPatchEnv, "env", "e", "", "environment to use") _patchCmd.Flags().BoolVarP(&_flagPatchForce, "force", "f", false, "override the in-progress api update") _patchCmd.Flags().VarP(&_flagOutput, "output", "o", fmt.Sprintf("output format: one of %s", strings.Join(flags.UserOutputTypeStrings(), "|"))) } @@ -49,31 +46,29 @@ var _patchCmd = &cobra.Command{ Short: "update API configuration for a deployed API", Args: cobra.RangeArgs(0, 1), Run: func(cmd *cobra.Command, args []string) { - env, err := ReadOrConfigureEnv(_flagPatchEnv) + envName, err := getEnvFromFlag(_flagPatchEnv) + if err != nil { + telemetry.Event("cli.patch") + exit.Error(err) + } + + env, err := ReadOrConfigureEnv(envName) if err != nil { telemetry.Event("cli.patch") exit.Error(err) } telemetry.Event("cli.patch", map[string]interface{}{"provider": env.Provider.String(), "env_name": env.Name}) - err = printEnvIfNotSpecified(_flagPatchEnv, cmd) + err = printEnvIfNotSpecified(env.Name, cmd) if err != nil { exit.Error(err) } configPath := getConfigPath(args) - var deployResults []schema.DeployResult - if env.Provider == types.LocalProviderType { - deployResults, err = local.Patch(env, configPath) - if err != nil { - exit.Error(err) - } - } else { - deployResults, err = cluster.Patch(MustGetOperatorConfig(env.Name), configPath, _flagPatchForce) - if err != nil { - exit.Error(err) - } + deployResults, err := cluster.Patch(MustGetOperatorConfig(env.Name), configPath, _flagPatchForce) + if err != nil { + exit.Error(err) } switch _flagOutput { @@ -84,7 +79,10 @@ var _patchCmd = &cobra.Command{ } fmt.Print(string(bytes)) case flags.PrettyOutputType: - message := deployMessage(deployResults, env.Name) + message, err := deployMessage(deployResults, env.Name) + if err != nil { + exit.Error(err) + } if didAnyResultsError(deployResults) { print.StderrBoldFirstBlock(message) } else { diff --git a/cli/cmd/predict.go b/cli/cmd/predict.go index 2030f805ae..8d3a7e6e88 100644 --- a/cli/cmd/predict.go +++ b/cli/cmd/predict.go @@ -22,14 +22,11 @@ import ( "net/http" "github.com/cortexlabs/cortex/cli/cluster" - "github.com/cortexlabs/cortex/cli/local" "github.com/cortexlabs/cortex/pkg/lib/errors" "github.com/cortexlabs/cortex/pkg/lib/exit" "github.com/cortexlabs/cortex/pkg/lib/files" "github.com/cortexlabs/cortex/pkg/lib/json" "github.com/cortexlabs/cortex/pkg/lib/telemetry" - "github.com/cortexlabs/cortex/pkg/operator/schema" - "github.com/cortexlabs/cortex/pkg/types" "github.com/cortexlabs/cortex/pkg/types/userconfig" "github.com/spf13/cobra" ) @@ -40,7 +37,7 @@ var ( func predictInit() { _predictCmd.Flags().SortFlags = false - _predictCmd.Flags().StringVarP(&_flagPredictEnv, "env", "e", getDefaultEnv(_generalCommandType), "environment to use") + _predictCmd.Flags().StringVarP(&_flagPredictEnv, "env", "e", "", "environment to use") } var _predictCmd = &cobra.Command{ @@ -48,14 +45,20 @@ var _predictCmd = &cobra.Command{ Short: "make a prediction request using a json file", Args: cobra.ExactArgs(2), Run: func(cmd *cobra.Command, args []string) { - env, err := ReadOrConfigureEnv(_flagPredictEnv) + envName, err := getEnvFromFlag(_flagPredictEnv) + if err != nil { + telemetry.Event("cli.predict") + exit.Error(err) + } + + env, err := ReadOrConfigureEnv(envName) if err != nil { telemetry.Event("cli.predict") exit.Error(err) } telemetry.Event("cli.predict", map[string]interface{}{"provider": env.Provider.String(), "env_name": env.Name}) - err = printEnvIfNotSpecified(_flagPredictEnv, cmd) + err = printEnvIfNotSpecified(env.Name, cmd) if err != nil { exit.Error(err) } @@ -63,17 +66,9 @@ var _predictCmd = &cobra.Command{ apiName := args[0] jsonPath := args[1] - var apisRes []schema.APIResponse - if env.Provider == types.LocalProviderType { - apisRes, err = local.GetAPI(apiName) - if err != nil { - exit.Error(err) - } - } else { - apisRes, err = cluster.GetAPI(MustGetOperatorConfig(env.Name), apiName) - if err != nil { - exit.Error(err) - } + apisRes, err := cluster.GetAPI(MustGetOperatorConfig(env.Name), apiName) + if err != nil { + exit.Error(err) } if len(apisRes) == 0 { diff --git a/cli/cmd/refresh.go b/cli/cmd/refresh.go index 51d54514d5..ba0007e823 100644 --- a/cli/cmd/refresh.go +++ b/cli/cmd/refresh.go @@ -22,12 +22,10 @@ import ( "github.com/cortexlabs/cortex/cli/cluster" "github.com/cortexlabs/cortex/cli/types/flags" - "github.com/cortexlabs/cortex/pkg/lib/errors" "github.com/cortexlabs/cortex/pkg/lib/exit" libjson "github.com/cortexlabs/cortex/pkg/lib/json" "github.com/cortexlabs/cortex/pkg/lib/print" "github.com/cortexlabs/cortex/pkg/lib/telemetry" - "github.com/cortexlabs/cortex/pkg/types" "github.com/spf13/cobra" ) @@ -38,7 +36,7 @@ var ( func refreshInit() { _refreshCmd.Flags().SortFlags = false - _refreshCmd.Flags().StringVarP(&_flagRefreshEnv, "env", "e", getDefaultEnv(_generalCommandType), "environment to use") + _refreshCmd.Flags().StringVarP(&_flagRefreshEnv, "env", "e", "", "environment to use") _refreshCmd.Flags().BoolVarP(&_flagRefreshForce, "force", "f", false, "override the in-progress api update") _refreshCmd.Flags().VarP(&_flagOutput, "output", "o", fmt.Sprintf("output format: one of %s", strings.Join(flags.UserOutputTypeStrings(), "|"))) } @@ -48,21 +46,24 @@ var _refreshCmd = &cobra.Command{ Short: "restart all replicas for an api (without downtime)", Args: cobra.ExactArgs(1), Run: func(cmd *cobra.Command, args []string) { - env, err := ReadOrConfigureEnv(_flagRefreshEnv) + envName, err := getEnvFromFlag(_flagRefreshEnv) if err != nil { telemetry.Event("cli.refresh") exit.Error(err) } - telemetry.Event("cli.refresh", map[string]interface{}{"provider": env.Provider.String(), "env_name": env.Name}) - err = printEnvIfNotSpecified(_flagRefreshEnv, cmd) + env, err := ReadOrConfigureEnv(envName) if err != nil { + telemetry.Event("cli.refresh") exit.Error(err) } + telemetry.Event("cli.refresh", map[string]interface{}{"provider": env.Provider.String(), "env_name": env.Name}) - if env.Provider == types.LocalProviderType { - exit.Error(errors.Append(ErrorNotSupportedInLocalEnvironment(), "; use `cortex deploy` instead")) + err = printEnvIfNotSpecified(env.Name, cmd) + if err != nil { + exit.Error(err) } + refreshResponse, err := cluster.Refresh(MustGetOperatorConfig(env.Name), args[0], _flagRefreshForce) if err != nil { exit.Error(err) diff --git a/cli/cmd/root.go b/cli/cmd/root.go index 061f052061..c7abc34371 100644 --- a/cli/cmd/root.go +++ b/cli/cmd/root.go @@ -52,14 +52,6 @@ var ( _homeDir string ) -type commandType int - -const ( - _generalCommandType commandType = iota - _clusterCommandType - _clusterGCPCommandType -) - func init() { cwd, err := os.Getwd() if err != nil { @@ -215,7 +207,7 @@ func addVerboseFlag(cmd *cobra.Command) { func wasEnvFlagProvided(cmd *cobra.Command) bool { envFlagProvided := false cmd.Flags().VisitAll(func(flag *pflag.Flag) { - if flag.Shorthand == "e" && flag.Changed { + if flag.Shorthand == "e" && flag.Changed && flag.Value.String() != "" { envFlagProvided = true } }) diff --git a/cli/cmd/version.go b/cli/cmd/version.go index 96e7f277c5..80f322a3b2 100644 --- a/cli/cmd/version.go +++ b/cli/cmd/version.go @@ -23,7 +23,6 @@ import ( "github.com/cortexlabs/cortex/pkg/consts" "github.com/cortexlabs/cortex/pkg/lib/exit" "github.com/cortexlabs/cortex/pkg/lib/telemetry" - "github.com/cortexlabs/cortex/pkg/types" "github.com/spf13/cobra" ) @@ -31,7 +30,7 @@ var _flagVersionEnv string func versionInit() { _versionCmd.Flags().SortFlags = false - _versionCmd.Flags().StringVarP(&_flagVersionEnv, "env", "e", getDefaultEnv(_generalCommandType), "environment to use") + _versionCmd.Flags().StringVarP(&_flagVersionEnv, "env", "e", "", "environment to use") } var _versionCmd = &cobra.Command{ @@ -39,24 +38,26 @@ var _versionCmd = &cobra.Command{ Short: "print the cli and cluster versions", Args: cobra.NoArgs, Run: func(cmd *cobra.Command, args []string) { - env, err := ReadOrConfigureEnv(_flagVersionEnv) + envName, err := getEnvFromFlag(_flagVersionEnv) + if err != nil { + telemetry.Event("cli.version") + exit.Error(err) + } + + env, err := ReadOrConfigureEnv(envName) if err != nil { telemetry.Event("cli.version") exit.Error(err) } telemetry.Event("cli.version", map[string]interface{}{"provider": env.Provider.String(), "env_name": env.Name}) - err = printEnvIfNotSpecified(_flagVersionEnv, cmd) + err = printEnvIfNotSpecified(env.Name, cmd) if err != nil { exit.Error(err) } fmt.Println("cli version: " + consts.CortexVersion) - if env.Provider == types.LocalProviderType { - return - } - infoResponse, err := cluster.Info(MustGetOperatorConfig(env.Name)) if err != nil { exit.Error(err) diff --git a/cli/local/api.go b/cli/local/api.go deleted file mode 100644 index 58b3305657..0000000000 --- a/cli/local/api.go +++ /dev/null @@ -1,295 +0,0 @@ -/* -Copyright 2020 Cortex Labs, Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package local - -import ( - "encoding/json" - "fmt" - "path/filepath" - "strings" - - "github.com/cortexlabs/cortex/pkg/consts" - "github.com/cortexlabs/cortex/pkg/lib/aws" - "github.com/cortexlabs/cortex/pkg/lib/errors" - "github.com/cortexlabs/cortex/pkg/lib/files" - "github.com/cortexlabs/cortex/pkg/lib/gcp" - "github.com/cortexlabs/cortex/pkg/lib/prompt" - "github.com/cortexlabs/cortex/pkg/lib/sets/strset" - "github.com/cortexlabs/cortex/pkg/lib/telemetry" - "github.com/cortexlabs/cortex/pkg/operator/schema" - "github.com/cortexlabs/cortex/pkg/types" - "github.com/cortexlabs/cortex/pkg/types/spec" - "github.com/cortexlabs/cortex/pkg/types/userconfig" -) - -var _deploymentID = "local" - -func UpdateAPI(apiConfig *userconfig.API, models []spec.CuratedModelResource, projectRoot string, projectID string, deployDisallowPrompt bool, awsClient *aws.Client, gcpClient *gcp.Client) (*schema.APIResponse, string, error) { - telemetry.Event("operator.deploy", apiConfig.TelemetryEvent(types.LocalProviderType)) - - var incompatibleVersion string - encounteredVersionMismatch := false - prevAPISpec, err := FindAPISpec(apiConfig.Name) - if err != nil { - if errors.GetKind(err) == ErrCortexVersionMismatch { - encounteredVersionMismatch = true - if incompatibleVersion, err = GetVersionFromAPISpec(apiConfig.Name); err != nil { - return nil, "", err - } - - incompatibleMinorVersion := incompatibleVersion - split := strings.Split(incompatibleVersion, ".") - if len(split) == 3 { - incompatibleMinorVersion = strings.Join(strings.Split(incompatibleVersion, ".")[:2], ".") - } - - if consts.CortexVersionMinor != incompatibleMinorVersion && !deployDisallowPrompt { - prompt.YesOrExit( - fmt.Sprintf( - "api %s was deployed using CLI version %s but the current CLI version is %s; "+ - "re-deploying %s with current CLI version %s might yield an unexpected outcome; any cached models won't be deleted\n\n"+ - "it is recommended to install version %s of the CLI (pip install cortex==%s), delete the API using version %s of the CLI, and then re-deploy the API using the latest version of the CLI\n\n"+ - "do you still want to re-deploy?", - apiConfig.Name, incompatibleMinorVersion, consts.CortexVersionMinor, apiConfig.Name, consts.CortexVersionMinor, incompatibleMinorVersion, incompatibleVersion, incompatibleMinorVersion), - "", "", - ) - } - if err := DeleteAPI(apiConfig.Name); err != nil { - return nil, "", err - } - } else if errors.GetKind(err) != ErrAPINotDeployed { - return nil, "", err - } - } - - prevAPIContainers, err := GetContainersByAPI(apiConfig.Name) - if err != nil { - return nil, "", err - } - - newAPISpec := spec.GetAPISpec(apiConfig, projectID, _deploymentID, "") - - if newAPISpec != nil && TotalLocalModelVersions(models) > 0 { - if err := CacheLocalModels(newAPISpec, models); err != nil { - return nil, "", err - } - } - - newAPISpec.LocalProjectDir = projectRoot - - if areAPIsEqual(newAPISpec, prevAPISpec) { - return toAPIResponse(newAPISpec), fmt.Sprintf("%s is up to date", newAPISpec.Resource.UserString()), nil - } - - if prevAPISpec != nil || len(prevAPIContainers) != 0 { - err = errors.FirstError( - DeleteAPI(newAPISpec.Name), - deleteCachedModels(newAPISpec.Name, prevAPISpec.SubtractModelIDs(newAPISpec)), - ) - if err != nil { - return nil, "", err - } - } - - err = writeAPISpec(newAPISpec) - if err != nil { - DeleteAPI(newAPISpec.Name) - deleteCachedModels(newAPISpec.Name, newAPISpec.ModelIDs()) - return nil, "", err - } - - if err := DeployContainers(newAPISpec, awsClient, gcpClient); err != nil { - DeleteAPI(newAPISpec.Name) - deleteCachedModels(newAPISpec.Name, newAPISpec.ModelIDs()) - return nil, "", err - } - - if prevAPISpec == nil && len(prevAPIContainers) == 0 { - if encounteredVersionMismatch { - return toAPIResponse(newAPISpec), fmt.Sprintf( - "creating api %s with current CLI version %s", - newAPISpec.Name, - consts.CortexVersion, - ), nil - } - - return toAPIResponse(newAPISpec), fmt.Sprintf("creating %s", newAPISpec.Resource.UserString()), nil - } - - return toAPIResponse(newAPISpec), fmt.Sprintf("updating %s", newAPISpec.Resource.UserString()), nil -} - -func toAPIResponse(api *spec.API) *schema.APIResponse { - return &schema.APIResponse{ - Spec: *api, - Endpoint: fmt.Sprintf("http://localhost:%d", *api.Networking.LocalPort), - } -} - -func writeAPISpec(apiSpec *spec.API) error { - apiBytes, err := json.Marshal(apiSpec) - if err != nil { - return err - } - - err = files.CreateDir(files.ParentDir(filepath.Join(_localWorkspaceDir, apiSpec.Key))) - if err != nil { - return err - } - - err = files.WriteFile(apiBytes, filepath.Join(_localWorkspaceDir, apiSpec.Key)) - if err != nil { - return err - } - - return nil -} - -func areAPIsEqual(a1, a2 *spec.API) bool { - if a1 == nil && a2 == nil { - return true - } - if a1 == nil || a2 == nil { - return false - } - if a1.SpecID != a2.SpecID { - return false - } - if !strset.FromSlice(a1.ModelIDs()).IsEqual(strset.FromSlice(a2.ModelIDs())) { - return false - } - return true -} - -func DeleteAPI(apiName string) error { - errList := []error{} - - containers, err := GetContainersByAPI(apiName) - if err == nil { - if len(containers) > 0 { - err = DeleteContainers(apiName) - if err != nil { - errList = append(errList, err) - } - } - } else { - errList = append(errList, err) - } - - if ContainersHaveAPINameVolume(containers) { - err = DeleteVolume(apiName) - if err != nil { - errList = append(errList, err) - } - } - - _, err = FindAPISpec(apiName) - if err == nil { - _, err := files.DeleteDirIfPresent(filepath.Join(_localWorkspaceDir, "apis", apiName)) - if err != nil { - errList = append(errList, ErrorFailedToDeleteAPISpec(filepath.Join(_localWorkspaceDir, "apis", apiName), err)) - } - } else if errors.GetKind(err) == ErrCortexVersionMismatch { - _, err := files.DeleteDirIfPresent(filepath.Join(_localWorkspaceDir, "apis", apiName)) - if err != nil { - errList = append(errList, ErrorFailedToDeleteAPISpec(filepath.Join(_localWorkspaceDir, "apis", apiName), err)) - } - } else { - // only add error if it isn't ErrCortexVersionMismatch - errList = append(errList, err) - } - - return errors.FirstError(errList...) -} - -func FindAPISpec(apiName string) (*spec.API, error) { - apiWorkspace := filepath.Join(_localWorkspaceDir, "apis", apiName) - if !files.IsDir(apiWorkspace) { - return nil, ErrorAPINotDeployed(apiName) - } - - filepaths, err := files.ListDirRecursive(apiWorkspace, false) - if err != nil { - return nil, errors.Wrap(err, "api", apiName) - } - - var apiSpec spec.API - for _, specPath := range filepaths { - if strings.HasSuffix(filepath.Base(specPath), "-spec.msgpack") { - apiSpecVersion := GetVersionFromAPISpecFilePath(specPath) - if apiSpecVersion != consts.CortexVersion { - return nil, ErrorCortexVersionMismatch(apiName, apiSpecVersion) - } - } - if strings.HasSuffix(filepath.Base(specPath), "-spec.json") { - apiSpecVersion := GetVersionFromAPISpecFilePath(specPath) - if apiSpecVersion != consts.CortexVersion { - return nil, ErrorCortexVersionMismatch(apiName, apiSpecVersion) - } - - bytes, err := files.ReadFileBytes(specPath) - if err != nil { - return nil, errors.Wrap(err, "api", apiName) - } - err = json.Unmarshal(bytes, &apiSpec) - if err != nil { - return nil, errors.Wrap(err, "api", apiName) - } - return &apiSpec, nil - } - } - return nil, ErrorAPINotDeployed(apiName) -} - -func GetVersionFromAPISpec(apiName string) (string, error) { - apiWorkspace := filepath.Join(_localWorkspaceDir, "apis", apiName) - if !files.IsDir(apiWorkspace) { - return "", ErrorAPINotDeployed(apiName) - } - - filepaths, err := files.ListDirRecursive(apiWorkspace, false) - if err != nil { - return "", errors.Wrap(err, "api", apiName) - } - - for _, specPath := range filepaths { - if strings.HasSuffix(filepath.Base(specPath), "-spec.json") || strings.HasSuffix(filepath.Base(specPath), "-spec.msgpack") { - return GetVersionFromAPISpecFilePath(specPath), nil - } - } - return "", ErrorAPINotDeployed(apiName) -} - -func GetVersionFromAPISpecFilePath(path string) string { - fileName := filepath.Base(path) - return strings.Split(fileName, "-")[0] -} - -func TotalLocalModelVersions(models []spec.CuratedModelResource) int { - totalLocalModelVersions := 0 - for _, model := range models { - if !model.LocalPath { - continue - } - if len(model.Versions) > 0 { - totalLocalModelVersions += len(model.Versions) - } else { - totalLocalModelVersions++ - } - } - return totalLocalModelVersions -} diff --git a/cli/local/config.go b/cli/local/config.go deleted file mode 100644 index 4b56d85170..0000000000 --- a/cli/local/config.go +++ /dev/null @@ -1,82 +0,0 @@ -/* -Copyright 2020 Cortex Labs, Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package local - -import ( - "os" - "path/filepath" - - "github.com/cortexlabs/cortex/pkg/lib/errors" - "github.com/cortexlabs/cortex/pkg/lib/exit" - "github.com/cortexlabs/cortex/pkg/lib/files" - s "github.com/cortexlabs/cortex/pkg/lib/strings" - "github.com/mitchellh/go-homedir" -) - -var _cwd string -var _localDir string -var _localWorkspaceDir string -var _modelCacheDir string - -func init() { - cwd, err := os.Getwd() - if err != nil { - err := errors.Wrap(err, "unable to determine current working directory") - exit.Error(err) - } - _cwd = s.EnsureSuffix(cwd, "/") - - homeDir, err := homedir.Dir() - if err != nil { - err := errors.Wrap(err, "unable to determine home directory") - exit.Error(err) - } - - _localDir = os.Getenv("CORTEX_CLI_CONFIG_DIR") - if _localDir != "" { - _localDir = files.UserRelToAbsPath(_localDir) - } else { - _localDir = filepath.Join(homeDir, ".cortex") - } - - err = os.MkdirAll(_localDir, os.ModePerm) - if err != nil { - err := errors.Wrap(err, "unable to write to home directory", _localDir) - exit.Error(err) - } - - _localWorkspaceDir = filepath.Join(_localDir, "workspace") - err = os.MkdirAll(_localWorkspaceDir, os.ModePerm) - if err != nil { - err := errors.Wrap(err, "unable to write to home directory", _localWorkspaceDir) - exit.Error(err) - } - - localAPISWorkspaceDir := filepath.Join(_localWorkspaceDir, "apis") - err = os.MkdirAll(localAPISWorkspaceDir, os.ModePerm) - if err != nil { - err := errors.Wrap(err, "unable to write to home directory", localAPISWorkspaceDir) - exit.Error(err) - } - - _modelCacheDir = filepath.Join(_localDir, "model_cache") - err = os.MkdirAll(_modelCacheDir, os.ModePerm) - if err != nil { - err := errors.Wrap(err, "unable to write to home directory", _modelCacheDir) - exit.Error(err) - } -} diff --git a/cli/local/delete.go b/cli/local/delete.go deleted file mode 100644 index ba397c9b50..0000000000 --- a/cli/local/delete.go +++ /dev/null @@ -1,93 +0,0 @@ -/* -Copyright 2020 Cortex Labs, Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package local - -import ( - "fmt" - "strings" - - "github.com/cortexlabs/cortex/pkg/consts" - "github.com/cortexlabs/cortex/pkg/lib/docker" - "github.com/cortexlabs/cortex/pkg/lib/errors" - "github.com/cortexlabs/cortex/pkg/lib/prompt" - "github.com/cortexlabs/cortex/pkg/operator/schema" - "github.com/cortexlabs/cortex/pkg/types/spec" -) - -func Delete(apiName string, keepCache, deleteForce bool) (schema.DeleteResponse, error) { - _, err := docker.GetDockerClient() - if err != nil { - return schema.DeleteResponse{}, err - } - - var apiSpec *spec.API = nil - if apiSpec, err = FindAPISpec(apiName); err != nil { - if errors.GetKind(err) == ErrCortexVersionMismatch { - var incompatibleVersion string - if incompatibleVersion, err = GetVersionFromAPISpec(apiName); err != nil { - return schema.DeleteResponse{}, err - } - - incompatibleMinorVersion := incompatibleVersion - split := strings.Split(incompatibleVersion, ".") - if len(split) == 3 { - incompatibleMinorVersion = strings.Join(strings.Split(incompatibleVersion, ".")[:2], ".") - } - - if consts.CortexVersionMinor != incompatibleMinorVersion && !deleteForce { - prompt.YesOrExit( - fmt.Sprintf( - "api %s was deployed using CLI version %s but the current CLI version is %s; "+ - "deleting %s with current CLI version %s might lead to an unexpected state; any cached models won't be deleted\n\n"+ - "it is recommended to download version %s of the CLI (pip install cortex==%s), delete the API using version %s of the CLI, and then re-deploy the API using the latest version of the CLI\n\n"+ - "do you still want to delete?", - apiName, incompatibleMinorVersion, consts.CortexVersionMinor, apiName, consts.CortexVersionMinor, incompatibleMinorVersion, incompatibleVersion, incompatibleMinorVersion), - "", "", - ) - } - - if err = DeleteAPI(apiName); err != nil { - return schema.DeleteResponse{}, err - } - return schema.DeleteResponse{ - Message: fmt.Sprintf("deleting api %s with current CLI version %s", apiName, consts.CortexVersion), - }, nil - } - - return schema.DeleteResponse{}, DeleteAPI(apiName) - } - - if keepCache { - err = DeleteAPI(apiName) - } else { - if apiSpec != nil { - err = errors.FirstError( - DeleteAPI(apiName), - deleteCachedModels(apiName, apiSpec.ModelIDs()), - ) - } else { - err = DeleteAPI(apiName) - } - } - if err != nil { - return schema.DeleteResponse{}, err - } - - return schema.DeleteResponse{ - Message: fmt.Sprintf("deleting %s", apiName), - }, nil -} diff --git a/cli/local/deploy.go b/cli/local/deploy.go deleted file mode 100644 index fb4df6014c..0000000000 --- a/cli/local/deploy.go +++ /dev/null @@ -1,117 +0,0 @@ -/* -Copyright 2020 Cortex Labs, Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package local - -import ( - "fmt" - "os" - "path/filepath" - - "github.com/cortexlabs/cortex/cli/types/cliconfig" - "github.com/cortexlabs/cortex/pkg/consts" - "github.com/cortexlabs/cortex/pkg/lib/aws" - "github.com/cortexlabs/cortex/pkg/lib/docker" - "github.com/cortexlabs/cortex/pkg/lib/errors" - "github.com/cortexlabs/cortex/pkg/lib/files" - "github.com/cortexlabs/cortex/pkg/lib/gcp" - "github.com/cortexlabs/cortex/pkg/operator/schema" - "github.com/cortexlabs/cortex/pkg/types" - "github.com/cortexlabs/cortex/pkg/types/spec" - "github.com/cortexlabs/cortex/pkg/types/userconfig" -) - -func Deploy(env cliconfig.Environment, configPath string, projectFileList []string, disallowPrompt bool) ([]schema.DeployResult, error) { - configFileName := filepath.Base(configPath) - - _, err := docker.GetDockerClient() - if err != nil { - return nil, err - } - - configBytes, err := files.ReadFileBytes(configPath) - if err != nil { - return nil, err - } - - if !files.IsAbsOrTildePrefixed(configPath) { - return nil, errors.ErrorUnexpected(fmt.Sprintf("%s is not an absolute path", configPath)) - } - projectRoot := files.Dir(configPath) - - projectFiles, err := newProjectFiles(projectFileList, projectRoot) - if err != nil { - return nil, err - } - - apiConfigs, err := spec.ExtractAPIConfigs(configBytes, types.LocalProviderType, configFileName, nil, nil) - if err != nil { - return nil, err - } - - return deploy(env, apiConfigs, projectFiles, disallowPrompt) -} - -func deploy(env cliconfig.Environment, apiConfigs []userconfig.API, projectFiles ProjectFiles, disallowPrompt bool) ([]schema.DeployResult, error) { - var awsClient *aws.Client - var gcpClient *gcp.Client - - var err error - if env.AWSAccessKeyID != nil { - awsClient, err = aws.NewFromCreds(*env.AWSRegion, *env.AWSAccessKeyID, *env.AWSSecretAccessKey) - } else { - awsClient, err = aws.NewAnonymousClient() - } - if err != nil { - return nil, err - } - - if os.Getenv("GOOGLE_APPLICATION_CREDENTIALS") != "" { - gcpClient, err = gcp.NewFromEnv() - if err != nil { - return nil, err - } - } else { - gcpClient = gcp.NewAnonymousClient() - } - - models := []spec.CuratedModelResource{} - err = ValidateLocalAPIs(apiConfigs, &models, projectFiles, awsClient, gcpClient) - if err != nil { - err = errors.Append(err, fmt.Sprintf("\n\napi configuration schema for Realtime API can be found at https://docs.cortex.dev/v/%s/", consts.CortexVersionMinor)) - return nil, err - } - - projectRelFilePaths := projectFiles.AllAbsPaths() - projectID, err := files.HashFile(projectRelFilePaths[0], projectRelFilePaths[1:]...) - if err != nil { - return nil, errors.Wrap(err, "failed to hash directory", projectFiles.projectRoot) - } - - results := make([]schema.DeployResult, len(apiConfigs)) - for i := range apiConfigs { - apiConfig := apiConfigs[i] - api, msg, err := UpdateAPI(&apiConfig, models, projectFiles.projectRoot, projectID, disallowPrompt, awsClient, gcpClient) - results[i].Message = msg - if err != nil { - results[i].Error = errors.Message(err) - } else { - results[i].API = api - } - } - - return results, nil -} diff --git a/cli/local/docker_spec.go b/cli/local/docker_spec.go deleted file mode 100644 index 8b22d8163f..0000000000 --- a/cli/local/docker_spec.go +++ /dev/null @@ -1,632 +0,0 @@ -/* -Copyright 2020 Cortex Labs, Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package local - -import ( - "context" - "fmt" - "path/filepath" - "strings" - - "github.com/cortexlabs/cortex/pkg/consts" - "github.com/cortexlabs/cortex/pkg/lib/archive" - "github.com/cortexlabs/cortex/pkg/lib/aws" - "github.com/cortexlabs/cortex/pkg/lib/docker" - "github.com/cortexlabs/cortex/pkg/lib/errors" - "github.com/cortexlabs/cortex/pkg/lib/gcp" - s "github.com/cortexlabs/cortex/pkg/lib/strings" - "github.com/cortexlabs/cortex/pkg/types/spec" - "github.com/cortexlabs/cortex/pkg/types/userconfig" - "github.com/docker/docker/api/types" - dockertypes "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/mount" - "github.com/docker/go-connections/nat" -) - -const ( - _apiContainerName = "api" - _tfServingContainerName = "serve" - _defaultPortStr = "8888" - _tfServingPortStr = "9000" - _tfServingEmptyModelConfig = "/etc/tfs/model_config_server.conf" - _tfServingMaxReloadTimes = "0" - _tfServingLoadTimeMicros = "30000000" // 30 seconds - _tfServingBatchConfig = "/etc/tfs/batch_config.conf" - _projectDir = "/mnt/project" - _cacheDir = "/mnt/cache" - _modelDir = "/mnt/model" - _workspaceDir = "/mnt/workspace" -) - -type ModelCaches []*spec.LocalModelCache - -func (modelCaches ModelCaches) IDs() string { - ids := make([]string, len(modelCaches)) - for i, modelCache := range modelCaches { - ids[i] = modelCache.ID - } - - return strings.Join(ids, ", ") -} - -func DeployContainers(api *spec.API, awsClient *aws.Client, gcpClient *gcp.Client) error { - switch api.Predictor.Type { - case userconfig.TensorFlowPredictorType: - return deployTensorFlowContainers(api, awsClient, gcpClient) - case userconfig.ONNXPredictorType: - return deployONNXContainer(api, awsClient, gcpClient) - default: - return deployPythonContainer(api, awsClient, gcpClient) - } -} - -func getAPIEnv(api *spec.API, awsClient *aws.Client, gcpClient *gcp.Client) []string { - envs := []string{} - - for envName, envVal := range api.Predictor.Env { - envs = append(envs, fmt.Sprintf("%s=%s", envName, envVal)) - } - - envs = append(envs, - "CORTEX_KIND="+api.Kind.String(), - "CORTEX_VERSION="+consts.CortexVersion, - "CORTEX_SERVING_PORT="+_defaultPortStr, - "CORTEX_PROVIDER="+"local", - "CORTEX_CACHE_DIR="+_cacheDir, - "CORTEX_API_SPEC="+filepath.Join("/mnt/workspace", filepath.Base(api.Key)), - "CORTEX_PROJECT_DIR="+_projectDir, - "CORTEX_PROCESSES_PER_REPLICA="+s.Int32(api.Predictor.ProcessesPerReplica), - "CORTEX_THREADS_PER_PROCESS="+s.Int32(api.Predictor.ThreadsPerProcess), - "CORTEX_MAX_REPLICA_CONCURRENCY="+s.Int32(api.Predictor.ProcessesPerReplica*api.Predictor.ThreadsPerProcess+1024), // allow a queue of 1024 - "CORTEX_LOG_LEVEL="+strings.ToUpper(api.Predictor.LogLevel.String()), - ) - - if api.Predictor.Type != userconfig.PythonPredictorType || api.Predictor.MultiModelReloading != nil { - envs = append(envs, "CORTEX_MODEL_DIR="+_modelDir) - } - - cortexPythonPath := _projectDir - if api.Predictor.PythonPath != nil { - cortexPythonPath = filepath.Join(_projectDir, *api.Predictor.PythonPath) - } - envs = append(envs, "CORTEX_PYTHON_PATH="+cortexPythonPath) - - if awsClient != nil { - envs = append(envs, "AWS_REGION="+awsClient.Region) - - if awsAccessKeyID := awsClient.AccessKeyID(); awsAccessKeyID != nil { - envs = append(envs, "AWS_ACCESS_KEY_ID="+*awsAccessKeyID) - } - - if awsSecretAccessKey := awsClient.SecretAccessKey(); awsSecretAccessKey != nil { - envs = append(envs, "AWS_SECRET_ACCESS_KEY="+*awsSecretAccessKey) - } - - if _, ok := api.Predictor.Env["PYTHONDONTWRITEBYTECODE"]; !ok { - envs = append(envs, "PYTHONDONTWRITEBYTECODE=1") - } - } - if gcpClient != nil && !gcpClient.IsAnonymous { - envs = append(envs, "GOOGLE_APPLICATION_CREDENTIALS=/var/google_key.json") - } - - return envs -} - -func deployPythonContainer(api *spec.API, awsClient *aws.Client, gcpClient *gcp.Client) error { - portBinding := nat.PortBinding{} - if api.Networking.LocalPort != nil { - portBinding.HostPort = s.Int(*api.Networking.LocalPort) - } - - resources := container.Resources{} - if api.Compute != nil { - if api.Compute.CPU != nil { - resources.NanoCPUs = api.Compute.CPU.MilliValue() * 1000 * 1000 - } - if api.Compute.Mem != nil { - resources.Memory = api.Compute.Mem.Quantity.Value() - } - if api.Compute.GPU > 0 { - resources.DeviceRequests = []container.DeviceRequest{{ - Count: -1, - Capabilities: [][]string{ - {"gpu"}, - }, - }} - } - } - - mounts := []mount.Mount{ - { - Type: mount.TypeBind, - Source: api.LocalProjectDir, - Target: _projectDir, - }, - { - Type: mount.TypeBind, - Source: filepath.Join(_localWorkspaceDir, filepath.Dir(api.Key)), - Target: _workspaceDir, - }, - } - - for _, modelCache := range api.LocalModelCaches { - mounts = append(mounts, mount.Mount{ - Type: mount.TypeBind, - Source: modelCache.HostPath, - Target: filepath.Join(_modelDir, modelCache.TargetPath), - }) - } - - hostConfig := &container.HostConfig{ - PortBindings: nat.PortMap{ - _defaultPortStr + "/tcp": []nat.PortBinding{portBinding}, - }, - Resources: resources, - Mounts: mounts, - } - - containerConfig := &container.Config{ - Image: api.Predictor.Image, - Tty: true, - Env: append( - getAPIEnv(api, awsClient, gcpClient), - ), - ExposedPorts: nat.PortSet{ - _defaultPortStr + "/tcp": struct{}{}, - }, - Labels: map[string]string{ - "cortex": "true", - "type": _apiContainerName, - "apiID": api.ID, - "specID": api.SpecID, - "predictorID": api.PredictorID, - "apiName": api.Name, - }, - } - containerInfo, err := docker.MustDockerClient().ContainerCreate(context.Background(), containerConfig, hostConfig, nil, "") - if err != nil { - if strings.Contains(err.Error(), "bind source path does not exist") { - return errors.Wrap(ErrorBindDockerInDocker(err), api.Identify()) - } - return errors.Wrap(err, api.Identify()) - } - - if gcpClient != nil && !gcpClient.IsAnonymous { - docker.CopyToContainer(containerInfo.ID, &archive.Input{ - Bytes: []archive.BytesInput{ - { - Content: gcpClient.CredentialsJSON, - Dest: "/var/google_key.json", - }, - }, - }, "/") - } - - err = docker.MustDockerClient().ContainerStart(context.Background(), containerInfo.ID, dockertypes.ContainerStartOptions{}) - if err != nil { - if api.Compute.GPU == 0 { - return errors.Wrap(err, api.Identify()) - } - err := retryWithNvidiaRuntime(err, containerConfig, hostConfig, gcpClient) - if err != nil { - return errors.Wrap(err, api.Identify()) - } - } - - return nil -} - -func deployONNXContainer(api *spec.API, awsClient *aws.Client, gcpClient *gcp.Client) error { - portBinding := nat.PortBinding{} - if api.Networking.LocalPort != nil { - portBinding.HostPort = s.Int(*api.Networking.LocalPort) - } - - resources := container.Resources{} - if api.Compute != nil { - if api.Compute.CPU != nil { - resources.NanoCPUs = api.Compute.CPU.MilliValue() * 1000 * 1000 - } - if api.Compute.Mem != nil { - resources.Memory = api.Compute.Mem.Quantity.Value() - } - if api.Compute.GPU > 0 { - resources.DeviceRequests = []container.DeviceRequest{{ - Count: -1, - Capabilities: [][]string{ - {"gpu"}, - }, - }} - } - } - - mounts := []mount.Mount{ - { - Type: mount.TypeBind, - Source: api.LocalProjectDir, - Target: _projectDir, - }, - { - Type: mount.TypeBind, - Source: filepath.Join(_localWorkspaceDir, filepath.Dir(api.Key)), - Target: _workspaceDir, - }, - } - for _, modelCache := range api.LocalModelCaches { - mounts = append(mounts, mount.Mount{ - Type: mount.TypeBind, - Source: modelCache.HostPath, - Target: filepath.Join(_modelDir, modelCache.TargetPath), - }) - } - - hostConfig := &container.HostConfig{ - PortBindings: nat.PortMap{ - _defaultPortStr + "/tcp": []nat.PortBinding{portBinding}, - }, - Resources: resources, - Mounts: mounts, - } - - containerConfig := &container.Config{ - Image: api.Predictor.Image, - Tty: true, - Env: append( - getAPIEnv(api, awsClient, gcpClient), - ), - ExposedPorts: nat.PortSet{ - _defaultPortStr + "/tcp": struct{}{}, - }, - Labels: map[string]string{ - "cortex": "true", - "type": _apiContainerName, - "apiID": api.ID, - "specID": api.SpecID, - "predictorID": api.PredictorID, - "apiName": api.Name, - "localModelIDs": ModelCaches(api.LocalModelCaches).IDs(), - }, - } - containerInfo, err := docker.MustDockerClient().ContainerCreate(context.Background(), containerConfig, hostConfig, nil, "") - if err != nil { - if strings.Contains(err.Error(), "bind source path does not exist") { - return errors.Wrap(ErrorBindDockerInDocker(err), api.Identify()) - } - return errors.Wrap(err, api.Identify()) - } - - if gcpClient != nil && !gcpClient.IsAnonymous { - docker.CopyToContainer(containerInfo.ID, &archive.Input{ - Bytes: []archive.BytesInput{ - { - Content: gcpClient.CredentialsJSON, - Dest: "/var/google_key.json", - }, - }, - }, "/") - } - - err = docker.MustDockerClient().ContainerStart(context.Background(), containerInfo.ID, dockertypes.ContainerStartOptions{}) - if err != nil { - if api.Compute.GPU == 0 { - return errors.Wrap(err, api.Identify()) - } - err := retryWithNvidiaRuntime(err, containerConfig, hostConfig, gcpClient) - if err != nil { - return errors.Wrap(err, api.Identify()) - } - } - - return nil -} - -func deployTensorFlowContainers(api *spec.API, awsClient *aws.Client, gcpClient *gcp.Client) error { - serveResources := container.Resources{} - apiResources := container.Resources{} - - if api.Compute != nil { - if api.Compute.CPU != nil { - totalNanoCPUs := api.Compute.CPU.MilliValue() * 1000 * 1000 - apiResources.NanoCPUs = totalNanoCPUs / 2 - serveResources.NanoCPUs = totalNanoCPUs - apiResources.NanoCPUs - } - if api.Compute.Mem != nil { - totalMemory := api.Compute.Mem.Quantity.Value() - apiResources.Memory = totalMemory / 2 - serveResources.Memory = totalMemory - apiResources.Memory - } - if api.Compute.GPU > 0 { - serveResources.DeviceRequests = append(serveResources.DeviceRequests, container.DeviceRequest{ - Count: -1, - Capabilities: [][]string{ - {"gpu"}, - }, - }) - } - } - - modelVolume := api.Name - if err := DeleteVolume(modelVolume); err != nil { - return errors.Wrap(err, api.Identify()) - } - - mounts := []mount.Mount{} - for _, modelCache := range api.LocalModelCaches { - mounts = append(mounts, mount.Mount{ - Type: mount.TypeBind, - Source: modelCache.HostPath, - Target: filepath.Join(_modelDir, modelCache.TargetPath), - }) - } - mounts = append(mounts, mount.Mount{ - Type: mount.TypeVolume, - Source: modelVolume, - Target: _modelDir, - }) - - serveHostConfig := &container.HostConfig{ - Resources: serveResources, - Mounts: mounts, - } - - envVars := []string{ - "TF_CPP_MIN_LOG_LEVEL=" + s.Int(userconfig.TFNumericLogLevelFromLogLevel(api.Predictor.LogLevel)), - } - - cmdArgs := []string{ - "--port=" + _tfServingPortStr, - "--model_config_file=" + _tfServingEmptyModelConfig, - "--max_num_load_retries=" + _tfServingMaxReloadTimes, - "--load_retry_interval_micros=" + _tfServingLoadTimeMicros, - fmt.Sprintf(`--grpc_channel_arguments="grpc.max_concurrent_streams=%d"`, api.Predictor.ProcessesPerReplica*api.Predictor.ThreadsPerProcess+10), - } - if api.Predictor.ServerSideBatching != nil { - envVars = append(envVars, - "TF_MAX_BATCH_SIZE="+s.Int32(api.Predictor.ServerSideBatching.MaxBatchSize), - "TF_BATCH_TIMEOUT_MICROS="+s.Int64(api.Predictor.ServerSideBatching.BatchInterval.Microseconds()), - "TF_NUM_BATCHED_THREADS="+s.Int32(api.Predictor.ProcessesPerReplica), - ) - cmdArgs = append(cmdArgs, - "--enable_batching=true", - "--batching_parameters_file="+_tfServingBatchConfig, - ) - } - - serveContainerConfig := &container.Config{ - Image: api.Predictor.TensorFlowServingImage, - Tty: true, - Env: envVars, - Cmd: cmdArgs, - ExposedPorts: nat.PortSet{ - _tfServingPortStr + "/tcp": struct{}{}, - }, - Labels: map[string]string{ - "cortex": "true", - "type": _tfServingContainerName, - "apiID": api.ID, - "specID": api.SpecID, - "predictorID": api.PredictorID, - "apiName": api.Name, - "localModelIDs": ModelCaches(api.LocalModelCaches).IDs(), - }, - } - - containerCreateRequest, err := docker.MustDockerClient().ContainerCreate(context.Background(), serveContainerConfig, serveHostConfig, nil, "") - if err != nil { - if strings.Contains(err.Error(), "bind source path does not exist") { - return errors.Wrap(ErrorBindDockerInDocker(err), api.Identify()) - } - return errors.Wrap(err, api.Identify()) - } - err = docker.MustDockerClient().ContainerStart(context.Background(), containerCreateRequest.ID, dockertypes.ContainerStartOptions{}) - if err != nil { - if api.Compute.GPU == 0 { - return errors.Wrap(err, api.Identify()) - } - err := retryWithNvidiaRuntime(err, serveContainerConfig, serveHostConfig, nil) - if err != nil { - return errors.Wrap(err, api.Identify()) - } - } - - containerInfo, err := docker.MustDockerClient().ContainerInspect(context.Background(), containerCreateRequest.ID) - if err != nil { - return errors.Wrap(err, api.Identify()) - } - - tfContainerHost := containerInfo.NetworkSettings.Networks["bridge"].IPAddress - - portBinding := nat.PortBinding{} - if api.Networking.LocalPort != nil { - portBinding.HostPort = fmt.Sprintf("%d", *api.Networking.LocalPort) - } - apiHostConfig := &container.HostConfig{ - PortBindings: nat.PortMap{ - _defaultPortStr + "/tcp": []nat.PortBinding{portBinding}, - }, - Resources: apiResources, - Mounts: append([]mount.Mount{ - { - Type: mount.TypeBind, - Source: api.LocalProjectDir, - Target: _projectDir, - }, - { - Type: mount.TypeBind, - Source: filepath.Join(_localWorkspaceDir, filepath.Dir(api.Key)), - Target: _workspaceDir, - }, - }, mounts...), - } - - apiContainerConfig := &container.Config{ - Image: api.Predictor.Image, - Tty: true, - Env: append( - getAPIEnv(api, awsClient, gcpClient), - "CORTEX_TF_BASE_SERVING_PORT="+_tfServingPortStr, - "CORTEX_TF_SERVING_HOST="+tfContainerHost, - ), - ExposedPorts: nat.PortSet{ - _defaultPortStr + "/tcp": struct{}{}, - }, - Labels: map[string]string{ - "cortex": "true", - "type": _apiContainerName, - "apiID": api.ID, - "specID": api.SpecID, - "predictorID": api.PredictorID, - "apiName": api.Name, - "localModelIDs": ModelCaches(api.LocalModelCaches).IDs(), - }, - } - containerCreateRequest, err = docker.MustDockerClient().ContainerCreate(context.Background(), apiContainerConfig, apiHostConfig, nil, "") - if err != nil { - if strings.Contains(err.Error(), "bind source path does not exist") { - return errors.Wrap(ErrorBindDockerInDocker(err), api.Identify()) - } - return errors.Wrap(err, api.Identify()) - } - - if gcpClient != nil && !gcpClient.IsAnonymous { - docker.CopyToContainer(containerCreateRequest.ID, &archive.Input{ - Bytes: []archive.BytesInput{ - { - Content: gcpClient.CredentialsJSON, - Dest: "/var/google_key.json", - }, - }, - }, "/") - } - - err = docker.MustDockerClient().ContainerStart(context.Background(), containerCreateRequest.ID, dockertypes.ContainerStartOptions{}) - if err != nil { - return errors.Wrap(err, api.Identify()) - } - - return nil -} - -// Retries deploying a container requiring GPU using nvidia runtime, returns original error if isn't relevant, nil if successful and new error if a retry was attempted but failed -func retryWithNvidiaRuntime(err error, containerConfig *container.Config, hostConfig *container.HostConfig, gcpClient *gcp.Client) error { - // error message if device driver may look like 'could not select device driver "" with capabilities: [[gpu]]' - if !(strings.Contains(err.Error(), "could not select device driver") && strings.Contains(err.Error(), "gpu")) { - return err - } - - if _, ok := docker.MustDockerClient().Info.Runtimes["nvidia"]; ok { - localPrintln("retrying API deployment using nvidia runtime because device driver for GPU was not found") - hostConfig.Runtime = "nvidia" - hostConfig.Resources.DeviceRequests = nil - containerCreateRequest, err := docker.MustDockerClient().ContainerCreate(context.Background(), containerConfig, hostConfig, nil, "") - if err != nil { - return errors.Wrap(err, "failed to request a GPU") - } - if gcpClient != nil && !gcpClient.IsAnonymous { - docker.CopyToContainer(containerCreateRequest.ID, &archive.Input{ - Bytes: []archive.BytesInput{ - { - Content: gcpClient.CredentialsJSON, - Dest: "/var/google_key.json", - }, - }, - }, "/") - } - err = docker.MustDockerClient().ContainerStart(context.Background(), containerCreateRequest.ID, dockertypes.ContainerStartOptions{}) - if err != nil { - return errors.Wrap(err, "failed to run a container using nvidia runtime; it is recommended to use the latest Docker Engine (https://docs.docker.com/engine/install/) with nvidia-container-runtime or nvidia-container-toolkit (https://docs.docker.com/config/containers/resource_constraints/#gpu)") - } - return nil - } - return errors.Append(errors.Wrap(err, "failed to allocate GPU"), "\n\n* only NVIDIA gpus are supported\n* please make sure that you've set up nvidia-container-runtime or nvidia-container-toolkit for your Docker Engine correctly (https://docs.docker.com/config/containers/resource_constraints/#gpu)\n\nAlternatively, try deploying the API without requesting a GPU by updating `compute.gpu` in your API configuration yaml") -} - -func GetContainersByAPI(apiName string) ([]dockertypes.Container, error) { - dargs := filters.NewArgs() - dargs.Add("label", "cortex=true") - dargs.Add("label", "apiName="+apiName) - - containers, err := docker.MustDockerClient().ContainerList(context.Background(), types.ContainerListOptions{ - All: true, - Filters: dargs, - }) - if err != nil { - return nil, errors.Wrap(err, "api", apiName) - } - - return containers, nil -} - -func GetAllRunningContainers() ([]dockertypes.Container, error) { - dargs := filters.NewArgs() - dargs.Add("label", "cortex=true") - - containers, err := docker.MustDockerClient().ContainerList(context.Background(), types.ContainerListOptions{ - Filters: dargs, - }) - if err != nil { - return nil, errors.WithStack(err) - } - - return containers, nil -} - -func DeleteContainers(apiName string) error { - containers, err := GetContainersByAPI(apiName) - if err != nil { - return err - } - - for _, container := range containers { - attemptErr := docker.MustDockerClient().ContainerRemove(context.Background(), container.ID, dockertypes.ContainerRemoveOptions{ - RemoveVolumes: true, - Force: true, - }) - if attemptErr != nil { - err = attemptErr - } - } - if err != nil { - return errors.Wrap(err, "api", apiName) - } - return nil -} - -func ContainersHaveAPINameVolume(containers []dockertypes.Container) bool { - for _, container := range containers { - apiName := container.Labels["apiName"] - for _, mounted := range container.Mounts { - if mounted.Type == mount.TypeVolume && mounted.Name == apiName { - return true - } - } - } - - return false -} - -func DeleteVolume(volumeName string) error { - if _, err := docker.MustDockerClient().VolumeInspect(context.Background(), volumeName); err == nil { - if err := docker.MustDockerClient().VolumeRemove(context.Background(), volumeName, false); err != nil { - return err - } - } - return nil -} diff --git a/cli/local/errors.go b/cli/local/errors.go deleted file mode 100644 index 920f9cd22e..0000000000 --- a/cli/local/errors.go +++ /dev/null @@ -1,128 +0,0 @@ -/* -Copyright 2020 Cortex Labs, Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package local - -import ( - "fmt" - - "github.com/cortexlabs/cortex/pkg/consts" - "github.com/cortexlabs/cortex/pkg/lib/errors" -) - -const ( - ErrAPINotDeployed = "local.api_not_deployed" - ErrAPISpecNotFound = "local.api_specification_not_found" - ErrCortexVersionMismatch = "local.cortex_version_mismatch" - ErrAPIContainersNotFound = "local.api_containers_not_found" - ErrFoundContainersWithoutAPISpec = "local.found_containers_without_api_spec" - ErrInvalidTensorFlowZip = "local.invalid_tensorflow_zip" - ErrFailedToDeleteAPISpec = "local.failed_to_delete_api_spec" - ErrDuplicateLocalPort = "local.duplicate_local_port" - ErrPortAlreadyInUse = "local.port_already_in_use" - ErrUnableToFindAvailablePorts = "local.unable_to_find_available_ports" - ErrBindDockerInDocker = "local.bind_docker_in_docker" - ErrMustSpecifyLocalAWSCreds = "local.must_specify_local_aws_creds" -) - -func ErrorAPINotDeployed(apiName string) error { - return errors.WithStack(&errors.Error{ - Kind: ErrAPINotDeployed, - Message: fmt.Sprintf("%s is not deployed", apiName), - }) -} - -func ErrorAPISpecNotFound(apiName string) error { - return errors.WithStack(&errors.Error{ - Kind: ErrAPISpecNotFound, - Message: fmt.Sprintf("unable to find configuration for %s api", apiName), - }) -} - -func ErrorCortexVersionMismatch(apiName string, apiVersion string) error { - return errors.WithStack(&errors.Error{ - Kind: ErrCortexVersionMismatch, - Message: fmt.Sprintf("api %s was deployed using CLI version %s but the current CLI version is %s; please run `cortex deploy` to redeploy the api or `cortex delete %s` to delete the api", apiName, apiVersion, consts.CortexVersion, apiName), - }) -} - -func ErrorFoundContainersWithoutAPISpec(apiName string) error { - return errors.WithStack(&errors.Error{ - Kind: ErrFoundContainersWithoutAPISpec, - Message: fmt.Sprintf("unable to find configuration for %s api; please run `cortex delete %s` to perform cleanup and try deploying again", apiName, apiName), - }) -} - -func ErrorAPIContainersNotFound(apiName string) error { - return errors.WithStack(&errors.Error{ - Kind: ErrAPIContainersNotFound, - Message: fmt.Sprintf("unable to find container(s) for %s api", apiName), - }) -} - -var _tfExpectedStructMessage = `For TensorFlow models, the zipped file must be a directory with the following structure: - 1523423423/ (Version prefix, usually a timestamp) - ├── saved_model.pb - └── variables/ - ├── variables.index - ├── variables.data-00000-of-00003 - ├── variables.data-00001-of-00003 - └── variables.data-00002-of-...` - -func ErrorInvalidTensorFlowZip() error { - message := "invalid TensorFlow zip.\n" - message += _tfExpectedStructMessage - return errors.WithStack(&errors.Error{ - Kind: ErrInvalidTensorFlowZip, - Message: message, - }) -} - -func ErrorFailedToDeleteAPISpec(path string, err error) error { - return errors.WithStack(&errors.Error{ - Kind: ErrFailedToDeleteAPISpec, - Message: errors.Message(err) + fmt.Sprintf("\n\nfailed to delete api specification; run `sudo rm -rf %s` to clean up", path), - }) -} - -func ErrorDuplicateLocalPort(apiName string) error { - return errors.WithStack(&errors.Error{ - Kind: ErrDuplicateLocalPort, - Message: fmt.Sprintf("port has already been assigned to api %s, please delete the api with `cortex delete %s --env local` or use another port", apiName, apiName), - }) -} - -func ErrorPortAlreadyInUse(port int) error { - return errors.WithStack(&errors.Error{ - Kind: ErrPortAlreadyInUse, - Message: fmt.Sprintf("port %d is being used by a non-cortex process; please specify a different port or make port %d available", port, port), - }) -} - -func ErrorUnableToFindAvailablePorts() error { - return errors.WithStack(&errors.Error{ - Kind: ErrUnableToFindAvailablePorts, - Message: "unable to find available ports", - }) -} - -func ErrorBindDockerInDocker(err error) error { - return errors.WithStack(&errors.Error{ - Kind: ErrBindDockerInDocker, - Message: err.Error() + "\n\nnote: deploying an API locally from within a docker container is not supported (although deploying to AWS from within a container is supported)", - Cause: err, - }) -} diff --git a/cli/local/get.go b/cli/local/get.go deleted file mode 100644 index 21f9875c2f..0000000000 --- a/cli/local/get.go +++ /dev/null @@ -1,172 +0,0 @@ -/* -Copyright 2020 Cortex Labs, Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package local - -import ( - "encoding/json" - "fmt" - "path/filepath" - "strings" - - "github.com/cortexlabs/cortex/pkg/consts" - "github.com/cortexlabs/cortex/pkg/lib/docker" - "github.com/cortexlabs/cortex/pkg/lib/errors" - "github.com/cortexlabs/cortex/pkg/lib/files" - "github.com/cortexlabs/cortex/pkg/operator/schema" - "github.com/cortexlabs/cortex/pkg/types/spec" -) - -func GetAPIs() ([]schema.APIResponse, error) { - _, err := docker.GetDockerClient() - if err != nil { - return nil, err - } - - apiSpecList, err := ListAPISpecs() - if err != nil { - return nil, err - } - - apiResponses := make([]schema.APIResponse, len(apiSpecList)) - for i := range apiSpecList { - apiSpec := apiSpecList[i] - apiStatus, err := GetAPIStatus(&apiSpec) - if err != nil { - return nil, err - } - - metrics, err := GetAPIMetrics(&apiSpec) - if err != nil { - return nil, err - } - - apiResponses[i] = schema.APIResponse{ - Spec: apiSpec, - Status: &apiStatus, - Metrics: &metrics, - Endpoint: fmt.Sprintf("http://localhost:%d", *apiSpec.Networking.LocalPort), - } - } - - return apiResponses, nil -} - -func ListAPISpecs() ([]spec.API, error) { - filepaths, err := files.ListDirRecursive(filepath.Join(_localWorkspaceDir, "apis"), false) - if err != nil { - return nil, err - } - - apiSpecList := []spec.API{} - for _, specPath := range filepaths { - if !strings.HasSuffix(filepath.Base(specPath), "-spec.json") { - continue - } - - apiSpecVersion := GetVersionFromAPISpecFilePath(specPath) - if apiSpecVersion != consts.CortexVersion { - continue - } - - var apiSpec spec.API - bytes, err := files.ReadFileBytes(specPath) - if err != nil { - return nil, errors.Wrap(err, "api", specPath) - } - err = json.Unmarshal(bytes, &apiSpec) - if err != nil { - return nil, errors.Wrap(err, "api", specPath) - } - apiSpecList = append(apiSpecList, apiSpec) - } - - return apiSpecList, nil -} - -func ListVersionMismatchedAPIs() ([]string, error) { - filepaths, err := files.ListDirRecursive(filepath.Join(_localWorkspaceDir, "apis"), false) - if err != nil { - return nil, err - } - - apiNames := []string{} - for _, specPath := range filepaths { - // Check msgpack for compatibility - if !strings.HasSuffix(filepath.Base(specPath), "-spec.json") && !strings.HasSuffix(filepath.Base(specPath), "-spec.msgpack") { - continue - } - apiSpecVersion := GetVersionFromAPISpecFilePath(specPath) - if apiSpecVersion == consts.CortexVersion { - continue - } - - key, err := filepath.Rel(filepath.Join(_localWorkspaceDir, "apis"), specPath) - if err != nil { - return nil, err - } - splitKey := strings.Split(key, "/") - if len(splitKey) == 0 { - continue - } - apiNames = append(apiNames, splitKey[0]) - } - return apiNames, nil -} - -func GetAPI(apiName string) ([]schema.APIResponse, error) { - _, err := docker.GetDockerClient() - if err != nil { - return nil, err - } - - apiSpec, err := FindAPISpec(apiName) - if err != nil { - return nil, err - } - - apiStatus, err := GetAPIStatus(apiSpec) - if err != nil { - return nil, err - } - - apiMetrics, err := GetAPIMetrics(apiSpec) - if err != nil { - return nil, err - } - - containers, err := GetContainersByAPI(apiName) - if err != nil { - return nil, err - } - - if len(containers) == 0 { - return nil, ErrorAPIContainersNotFound(apiName) - } - apiContainer := containers[0] - if len(containers) == 2 && apiContainer.Labels["type"] != "api" { - apiContainer = containers[1] - } - - return []schema.APIResponse{ - { - Spec: *apiSpec, - Status: &apiStatus, - Metrics: &apiMetrics, - Endpoint: fmt.Sprintf("http://localhost:%d", *apiSpec.Networking.LocalPort), - }, - }, nil -} diff --git a/cli/local/logs.go b/cli/local/logs.go deleted file mode 100644 index 5655ed324e..0000000000 --- a/cli/local/logs.go +++ /dev/null @@ -1,49 +0,0 @@ -/* -Copyright 2020 Cortex Labs, Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package local - -import ( - "github.com/cortexlabs/cortex/pkg/lib/docker" -) - -func StreamLogs(apiName string) error { - _, err := docker.GetDockerClient() - if err != nil { - return err - } - - _, err = FindAPISpec(apiName) - if err != nil { - return err - } - - containers, err := GetContainersByAPI(apiName) - if err != nil { - return err - } - - if len(containers) == 0 { - return ErrorAPIContainersNotFound(apiName) - } - - var containerIDs []string - for _, container := range containers { - containerIDs = append(containerIDs, container.ID) - } - - return docker.StreamDockerLogs(containerIDs[0], containerIDs[1:]...) -} diff --git a/cli/local/metrics.go b/cli/local/metrics.go deleted file mode 100644 index d814e6f6f3..0000000000 --- a/cli/local/metrics.go +++ /dev/null @@ -1,172 +0,0 @@ -/* -Copyright 2020 Cortex Labs, Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package local - -import ( - "context" - "path/filepath" - "strings" - "time" - - "github.com/cortexlabs/cortex/pkg/lib/docker" - "github.com/cortexlabs/cortex/pkg/lib/errors" - "github.com/cortexlabs/cortex/pkg/lib/files" - "github.com/cortexlabs/cortex/pkg/lib/pointer" - s "github.com/cortexlabs/cortex/pkg/lib/strings" - "github.com/cortexlabs/cortex/pkg/types/metrics" - "github.com/cortexlabs/cortex/pkg/types/spec" - "github.com/cortexlabs/cortex/pkg/types/status" - "github.com/cortexlabs/cortex/pkg/types/userconfig" -) - -func GetAPIMetrics(api *spec.API) (metrics.Metrics, error) { - apiWorkspace := filepath.Join(_localWorkspaceDir, filepath.Dir(api.Key)) - - networkStats := metrics.NetworkStats{} - - filepaths, err := files.ListDir(apiWorkspace, false) - if err != nil { - return metrics.Metrics{}, errors.Wrap(err, "api", api.Name) - } - - totalRequestTime := 0.0 - for _, filepath := range filepaths { - if strings.HasSuffix(filepath, ".2XX") { - fileContent, err := files.ReadFile(filepath) - if err != nil { - return metrics.Metrics{}, errors.Wrap(err, "api", api.Name) - } - count, ok := s.ParseInt(fileContent) - if !ok { - count = 0 - } - networkStats.Code2XX += int(count) - } - - if strings.HasSuffix(filepath, ".4XX") { - fileContent, err := files.ReadFile(filepath) - if err != nil { - return metrics.Metrics{}, errors.Wrap(err, "api", api.Name) - } - - count, ok := s.ParseInt(fileContent) - if !ok { - count = 0 - } - networkStats.Code4XX += int(count) - } - - if strings.HasSuffix(filepath, ".5XX") { - fileContent, err := files.ReadFile(filepath) - if err != nil { - return metrics.Metrics{}, errors.Wrap(err, "api", api.Name) - } - - count, ok := s.ParseInt(fileContent) - if !ok { - count = 0 - } - networkStats.Code5XX += int(count) - } - - if strings.HasSuffix(filepath, ".request_time") { - fileContent, err := files.ReadFile(filepath) - if err != nil { - return metrics.Metrics{}, errors.Wrap(err, "api", api.Name) - } - - requestTime, ok := s.ParseFloat64(fileContent) - if !ok { - requestTime = 0 - } - totalRequestTime += requestTime - } - } - - totalRequests := networkStats.Code2XX + networkStats.Code4XX + networkStats.Code5XX - networkStats.Total = totalRequests - if totalRequests != 0 && totalRequestTime != 0 { - networkStats.Latency = pointer.Float64(totalRequestTime / float64(totalRequests)) - } - return metrics.Metrics{ - APIName: api.Name, - NetworkStats: &networkStats, - }, nil -} - -func GetAPIStatus(api *spec.API) (status.Status, error) { - apiStatus := status.Status{ - APIID: api.ID, - APIName: api.Name, - ReplicaCounts: status.ReplicaCounts{ - Requested: 1, - }, - } - - // 10 second grace period between creating api spec file and looking for containers - if api.LastUpdated+10 > time.Now().Unix() { - apiStatus.ReplicaCounts.Updated.Initializing = 1 - apiStatus.Code = status.Updating - return apiStatus, nil - } - - containers, err := GetContainersByAPI(api.Name) - if err != nil { - return status.Status{}, err - } - - if len(containers) == 0 { - apiStatus.ReplicaCounts.Updated.Failed = 1 - apiStatus.Code = status.Error - return apiStatus, nil - } - - if api.Predictor.Type == userconfig.TensorFlowPredictorType && len(containers) != 2 { - apiStatus.ReplicaCounts.Updated.Failed = 1 - apiStatus.Code = status.Error - return apiStatus, nil - } - - for _, container := range containers { - if container.State != "running" { - dockerClient := docker.MustDockerClient() - containerInfo, err := dockerClient.ContainerInspect(context.Background(), container.ID) - if err != nil { - return status.Status{}, errors.Wrap(err, api.Identify()) - } - if containerInfo.State.OOMKilled || containerInfo.State.ExitCode == 137 { - apiStatus.ReplicaCounts.Updated.Failed = 1 - apiStatus.Code = status.OOM - return apiStatus, nil - } - - apiStatus.ReplicaCounts.Updated.Failed = 1 - apiStatus.Code = status.Error - return apiStatus, nil - } - } - - if !files.IsFile(filepath.Join(_localWorkspaceDir, filepath.Dir(api.Key), "api_readiness.txt")) { - apiStatus.ReplicaCounts.Updated.Initializing = 1 - apiStatus.Code = status.Updating - return apiStatus, nil - } - - apiStatus.ReplicaCounts.Updated.Ready = 1 - apiStatus.Code = status.Live - return apiStatus, nil -} diff --git a/cli/local/model_cache.go b/cli/local/model_cache.go deleted file mode 100644 index 8de2139ab7..0000000000 --- a/cli/local/model_cache.go +++ /dev/null @@ -1,228 +0,0 @@ -/* -Copyright 2020 Cortex Labs, Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package local - -import ( - "fmt" - "path/filepath" - "strings" - - "github.com/cortexlabs/cortex/pkg/consts" - "github.com/cortexlabs/cortex/pkg/lib/errors" - "github.com/cortexlabs/cortex/pkg/lib/files" - "github.com/cortexlabs/cortex/pkg/lib/sets/strset" - s "github.com/cortexlabs/cortex/pkg/lib/strings" - "github.com/cortexlabs/cortex/pkg/types/spec" - "github.com/cortexlabs/cortex/pkg/types/userconfig" -) - -func CacheLocalModels(apiSpec *spec.API, models []spec.CuratedModelResource) error { - var err error - var wasAlreadyCached bool - var localModelCache *spec.LocalModelCache - localModelCaches := make([]*spec.LocalModelCache, 0) - - var predictorModels *userconfig.MultiModels - var predictorModelsKey string - if apiSpec.Predictor.Models != nil { - predictorModels = apiSpec.Predictor.Models - predictorModelsKey = userconfig.ModelsKey - } else if apiSpec.Predictor.MultiModelReloading != nil { - predictorModels = apiSpec.Predictor.MultiModelReloading - predictorModelsKey = userconfig.MultiModelReloadingKey - } - - modelsThatWereCachedAlready := 0 - for _, model := range models { - if !model.LocalPath { - continue - } - - localModelCache, wasAlreadyCached, err = cacheLocalModel(model) - if err != nil { - if predictorModels.Path != nil { - return errors.Wrap(err, apiSpec.Identify(), userconfig.PredictorKey, predictorModelsKey, userconfig.ModelsPathKey) - } else if predictorModels.Dir != nil { - return errors.Wrap(err, apiSpec.Identify(), userconfig.PredictorKey, predictorModelsKey, userconfig.ModelsDirKey, model.Name, *apiSpec.Predictor.Models.Dir) - } - return errors.Wrap(err, apiSpec.Identify(), userconfig.PredictorKey, predictorModelsKey, userconfig.ModelsPathsKey, model.Name, userconfig.ModelsPathKey) - } - if wasAlreadyCached { - modelsThatWereCachedAlready++ - } - if model.IsFilePath || len(model.Versions) == 0 { - localModelCache.TargetPath = filepath.Join(model.Name, "1") - } else { - localModelCache.TargetPath = model.Name - } - - localModelCaches = append(localModelCaches, localModelCache) - } - apiSpec.LocalModelCaches = localModelCaches - - if len(localModelCaches) > modelsThatWereCachedAlready { - fmt.Println("") // Newline to group all of the model information - } - - return nil -} - -func cacheLocalModel(model spec.CuratedModelResource) (*spec.LocalModelCache, bool, error) { - localModelCache := spec.LocalModelCache{} - var err error - - if !model.LocalPath { - return nil, false, nil - } - - hash, err := localModelHash(model.Path) - if err != nil { - return nil, false, err - } - localModelCache.ID = hash - - destModelDir := filepath.Join(_modelCacheDir, localModelCache.ID) - - if files.IsDir(destModelDir) { - if model.IsFilePath || len(model.Versions) == 0 { - localModelCache.HostPath = filepath.Join(destModelDir, "1") - } else { - localModelCache.HostPath = destModelDir - } - return &localModelCache, true, nil - } - - err = resetModelCacheDir(destModelDir) - if err != nil { - return nil, false, err - } - if model.IsFilePath || len(model.Versions) == 0 { - if _, err := files.CreateDirIfMissing(filepath.Join(destModelDir, "1")); err != nil { - return nil, false, err - } - } - - if model.Name == consts.SingleModelName { - switch len(model.Versions) { - case 0: - fmt.Println("○ caching model ...") - case 1: - fmt.Println(fmt.Sprintf("○ caching model (version %d) ...", model.Versions[0])) - default: - fmt.Println(fmt.Sprintf("○ caching model (versions %s) ...", s.UserStrsAnd(model.Versions))) - } - - } else { - switch len(model.Versions) { - case 0: - fmt.Println(fmt.Sprintf("○ caching model %s ...", model.Name)) - case 1: - fmt.Println(fmt.Sprintf("○ caching model %s (version %d) ...", model.Name, model.Versions[0])) - default: - fmt.Println(fmt.Sprintf("○ caching model %s (versions %s) ...", model.Name, s.UserStrsAnd(model.Versions))) - } - } - - if model.IsFilePath || len(model.Versions) == 0 { - destModelDir = filepath.Join(destModelDir, "1") - } - - if model.IsFilePath { - err = files.CopyFileOverwrite(model.Path, filepath.Join(destModelDir, filepath.Base(model.Path))) - } else { - err = files.CopyDirOverwrite(strings.TrimSuffix(model.Path, "/"), s.EnsureSuffix(destModelDir, "/")) - } - if err != nil { - return nil, false, err - } - - localModelCache.HostPath = destModelDir - return &localModelCache, false, nil -} - -func deleteCachedModels(apiName string, modelsToDelete []string) error { - var errList []error - modelsInUse := strset.New() - apiSpecList, err := ListAPISpecs() - errList = append(errList, err) - - if err == nil { - for _, apiSpec := range apiSpecList { - if len(apiSpec.LocalModelCaches) > 0 && apiSpec.Name != apiName { - for _, modelCache := range apiSpec.LocalModelCaches { - modelsInUse.Add(modelCache.ID) - } - } - } - } - - toDeleteModels := strset.Difference( - strset.FromSlice(modelsToDelete), - modelsInUse, - ) - err = deleteCachedModelsByID(toDeleteModels.Slice()) - - errList = append(errList, err) - return errors.FirstError(errList...) -} - -func deleteCachedModelsByID(modelIDs []string) error { - errList := []error{} - for _, modelID := range modelIDs { - err := files.DeleteDir(filepath.Join(_modelCacheDir, modelID)) - if err != nil { - errList = append(errList, err) - } - } - - return errors.FirstError(errList...) -} - -func localModelHash(modelPath string) (string, error) { - var err error - modelHash := "" - if files.IsDir(modelPath) { - modelHash, err = files.HashDirectory(modelPath, files.IgnoreHiddenFiles, files.IgnoreHiddenFolders) - if err != nil { - return "", err - } - } else { - if err := files.CheckFile(modelPath); err != nil { - return "", err - } - modelHash, err = files.HashFile(modelPath) - if err != nil { - return "", err - } - } - - return modelHash, nil -} - -func resetModelCacheDir(modelDir string) error { - _, err := files.DeleteDirIfPresent(modelDir) - if err != nil { - return err - } - - _, err = files.CreateDirIfMissing(modelDir) - if err != nil { - return err - } - - return nil -} diff --git a/cli/local/patch.go b/cli/local/patch.go deleted file mode 100644 index eec6f2132e..0000000000 --- a/cli/local/patch.go +++ /dev/null @@ -1,104 +0,0 @@ -/* -Copyright 2020 Cortex Labs, Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package local - -import ( - "path" - "path/filepath" - - "github.com/cortexlabs/cortex/cli/types/cliconfig" - "github.com/cortexlabs/cortex/pkg/lib/files" - "github.com/cortexlabs/cortex/pkg/operator/schema" - "github.com/cortexlabs/cortex/pkg/types" - "github.com/cortexlabs/cortex/pkg/types/spec" - "github.com/cortexlabs/cortex/pkg/types/userconfig" -) - -func Patch(env cliconfig.Environment, configPath string) ([]schema.DeployResult, error) { - configFileName := filepath.Base(configPath) - - configBytes, err := files.ReadFileBytes(configPath) - if err != nil { - return nil, err - } - - apiConfigs, err := spec.ExtractAPIConfigs(configBytes, types.LocalProviderType, configFileName, nil, nil) - if err != nil { - return nil, err - } - - deployResults := []schema.DeployResult{} - for i := range apiConfigs { - apiConfig := &apiConfigs[i] - apiResponse, err := GetAPI(apiConfig.Name) - if err != nil { - return nil, err - } - - localProjectDir := apiResponse[0].Spec.LocalProjectDir - - projectFileList, err := findProjectFiles(localProjectDir) - if err != nil { - return nil, err - } - - projectFiles, err := newProjectFiles(projectFileList, localProjectDir) - if err != nil { - return nil, err - } - - deployResult, err := deploy(env, []userconfig.API{*apiConfig}, projectFiles, true) - if err != nil { - return nil, err - } - - deployResults = append(deployResults, deployResult...) - } - - return deployResults, nil -} - -func findProjectFiles(projectRoot string) ([]string, error) { - ignoreFns := []files.IgnoreFn{ - files.IgnoreCortexDebug, - files.IgnoreHiddenFiles, - files.IgnoreHiddenFolders, - files.IgnorePythonGeneratedFiles, - } - - cortexIgnorePath := path.Join(projectRoot, ".cortexignore") - if files.IsFile(cortexIgnorePath) { - cortexIgnore, err := files.GitIgnoreFn(cortexIgnorePath) - if err != nil { - return nil, err - } - ignoreFns = append(ignoreFns, cortexIgnore) - } - - projectPaths, err := files.ListDirRecursive(projectRoot, false, ignoreFns...) - if err != nil { - return nil, err - } - - // Include .env file containing environment variables - dotEnvPath := path.Join(projectRoot, ".env") - if files.IsFile(dotEnvPath) { - projectPaths = append(projectPaths, dotEnvPath) - } - - return projectPaths, nil -} diff --git a/cli/local/print.go b/cli/local/print.go deleted file mode 100644 index f386afa2f0..0000000000 --- a/cli/local/print.go +++ /dev/null @@ -1,43 +0,0 @@ -/* -Copyright 2020 Cortex Labs, Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package local - -import ( - "fmt" - - "github.com/cortexlabs/cortex/cli/types/flags" -) - -// Can be overwritten by CLI commands -var OutputType flags.OutputType = flags.PrettyOutputType - -func localPrintln(a ...interface{}) { - if OutputType != flags.JSONOutputType { - fmt.Println(a...) - } -} - -func localPrint(a ...interface{}) { - if OutputType != flags.JSONOutputType { - fmt.Print(a...) - } -} - -func localPrintf(format string, a ...interface{}) { - if OutputType != flags.JSONOutputType { - fmt.Printf(format, a...) - } -} diff --git a/cli/local/validations.go b/cli/local/validations.go deleted file mode 100644 index 6f63cfab7d..0000000000 --- a/cli/local/validations.go +++ /dev/null @@ -1,316 +0,0 @@ -/* -Copyright 2020 Cortex Labs, Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package local - -import ( - "fmt" - "math" - "net" - "path" - "path/filepath" - "strings" - - "github.com/cortexlabs/cortex/cli/types/flags" - "github.com/cortexlabs/cortex/pkg/lib/aws" - "github.com/cortexlabs/cortex/pkg/lib/docker" - "github.com/cortexlabs/cortex/pkg/lib/errors" - "github.com/cortexlabs/cortex/pkg/lib/files" - "github.com/cortexlabs/cortex/pkg/lib/gcp" - "github.com/cortexlabs/cortex/pkg/lib/k8s" - "github.com/cortexlabs/cortex/pkg/lib/pointer" - "github.com/cortexlabs/cortex/pkg/lib/regex" - "github.com/cortexlabs/cortex/pkg/lib/sets/strset" - "github.com/cortexlabs/cortex/pkg/lib/slices" - s "github.com/cortexlabs/cortex/pkg/lib/strings" - "github.com/cortexlabs/cortex/pkg/types" - "github.com/cortexlabs/cortex/pkg/types/spec" - "github.com/cortexlabs/cortex/pkg/types/userconfig" -) - -var _startingPort = 8889 - -type ProjectFiles struct { - relFilePaths []string - projectRoot string -} - -func newProjectFiles(projectFileList []string, projectRoot string) (ProjectFiles, error) { - relFilePaths := make([]string, len(projectFileList)) - for i, projectFilePath := range projectFileList { - if !files.IsAbsOrTildePrefixed(projectFilePath) { - return ProjectFiles{}, errors.ErrorUnexpected(fmt.Sprintf("%s is not an absolute path", projectFilePath)) - } - if !strings.HasPrefix(projectFilePath, projectRoot) { - return ProjectFiles{}, errors.ErrorUnexpected(fmt.Sprintf("%s is not located within in the project", projectFilePath)) - } - relFilePaths[i] = strings.TrimPrefix(projectFilePath, projectRoot) - } - - return ProjectFiles{ - relFilePaths: relFilePaths, - projectRoot: projectRoot, - }, nil -} - -func (projectFiles ProjectFiles) AllPaths() []string { - return projectFiles.relFilePaths -} - -func (projectFiles ProjectFiles) AllAbsPaths() []string { - absPaths := make([]string, 0, len(projectFiles.relFilePaths)) - for _, relPath := range projectFiles.relFilePaths { - absPaths = append(absPaths, path.Join(projectFiles.projectRoot, relPath)) - } - - return absPaths -} - -func (projectFiles ProjectFiles) GetFile(path string) ([]byte, error) { - for _, projectFilePath := range projectFiles.relFilePaths { - if path == projectFilePath { - bytes, err := files.ReadFileBytes(filepath.Join(projectFiles.projectRoot, path)) - if err != nil { - return nil, err - } - return bytes, nil - } - } - - return nil, files.ErrorFileDoesNotExist(path) -} - -func (projectFiles ProjectFiles) HasFile(path string) bool { - return slices.HasString(projectFiles.relFilePaths, path) -} - -func (projectFiles ProjectFiles) HasDir(path string) bool { - path = s.EnsureSuffix(path, "/") - for _, projectFilePath := range projectFiles.relFilePaths { - if strings.HasPrefix(projectFilePath, path) { - return true - } - } - return false -} - -// Get the absolute path to the project directory -func (projectFiles ProjectFiles) ProjectDir() string { - return projectFiles.projectRoot -} - -func ValidateLocalAPIs(apis []userconfig.API, models *[]spec.CuratedModelResource, projectFiles ProjectFiles, awsClient *aws.Client, gcpClient *gcp.Client) error { - if len(apis) == 0 { - return spec.ErrorNoAPIs() - } - - dockerClient, err := docker.GetDockerClient() - if err != nil { - return err - } - - for i := range apis { - api := &apis[i] - - if err := spec.ValidateAPI(api, models, projectFiles, types.LocalProviderType, awsClient, gcpClient, nil); err != nil { - return errors.Wrap(err, api.Identify()) - } - - if api.Compute.CPU != nil && (api.Compute.CPU.MilliValue() > int64(dockerClient.Info.NCPU)*1000) { - api.Compute.CPU = k8s.NewQuantity(int64(dockerClient.Info.NCPU)) - } - } - - dups := spec.FindDuplicateNames(apis) - if len(dups) > 0 { - return spec.ErrorDuplicateName(dups) - } - - imageSet := strset.New() - for _, api := range apis { - imageSet.Add(api.Predictor.Image) - if api.Predictor.Type == userconfig.TensorFlowPredictorType { - imageSet.Add(api.Predictor.TensorFlowServingImage) - } - } - - pulledImage := false - for image := range imageSet { - var err error - dockerAuth := docker.NoAuth - if regex.IsValidECRURL(image) && !awsClient.IsAnonymous { - dockerAuth, err = docker.AWSAuthConfig(awsClient) - if err != nil { - return err - } - } - - pullVerbosity := docker.PrintDots - if OutputType == flags.JSONOutputType { - pullVerbosity = docker.NoPrint - } - - pulledThisImage, err := docker.PullImage(image, dockerAuth, pullVerbosity) - if err != nil { - return errors.Wrap(err, "failed to pull image", image) - } - - if pulledThisImage { - pulledImage = true - } - } - - if pulledImage { - localPrintln() - } - - portToRunningAPIsMap, err := getPortToRunningAPIsMap() - if err != nil { - return err - } - - var usedPorts []int - - runningAPIsToPortMap := map[string]int{} - for port, apiName := range portToRunningAPIsMap { - runningAPIsToPortMap[apiName] = port - usedPorts = append(usedPorts, port) - } - - portToUpdatingAPIMap := map[int]string{} - updatingAPIToPortMap := map[string]*int{} - - for i := range apis { - api := &apis[i] - - updatingAPIToPortMap[api.Name] = api.Networking.LocalPort - if api.Networking.LocalPort != nil { - if collidingAPIName, ok := portToUpdatingAPIMap[*api.Networking.LocalPort]; ok { - return errors.Wrap(ErrorDuplicateLocalPort(collidingAPIName), api.Identify(), userconfig.LocalPortKey, s.Int(*api.Networking.LocalPort)) - } - usedPorts = append(usedPorts, *api.Networking.LocalPort) - portToUpdatingAPIMap[*api.Networking.LocalPort] = api.Name - } - } - - for i := range apis { - api := &apis[i] - if api.Networking.LocalPort != nil { - // same port as previous deployment of this API - if *api.Networking.LocalPort == runningAPIsToPortMap[api.Name] { - continue - } - - // port is being used by another API - if apiName, ok := portToRunningAPIsMap[*api.Networking.LocalPort]; ok { - return errors.Wrap(ErrorDuplicateLocalPort(apiName), api.Identify(), userconfig.LocalPortKey, s.Int(*api.Networking.LocalPort)) - } - isPortAvailable, err := checkPortAvailability(*api.Networking.LocalPort) - if err != nil { - return err - } - - if !isPortAvailable { - return errors.Wrap(ErrorPortAlreadyInUse(*api.Networking.LocalPort), api.Identify(), userconfig.LocalPortKey) - } - } else { - // get previous api deployment port - if port, ok := runningAPIsToPortMap[api.Name]; ok { - - // check that the previous api deployment port has not been claimed in new deployment - if _, ok := portToUpdatingAPIMap[port]; !ok { - api.Networking.LocalPort = pointer.Int(port) - } - } - } - } - - for i := range apis { - api := &apis[i] - if api.Networking.LocalPort == nil { - availablePort, err := findTheNextAvailablePort(usedPorts) - if err != nil { - errors.Wrap(err, api.Identify()) - } - - api.Networking.LocalPort = pointer.Int(availablePort) - } - } - - return nil -} - -func checkPortAvailability(port int) (bool, error) { - ln, err := net.Listen("tcp", ":"+s.Int(port)) - if err != nil { - return false, nil - } - err = ln.Close() - if err != nil { - return false, errors.WithStack(err) - } - - return true, nil -} - -func findTheNextAvailablePort(blackListedPorts []int) (int, error) { - defer func() { _startingPort++ }() - blackListedSet := map[int]struct{}{} - for _, port := range blackListedPorts { - blackListedSet[port] = struct{}{} - } - - for _startingPort <= math.MaxUint16 { - if _, ok := blackListedSet[_startingPort]; ok { - _startingPort++ - continue - } - - isAvailable, err := checkPortAvailability(_startingPort) - if err != nil { - return 0, err - } - - if isAvailable { - return _startingPort, nil - } - - _startingPort++ - } - - return 0, ErrorUnableToFindAvailablePorts() -} - -func getPortToRunningAPIsMap() (map[int]string, error) { - allContainers, err := GetAllRunningContainers() - if err != nil { - return nil, err - } - - portMap := map[int]string{} - - for _, container := range allContainers { - if container.Labels["type"] == _apiContainerName { - for _, port := range container.Ports { - if port.PrivatePort == 8888 { - portMap[int(port.PublicPort)] = container.Labels["apiName"] - } - } - } - } - - return portMap, nil -} diff --git a/cli/types/cliconfig/cli_config.go b/cli/types/cliconfig/cli_config.go index 923ded7279..cbcd598215 100644 --- a/cli/types/cliconfig/cli_config.go +++ b/cli/types/cliconfig/cli_config.go @@ -19,12 +19,11 @@ package cliconfig import ( "github.com/cortexlabs/cortex/pkg/lib/errors" "github.com/cortexlabs/cortex/pkg/lib/sets/strset" - "github.com/cortexlabs/cortex/pkg/types" ) type CLIConfig struct { Telemetry *bool `json:"telemetry,omitempty" yaml:"telemetry,omitempty"` - DefaultEnvironment string `json:"default_environment" yaml:"default_environment"` + DefaultEnvironment *string `json:"default_environment" yaml:"default_environment"` Environments []*Environment `json:"environments" yaml:"environments"` } @@ -43,20 +42,5 @@ func (cliConfig *CLIConfig) Validate() error { } } - // Ensure the local env is always present - if !envNames.Has(types.LocalProviderType.String()) { - localEnv := &Environment{ - Name: types.LocalProviderType.String(), - Provider: types.LocalProviderType, - } - - cliConfig.Environments = append([]*Environment{localEnv}, cliConfig.Environments...) - envNames.Add("local") - } - - if cliConfig.DefaultEnvironment == "" { - cliConfig.DefaultEnvironment = types.LocalProviderType.String() - } - return nil } diff --git a/cli/types/cliconfig/config_key.go b/cli/types/cliconfig/config_key.go index a60762c108..8d910dce08 100644 --- a/cli/types/cliconfig/config_key.go +++ b/cli/types/cliconfig/config_key.go @@ -24,5 +24,4 @@ const ( OperatorEndpointKey = "operator_endpoint" AWSAccessKeyIDKey = "aws_access_key_id" AWSSecretAccessKeyKey = "aws_secret_access_key" - AWSRegionKey = "aws_region" ) diff --git a/cli/types/cliconfig/environment.go b/cli/types/cliconfig/environment.go index d3718bcd43..31892b6452 100644 --- a/cli/types/cliconfig/environment.go +++ b/cli/types/cliconfig/environment.go @@ -31,7 +31,6 @@ type Environment struct { OperatorEndpoint *string `json:"operator_endpoint,omitempty" yaml:"operator_endpoint,omitempty"` AWSAccessKeyID *string `json:"aws_access_key_id,omitempty" yaml:"aws_access_key_id,omitempty"` AWSSecretAccessKey *string `json:"aws_secret_access_key,omitempty" yaml:"aws_secret_access_key,omitempty"` - AWSRegion *string `json:"aws_region,omitempty" yaml:"aws_region,omitempty"` } func (env Environment) String(isDefault bool) string { @@ -54,9 +53,6 @@ func (env Environment) String(isDefault bool) string { if env.AWSSecretAccessKey != nil { items.Add("aws secret access key", s.MaskString(*env.AWSSecretAccessKey, 4)) } - if env.AWSRegion != nil { - items.Add("aws region", *env.AWSRegion) - } return items.String(&table.KeyValuePairOpts{ BoldFirstLine: pointer.Bool(true), @@ -64,10 +60,6 @@ func (env Environment) String(isDefault bool) string { } func CheckProviderEnvironmentNameCompatibility(envName string, provider types.ProviderType) error { - if provider == types.LocalProviderType && envName != types.LocalProviderType.String() { - return ErrorLocalEnvironmentMustBeNamedLocal(envName) - } - envNameProvider := types.ProviderTypeFromString(envName) if envNameProvider == types.UnknownProviderType { return nil @@ -93,12 +85,6 @@ func (env *Environment) Validate() error { return err } - if env.Provider == types.LocalProviderType { - if env.OperatorEndpoint != nil { - return errors.Wrap(ErrorOperatorEndpointInLocalEnvironment(), env.Name) - } - } - if env.Provider == types.AWSProviderType { if env.OperatorEndpoint == nil { return errors.Wrap(cr.ErrorMustBeDefined(), env.Name, OperatorEndpointKey) @@ -109,10 +95,6 @@ func (env *Environment) Validate() error { if env.AWSSecretAccessKey == nil { return errors.Wrap(cr.ErrorMustBeDefined(), env.Name, AWSSecretAccessKeyKey) } - if env.AWSRegion != nil { - err := errors.Append(cr.ErrorMustBeEmpty(), " (it's only used for the local environment, since it can be inferred in aws)") - return errors.Wrap(err, env.Name, AWSRegionKey) - } } if env.Provider == types.GCPProviderType { @@ -125,9 +107,6 @@ func (env *Environment) Validate() error { if env.AWSSecretAccessKey != nil { return errors.Wrap(cr.ErrorMustBeEmpty(), env.Name, AWSSecretAccessKeyKey) } - if env.AWSRegion != nil { - return errors.Wrap(cr.ErrorMustBeEmpty(), env.Name, AWSRegionKey) - } } return nil diff --git a/cli/types/cliconfig/errors.go b/cli/types/cliconfig/errors.go index 441771f100..7a449a6fca 100644 --- a/cli/types/cliconfig/errors.go +++ b/cli/types/cliconfig/errors.go @@ -25,11 +25,9 @@ import ( ) const ( - ErrEnvironmentNotConfigured = "cliconfig.environment_not_configured" - ErrLocalEnvironmentMustBeNamedLocal = "cliconfig.local_environment_must_be_named_local" - ErrEnvironmentProviderNameConflict = "cliconfig.environment_provider_name_conflict" - ErrDuplicateEnvironmentNames = "cliconfig.duplicate_environment_names" - ErrOperatorEndpointInLocalEnvironment = "cliconfig.operator_endpoint_in_local_environment" + ErrEnvironmentNotConfigured = "cliconfig.environment_not_configured" + ErrEnvironmentProviderNameConflict = "cliconfig.environment_provider_name_conflict" + ErrDuplicateEnvironmentNames = "cliconfig.duplicate_environment_names" ) func ErrorEnvironmentNotConfigured(envName string) error { @@ -39,13 +37,6 @@ func ErrorEnvironmentNotConfigured(envName string) error { }) } -func ErrorLocalEnvironmentMustBeNamedLocal(envName string) error { - return errors.WithStack(&errors.Error{ - Kind: ErrLocalEnvironmentMustBeNamedLocal, - Message: fmt.Sprintf("local environment cannot be named \"%s\"; local environment is reserved and must be named \"local\"", envName), - }) -} - func ErrorEnvironmentProviderNameConflict(envName string, provider types.ProviderType) error { return errors.WithStack(&errors.Error{ Kind: ErrEnvironmentProviderNameConflict, @@ -59,10 +50,3 @@ func ErrorDuplicateEnvironmentNames(envName string) error { Message: fmt.Sprintf("duplicate environment names (%s is defined more than once)", s.UserStr(envName)), }) } - -func ErrorOperatorEndpointInLocalEnvironment() error { - return errors.WithStack(&errors.Error{ - Kind: ErrOperatorEndpointInLocalEnvironment, - Message: fmt.Sprintf("operator_endpoint should not be specified (it's not used in the local environment)"), - }) -} diff --git a/dev/registry.sh b/dev/registry.sh index 8fbe14f55e..05f7278549 100755 --- a/dev/registry.sh +++ b/dev/registry.sh @@ -31,7 +31,7 @@ GCP_PROJECT_ID=${GCP_PROJECT_ID:-} AWS_ACCOUNT_ID=${AWS_ACCOUNT_ID:-} AWS_REGION=${AWS_REGION:-} -provider="local" +provider="undefined" include_slim="false" positional_args=() while [[ $# -gt 0 ]]; do @@ -142,7 +142,7 @@ function cache_builder() { } function push() { - if [ "$provider" = "local" ]; then + if [ "$provider" == "undefined" ]; then return fi @@ -215,21 +215,19 @@ export -f registry_login # validate environment is correctly set on env.sh validate_env "$provider" -# usage: registry.sh clean --provider aws|gcp|local +# usage: registry.sh clean --provider aws|gcp if [ "$cmd" = "clean" ]; then if [ "$provider" = "aws" ]; then cleanup_ecr - elif [ "$provider" = "local" ]; then - cleanup_local fi -# usage: registry.sh create --provider/-p aws|gcp|local +# usage: registry.sh create --provider/-p aws|gcp elif [ "$cmd" = "create" ]; then if [ "$provider" = "aws" ]; then create_aws_registry fi -# usage: registry.sh update-single IMAGE --provider/-p aws|gcp|local +# usage: registry.sh update-single IMAGE --provider/-p aws|gcp elif [ "$cmd" = "update-single" ]; then image=$sub_cmd if [ "$image" = "operator" ] || [ "$image" = "request-monitor" ]; then @@ -237,50 +235,50 @@ elif [ "$cmd" = "update-single" ]; then fi build_and_push $image latest -# usage: registry.sh update all|dev|api --provider/-p aws|gcp|local [--include-slim] +# usage: registry.sh update all|dev|api --provider/-p aws|gcp [--include-slim] # if parallel utility is installed, the docker build commands will be parallelized elif [ "$cmd" = "update" ]; then images_to_build=() if [ "$sub_cmd" == "all" ]; then - images_to_build+=( "${non_dev_images_local[@]}" ) + images_to_build+=( "${non_dev_images_cluster[@]}" ) if [ "$provider" == "aws" ]; then - images_to_build+=( "${non_dev_images_cluster[@]}" ) images_to_build+=( "${non_dev_images_aws[@]}" ) elif [ "$provider" == "gcp" ]; then - images_to_build+=( "${non_dev_images_cluster[@]}" ) images_to_build+=( "${non_dev_images_gcp[@]}" ) + elif [ "$provider" == "undefined" ]; then + images_to_build+=( "${non_dev_images_aws[@]}" "${non_dev_images_gcp[@]}" ) fi fi if [[ "$sub_cmd" == "all" || "$sub_cmd" == "dev" ]]; then - images_to_build+=( "${dev_images_local[@]}" ) + images_to_build+=( "${dev_images_cluster[@]}" ) if [ "$provider" == "aws" ]; then - images_to_build+=( "${dev_images_cluster[@]}" ) images_to_build+=( "${dev_images_aws[@]}" ) elif [ "$provider" == "gcp" ]; then - images_to_build+=( "${dev_images_cluster[@]}" ) images_to_build+=( "${dev_images_gcp[@]}" ) + elif [ "$provider" == "undefined" ]; then + images_to_build+=( "${dev_images_aws[@]}" "${dev_images_aws[@]}" ) fi fi - images_to_build+=( "${api_images_local[@]}" ) + images_to_build+=( "${api_images_cluster[@]}" ) if [ "$provider" == "aws" ]; then - images_to_build+=( "${api_images_cluster[@]}" ) images_to_build+=( "${api_images_aws[@]}" ) elif [ "$provider" == "gcp" ]; then - images_to_build+=( "${api_images_cluster[@]}" ) images_to_build+=( "${api_images_gcp[@]}" ) + elif [ "$provider" == "undefined" ]; then + images_to_build+=( "${api_images_aws[@]}" "${api_images_gcp[@]}" ) fi if [ "$include_slim" == "true" ]; then - images_to_build+=( "${api_slim_images_local[@]}" ) + images_to_build+=( "${api_slim_images_cluster[@]}" ) if [ "$provider" == "aws" ]; then - images_to_build+=( "${api_slim_images_cluster[@]}" ) images_to_build+=( "${api_slim_images_aws[@]}" ) elif [ "$provider" == "gcp" ]; then - images_to_build+=( "${api_slim_images_cluster[@]}" ) images_to_build+=( "${api_slim_images_gcp[@]}" ) + elif [ "$provider" == "undefined" ]; then + images_to_build+=( "${api_slim_images_aws[@]}" "${api_slim_images_gcp[@]}" ) fi fi diff --git a/docs/clients/cli.md b/docs/clients/cli.md index b39271ee27..1859409d32 100644 --- a/docs/clients/cli.md +++ b/docs/clients/cli.md @@ -9,7 +9,7 @@ Usage: cortex deploy [CONFIG_FILE] [flags] Flags: - -e, --env string environment to use (default "local") + -e, --env string environment to use -f, --force override the in-progress api update -y, --yes skip prompts -o, --output string output format: one of pretty|json (default "pretty") @@ -25,7 +25,7 @@ Usage: cortex get [API_NAME] [JOB_ID] [flags] Flags: - -e, --env string environment to use (default "local") + -e, --env string environment to use -w, --watch re-run the command every 2 seconds -o, --output string output format: one of pretty|json (default "pretty") -v, --verbose show additional information (only applies to pretty output format) @@ -41,7 +41,7 @@ Usage: cortex logs API_NAME [JOB_ID] [flags] Flags: - -e, --env string environment to use (default "local") + -e, --env string environment to use -h, --help help for logs ``` @@ -54,7 +54,7 @@ Usage: cortex patch [CONFIG_FILE] [flags] Flags: - -e, --env string environment to use (default "local") + -e, --env string environment to use -f, --force override the in-progress api update -o, --output string output format: one of pretty|json (default "pretty") -h, --help help for patch @@ -69,7 +69,7 @@ Usage: cortex refresh API_NAME [flags] Flags: - -e, --env string environment to use (default "local") + -e, --env string environment to use -f, --force override the in-progress api update -o, --output string output format: one of pretty|json (default "pretty") -h, --help help for refresh @@ -84,7 +84,7 @@ Usage: cortex predict API_NAME JSON_FILE [flags] Flags: - -e, --env string environment to use (default "local") + -e, --env string environment to use -h, --help help for predict ``` @@ -97,7 +97,7 @@ Usage: cortex delete API_NAME [JOB_ID] [flags] Flags: - -e, --env string environment to use (default "local") + -e, --env string environment to use -f, --force delete the api without confirmation -c, --keep-cache keep cached data for the api -o, --output string output format: one of pretty|json (default "pretty") @@ -261,7 +261,6 @@ Flags: -o, --operator-endpoint string set the operator endpoint without prompting -k, --aws-access-key-id string set the aws access key id without prompting -s, --aws-secret-access-key string set the aws secret access key without prompting - -r, --aws-region string set the aws region without prompting -h, --help help for configure ``` @@ -311,7 +310,7 @@ Usage: cortex version [flags] Flags: - -e, --env string environment to use (default "local") + -e, --env string environment to use -h, --help help for version ``` diff --git a/docs/clients/python.md b/docs/clients/python.md index 7390c6604d..534ab6643b 100644 --- a/docs/clients/python.md +++ b/docs/clients/python.md @@ -2,7 +2,6 @@ * [cortex](#cortex) * [client](#client) - * [local\_client](#local_client) * [cluster\_client](#cluster_client) * [env\_list](#env_list) * [env\_delete](#env_delete) @@ -37,28 +36,6 @@ Initialize a client based on the specified environment. Cortex client that can be used to deploy and manage APIs in the specified environment. -## local\_client - -```python -local_client(aws_access_key_id: str, aws_secret_access_key: str, aws_region: str) -> Client -``` - -Initialize a client to deploy and manage APIs locally. - -The specified AWS credentials will be used by the CLI to download models -from S3 and authenticate to ECR, and will be set in your Predictor. - -**Arguments**: - -- `aws_access_key_id` - AWS access key ID. -- `aws_secret_access_key` - AWS secret access key. -- `aws_region` - AWS region. - - -**Returns**: - - Cortex client that can be used to deploy and manage APIs locally. - ## cluster\_client ```python diff --git a/go.mod b/go.mod index 26b76581e3..ce2d5f3f7d 100644 --- a/go.mod +++ b/go.mod @@ -18,7 +18,7 @@ require ( github.com/denormal/go-gitignore v0.0.0-20180930084346-ae8ad1d07817 github.com/docker/distribution v2.7.1+incompatible // indirect github.com/docker/docker v0.0.0-00010101000000-000000000000 - github.com/docker/go-connections v0.4.0 + github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-units v0.4.0 // indirect github.com/fatih/color v1.10.0 github.com/getsentry/sentry-go v0.8.0 diff --git a/pkg/cortex/client/cortex/__init__.py b/pkg/cortex/client/cortex/__init__.py index a6da59f5f0..9f56d71156 100644 --- a/pkg/cortex/client/cortex/__init__.py +++ b/pkg/cortex/client/cortex/__init__.py @@ -47,43 +47,6 @@ def client(env: str): return Client(environment) -def local_client( - aws_access_key_id: str, - aws_secret_access_key: str, - aws_region: str, -) -> Client: - """ - Initialize a client to deploy and manage APIs locally. - - The specified AWS credentials will be used by the CLI to download models - from S3 and authenticate to ECR, and will be set in your Predictor. - - Args: - aws_access_key_id: AWS access key ID. - aws_secret_access_key: AWS secret access key. - aws_region: AWS region. - - Returns: - Cortex client that can be used to deploy and manage APIs locally. - """ - args = [ - "env", - "configure", - "--provider", - "local", - "--aws-region", - aws_region, - "--aws-access-key-id", - aws_access_key_id, - "--aws-secret-access-key", - aws_secret_access_key, - ] - - run_cli(args, hide_output=True) - - return Client("local") - - def cluster_client( name: str, provider: str, diff --git a/pkg/cortex/serve/cortex_internal/lib/api/api.py b/pkg/cortex/serve/cortex_internal/lib/api/api.py index 77b8d62804..8ce41339e5 100644 --- a/pkg/cortex/serve/cortex_internal/lib/api/api.py +++ b/pkg/cortex/serve/cortex_internal/lib/api/api.py @@ -22,7 +22,7 @@ import datadog from cortex_internal.lib.api import Predictor from cortex_internal.lib.exceptions import CortexException -from cortex_internal.lib.storage import LocalStorage, S3, GCS +from cortex_internal.lib.storage import S3, GCS from cortex_internal.lib.log import logger @@ -30,7 +30,7 @@ class API: def __init__( self, provider: str, - storage: Union[LocalStorage, S3, GCS], + storage: Union[S3, GCS], api_spec: Dict[str, Any], model_dir: str, cache_dir: str = ".", @@ -49,13 +49,9 @@ def __init__( self.name = api_spec["name"] self.predictor = Predictor(provider, api_spec, model_dir) - if provider != "local": - host_ip = os.environ["HOST_IP"] - datadog.initialize(statsd_host=host_ip, statsd_port="8125") - self.statsd = datadog.statsd - - if provider == "local": - self.metrics_file_lock = threading.Lock() + host_ip = os.environ["HOST_IP"] + datadog.initialize(statsd_host=host_ip, statsd_port="8125") + self.statsd = datadog.statsd @property def server_side_batching_enabled(self): @@ -73,16 +69,13 @@ def metric_dimensions(self): def post_request_metrics(self, status_code, total_time): total_time_ms = total_time * 1000 - if self.provider == "local": - self.store_metrics_locally(status_code, total_time_ms) - else: - metrics = [ - self.status_code_metric(self.metric_dimensions(), status_code), - self.status_code_metric(self.metric_dimensions_with_id(), status_code), - self.latency_metric(self.metric_dimensions(), total_time_ms), - self.latency_metric(self.metric_dimensions_with_id(), total_time_ms), - ] - self.post_metrics(metrics) + metrics = [ + self.status_code_metric(self.metric_dimensions(), status_code), + self.status_code_metric(self.metric_dimensions_with_id(), status_code), + self.latency_metric(self.metric_dimensions(), total_time_ms), + self.latency_metric(self.metric_dimensions_with_id(), total_time_ms), + ] + self.post_metrics(metrics) def post_metrics(self, metrics): try: @@ -98,27 +91,6 @@ def post_metrics(self, metrics): except: logger.warn("failure encountered while publishing metrics", exc_info=True) - def store_metrics_locally(self, status_code, total_time): - status_code_series = int(status_code / 100) - status_code_file_name = f"/mnt/workspace/{os.getpid()}.{status_code_series}XX" - request_time_file = f"/mnt/workspace/{os.getpid()}.request_time" - - self.metrics_file_lock.acquire() - try: - self.increment_counter_file(status_code_file_name, 1) - self.increment_counter_file(request_time_file, total_time) - finally: - self.metrics_file_lock.release() - - def increment_counter_file(self, file_name, value): - previous_val = 0 - if Path(file_name).is_file(): - with open(file_name, "r") as f: - previous_val = json.load(f) # values are either of type int or float - - with open(file_name, "w") as f: - json.dump(previous_val + value, f) - def status_code_metric(self, dimensions, status_code): status_code_series = int(status_code / 100) status_code_dimensions = dimensions + [ @@ -164,18 +136,16 @@ def get_spec( spec_path: str, cache_dir: str, region: Optional[str] = None, -) -> Tuple[Union[LocalStorage, S3, GCS], dict]: +) -> Tuple[Union[S3, GCS], dict]: """ Args: - provider: "local", "aws" or "gcp". + provider: "aws" or "gcp". spec_path: Path to API spec (i.e. "s3://cortex-dev-0/apis/iris-classifier/api/69b93378fa5c0218-jy1fjtyihu-9fcc10739e7fc8050cefa8ca27ece1ee/master-spec.json"). cache_dir: Local directory where the API spec gets saved to. region: Region of the bucket. Only required for "S3" provider. """ - if provider == "local": - storage = LocalStorage(cache_dir) - elif provider == "aws": + if provider == "aws": bucket, key = S3.deconstruct_s3_path(spec_path) storage = S3(bucket=bucket, region=region) elif provider == "gcp": @@ -184,9 +154,6 @@ def get_spec( else: raise ValueError('invalid "provider" argument') - if provider == "local": - return storage, read_json(spec_path) - local_spec_path = os.path.join(cache_dir, "api_spec.json") if not os.path.isfile(local_spec_path): storage.download_file(key, local_spec_path) diff --git a/pkg/cortex/serve/cortex_internal/lib/api/predictor.py b/pkg/cortex/serve/cortex_internal/lib/api/predictor.py index 8717d906d4..e4cf5c369e 100644 --- a/pkg/cortex/serve/cortex_internal/lib/api/predictor.py +++ b/pkg/cortex/serve/cortex_internal/lib/api/predictor.py @@ -72,7 +72,7 @@ class Predictor: def __init__(self, provider: str, api_spec: dict, model_dir: str): """ Args: - provider: "local" or "aws". + provider: "aws" or "gcp". api_spec: API configuration. model_dir: Where the models are stored on disk. """ diff --git a/pkg/cortex/serve/cortex_internal/lib/client/onnx.py b/pkg/cortex/serve/cortex_internal/lib/client/onnx.py index 382d0d9eeb..50c908492e 100644 --- a/pkg/cortex/serve/cortex_internal/lib/client/onnx.py +++ b/pkg/cortex/serve/cortex_internal/lib/client/onnx.py @@ -88,7 +88,7 @@ def __init__( self._models_dir = False self._spec_model_names = self._spec_models.get_field("name") - # for when local models are used + # only applicable for ONNX file paths (for ONNX filepaths, it must look as if the models are available locally) self._spec_local_model_names = self._spec_models.get_local_model_names() self._local_model_ts = int(datetime.datetime.now(datetime.timezone.utc).timestamp()) diff --git a/pkg/cortex/serve/cortex_internal/lib/client/python.py b/pkg/cortex/serve/cortex_internal/lib/client/python.py index 1bb387954a..2aa696b0bc 100644 --- a/pkg/cortex/serve/cortex_internal/lib/client/python.py +++ b/pkg/cortex/serve/cortex_internal/lib/client/python.py @@ -79,10 +79,6 @@ def __init__( self._models_dir = False self._spec_model_names = self._spec_models.get_field("name") - # for when local models are used - self._spec_local_model_names = self._spec_models.get_local_model_names() - self._local_model_ts = int(datetime.datetime.now(datetime.timezone.utc).timestamp()) - self._multiple_processes = self._api_spec["predictor"]["processes_per_replica"] > 1 self._caching_enabled = self._is_model_caching_enabled() @@ -267,9 +263,7 @@ def _get_model(self, model_name: str, model_version: str) -> Any: with LockedModel(self._models, "r", model_name, model_version): status, local_ts = self._models.has_model(model_name, model_version) if status in ["not-available", "on-disk"] or ( - status != "not-available" - and local_ts != current_upstream_ts - and not (status == "in-memory" and model_name in self._spec_local_model_names) + status != "not-available" and local_ts != current_upstream_ts ): update_model = True raise WithBreak @@ -284,9 +278,8 @@ def _get_model(self, model_name: str, model_version: str) -> Any: status, local_ts = self._models.has_model(model_name, model_version) # refresh disk model - if model_name not in self._spec_local_model_names and ( - status == "not-available" - or (status in ["on-disk", "in-memory"] and local_ts != current_upstream_ts) + if status == "not-available" or ( + status in ["on-disk", "in-memory"] and local_ts != current_upstream_ts ): if status == "not-available": logger.info( @@ -328,10 +321,6 @@ def _get_model(self, model_name: str, model_version: str) -> Any: raise WithBreak current_upstream_ts = int(date.timestamp()) - # give the local model a timestamp initialized at start time - if model_name in self._spec_local_model_names: - current_upstream_ts = self._local_model_ts - # load model try: logger.info( diff --git a/pkg/cortex/serve/cortex_internal/lib/client/tensorflow.py b/pkg/cortex/serve/cortex_internal/lib/client/tensorflow.py index afdbbb4e56..d309622d4f 100644 --- a/pkg/cortex/serve/cortex_internal/lib/client/tensorflow.py +++ b/pkg/cortex/serve/cortex_internal/lib/client/tensorflow.py @@ -280,20 +280,19 @@ def _run_inference(self, model_input: Any, model_name: str, model_version: str) self._models.remove_model(model_name, model_version) # download model - if model_name not in self._spec_models.get_local_model_names(): - logger.info( - f"downloading model {model_name} of version {model_version} from the {upstream_model['provider']} upstream" - ) - date = self._models.download_model( - upstream_model["provider"], - upstream_model["bucket"], - model_name, - model_version, - upstream_model["path"], - ) - if not date: - raise WithBreak - current_upstream_ts = int(date.timestamp()) + logger.info( + f"downloading model {model_name} of version {model_version} from the {upstream_model['provider']} upstream" + ) + date = self._models.download_model( + upstream_model["provider"], + upstream_model["bucket"], + model_name, + model_version, + upstream_model["path"], + ) + if not date: + raise WithBreak + current_upstream_ts = int(date.timestamp()) # load model try: diff --git a/pkg/cortex/serve/cortex_internal/lib/model/cron.py b/pkg/cortex/serve/cortex_internal/lib/model/cron.py index 3fafdfe748..b8c0677615 100644 --- a/pkg/cortex/serve/cortex_internal/lib/model/cron.py +++ b/pkg/cortex/serve/cortex_internal/lib/model/cron.py @@ -28,7 +28,7 @@ from cortex_internal.lib import util from cortex_internal.lib.log import logger from cortex_internal.lib.concurrency import LockedFile, get_locked_files -from cortex_internal.lib.storage import S3, GCS, LocalStorage +from cortex_internal.lib.storage import S3, GCS from cortex_internal.lib.exceptions import CortexException, WithBreak from cortex_internal.lib.type import ( predictor_type_from_api_spec, @@ -257,14 +257,6 @@ def _make_local_models_available(self) -> None: logger.info(message) def _update_models_tree(self) -> None: - # don't update when the models:dir is a local path - if ( - self._is_dir_used - and not self._models_dir.startswith("s3://") - and not self._models_dir.startswith("gs://") - ): - return - # get updated/validated paths/versions of the cloud models ( model_names, @@ -738,7 +730,6 @@ def __init__( self._cloud_paths = [] self._spec_models = get_models_from_api_spec(self._api_spec) - self._local_model_names = self._spec_models.get_local_model_names() self._cloud_model_names = self._spec_models.get_cloud_model_names() for model_name in self._cloud_model_names: self._cloud_paths.append(self._spec_models[model_name]["path"]) @@ -785,8 +776,6 @@ def run(self): self._reset_when_tfs_unresponsive() time.sleep(1.0) - self._load_local_models() - while not self._event_stopper.is_set(): success = self._update_models() if success and not self._ran_once.is_set(): @@ -823,14 +812,6 @@ def ran_once(self) -> bool: return self._ran_once.is_set() def _update_models(self) -> bool: - # don't update when the models:dir is a local path - if ( - self._is_dir_used - and not self._models_dir.startswith("s3://") - and not self._models_dir.startswith("gs://") - ): - return True - # get updated/validated paths/versions of the cloud models (S3 or GS) ( model_names, @@ -872,7 +853,7 @@ def _update_models(self) -> bool: # remove models that no longer appear in model_names for model_name, model_versions in find_ondisk_models(self._download_dir).items(): - if model_name in model_names or model_name in self._local_model_names: + if model_name in model_names: continue for ondisk_version in model_versions: ondisk_model_version_path = os.path.join( @@ -917,8 +898,6 @@ def _update_models(self) -> bool: # # update TFS models current_ts_state = {} for model_name, model_versions in ondisk_models.items(): - if model_name in self._local_model_names: - continue try: ts = self._update_tfs_model( model_name, model_versions, timestamps, model_names, versions @@ -1223,39 +1202,6 @@ def _update_tfs_model( return current_ts_state - def _load_local_models(self) -> None: - for model_name in self._local_model_names: - for model_version in self._spec_models[model_name]["versions"]: - model_disk_path = os.path.join(self._tfs_model_dir, model_name) - try: - self._client.add_single_model( - model_name, - model_version, - model_disk_path, - self._determine_model_signature_key(model_name), - timeout=30.0, - ) - except Exception as e: - try: - self._client.remove_single_model(model_name, model_version) - logger.warning( - "model '{}' of version '{}' couldn't be loaded: {}".format( - model_name, model_version, str(e) - ) - ) - except grpc.RpcError as error: - if error.code() == grpc.StatusCode.UNAVAILABLE: - logger.warning( - "TFS server unresponsive after trying to load model '{}' of version '{}': {}".format( - model_name, model_version, str(e) - ) - ) - self._reset_when_tfs_unresponsive() - return None - self._old_ts_state[f"{model_name}-{model_version}"] = int( - datetime.datetime.now(datetime.timezone.utc).timestamp() - ) - def _is_this_a_newer_model_id(self, model_id: str, timestamp: int) -> bool: return model_id in self._old_ts_state and self._old_ts_state[model_id] < timestamp @@ -1389,6 +1335,8 @@ def __init__( self._tree = tree self._spec_models = get_models_from_api_spec(self._api_spec) + + # only required for ONNX file paths self._local_model_names = self._spec_models.get_local_model_names() self._local_model_versions = [ self._spec_models.get_versions_for(model_name) for model_name in self._local_model_names @@ -1569,6 +1517,7 @@ def __init__(self, interval: int, api_spec: dict, tree: ModelsTree, ondisk_model self._is_dir_used = False self._models_dir = None + # only required for ONNX file paths self._make_local_models_available() def _make_local_models_available(self): @@ -1619,14 +1568,6 @@ def _make_local_models_available(self): ) def _update_models_tree(self) -> None: - # don't update when the models:dir is a local path - if ( - self._is_dir_used - and not self._models_dir.startswith("s3://") - and not self._models_dir.startswith("gs://") - ): - return True - # get updated/validated paths/versions of the cloud models (S3 or GS) ( model_names, diff --git a/pkg/cortex/serve/cortex_internal/lib/model/type.py b/pkg/cortex/serve/cortex_internal/lib/model/type.py index fa2e554649..11ea546030 100644 --- a/pkg/cortex/serve/cortex_internal/lib/model/type.py +++ b/pkg/cortex/serve/cortex_internal/lib/model/type.py @@ -49,6 +49,8 @@ def is_local(self, name: str) -> Optional[bool]: """ Checks if the model has been made available from the local disk. + Note: Only required for ONNX file paths. + Args: name: Name of the model as specified in predictor:models:paths:name or if a single model is specified, _cortex_default. @@ -98,6 +100,8 @@ def get_local_model_names(self) -> List[str]: """ Get locally-provided models as specified with predictor:models:path, predictor:models:paths or predictor:models:dir. + Note: Only required for ONNX file paths. + Returns: A list of names of all local models. """ diff --git a/pkg/cortex/serve/cortex_internal/lib/model/validation.py b/pkg/cortex/serve/cortex_internal/lib/model/validation.py index 492ba3d286..930b52c124 100644 --- a/pkg/cortex/serve/cortex_internal/lib/model/validation.py +++ b/pkg/cortex/serve/cortex_internal/lib/model/validation.py @@ -21,7 +21,6 @@ from fnmatch import fnmatchcase from cortex_internal.lib import util -from cortex_internal.lib.storage import S3, LocalStorage from cortex_internal.lib.log import logger from cortex_internal.lib.exceptions import CortexException from cortex_internal.lib.type import ( diff --git a/pkg/cortex/serve/cortex_internal/serve/serve.py b/pkg/cortex/serve/cortex_internal/serve/serve.py index e73ec0d181..afe5152ecc 100644 --- a/pkg/cortex/serve/cortex_internal/serve/serve.py +++ b/pkg/cortex/serve/cortex_internal/serve/serve.py @@ -327,9 +327,7 @@ def start_fn(): predictor_impl.post_predict ).args - predict_route = "/" - if provider != "local": - predict_route = "/predict" + predict_route = "/predict" local_cache["predict_route"] = predict_route except: logger.exception("failed to start api") diff --git a/pkg/cortex/serve/init/bootloader.sh b/pkg/cortex/serve/init/bootloader.sh index d3a80175cc..3f12a3da01 100755 --- a/pkg/cortex/serve/init/bootloader.sh +++ b/pkg/cortex/serve/init/bootloader.sh @@ -20,11 +20,7 @@ set -e export EXPECTED_CORTEX_VERSION=master if [ "$CORTEX_VERSION" != "$EXPECTED_CORTEX_VERSION" ]; then - if [ "$CORTEX_PROVIDER" == "local" ]; then - echo "error: your Cortex CLI version ($CORTEX_VERSION) doesn't match your predictor image version ($EXPECTED_CORTEX_VERSION); please update your predictor image by modifying the \`image\` field in your API configuration file (e.g. cortex.yaml) and re-running \`cortex deploy\`, or update your CLI by following the instructions at https://docs.cortex.dev/" - else - echo "error: your Cortex operator version ($CORTEX_VERSION) doesn't match your predictor image version ($EXPECTED_CORTEX_VERSION); please update your predictor image by modifying the \`image\` field in your API configuration file (e.g. cortex.yaml) and re-running \`cortex deploy\`, or update your cluster by following the instructions at https://docs.cortex.dev/" - fi + echo "error: your Cortex operator version ($CORTEX_VERSION) doesn't match your predictor image version ($EXPECTED_CORTEX_VERSION); please update your predictor image by modifying the \`image\` field in your API configuration file (e.g. cortex.yaml) and re-running \`cortex deploy\`, or update your cluster by following the instructions at https://docs.cortex.dev/" exit 1 fi @@ -44,12 +40,10 @@ rm -rf /mnt/workspace/proc-*-ready.txt # allow for the liveness check to pass until the API is running echo "9999999999" > /mnt/workspace/api_liveness.txt -if [ "$CORTEX_PROVIDER" != "local" ]; then - if [ "$CORTEX_KIND" == "RealtimeAPI" ]; then - sysctl -w net.core.somaxconn="65535" >/dev/null - sysctl -w net.ipv4.ip_local_port_range="15000 64000" >/dev/null - sysctl -w net.ipv4.tcp_fin_timeout=30 >/dev/null - fi +if [ "$CORTEX_KIND" == "RealtimeAPI" ]; then + sysctl -w net.core.somaxconn="65535" >/dev/null + sysctl -w net.ipv4.ip_local_port_range="15000 64000" >/dev/null + sysctl -w net.ipv4.tcp_fin_timeout=30 >/dev/null fi # to export user-specified environment files diff --git a/pkg/cortex/serve/init/script.py b/pkg/cortex/serve/init/script.py index 306e41986e..1b558040bc 100644 --- a/pkg/cortex/serve/init/script.py +++ b/pkg/cortex/serve/init/script.py @@ -121,7 +121,7 @@ def main(): # get API spec provider = os.environ["CORTEX_PROVIDER"] spec_path = os.environ["CORTEX_API_SPEC"] - cache_dir = os.getenv("CORTEX_CACHE_DIR") # when it's deployed locally + cache_dir = os.getenv("CORTEX_CACHE_DIR") region = os.getenv("AWS_REGION") # when it's deployed to AWS _, api_spec = get_spec(provider, spec_path, cache_dir, region) diff --git a/pkg/cortex/serve/start/batch.py b/pkg/cortex/serve/start/batch.py index c8c5675f60..7c7959ac6e 100644 --- a/pkg/cortex/serve/start/batch.py +++ b/pkg/cortex/serve/start/batch.py @@ -34,7 +34,7 @@ from cortex_internal.lib import util from cortex_internal.lib.api import API, get_spec, get_api from cortex_internal.lib.concurrency import LockedFile -from cortex_internal.lib.storage import S3, LocalStorage +from cortex_internal.lib.storage import S3 from cortex_internal.lib.exceptions import UserRuntimeException API_LIVENESS_UPDATE_PERIOD = 5 # seconds diff --git a/pkg/lib/docker/docker.go b/pkg/lib/docker/docker.go index 3b057a7545..4dbe8931f2 100644 --- a/pkg/lib/docker/docker.go +++ b/pkg/lib/docker/docker.go @@ -317,7 +317,7 @@ func EncodeAuthConfig(authConfig dockertypes.AuthConfig) (string, error) { func CheckImageAccessible(dockerClient *Client, dockerImage, authConfig string, providerType types.ProviderType) error { if _, err := dockerClient.DistributionInspect(context.Background(), dockerImage, authConfig); err != nil { - return ErrorImageInaccessible(dockerImage, providerType, err) + return ErrorImageInaccessible(dockerImage, err) } return nil } diff --git a/pkg/lib/docker/errors.go b/pkg/lib/docker/errors.go index baea0f6165..a0e8b8f2f3 100644 --- a/pkg/lib/docker/errors.go +++ b/pkg/lib/docker/errors.go @@ -23,7 +23,6 @@ import ( "github.com/cortexlabs/cortex/pkg/consts" "github.com/cortexlabs/cortex/pkg/lib/errors" - "github.com/cortexlabs/cortex/pkg/types" ) const ( @@ -66,23 +65,15 @@ func ErrorImageDoesntExistLocally(image string) error { }) } -func ErrorImageInaccessible(image string, providerType types.ProviderType, cause error) error { +func ErrorImageInaccessible(image string, cause error) error { message := fmt.Sprintf("%s is not accessible", image) if cause != nil { message += "\n" + errors.Message(cause) // add \n because docker client errors are verbose but useful } - switch providerType { - case types.LocalProviderType: - message += fmt.Sprintf("\n\nyou can download your image with `docker pull %s` and try this command again", image) - if strings.Contains(cause.Error(), "auth") { - message += " (if your registry is private, run `docker login` first)" - } - case types.AWSProviderType: - if strings.Contains(cause.Error(), "auth") { - message += fmt.Sprintf("\n\nif you would like to use a private docker registry, see https://docs.cortex.dev/v/%s/", consts.CortexVersionMinor) - } + if strings.Contains(cause.Error(), "auth") { + message += fmt.Sprintf("\n\nif you would like to use a private docker registry, see https://docs.cortex.dev/v/%s/", consts.CortexVersionMinor) } return errors.WithStack(&errors.Error{ diff --git a/pkg/operator/resources/validations.go b/pkg/operator/resources/validations.go index a078258187..1a301a33b5 100644 --- a/pkg/operator/resources/validations.go +++ b/pkg/operator/resources/validations.go @@ -70,11 +70,6 @@ func (projectFiles ProjectFiles) HasDir(path string) bool { return false } -// This should not be called, since it's only relevant for the local environment -func (projectFiles ProjectFiles) ProjectDir() string { - return "./" -} - func ValidateClusterAPIs(apis []userconfig.API, projectFiles spec.ProjectFiles) error { if len(apis) == 0 { return spec.ErrorNoAPIs() @@ -93,8 +88,6 @@ func ValidateClusterAPIs(apis []userconfig.API, projectFiles spec.ProjectFiles) } } - didPrintWarning := false - realtimeAPIs := InclusiveFilterAPIsByKind(apis, userconfig.RealtimeAPIKind) for i := range apis { @@ -106,11 +99,6 @@ func ValidateClusterAPIs(apis []userconfig.API, projectFiles spec.ProjectFiles) if err := validateK8s(api, virtualServices, maxMem); err != nil { return errors.Wrap(err, api.Identify()) } - - if !didPrintWarning && api.Networking.LocalPort != nil { - fmt.Println(fmt.Sprintf("warning: %s will be ignored because it is not supported in an environment using aws provider\n", userconfig.LocalPortKey)) - didPrintWarning = true - } } if api.Kind == userconfig.TrafficSplitterKind { diff --git a/pkg/types/provider.go b/pkg/types/provider.go index 521025d4bf..84f891e739 100644 --- a/pkg/types/provider.go +++ b/pkg/types/provider.go @@ -20,14 +20,12 @@ type ProviderType int const ( UnknownProviderType ProviderType = iota - LocalProviderType AWSProviderType GCPProviderType ) var _providerTypes = []string{ "unknown", - "local", "aws", "gcp", } @@ -48,7 +46,7 @@ func ProviderTypeStrings() []string { } func ClusterProviderTypeStrings() []string { - return _providerTypes[2:] + return _providerTypes[1:] } func (t ProviderType) String() string { diff --git a/pkg/types/spec/api.go b/pkg/types/spec/api.go index ee7d028edb..5e07c69b11 100644 --- a/pkg/types/spec/api.go +++ b/pkg/types/spec/api.go @@ -28,38 +28,33 @@ import ( "github.com/cortexlabs/cortex/pkg/consts" "github.com/cortexlabs/cortex/pkg/lib/errors" "github.com/cortexlabs/cortex/pkg/lib/hash" - "github.com/cortexlabs/cortex/pkg/lib/sets/strset" s "github.com/cortexlabs/cortex/pkg/lib/strings" "github.com/cortexlabs/cortex/pkg/types/userconfig" ) type API struct { *userconfig.API - ID string `json:"id"` - SpecID string `json:"spec_id"` - PredictorID string `json:"predictor_id"` - DeploymentID string `json:"deployment_id"` - Key string `json:"key"` - PredictorKey string `json:"predictor_key"` - LastUpdated int64 `json:"last_updated"` - MetadataRoot string `json:"metadata_root"` - ProjectID string `json:"project_id"` - ProjectKey string `json:"project_key"` - LocalModelCaches []*LocalModelCache `json:"local_model_cache"` // local only - LocalProjectDir string `json:"local_project_dir"` -} - -type LocalModelCache struct { - ID string `json:"id"` - HostPath string `json:"host_path"` - TargetPath string `json:"target_path"` + ID string `json:"id"` + SpecID string `json:"spec_id"` + PredictorID string `json:"predictor_id"` + DeploymentID string `json:"deployment_id"` + Key string `json:"key"` + PredictorKey string `json:"predictor_key"` + LastUpdated int64 `json:"last_updated"` + MetadataRoot string `json:"metadata_root"` + ProjectID string `json:"project_id"` + ProjectKey string `json:"project_key"` } type CuratedModelResource struct { *userconfig.ModelResource - S3Path bool `json:"s3_path"` - GCSPath bool `json:"gcs_path"` - LocalPath bool `json:"local_path"` + S3Path bool `json:"s3_path"` + GCSPath bool `json:"gcs_path"` + + // has no utility in the go stack, but in the python stack, this is required for + // single model paths (ONNX) because models are made available locally to the api pod + LocalPath bool `json:"local_path"` + IsFilePath bool `json:"file_path"` Versions []int64 `json:"versions"` } @@ -114,23 +109,6 @@ func GetAPISpec(apiConfig *userconfig.API, projectID string, deploymentID string } } -// Keep track of models in the model cache used by this API (local only) -func (api *API) ModelIDs() []string { - models := []string{} - for _, localModelCache := range api.LocalModelCaches { - models = append(models, localModelCache.ID) - } - return models -} - -func (api *API) SubtractModelIDs(apis ...*API) []string { - modelIDs := strset.FromSlice(api.ModelIDs()) - for _, a := range apis { - modelIDs.Remove(a.ModelIDs()...) - } - return modelIDs.Slice() -} - func PredictorKey(apiName string, predictorID string, clusterName string) string { return filepath.Join( clusterName, diff --git a/pkg/types/spec/errors.go b/pkg/types/spec/errors.go index fcb979e5d8..be42f29f50 100644 --- a/pkg/types/spec/errors.go +++ b/pkg/types/spec/errors.go @@ -77,11 +77,9 @@ const ( ErrFieldNotSupportedByPredictorType = "spec.field_not_supported_by_predictor_type" ErrNoAvailableNodeComputeLimit = "spec.no_available_node_compute_limit" ErrCortexPrefixedEnvVarNotAllowed = "spec.cortex_prefixed_env_var_not_allowed" - ErrLocalPathNotSupportedByAWSProvider = "spec.local_path_not_supported_by_aws_provider" - ErrUnsupportedLocalComputeResource = "spec.unsupported_local_compute_resource" + ErrUnsupportedComputeResourceForProvider = "spec.unsupported_compute_resource_for_provider" ErrRegistryInDifferentRegion = "spec.registry_in_different_region" ErrRegistryAccountIDMismatch = "spec.registry_account_id_mismatch" - ErrCannotAccessECRWithAnonymousAWSCreds = "spec.cannot_access_ecr_with_anonymous_aws_creds" ErrKindIsNotSupportedByProvider = "spec.kind_is_not_supported_by_provider" ErrKeyIsNotSupportedForKind = "spec.key_is_not_supported_for_kind" ErrComputeResourceConflict = "spec.compute_resource_conflict" @@ -495,16 +493,9 @@ func ErrorCortexPrefixedEnvVarNotAllowed() error { }) } -func ErrorLocalModelPathNotSupportedByAWSProvider() error { - return errors.WithStack(&errors.Error{ - Kind: ErrLocalPathNotSupportedByAWSProvider, - Message: fmt.Sprintf("local model paths are not supported for aws provider, please specify an S3 path"), - }) -} - func ErrorUnsupportedComputeResourceForProvider(resourceType string, provider types.ProviderType) error { return errors.WithStack(&errors.Error{ - Kind: ErrUnsupportedLocalComputeResource, + Kind: ErrUnsupportedComputeResourceForProvider, Message: fmt.Sprintf("%s compute resources cannot be used for the %s provider", resourceType, provider.String()), }) } @@ -523,13 +514,6 @@ func ErrorRegistryAccountIDMismatch(regID, opID string) error { }) } -func ErrorCannotAccessECRWithAnonymousAWSCreds() error { - return errors.WithStack(&errors.Error{ - Kind: ErrCannotAccessECRWithAnonymousAWSCreds, - Message: fmt.Sprintf("cannot access ECR with anonymous aws credentials; run `cortex env configure local` to specify AWS credentials with access to ECR"), - }) -} - func ErrorKindIsNotSupportedByProvider(kind userconfig.Kind, provider types.ProviderType) error { return errors.WithStack(&errors.Error{ Kind: ErrKindIsNotSupportedByProvider, diff --git a/pkg/types/spec/project_files.go b/pkg/types/spec/project_files.go index d8c6dfab01..e89b52f048 100644 --- a/pkg/types/spec/project_files.go +++ b/pkg/types/spec/project_files.go @@ -25,6 +25,4 @@ type ProjectFiles interface { HasFile(string) bool // Return whether the project contains a directory path (relative to the project root) HasDir(string) bool - // Return the absolute path to the root of the project directory (should only be used in local environment) - ProjectDir() string } diff --git a/pkg/types/spec/utils.go b/pkg/types/spec/utils.go index 3eaa1b5379..1dfb33c48c 100644 --- a/pkg/types/spec/utils.go +++ b/pkg/types/spec/utils.go @@ -104,7 +104,6 @@ func generateErrorForPredictorTypeFn(api *userconfig.API) errorForPredictorTypeF func validateDirModels( modelPath string, signatureKey *string, - projectDir string, awsClient *aws.Client, gcpClient *gcp.Client, errorForPredictorType errorForPredictorTypeFn, @@ -119,7 +118,6 @@ func validateDirModels( s3Path := strings.HasPrefix(modelPath, "s3://") gcsPath := strings.HasPrefix(modelPath, "gs://") - localPath := !s3Path && !gcsPath if s3Path { awsClientForBucket, err := aws.NewFromClientS3Path(modelPath, awsClient) @@ -149,20 +147,6 @@ func validateDirModels( return nil, err } } - if localPath { - expandedLocalPath := files.RelToAbsPath(modelPath, projectDir) - dirPrefix = s.EnsureSuffix(expandedLocalPath, "/") - - err := files.CheckDir(dirPrefix) - if err != nil { - return nil, err - } - - modelDirPaths, err = files.ListDirRecursive(dirPrefix, false, nil...) - if err != nil { - return nil, err - } - } if len(modelDirPaths) == 0 { return nil, errorForPredictorType(dirPrefix, modelDirPaths) } @@ -220,9 +204,6 @@ func validateDirModels( if gcsPath { fullModelPath = s.EnsureSuffix(gcp.GCSPath(bucket, modelPrefix), "/") } - if localPath { - fullModelPath = s.EnsureSuffix(modelPrefix, "/") - } modelResources[i] = CuratedModelResource{ ModelResource: &userconfig.ModelResource{ @@ -230,10 +211,9 @@ func validateDirModels( Path: fullModelPath, SignatureKey: signatureKey, }, - S3Path: s3Path, - GCSPath: gcsPath, - LocalPath: localPath, - Versions: intVersions, + S3Path: s3Path, + GCSPath: gcsPath, + Versions: intVersions, } } @@ -243,7 +223,6 @@ func validateDirModels( func validateModels( models []userconfig.ModelResource, defaultSignatureKey *string, - projectDir string, awsClient *aws.Client, gcpClient *gcp.Client, errorForPredictorType errorForPredictorTypeFn, @@ -260,7 +239,6 @@ func validateModels( s3Path := strings.HasPrefix(model.Path, "s3://") gcsPath := strings.HasPrefix(model.Path, "gs://") - localPath := !s3Path && !gcsPath if s3Path { awsClientForBucket, err := aws.NewFromClientS3Path(model.Path, awsClient) @@ -280,7 +258,6 @@ func validateModels( } modelPaths = aws.ConvertS3ObjectsToKeys(s3Objects...) } - if gcsPath { bucket, modelPrefix, err = gcp.SplitGCSPath(model.Path) if err != nil { @@ -293,21 +270,6 @@ func validateModels( return nil, errors.Wrap(err, model.Name) } } - - if localPath { - expandedLocalPath := files.RelToAbsPath(model.Path, projectDir) - modelPrefix = s.EnsureSuffix(expandedLocalPath, "/") - - err := files.CheckDir(modelPrefix) - if err != nil { - return nil, errors.Wrap(err, model.Name) - } - - modelPaths, err = files.ListDirRecursive(modelPrefix, false, nil...) - if err != nil { - return nil, errors.Wrap(err, model.Name) - } - } if len(modelPaths) == 0 { return nil, errors.Wrap(errorForPredictorType(modelPrefix, modelPaths), model.Name) } @@ -359,9 +321,6 @@ func validateModels( if gcsPath { fullModelPath = s.EnsureSuffix(gcp.GCSPath(bucket, modelPrefix), "/") } - if localPath { - fullModelPath = s.EnsureSuffix(modelPrefix, "/") - } modelResources[i] = CuratedModelResource{ ModelResource: &userconfig.ModelResource{ @@ -369,10 +328,9 @@ func validateModels( Path: fullModelPath, SignatureKey: signatureKey, }, - S3Path: s3Path, - GCSPath: gcsPath, - LocalPath: localPath, - Versions: intVersions, + S3Path: s3Path, + GCSPath: gcsPath, + Versions: intVersions, } } diff --git a/pkg/types/spec/validations.go b/pkg/types/spec/validations.go index 5ca1ba5ba0..af28c932c8 100644 --- a/pkg/types/spec/validations.go +++ b/pkg/types/spec/validations.go @@ -19,7 +19,6 @@ package spec import ( "context" "fmt" - "math" "strings" "time" @@ -54,8 +53,8 @@ const _dockerPullSecretName = "registry-credentials" func apiValidation( provider types.ProviderType, resource userconfig.Resource, - awsClusterConfig *clusterconfig.Config, // should be omitted if running locally - gcpClusterConfig *clusterconfig.GCPConfig, // should be omitted if running locally + awsClusterConfig *clusterconfig.Config, + gcpClusterConfig *clusterconfig.GCPConfig, ) *cr.StructValidation { structFieldValidations := []*cr.StructFieldValidation{} @@ -249,44 +248,26 @@ func predictorValidation() *cr.StructFieldValidation { func networkingValidation( kind userconfig.Kind, provider types.ProviderType, - awsClusterConfig *clusterconfig.Config, // should be omitted if running locally - gcpClusterConfig *clusterconfig.GCPConfig, // should be omitted if running locally + awsClusterConfig *clusterconfig.Config, + gcpClusterConfig *clusterconfig.GCPConfig, ) *cr.StructFieldValidation { - - structFieldValidation := []*cr.StructFieldValidation{ - { - StructField: "Endpoint", - StringPtrValidation: &cr.StringPtrValidation{ - Validator: urls.ValidateEndpoint, - MaxLength: 1000, // no particular reason other than it works - }, - }, - } - - if kind == userconfig.RealtimeAPIKind { - structFieldValidation = append(structFieldValidation, &cr.StructFieldValidation{ - StructField: "LocalPort", - IntPtrValidation: &cr.IntPtrValidation{ - GreaterThan: pointer.Int(0), - LessThanOrEqualTo: pointer.Int(math.MaxUint16), - }, - }) - } - return &cr.StructFieldValidation{ StructField: "Networking", StructValidation: &cr.StructValidation{ - StructFieldValidations: structFieldValidation, + StructFieldValidations: []*cr.StructFieldValidation{ + { + StructField: "Endpoint", + StringPtrValidation: &cr.StringPtrValidation{ + Validator: urls.ValidateEndpoint, + MaxLength: 1000, // no particular reason other than it works + }, + }, + }, }, } } func computeValidation(provider types.ProviderType) *cr.StructFieldValidation { - cpuDefault := pointer.String("200m") - if provider == types.LocalProviderType { - cpuDefault = nil - } - structFieldValidation := &cr.StructFieldValidation{ StructField: "Compute", StructValidation: &cr.StructValidation{ @@ -294,7 +275,7 @@ func computeValidation(provider types.ProviderType) *cr.StructFieldValidation { { StructField: "CPU", StringPtrValidation: &cr.StringPtrValidation{ - Default: cpuDefault, + Default: pointer.String("200m"), AllowExplicitNull: true, CastNumeric: true, }, @@ -348,14 +329,9 @@ func computeValidation(provider types.ProviderType) *cr.StructFieldValidation { } func autoscalingValidation(provider types.ProviderType) *cr.StructFieldValidation { - defaultNil := provider == types.LocalProviderType - allowExplicitNull := provider == types.LocalProviderType - structFieldValidation := &cr.StructFieldValidation{ StructField: "Autoscaling", StructValidation: &cr.StructValidation{ - DefaultNil: defaultNil, - AllowExplicitNull: allowExplicitNull, StructFieldValidations: []*cr.StructFieldValidation{ { StructField: "MinReplicas", @@ -392,7 +368,7 @@ func autoscalingValidation(provider types.ProviderType) *cr.StructFieldValidatio }, } - if provider == types.AWSProviderType || provider == types.LocalProviderType { + if provider == types.AWSProviderType { structFieldValidation.StructValidation.StructFieldValidations = append(structFieldValidation.StructValidation.StructFieldValidations, &cr.StructFieldValidation{ StructField: "TargetReplicaConcurrency", @@ -522,13 +498,9 @@ func autoscalingValidation(provider types.ProviderType) *cr.StructFieldValidatio } func updateStrategyValidation(provider types.ProviderType) *cr.StructFieldValidation { - defaultNil := provider == types.LocalProviderType - allowExplicitNull := provider == types.LocalProviderType return &cr.StructFieldValidation{ StructField: "UpdateStrategy", StructValidation: &cr.StructValidation{ - DefaultNil: defaultNil, - AllowExplicitNull: allowExplicitNull, StructFieldValidations: []*cr.StructFieldValidation{ { StructField: "MaxSurge", @@ -672,8 +644,8 @@ func ExtractAPIConfigs( configBytes []byte, provider types.ProviderType, configFileName string, - awsClusterConfig *clusterconfig.Config, // should be omitted if running locally - gcpClusterConfig *clusterconfig.GCPConfig, // should be omitted if running locally + awsClusterConfig *clusterconfig.Config, + gcpClusterConfig *clusterconfig.GCPConfig, ) ([]userconfig.API, error) { var err error @@ -702,7 +674,7 @@ func ExtractAPIConfigs( } if resourceStruct.Kind == userconfig.BatchAPIKind || resourceStruct.Kind == userconfig.TrafficSplitterKind { - if provider == types.LocalProviderType || provider == types.GCPProviderType { + if provider == types.GCPProviderType { return nil, errors.Wrap(ErrorKindIsNotSupportedByProvider(resourceStruct.Kind, provider), userconfig.IdentifyAPI(configFileName, resourceStruct.Name, resourceStruct.Kind, i)) } } @@ -742,7 +714,7 @@ func ValidateAPI( provider types.ProviderType, awsClient *aws.Client, gcpClient *gcp.Client, - k8sClient *k8s.Client, // will be nil for local provider + k8sClient *k8s.Client, ) error { // if models is nil, we need to set it to an empty slice to avoid nil pointer exceptions @@ -750,7 +722,7 @@ func ValidateAPI( models = &[]CuratedModelResource{} } - if provider != types.LocalProviderType && api.Networking.Endpoint == nil { + if api.Networking.Endpoint == nil { api.Networking.Endpoint = pointer.String("/" + api.Name) } @@ -758,7 +730,7 @@ func ValidateAPI( return errors.Wrap(err, userconfig.PredictorKey) } - if api.Autoscaling != nil { // should only be nil for local provider + if api.Autoscaling != nil { if err := validateAutoscaling(api); err != nil { return errors.Wrap(err, userconfig.AutoscalingKey) } @@ -768,7 +740,7 @@ func ValidateAPI( return errors.Wrap(err, userconfig.ComputeKey) } - if api.UpdateStrategy != nil { // should only be nil for local provider + if api.UpdateStrategy != nil { if err := validateUpdateStrategy(api.UpdateStrategy); err != nil { return errors.Wrap(err, userconfig.UpdateStrategyKey) } @@ -809,7 +781,7 @@ func validatePredictor( provider types.ProviderType, awsClient *aws.Client, gcpClient *gcp.Client, - k8sClient *k8s.Client, // will be nil for local provider + k8sClient *k8s.Client, ) error { predictor := api.Predictor @@ -1021,9 +993,9 @@ func validatePythonPredictor(api *userconfig.API, models *[]CuratedModelResource var err error if hasMultiModels && mmr.Dir != nil { - *models, err = validateDirModels(*mmr.Dir, nil, projectFiles.ProjectDir(), awsClient, gcpClient, generateErrorForPredictorTypeFn(api), nil) + *models, err = validateDirModels(*mmr.Dir, nil, awsClient, gcpClient, generateErrorForPredictorTypeFn(api), nil) } else { - *models, err = validateModels(modelResources, nil, projectFiles.ProjectDir(), awsClient, gcpClient, generateErrorForPredictorTypeFn(api), nil) + *models, err = validateModels(modelResources, nil, awsClient, gcpClient, generateErrorForPredictorTypeFn(api), nil) } if err != nil { return modelWrapError(err) @@ -1110,9 +1082,9 @@ func validateTensorFlowPredictor(api *userconfig.API, models *[]CuratedModelReso var err error if hasMultiModels && predictor.Models.Dir != nil { - *models, err = validateDirModels(*predictor.Models.Dir, predictor.Models.SignatureKey, projectFiles.ProjectDir(), awsClient, gcpClient, generateErrorForPredictorTypeFn(api), validators) + *models, err = validateDirModels(*predictor.Models.Dir, predictor.Models.SignatureKey, awsClient, gcpClient, generateErrorForPredictorTypeFn(api), validators) } else { - *models, err = validateModels(modelResources, predictor.Models.SignatureKey, projectFiles.ProjectDir(), awsClient, gcpClient, generateErrorForPredictorTypeFn(api), validators) + *models, err = validateModels(modelResources, predictor.Models.SignatureKey, awsClient, gcpClient, generateErrorForPredictorTypeFn(api), validators) } if err != nil { return modelWrapError(err) @@ -1166,8 +1138,8 @@ func validateONNXPredictor(api *userconfig.API, models *[]CuratedModelResource, Path: *predictor.Models.Path, } - if strings.HasSuffix(*predictor.Models.Path, ".onnx") && provider != types.LocalProviderType { - if err := validateONNXModelFilePath(*predictor.Models.Path, projectFiles.ProjectDir(), awsClient, gcpClient); err != nil { + if strings.HasSuffix(*predictor.Models.Path, ".onnx") { + if err := validateONNXModelFilePath(*predictor.Models.Path, awsClient, gcpClient); err != nil { return modelWrapError(err) } modelFileResources = append(modelFileResources, modelResource) @@ -1191,8 +1163,8 @@ func validateONNXPredictor(api *userconfig.API, models *[]CuratedModelResource, path.Name, ) } - if strings.HasSuffix((*path).Path, ".onnx") && provider != types.LocalProviderType { - if err := validateONNXModelFilePath((*path).Path, projectFiles.ProjectDir(), awsClient, gcpClient); err != nil { + if strings.HasSuffix((*path).Path, ".onnx") { + if err := validateONNXModelFilePath((*path).Path, awsClient, gcpClient); err != nil { return errors.Wrap(modelWrapError(err), path.Name) } modelFileResources = append(modelFileResources, *path) @@ -1214,9 +1186,9 @@ func validateONNXPredictor(api *userconfig.API, models *[]CuratedModelResource, var err error if hasMultiModels && predictor.Models.Dir != nil { - *models, err = validateDirModels(*predictor.Models.Dir, nil, projectFiles.ProjectDir(), awsClient, gcpClient, generateErrorForPredictorTypeFn(api), validators) + *models, err = validateDirModels(*predictor.Models.Dir, nil, awsClient, gcpClient, generateErrorForPredictorTypeFn(api), validators) } else { - *models, err = validateModels(modelResources, nil, projectFiles.ProjectDir(), awsClient, gcpClient, generateErrorForPredictorTypeFn(api), validators) + *models, err = validateModels(modelResources, nil, awsClient, gcpClient, generateErrorForPredictorTypeFn(api), validators) } if err != nil { return modelWrapError(err) @@ -1225,7 +1197,6 @@ func validateONNXPredictor(api *userconfig.API, models *[]CuratedModelResource, for _, modelFileResource := range modelFileResources { s3Path := strings.HasPrefix(modelFileResource.Path, "s3://") gcsPath := strings.HasPrefix(modelFileResource.Path, "gs://") - localPath := !s3Path && !gcsPath *models = append(*models, CuratedModelResource{ ModelResource: &userconfig.ModelResource{ @@ -1234,7 +1205,6 @@ func validateONNXPredictor(api *userconfig.API, models *[]CuratedModelResource, }, S3Path: s3Path, GCSPath: gcsPath, - LocalPath: localPath, IsFilePath: true, }) } @@ -1254,10 +1224,9 @@ func validateONNXPredictor(api *userconfig.API, models *[]CuratedModelResource, return nil } -func validateONNXModelFilePath(modelPath string, projectDir string, awsClient *aws.Client, gcpClient *gcp.Client) error { +func validateONNXModelFilePath(modelPath string, awsClient *aws.Client, gcpClient *gcp.Client) error { s3Path := strings.HasPrefix(modelPath, "s3://") gcsPath := strings.HasPrefix(modelPath, "gs://") - localPath := !s3Path && !gcsPath if s3Path { awsClientForBucket, err := aws.NewFromClientS3Path(modelPath, awsClient) @@ -1296,13 +1265,6 @@ func validateONNXModelFilePath(modelPath string, projectDir string, awsClient *a } } - if localPath { - expandedLocalPath := files.RelToAbsPath(modelPath, projectDir) - if err := files.CheckFile(expandedLocalPath); err != nil { - return err - } - } - return nil } @@ -1383,7 +1345,7 @@ func validateDockerImagePath( image string, provider types.ProviderType, awsClient *aws.Client, - k8sClient *k8s.Client, // will be nil for local provider) + k8sClient *k8s.Client, ) error { if consts.DefaultImagePathsSet.Has(image) { return nil @@ -1402,20 +1364,9 @@ func validateDockerImagePath( return err } - if provider == types.LocalProviderType { - // short circuit if the image is already available locally - if err := docker.CheckImageExistsLocally(dockerClient, image); err == nil { - return nil - } - } - dockerAuthStr := docker.NoAuth if regex.IsValidECRURL(image) { - if awsClient.IsAnonymous { - return errors.Wrap(ErrorCannotAccessECRWithAnonymousAWSCreds(), image) - } - ecrRegion := aws.GetRegionFromECRURL(image) if ecrRegion != awsClient.Region { return ErrorRegistryInDifferentRegion(ecrRegion, awsClient.Region) diff --git a/pkg/types/userconfig/api.go b/pkg/types/userconfig/api.go index 027c2726a8..527c142249 100644 --- a/pkg/types/userconfig/api.go +++ b/pkg/types/userconfig/api.go @@ -88,8 +88,7 @@ type ServerSideBatching struct { } type Networking struct { - Endpoint *string `json:"endpoint" yaml:"endpoint"` - LocalPort *int `json:"local_port" yaml:"local_port"` + Endpoint *string `json:"endpoint" yaml:"endpoint"` } type Compute struct { @@ -317,17 +316,16 @@ func (api *API) UserStr(provider types.ProviderType) string { sb.WriteString(s.Indent(api.Compute.UserStr(), " ")) } - if provider != types.LocalProviderType { - if api.Autoscaling != nil { - sb.WriteString(fmt.Sprintf("%s:\n", AutoscalingKey)) - sb.WriteString(s.Indent(api.Autoscaling.UserStr(provider), " ")) - } + if api.Autoscaling != nil { + sb.WriteString(fmt.Sprintf("%s:\n", AutoscalingKey)) + sb.WriteString(s.Indent(api.Autoscaling.UserStr(provider), " ")) + } - if api.UpdateStrategy != nil { - sb.WriteString(fmt.Sprintf("%s:\n", UpdateStrategyKey)) - sb.WriteString(s.Indent(api.UpdateStrategy.UserStr(), " ")) - } + if api.UpdateStrategy != nil { + sb.WriteString(fmt.Sprintf("%s:\n", UpdateStrategyKey)) + sb.WriteString(s.Indent(api.UpdateStrategy.UserStr(), " ")) } + return sb.String() } @@ -434,10 +432,7 @@ func (batch *ServerSideBatching) UserStr() string { func (networking *Networking) UserStr(provider types.ProviderType) string { var sb strings.Builder - if provider == types.LocalProviderType && networking.LocalPort != nil { - sb.WriteString(fmt.Sprintf("%s: %d\n", LocalPortKey, *networking.LocalPort)) - } - if provider != types.LocalProviderType && networking.Endpoint != nil { + if networking.Endpoint != nil { sb.WriteString(fmt.Sprintf("%s: %s\n", EndpointKey, *networking.Endpoint)) } return sb.String() @@ -569,10 +564,6 @@ func (api *API) TelemetryEvent(provider types.ProviderType) map[string]interface event["networking.endpoint._is_custom"] = true } } - if api.Networking.LocalPort != nil { - event["networking.local_port._is_defined"] = true - event["networking.local_port"] = *api.Networking.LocalPort - } } if api.Compute != nil { diff --git a/pkg/types/userconfig/config_key.go b/pkg/types/userconfig/config_key.go index 154c06cfa3..e334be1589 100644 --- a/pkg/types/userconfig/config_key.go +++ b/pkg/types/userconfig/config_key.go @@ -64,8 +64,7 @@ const ( ModelsNameKey = "name" // Networking - EndpointKey = "endpoint" - LocalPortKey = "local_port" + EndpointKey = "endpoint" // Compute CPUKey = "cpu" diff --git a/test/apis/live-reloading/python/mpg-estimator/predictor.py b/test/apis/live-reloading/python/mpg-estimator/predictor.py index 122fac51fc..2ac7f494e0 100644 --- a/test/apis/live-reloading/python/mpg-estimator/predictor.py +++ b/test/apis/live-reloading/python/mpg-estimator/predictor.py @@ -9,7 +9,7 @@ def load_model(self, model_path): return mlflow.sklearn.load_model(model_path) def predict(self, payload, query_params): - model_version = query_params.get("version") + model_version = query_params.get("version", "latest") model = self.client.get_model(model_version=model_version) model_input = [ diff --git a/test/apis/pytorch/text-generator/README.md b/test/apis/pytorch/text-generator/README.md index 98ad1ebcd6..a071964acb 100644 --- a/test/apis/pytorch/text-generator/README.md +++ b/test/apis/pytorch/text-generator/README.md @@ -39,67 +39,6 @@ torch transformers==3.0.* ``` -## Deploy your model locally - -You can create APIs from any Python runtime that has access to Docker (e.g. the Python shell or a Jupyter notebook): - -```python -import cortex - -cx_local = cortex.client("local") - -api_spec = { - "name": "text-generator", - "kind": "RealtimeAPI", - "predictor": { - "type": "python", - "path": "predictor.py" - } -} - -cx_local.create_api(api_spec, project_dir=".", wait=True) -``` - -## Consume your API - -```python -import requests - -endpoint = cx_local.get_api("text-generator")["endpoint"] -payload = {"text": "hello world"} -print(requests.post(endpoint, payload).text) -``` - -## Manage your APIs using the CLI - -Monitor the status of your API using `cortex get`: - -```bash -$ cortex get --watch - -env realtime api status last update avg request 2XX -local text-generator updating 8s - - -``` - -Show additional information for your API (e.g. its endpoint) using `cortex get `: - -```bash -$ cortex get text-generator - -status last update avg request 2XX -live 1m - - - -endpoint: http://localhost:8889 -``` - -You can also stream logs from your API: - -```bash -$ cortex logs text-generator - -... -``` - ## Deploy your model to AWS Cortex can automatically provision infrastructure on your AWS account and deploy your models as production-ready web services: @@ -134,7 +73,6 @@ $ cortex get --watch env realtime api status up-to-date requested last update avg request 2XX aws text-generator live 1 1 1m - - -local text-generator live 1 1 17m 3.1285 s 1 ``` The output above indicates that one replica of your API was requested and is available to serve predictions. Cortex will automatically launch more replicas if the load increases and will spin down replicas if there is unused capacity. @@ -152,7 +90,7 @@ endpoint: https://***.execute-api.us-west-2.amazonaws.com/text-generator ## Run on GPUs -If your cortex cluster is using GPU instances (configured during cluster creation) or if you are running locally with an nvidia GPU, you can run your text generator API on GPUs. Add the `compute` field to your API configuration and re-deploy: +If your cortex cluster is using GPU instances (configured during cluster creation), you can run your text generator API on GPUs. Add the `compute` field to your API configuration and re-deploy: ```python api_spec = { @@ -184,7 +122,5 @@ text-generator updating 0 1 1 29s - Deleting APIs will free up cluster resources and allow Cortex to scale down to the minimum number of instances you specified during cluster creation: ```python -cx_local.delete_api("text-generator") - cx_aws.delete_api("text-generator") ``` diff --git a/test/apis/pytorch/text-generator/deploy.ipynb b/test/apis/pytorch/text-generator/deploy.ipynb deleted file mode 100644 index 5bdc629577..0000000000 --- a/test/apis/pytorch/text-generator/deploy.ipynb +++ /dev/null @@ -1,77 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "This example needs to run on a machine that supports Docker to deploy Cortex APIs locally (Colab users can still deploy to remote Cortex clusters)\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "!pip3 install cortex\n", - "!pip3 install requests" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import cortex\n", - "\n", - "cx = cortex.client(\"local\")\n", - "\n", - "api_spec = {\n", - " \"name\": \"text-generator\",\n", - " \"kind\": \"RealtimeAPI\",\n", - " \"predictor\": {\n", - " \"type\": \"python\",\n", - " \"path\": \"predictor.py\"\n", - " }\n", - "}\n", - "\n", - "cx.create_api(api_spec, project_dir=\".\", wait=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import requests\n", - "\n", - "endpoint = cx.get_api(\"text-generator\")[\"endpoint\"]\n", - "payload = {\"text\": \"hello world\"}\n", - "print(requests.post(endpoint, payload).text)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.9" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -}