diff --git a/cmd/enter.go b/cmd/enter.go new file mode 100644 index 0000000000..e376d56822 --- /dev/null +++ b/cmd/enter.go @@ -0,0 +1,114 @@ +package cmd + +import ( + helmClient "github.com/covexo/devspace/pkg/devspace/clients/helm" + "github.com/covexo/devspace/pkg/devspace/clients/kubectl" + "github.com/covexo/devspace/pkg/devspace/config/configutil" + "github.com/covexo/devspace/pkg/util/log" + "github.com/spf13/cobra" + k8sv1 "k8s.io/api/core/v1" + "k8s.io/client-go/kubernetes" + kubectlExec "k8s.io/client-go/util/exec" +) + +// EnterCmd is a struct that defines a command call for "enter" +type EnterCmd struct { + flags *EnterCmdFlags + helm *helmClient.HelmClientWrapper + kubectl *kubernetes.Clientset + pod *k8sv1.Pod +} + +// EnterCmdFlags are the flags available for the enter-command +type EnterCmdFlags struct { + container string +} + +func init() { + cmd := &EnterCmd{ + flags: &EnterCmdFlags{}, + } + + cobraCmd := &cobra.Command{ + Use: "enter", + Short: "Enter your DevSpace", + Long: ` +####################################################### +################## devspace enter ##################### +####################################################### +Execute a command or start a new terminal in your +devspace: + +devspace enter +devspace enter bash +devspace enter -c myContainer +#######################################################`, + Run: cmd.Run, + } + rootCmd.AddCommand(cobraCmd) + + cobraCmd.Flags().StringVarP(&cmd.flags.container, "container", "c", "", "Container name within pod where to execute command") +} + +// Run executes the command logic +func (cmd *EnterCmd) Run(cobraCmd *cobra.Command, args []string) { + var err error + log.StartFileLogging() + + cmd.kubectl, err = kubectl.NewClient() + if err != nil { + log.Fatalf("Unable to create new kubectl client: %v", err) + } + + log.StartWait("Initializing helm client") + cmd.helm, err = helmClient.NewClient(cmd.kubectl, false) + log.StopWait() + if err != nil { + log.Fatalf("Error initializing helm client: %s", err.Error()) + } + + // Check if we find a running release pod + log.StartWait("Find a running devspace pod") + pod, err := getRunningDevSpacePod(cmd.helm, cmd.kubectl) + log.StopWait() + if err != nil { + log.Fatal("Cannot find a running devspace pod") + } + + enterTerminal(cmd.kubectl, pod, cmd.flags.container, args) +} + +func enterTerminal(client *kubernetes.Clientset, pod *k8sv1.Pod, containerNameOverride string, args []string) { + var command []string + config := configutil.GetConfig(false) + + if len(args) == 0 && (config.DevSpace.Terminal.Command == nil || len(*config.DevSpace.Terminal.Command) == 0) { + command = []string{ + "sh", + "-c", + "command -v bash >/dev/null 2>&1 && exec bash || exec sh", + } + } else { + if len(args) > 0 { + command = args + } else { + for _, cmd := range *config.DevSpace.Terminal.Command { + command = append(command, *cmd) + } + } + } + + containerName := pod.Spec.Containers[0].Name + if containerNameOverride != "" { + containerName = containerNameOverride + } else if config.DevSpace.Terminal.ContainerName != nil { + containerName = *config.DevSpace.Terminal.ContainerName + } + + _, _, _, terminalErr := kubectl.Exec(client, pod, containerName, command, true, nil) + if terminalErr != nil { + if _, ok := terminalErr.(kubectlExec.CodeExitError); ok == false { + log.Fatalf("Unable to start terminal session: %v", terminalErr) + } + } +} diff --git a/cmd/reset.go b/cmd/reset.go index 04e82a5435..30e843cd31 100644 --- a/cmd/reset.go +++ b/cmd/reset.go @@ -73,13 +73,15 @@ func (cmd *ResetCmd) Run(cobraCmd *cobra.Command, args []string) { if cmd.kubectl == nil || cmd.helm == nil { cmd.kubectl, err = kubectl.NewClient() if err != nil { - log.Failf("Failed to initialize kubectl client: ", err.Error()) + log.Failf("Failed to initialize kubectl client: %v", err) } } cmd.determineResetExtent() if cmd.flags.deleteRelease { + log.StartWait("Deleting devspace release") err = cmd.deleteRelease() + log.StopWait() if err != nil { log.Failf("Error deleting release: %s", err.Error()) @@ -89,7 +91,9 @@ func (cmd *ResetCmd) Run(cobraCmd *cobra.Command, args []string) { } if cmd.flags.deleteRegistry { + log.StartWait("Deleting docker registry") err = cmd.deleteRegistry() + log.StopWait() if err != nil { log.Failf("Error deleting docker registry: %s", err.Error()) @@ -103,7 +107,9 @@ func (cmd *ResetCmd) Run(cobraCmd *cobra.Command, args []string) { } if cmd.flags.deleteTiller { + log.StartWait("Deleting tiller") err = cmd.deleteTiller() + log.StopWait() if err != nil { log.Failf("Error deleting tiller: %s", err.Error()) @@ -148,7 +154,7 @@ func (cmd *ResetCmd) Run(cobraCmd *cobra.Command, args []string) { err = cmd.deleteDevspaceFolder() if err != nil { - log.Failf("Error deleting .devspace folder: ", err.Error()) + log.Failf("Error deleting .devspace folder: %v", err) if cmd.shouldContinue() == false { return @@ -159,9 +165,12 @@ func (cmd *ResetCmd) Run(cobraCmd *cobra.Command, args []string) { } if cmd.flags.deleteClusterRoleBinding { + log.StartWait("Deleting cluster role bindings") err = cmd.kubectl.RbacV1beta1().ClusterRoleBindings().Delete(clusterRoleBindingName, &metav1.DeleteOptions{}) + log.StopWait() + if err != nil { - log.Failf("Failed to remove ClusterRoleBinding: ", err.Error()) + log.Failf("Failed to remove ClusterRoleBinding: %v", err) } else { log.Done("Successfully deleted ClusterRoleBinding '" + clusterRoleBindingName + "'") } diff --git a/cmd/up.go b/cmd/up.go index 03173a86da..215d12272d 100644 --- a/cmd/up.go +++ b/cmd/up.go @@ -38,7 +38,6 @@ import ( k8sv1 "k8s.io/api/core/v1" k8sv1beta1 "k8s.io/api/rbac/v1beta1" "k8s.io/client-go/kubernetes" - kubectlExec "k8s.io/client-go/util/exec" ) // UpCmd is a struct that defines a command call for "up" @@ -70,7 +69,7 @@ var UpFlagsDefault = &UpCmdFlags{ tiller: true, open: "cmd", initRegistries: true, - build: true, + build: false, sync: true, deploy: false, portforwarding: true, @@ -106,13 +105,13 @@ Starts and connects your DevSpace: cobraCmd.Flags().BoolVar(&cmd.flags.tiller, "tiller", cmd.flags.tiller, "Install/upgrade tiller") cobraCmd.Flags().BoolVar(&cmd.flags.initRegistries, "init-registries", cmd.flags.initRegistries, "Initialize registries (and install internal one)") - cobraCmd.Flags().BoolVarP(&cmd.flags.build, "build", "b", cmd.flags.build, "Build image if Dockerfile has been modified") + cobraCmd.Flags().BoolVarP(&cmd.flags.build, "build", "b", cmd.flags.build, "Force image build") cobraCmd.Flags().StringVarP(&cmd.flags.container, "container", "c", cmd.flags.container, "Container name where to open the shell") cobraCmd.Flags().BoolVar(&cmd.flags.sync, "sync", cmd.flags.sync, "Enable code synchronization") cobraCmd.Flags().BoolVar(&cmd.flags.verboseSync, "verbose-sync", cmd.flags.verboseSync, "When enabled the sync will log every file change") cobraCmd.Flags().BoolVar(&cmd.flags.portforwarding, "portforwarding", cmd.flags.portforwarding, "Enable port forwarding") - cobraCmd.Flags().BoolVarP(&cmd.flags.deploy, "deploy", "d", cmd.flags.deploy, "Deploy chart") - cobraCmd.Flags().BoolVar(&cmd.flags.noSleep, "no-sleep", cmd.flags.noSleep, "Enable no-sleep") + cobraCmd.Flags().BoolVarP(&cmd.flags.deploy, "deploy", "d", cmd.flags.deploy, "Force chart deployment") + cobraCmd.Flags().BoolVar(&cmd.flags.noSleep, "no-sleep", cmd.flags.noSleep, "Enable no-sleep (Override the containers.default.command and containers.default.args values with empty strings)") } // Run executes the command logic @@ -155,11 +154,9 @@ func (cmd *UpCmd) Run(cobraCmd *cobra.Command, args []string) { if cmd.flags.initRegistries { cmd.initRegistries() } - mustRedeploy := false - if cmd.flags.build { - mustRedeploy = cmd.buildImages(cobraCmd.Flags().Changed("build")) - } + // Build image if necessary + mustRedeploy := cmd.buildImages() // Check if we find a running release pod pod, err := getRunningDevSpacePod(cmd.helm, cmd.kubectl) @@ -183,7 +180,7 @@ func (cmd *UpCmd) Run(cobraCmd *cobra.Command, args []string) { }() } - cmd.enterTerminal(args) + enterTerminal(cmd.kubectl, cmd.pod, cmd.flags.container, args) } func (cmd *UpCmd) ensureNamespace() error { @@ -345,7 +342,7 @@ func (cmd *UpCmd) initRegistries() { } } -func (cmd *UpCmd) shouldRebuild(imageConf *v1.ImageConfig, dockerfilePath string, buildFlagChanged bool) bool { +func (cmd *UpCmd) shouldRebuild(imageConf *v1.ImageConfig, dockerfilePath string) bool { var dockerfileModTime time.Time mustRebuild := true @@ -361,7 +358,7 @@ func (cmd *UpCmd) shouldRebuild(imageConf *v1.ImageConfig, dockerfilePath string dockerfileModTime = dockerfileInfo.ModTime() // When user has not used -b or --build flags - if buildFlagChanged == false { + if cmd.flags.build == false { if imageConf.Build.LatestTimestamp != nil { latestBuildTime, _ := time.Parse(time.RFC3339Nano, *imageConf.Build.LatestTimestamp) @@ -370,13 +367,13 @@ func (cmd *UpCmd) shouldRebuild(imageConf *v1.ImageConfig, dockerfilePath string } } } - imageConf.Build.LatestTimestamp = configutil.String(dockerfileModTime.Format(time.RFC3339Nano)) + imageConf.Build.LatestTimestamp = configutil.String(dockerfileModTime.Format(time.RFC3339Nano)) return mustRebuild } // returns true when one of the images had to be rebuild -func (cmd *UpCmd) buildImages(buildFlagChanged bool) bool { +func (cmd *UpCmd) buildImages() bool { re := false config := configutil.GetConfig(false) @@ -391,10 +388,18 @@ func (cmd *UpCmd) buildImages(buildFlagChanged bool) bool { if imageConf.Build.ContextPath != nil { contextPath = *imageConf.Build.ContextPath } - dockerfilePath = filepath.Join(cmd.workdir, strings.TrimPrefix(dockerfilePath, ".")) - contextPath = filepath.Join(cmd.workdir, strings.TrimPrefix(contextPath, ".")) - if cmd.shouldRebuild(imageConf, dockerfilePath, buildFlagChanged) { + dockerfilePath, err := filepath.Abs(dockerfilePath) + if err != nil { + log.Fatalf("Couldn't determine absolute path for %s", *imageConf.Build.DockerfilePath) + } + + contextPath, err = filepath.Abs(contextPath) + if err != nil { + log.Fatalf("Couldn't determine absolute path for %s", *imageConf.Build.ContextPath) + } + + if cmd.shouldRebuild(imageConf, dockerfilePath) { re = true imageTag, randErr := randutil.GenerateRandomString(7) @@ -482,6 +487,12 @@ func (cmd *UpCmd) buildImages(buildFlagChanged bool) bool { if imageConf.Build.Options.BuildArgs != nil { buildOptions.BuildArgs = *imageConf.Build.Options.BuildArgs } + if imageConf.Build.Options.Target != nil { + buildOptions.Target = *imageConf.Build.Options.Target + } + if imageConf.Build.Options.Network != nil { + buildOptions.NetworkMode = *imageConf.Build.Options.Network + } } err = imageBuilder.BuildImage(contextPath, dockerfilePath, buildOptions) @@ -783,41 +794,6 @@ func (cmd *UpCmd) startPortForwarding() { } } -func (cmd *UpCmd) enterTerminal(args []string) { - var command []string - config := configutil.GetConfig(false) - - if len(args) == 0 && (config.DevSpace.Terminal.Command == nil || len(*config.DevSpace.Terminal.Command) == 0) { - command = []string{ - "sh", - "-c", - "command -v bash >/dev/null 2>&1 && exec bash || exec sh", - } - } else { - if len(args) > 0 { - command = args - } else { - for _, cmd := range *config.DevSpace.Terminal.Command { - command = append(command, *cmd) - } - } - } - - containerName := cmd.pod.Spec.Containers[0].Name - if cmd.flags.container != "" { - containerName = cmd.flags.container - } else if config.DevSpace.Terminal.ContainerName != nil { - containerName = *config.DevSpace.Terminal.ContainerName - } - - _, _, _, terminalErr := kubectl.Exec(cmd.kubectl, cmd.pod, containerName, command, true, nil) - if terminalErr != nil { - if _, ok := terminalErr.(kubectlExec.CodeExitError); ok == false { - log.Fatalf("Unable to start terminal session: %v", terminalErr) - } - } -} - func waitForPodReady(kubectl *kubernetes.Clientset, pod *k8sv1.Pod, maxWaitTime time.Duration, checkInterval time.Duration) error { for maxWaitTime > 0 { pod, err := kubectl.Core().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}) diff --git a/docs/docs/cli/enter.md b/docs/docs/cli/enter.md new file mode 100644 index 0000000000..fbd87256f1 --- /dev/null +++ b/docs/docs/cli/enter.md @@ -0,0 +1,20 @@ +--- +title: devspace enter +--- + +Execute a command or start a new terminal in your devspace. + +```bash +Usage: + devspace enter [flags] + +Flags: + -c, --container string Container name within pod where to execute command + -h, --help help for enter + +Examples: +devspace enter +devspace enter bash +devspace enter echo 123 +devspace enter -c myContainer +``` diff --git a/docs/docs/cli/up.md b/docs/docs/cli/up.md index 43fd42d76b..23d2816fc5 100644 --- a/docs/docs/cli/up.md +++ b/docs/docs/cli/up.md @@ -2,19 +2,29 @@ title: devspace up --- -With `devspace up`, you build your image, start your DevSpace and connect to it. +With `devspace up`, you build your image, start your DevSpace and connect to it. + +The command will do the following: + +1. Ensure that a tiller server is available (if not it will automatically deploy one to the specified namespace) +2. Optionally it will deploy a docker registry if this was desired +3. Build the docker image if changed or forced by -b + * Push the built image to the specified registry +5. Redeploy the chart if release was not found, image was rebuilt or -d option was specified +6. Establish port forwarding and sync +7. Execute the specified command in the container (default: open a terminal) ```bash Usage: devspace up [flags] Flags: - -b, --build Build image if Dockerfile has been modified (default true) + -b, --build Force image build -c, --container string Container name where to open the shell - -d, --deploy Deploy chart + -d, --deploy Force chart deployment -h, --help help for up --init-registries Initialize registries (and install internal one) (default true) - --no-sleep Enable no-sleep + --no-sleep Enable no-sleep (Override the containers.default.command and containers.default.args values with empty strings) --portforwarding Enable port forwarding (default true) --sync Enable code synchronization (default true) --tiller Install/upgrade tiller (default true) @@ -22,7 +32,7 @@ Flags: Examples: devspace up # Start the devspace -devspace up bash # Execute bash command +devspace up bash # Execute bash command after deploying ``` **Note**: Every time you run `devspace up`, your containers will be re-deployed. This way, you will always start with a clean state. diff --git a/docs/docs/configuration/config.yaml.md b/docs/docs/configuration/config.yaml.md index 9795bc73c4..15cb1125d6 100644 --- a/docs/docs/configuration/config.yaml.md +++ b/docs/docs/configuration/config.yaml.md @@ -7,6 +7,15 @@ This is an example of a [.devspace/config.yaml](#) # Devspace version, currently is always v1 version: v1 devSpace: + # terminal options for devspace up and devspace enter + terminal: + # the container name within the selected release pod to open a terminal connection to (is also a flag in `devspace up -c CONTAINER`) + containerName: default + # the command to execute within the container when using `devspace up` or `devspace enter` + command: + - sh + - -c + - bash release: # Name of helm release that is used for deploying # the devspace chart (contents of /chart) @@ -33,6 +42,8 @@ devSpace: - resourceType: pod labelSelector: release: my-app + # The container within the pod to sync to + containerName: default # Sync the complete local project path localSubPath: ./ # Into the remote container path /app @@ -49,7 +60,20 @@ images: tag: 9u5ye0G # Registry where the build image will be pushed to registry: default + # Specifies where the docker context path is + contextPath: ./ + # Specifies where the Dockerfile lies + dockerfilePath: ./Dockerfile + # Specifies how to build the image build: + options: + # Used for multi-stage builds + target: development + # buildArgs passed to docker during build + buildArgs: + myarg1: myvalue1 + # network mode (see [network](https://docs.docker.com/network/)) + network: bridge engine: docker: # Use docker for image building @@ -103,7 +127,12 @@ A [.devspace/config.yaml](#) contains any public/shared configuration for runnin **Note: You can easily re-configure your DevSpace by running `devspace init -r`.** ## devspace -Defines the DevSpace including everything related to portForwarding, sync, and the helm release config. +Defines the DevSpace including everything related to terminal, portForwarding, sync, and the helm release config. + +### devspace.terminal +In this section options are defined, what should happen when devspace up or devspace enter connect to the release pod. +- `containerName` *string* the name of the container to connect to within the selected pod (default is the first defined container) +- `command` *string array* the default command that is executed when entering a pod with devspace up or devspace enter (default is: ["sh", "-c", "command -v bash >/dev/null 2>&1 && exec bash || exec sh"]) ### devspace.release Defines how the DevSpace is deployed to your cluster. See [Type: Release](#type-release) for details. @@ -116,15 +145,16 @@ To access applications running inside a DevSpace, the DevSpace CLI allows to con ### devspace.portForwarding[].portMappings[] PortMapping: -- `localPort` *string* on localhost -- `remotePort` *string* remote pod port +- `localPort` *string* the local port on the machine +- `remotePort` *string* the remote pod port In the example above, you could open `localhost:8080` inside your browser to see the output of the application listening on port 80 within your DevSpace. ### devspace.sync To comfortably sync code to a DevSpace, the DevSpace CLI allows to configure real-time code synchronizations. A sync config consists of the following: - `resourceType` *string* kubernetes resource type that is selected (currently only `pod` is supported) -- `labelSelector` *map[string]string* usually the release/app name +- `labelSelector` *map[string]string* label selector to select the correct pod (usually the release/app name) +- `containerName` *string* the name of the container within the pod to sync to (default: the first specified container in the pod) - `localSubPath` *string* relative path to the folder that should be synced (default: path to your local project root) - `containerPath` *string* absolute path within the container - `excludePaths` *string array* paths to exclude files/folders from sync in .gitignore syntax @@ -138,20 +168,29 @@ This section of the config defines a map of images that can be used in the helm ### images[] An image is defined by: -- `name` *string* of the image that is being pushed to the registry -- `tag` *string* stating the latest tag pushed to the registry (auto-generated) -- `registry` *string* referencing one of the keys defined in the `registries` map +- `name` *string* name of the image that is being pushed to the registry +- `tag` *string* tag indicates the latest tag pushed to the registry (auto-generated) +- `registry` *string* registry references one of the keys defined in the `registries` map - `build` *BuildConfig* defines the build procedure for this image ### images[].build BuildConfig: -- `engine` *Engine* The engine that should be used for building the image +- `dockerfilePath` *string* specifies the path where the dockerfile lies (default: ./Dockerfile) +- `contextPath` *string* specifies the context path for docker (default: ./) +- `engine` *Engine* the engine that should be used for building the image +- `options` *BuildOptions* additional options used for building the image + +### images[].build.options +BuildOptions: +- `buildArgs` *map[string]string* key-value map used for specifying build arguments passed to docker +- `target` *string* the target used for multi-stage builds (see [multi-stage-build](https://docs.docker.com/develop/develop-images/multistage-build/)) +- `network` *string* the network mode used for building the image (see [network](https://docs.docker.com/network/)) ### images[].build.engine Engine: An image build is mainly defined by the build engine. There are 2 build engines currently supported (choose only one): -- `docker` *DockerConfig* uses the local Docker daemon or a Docker daemon running inside a Minikube cluster (if `preferMinikube` == true) -- `kaniko` *KanikoConfig* builds images in userspace within a build pod running inside the Kubernetes cluster +- `docker` *DockerConfig* use the local Docker daemon or a Docker daemon running inside a Minikube cluster (if `preferMinikube` == true) +- `kaniko` *KanikoConfig* build images in userspace within a build pod running inside the Kubernetes cluster ### images[].build.engine.docker DockerConfig: @@ -168,25 +207,25 @@ This section of the config defines a map of image registries. You can use any ex ### registries[] ImageRegistry: -- `url` *string* of the registry (format: myregistry.com:port) +- `url` *string* the url of the registry (format: myregistry.com:port) - `insecure` *bool* flag to allow pushing to registries without HTTPS - `user` *RegistryUser* credentials for pushing to / pulling from the registry ### registries[].user RegistryUser: -- `username` *string* that should be used for pushing and pulling from the registry -- `password` *string* that should be used for pushing and pulling from the registry +- `username` *string* the user that should be used for pushing and pulling from the registry +- `password` *string* the password should be used for pushing and pulling from the registry ## services Defines cluster services that the DevSpace uses. ### services.internalRegistry The `internalRegistry` is used to tell the DevSpace CLI to deploy a private registry inside the Kubernetes cluster: -- `release` *Release* for deploying the registry (see [Type: Release](#type-release)) +- `release` *Release* release options for deploying the registry (see [Type: Release](#type-release)) ### services.tiller The `tiller` service is defined by: -- `release` *Release* definition for tiller (see [Type: Release](#type-release)) +- `release` *Release* release definition for tiller (see [Type: Release](#type-release)) - `appNamespaces` *string array* defines a list of namespace that tiller may deploy applications to ## cluster @@ -206,6 +245,6 @@ ClusterUser: ## Type: Release A `release` is specified through: -- `name` *string* of the release -- `namespace` *string* to deploy the release to -- `values` *map[string] any* that are set during the deployment (contents of the values.yaml in helm) +- `name` *string* name of the release +- `namespace` *string* the namespace to deploy the release to +- `values` *map[string] any* override values that are set during the deployment (contents of the values.yaml in helm) diff --git a/docs/website/sidebars.json b/docs/website/sidebars.json index 7c71f85a09..5e06c4688e 100644 --- a/docs/website/sidebars.json +++ b/docs/website/sidebars.json @@ -11,6 +11,7 @@ "Commands": [ "cli/init", "cli/up", + "cli/enter", "cli/down", "cli/reset", "cli/add", diff --git a/pkg/devspace/builder/docker/docker.go b/pkg/devspace/builder/docker/docker.go index 5537f2633f..1cc6af2fd3 100644 --- a/pkg/devspace/builder/docker/docker.go +++ b/pkg/devspace/builder/docker/docker.go @@ -128,6 +128,8 @@ func (b *Builder) BuildImage(contextPath, dockerfilePath string, options *types. Tags: []string{b.imageURL}, Dockerfile: relDockerfile, BuildArgs: options.BuildArgs, + Target: options.Target, + NetworkMode: options.NetworkMode, AuthConfigs: authConfigs, }) if err != nil { diff --git a/pkg/devspace/config/v1/image.go b/pkg/devspace/config/v1/image.go index be5c3f1704..69fd66ae63 100644 --- a/pkg/devspace/config/v1/image.go +++ b/pkg/devspace/config/v1/image.go @@ -38,4 +38,6 @@ type DockerBuildEngine struct { //BuildOptions defines options for building Docker images type BuildOptions struct { BuildArgs *map[string]*string `yaml:"buildArgs"` + Target *string `yaml:"target"` + Network *string `yaml:"network"` } diff --git a/pkg/devspace/config/v1/terminal.go b/pkg/devspace/config/v1/terminal.go index 2a048da065..1347864221 100644 --- a/pkg/devspace/config/v1/terminal.go +++ b/pkg/devspace/config/v1/terminal.go @@ -3,5 +3,5 @@ package v1 // Terminal describes the terminal options type Terminal struct { ContainerName *string `yaml:"containerName"` - Command *[]*string `yaml:"shell"` + Command *[]*string `yaml:"command"` }