From c3b012933efe805e5820a1d85c4fbfed446baeb1 Mon Sep 17 00:00:00 2001 From: Russell Centanni Date: Thu, 10 Mar 2022 18:54:44 -0500 Subject: [PATCH 1/9] refactor: docker-compose declarative configuration --- pkg/devspace/compose/loader.go | 722 +++++++++--------- pkg/devspace/compose/loader_test.go | 36 +- .../compose/testdata/basic/expected.yaml | 35 +- .../compose/testdata/build/expected.yaml | 19 +- .../build_args_list/docker-compose.yaml | 1 - .../testdata/build_args_list/expected.yaml | 32 +- .../build_args_map/docker-compose.yaml | 1 - .../testdata/build_args_map/expected.yaml | 32 +- .../testdata/build_context/expected.yaml | 20 +- .../testdata/build_dockerfile/expected.yaml | 21 +- .../testdata/build_entry_point/expected.yaml | 21 +- .../testdata/build_image/expected.yaml | 20 +- .../testdata/build_image_tag/expected.yaml | 20 +- .../testdata/build_network/expected.yaml | 26 +- .../testdata/build_target/expected.yaml | 26 +- .../compose/testdata/command/expected.yaml | 24 +- .../testdata/container_name/expected.yaml | 36 +- .../testdata/entry_point/expected.yaml | 24 +- .../testdata/env_file_multiple/expected.yaml | 34 +- .../testdata/env_file_single/expected.yaml | 26 +- .../testdata/environment/expected.yaml | 34 +- .../compose/testdata/expose/expected.yaml | 38 +- .../extra_hosts_multiple/expected.yaml | 32 +- .../testdata/extra_hosts_single/expected.yaml | 29 +- .../testdata/healthcheck/expected.yaml | 100 +-- .../compose/testdata/ports-long/expected.yaml | 45 +- .../testdata/ports-short/expected.yaml | 126 ++- .../testdata/restart-always/expected.yaml | 22 +- .../compose/testdata/restart-no/expected.yaml | 22 +- .../testdata/restart-on-failure/expected.yaml | 22 +- .../restart-unless-stopped/expected.yaml | 22 +- .../docker-compose.yaml | 0 .../expected.yaml | 0 .../docker-compose.yaml | 0 .../expected.yaml | 0 .../docker-compose.yaml | 0 .../expected.yaml | 0 .../docker-compose.yaml | 0 .../expected.yaml | 0 .../docker-compose.yaml | 0 .../expected.yaml | 0 .../docker-compose.yaml | 0 .../expected.yaml | 0 43 files changed, 875 insertions(+), 793 deletions(-) rename pkg/devspace/compose/testdata/{depends_on => x_depends_on}/docker-compose.yaml (100%) rename pkg/devspace/compose/testdata/{depends_on => x_depends_on}/expected.yaml (100%) rename pkg/devspace/compose/testdata/{secret-long => x_secret-long}/docker-compose.yaml (100%) rename pkg/devspace/compose/testdata/{secret-long => x_secret-long}/expected.yaml (100%) rename pkg/devspace/compose/testdata/{secret-short => x_secret-short}/docker-compose.yaml (100%) rename pkg/devspace/compose/testdata/{secret-short => x_secret-short}/expected.yaml (100%) rename pkg/devspace/compose/testdata/{volumes-depends_on => x_volumes-depends_on}/docker-compose.yaml (100%) rename pkg/devspace/compose/testdata/{volumes-depends_on => x_volumes-depends_on}/expected.yaml (100%) rename pkg/devspace/compose/testdata/{volumes-long => x_volumes-long}/docker-compose.yaml (100%) rename pkg/devspace/compose/testdata/{volumes-long => x_volumes-long}/expected.yaml (100%) rename pkg/devspace/compose/testdata/{volumes-short => x_volumes-short}/docker-compose.yaml (100%) rename pkg/devspace/compose/testdata/{volumes-short => x_volumes-short}/expected.yaml (100%) diff --git a/pkg/devspace/compose/loader.go b/pkg/devspace/compose/loader.go index a0ef06dabd..7bb6e6fec7 100644 --- a/pkg/devspace/compose/loader.go +++ b/pkg/devspace/compose/loader.go @@ -1,6 +1,5 @@ package compose -/* import ( "fmt" "io/ioutil" @@ -14,13 +13,12 @@ import ( composeloader "github.com/compose-spec/compose-go/loader" composetypes "github.com/compose-spec/compose-go/types" - "gopkg.in/yaml.v3" - v1 "k8s.io/api/core/v1" - "github.com/loft-sh/devspace/pkg/devspace/config/constants" "github.com/loft-sh/devspace/pkg/devspace/config/versions/latest" + "github.com/loft-sh/devspace/pkg/devspace/deploy/deployer/helm" "github.com/loft-sh/devspace/pkg/util/log" - "github.com/loft-sh/devspace/pkg/util/ptr" + "gopkg.in/yaml.v2" + v1 "k8s.io/api/core/v1" ) var ( @@ -56,8 +54,8 @@ func NewDockerComposeLoader(composePath string) ConfigLoader { } } -func (d *configLoader) Load(log log.Logger) (*latest.Config, error) { - composeFile, err := ioutil.ReadFile(d.composePath) +func (cl *configLoader) Load(log log.Logger) (*latest.Config, error) { + composeFile, err := ioutil.ReadFile(cl.composePath) if err != nil { return nil, err } @@ -74,25 +72,29 @@ func (d *configLoader) Load(log log.Logger) (*latest.Config, error) { } config := latest.New().(*latest.Config) + config.Name = dockerCompose.Name + if config.Name == "" { + config.Name = "docker-compose" + } cwd, err := os.Getwd() if err != nil { return nil, err } - var hooks []*latest.HookConfig - var images map[string]*latest.ImageConfig - deployments := []*latest.DeploymentConfig{} - dev := latest.DevConfig{} - baseDir := filepath.Dir(d.composePath) + // var hooks []*latest.HookConfig + var images map[string]*latest.Image + var deployments map[string]*latest.DeploymentConfig + var dev map[string]*latest.DevPod + baseDir := filepath.Dir(cl.composePath) if len(dockerCompose.Networks) > 0 { log.Warn("networks are not supported") } - dependentsMap, err := calculateDependentsMap(dockerCompose) - if err != nil { - return nil, err - } + // dependentsMap, err := calculateDependentsMap(dockerCompose) + // if err != nil { + // return nil, err + // } err = dockerCompose.WithServices(nil, func(service composetypes.ServiceConfig) error { imageConfig, err := imageConfig(cwd, service) @@ -101,60 +103,70 @@ func (d *configLoader) Load(log log.Logger) (*latest.Config, error) { } if imageConfig != nil { if images == nil { - images = map[string]*latest.ImageConfig{} + images = map[string]*latest.Image{} } images[service.Name] = imageConfig } - deploymentConfig, err := deploymentConfig(service, dockerCompose.Volumes, log) + deploymentName := formatName(service.Name) + deploymentConfig, err := cl.deploymentConfig(service, dockerCompose.Volumes, log) if err != nil { return err } - deployments = append(deployments, deploymentConfig) + if deployments == nil { + deployments = map[string]*latest.DeploymentConfig{} + } + deployments[deploymentName] = deploymentConfig - err = addDevConfig(&dev, service, baseDir, log) + devConfig, err := addDevConfig(service, baseDir, log) if err != nil { return err } - - bindVolumeHooks := []*latest.HookConfig{} - for _, volume := range service.Volumes { - if volume.Type == composetypes.VolumeTypeBind { - bindVolumeHook := createUploadVolumeHook(service, volume) - bindVolumeHooks = append(bindVolumeHooks, bindVolumeHook) + if devConfig != nil { + if dev == nil { + dev = map[string]*latest.DevPod{} } + dev[service.Name] = devConfig } - if len(bindVolumeHooks) > 0 { - hooks = append(hooks, bindVolumeHooks...) - hooks = append(hooks, createUploadDoneHook(service)) - } - - _, isDependency := dependentsMap[service.Name] - if isDependency { - waitHook := createWaitHook(service) - hooks = append(hooks, waitHook) - } + // bindVolumeHooks := []*latest.HookConfig{} + // for _, volume := range service.Volumes { + // if volume.Type == composetypes.VolumeTypeBind { + // bindVolumeHook := createUploadVolumeHook(service, volume) + // bindVolumeHooks = append(bindVolumeHooks, bindVolumeHook) + // } + // } + + // if len(bindVolumeHooks) > 0 { + // hooks = append(hooks, bindVolumeHooks...) + // hooks = append(hooks, createUploadDoneHook(service)) + // } + + // _, isDependency := dependentsMap[service.Name] + // if isDependency { + // waitHook := createWaitHook(service) + // hooks = append(hooks, waitHook) + // } - return err + return nil }) if err != nil { return nil, err } - for secretName, secret := range dockerCompose.Secrets { - createHook, err := createSecretHook(secretName, cwd, secret) - if err != nil { - return nil, err - } - hooks = append(hooks, createHook) - hooks = append(hooks, deleteSecretHook(secretName)) - } + // for secretName, secret := range dockerCompose.Secrets { + // createHook, err := createSecretHook(secretName, cwd, secret) + // if err != nil { + // return nil, err + // } + // hooks = append(hooks, createHook) + // hooks = append(hooks, deleteSecretHook(secretName)) + // } config.Images = images config.Deployments = deployments config.Dev = dev - config.Hooks = hooks + // config.Hooks = hooks return config, nil } @@ -175,16 +187,17 @@ func (d *configLoader) Save(config *latest.Config) error { return nil } -func addDevConfig(dev *latest.DevConfig, service composetypes.ServiceConfig, baseDir string, log log.Logger) error { - devPorts := dev.Ports - if devPorts == nil { - devPorts = []*latest.PortForwardingConfig{} - } +func addDevConfig(service composetypes.ServiceConfig, baseDir string, log log.Logger) (*latest.DevPod, error) { + var dev *latest.DevPod + + devPorts := []*latest.PortMapping{} if len(service.Ports) > 0 { - portForwarding := &latest.PortForwardingConfig{ - LabelSelector: labelSelector(service.Name), - PortMappings: []*latest.PortMapping{}, + if dev == nil { + dev = &latest.DevPod{ + LabelSelector: labelSelector(service.Name), + Ports: []*latest.PortMapping{}, + } } for _, port := range service.Ports { portMapping := &latest.PortMapping{} @@ -195,73 +208,68 @@ func addDevConfig(dev *latest.DevConfig, service composetypes.ServiceConfig, bas } if port.Published != port.Target { - portMapping.LocalPort = ptr.Int(int(port.Published)) - portMapping.RemotePort = ptr.Int(int(port.Target)) + portMapping.Port = fmt.Sprint(port.Published) + ":" + fmt.Sprint(port.Target) } else { - portMapping.LocalPort = ptr.Int(int(port.Published)) + portMapping.Port = fmt.Sprint(port.Published) } if port.HostIP != "" { portMapping.BindAddress = port.HostIP } - portForwarding.PortMappings = append(portForwarding.PortMappings, portMapping) + devPorts = append(devPorts, portMapping) } - devPorts = append(devPorts, portForwarding) } if len(service.Expose) > 0 { - portForwarding := &latest.PortForwardingConfig{ - LabelSelector: labelSelector(service.Name), - PortMappings: []*latest.PortMapping{}, + if dev == nil { + dev = &latest.DevPod{ + LabelSelector: labelSelector(service.Name), + } } + for _, expose := range service.Expose { - exposePort, err := strconv.Atoi(expose) - if err != nil { - return fmt.Errorf("expected integer for port number: %s", err.Error()) - } - portForwarding.PortMappings = append(portForwarding.PortMappings, &latest.PortMapping{ - LocalPort: ptr.Int(exposePort), + devPorts = append(devPorts, &latest.PortMapping{ + Port: expose, }) } - devPorts = append(devPorts, portForwarding) } - devSync := dev.Sync - if devSync == nil { - devSync = []*latest.SyncConfig{} - } + // devSync := dev.Sync + // if devSync == nil { + // devSync = []*latest.SyncConfig{} + // } - for _, volume := range service.Volumes { - if volume.Type == composetypes.VolumeTypeBind { - sync := &latest.SyncConfig{ - LabelSelector: labelSelector(service.Name), - ContainerName: resolveContainerName(service), - LocalSubPath: resolveLocalPath(volume), - ContainerPath: volume.Target, - } + // for _, volume := range service.Volumes { + // if volume.Type == composetypes.VolumeTypeBind { + // sync := &latest.SyncConfig{ + // LabelSelector: labelSelector(service.Name), + // ContainerName: resolveContainerName(service), + // LocalSubPath: resolveLocalPath(volume), + // ContainerPath: volume.Target, + // } - _, err := os.Stat(filepath.Join(baseDir, volume.Source, DockerIgnorePath)) - if err == nil { - sync.ExcludeFile = DockerIgnorePath - } + // _, err := os.Stat(filepath.Join(baseDir, volume.Source, DockerIgnorePath)) + // if err == nil { + // sync.ExcludeFile = DockerIgnorePath + // } - devSync = append(devSync, sync) - } - } + // devSync = append(devSync, sync) + // } + // } if len(devPorts) > 0 { dev.Ports = devPorts } - if len(devSync) > 0 { - dev.Sync = devSync - } + // if len(devSync) > 0 { + // dev.Sync = devSync + // } - return nil + return dev, nil } -func imageConfig(cwd string, service composetypes.ServiceConfig) (*latest.ImageConfig, error) { +func imageConfig(cwd string, service composetypes.ServiceConfig) (*latest.Image, error) { build := service.Build if build == nil { return nil, nil @@ -271,42 +279,41 @@ func imageConfig(cwd string, service composetypes.ServiceConfig) (*latest.ImageC if err != nil { return nil, err } + context = filepath.ToSlash(context) + if context == "." { + context = "" + } dockerfile, err := filepath.Rel(cwd, filepath.Join(cwd, build.Context, build.Dockerfile)) if err != nil { return nil, err } - image := &latest.ImageConfig{ + image := &latest.Image{ Image: resolveImage(service), - Context: filepath.ToSlash(context), + Context: context, Dockerfile: filepath.ToSlash(dockerfile), } - buildOptions := &latest.BuildOptions{} - hasBuildOptions := false if build.Args != nil { - buildOptions.BuildArgs = build.Args - hasBuildOptions = true + image.BuildArgs = build.Args } if build.Target != "" { - buildOptions.Target = build.Target - hasBuildOptions = true + image.Target = build.Target } if build.Network != "" { - buildOptions.Network = build.Network - hasBuildOptions = true + image.Network = build.Network } - if hasBuildOptions { - image.Build = &latest.BuildConfig{ - Docker: &latest.DockerConfig{ - Options: buildOptions, - }, - } - } + // if hasBuildOptions { + // image.Build = &latest.BuildConfig{ + // Docker: &latest.DockerConfig{ + // Options: buildOptions, + // }, + // } + // } if len(service.Entrypoint) > 0 { image.Entrypoint = service.Entrypoint @@ -315,38 +322,38 @@ func imageConfig(cwd string, service composetypes.ServiceConfig) (*latest.ImageC return image, nil } -func createSecretHook(name string, cwd string, secret composetypes.SecretConfig) (*latest.HookConfig, error) { - file, err := filepath.Rel(cwd, filepath.Join(cwd, secret.File)) - if err != nil { - return nil, err - } +// func createSecretHook(name string, cwd string, secret composetypes.SecretConfig) (*latest.HookConfig, error) { +// file, err := filepath.Rel(cwd, filepath.Join(cwd, secret.File)) +// if err != nil { +// return nil, err +// } - return &latest.HookConfig{ - Events: []string{"before:deploy"}, - Command: fmt.Sprintf("kubectl create secret generic %s --namespace=${devspace.namespace} --dry-run=client --from-file=%s=%s -o yaml | kubectl apply -f -", name, name, filepath.ToSlash(file)), - }, nil -} +// return &latest.HookConfig{ +// Events: []string{"before:deploy"}, +// Command: fmt.Sprintf("kubectl create secret generic %s --namespace=${devspace.namespace} --dry-run=client --from-file=%s=%s -o yaml | kubectl apply -f -", name, name, filepath.ToSlash(file)), +// }, nil +// } -func deleteSecretHook(name string) *latest.HookConfig { - return &latest.HookConfig{ - Events: []string{"after:purge"}, - Command: fmt.Sprintf("kubectl delete secret %s --namespace=${devspace.namespace} --ignore-not-found", name), - } -} +// func deleteSecretHook(name string) *latest.HookConfig { +// return &latest.HookConfig{ +// Events: []string{"after:purge"}, +// Command: fmt.Sprintf("kubectl delete secret %s --namespace=${devspace.namespace} --ignore-not-found", name), +// } +// } -func deploymentConfig(service composetypes.ServiceConfig, composeVolumes map[string]composetypes.VolumeConfig, log log.Logger) (*latest.DeploymentConfig, error) { +func (cl *configLoader) deploymentConfig(service composetypes.ServiceConfig, composeVolumes map[string]composetypes.VolumeConfig, log log.Logger) (*latest.DeploymentConfig, error) { values := map[string]interface{}{} - volumes, volumeMounts, bindVolumeMounts := volumesConfig(service, composeVolumes, log) - if len(volumes) > 0 { - values["volumes"] = volumes - } + // volumes, volumeMounts, bindVolumeMounts := volumesConfig(service, composeVolumes, log) + // if len(volumes) > 0 { + // values["volumes"] = volumes + // } - if hasLocalSync(service) { - values["initContainers"] = []interface{}{initContainerConfig(service, bindVolumeMounts)} - } + // if hasLocalSync(service) { + // values["initContainers"] = []interface{}{initContainerConfig(service, bindVolumeMounts)} + // } - container, err := containerConfig(service, volumeMounts) + container, err := containerConfig(service, []interface{}{}) if err != nil { return nil, err } @@ -428,81 +435,83 @@ func deploymentConfig(service composetypes.ServiceConfig, composeVolumes map[str } return &latest.DeploymentConfig{ - Name: formatName(service.Name), Helm: &latest.HelmConfig{ - ComponentChart: ptr.Bool(true), - Values: values, + Chart: &latest.ChartConfig{ + Name: helm.DevSpaceChartConfig.Name, + RepoURL: helm.DevSpaceChartConfig.RepoURL, + }, + Values: values, }, }, nil } -func volumesConfig( - service composetypes.ServiceConfig, - composeVolumes map[string]composetypes.VolumeConfig, - log log.Logger, -) (volumes []interface{}, volumeMounts []interface{}, bindVolumeMounts []interface{}) { - for _, secret := range service.Secrets { - volume := createSecretVolume(secret) - volumes = append(volumes, volume) - - volumeMount := createSecretVolumeMount(secret) - volumeMounts = append(volumeMounts, volumeMount) - } - - var volumeVolumes []composetypes.ServiceVolumeConfig - var bindVolumes []composetypes.ServiceVolumeConfig - var tmpfsVolumes []composetypes.ServiceVolumeConfig - for _, serviceVolume := range service.Volumes { - switch serviceVolume.Type { - case composetypes.VolumeTypeBind: - bindVolumes = append(bindVolumes, serviceVolume) - case composetypes.VolumeTypeTmpfs: - tmpfsVolumes = append(tmpfsVolumes, serviceVolume) - case composetypes.VolumeTypeVolume: - volumeVolumes = append(volumeVolumes, serviceVolume) - default: - log.Warnf("%s volumes are not supported", serviceVolume.Type) - } - } - - volumeMap := map[string]interface{}{} - for idx, volumeVolume := range volumeVolumes { - volumeName := resolveServiceVolumeName(service, volumeVolume, idx+1) - _, ok := volumeMap[volumeName] - if !ok { - volume := createVolume(volumeName, DefaultVolumeSize) - volumes = append(volumes, volume) - volumeMap[volumeName] = volume - } - - volumeMount := createServiceVolumeMount(volumeName, volumeVolume) - volumeMounts = append(volumeMounts, volumeMount) - } - - for _, tmpfsVolume := range tmpfsVolumes { - volumeName := resolveServiceVolumeName(service, tmpfsVolume, len(volumes)) - volume := createEmptyDirVolume(volumeName, tmpfsVolume) - volumes = append(volumes, volume) - - volumeMount := createServiceVolumeMount(volumeName, tmpfsVolume) - volumeMounts = append(volumeMounts, volumeMount) - } - - for idx, bindVolume := range bindVolumes { - volumeName := fmt.Sprintf("volume-%d", idx+1) - volume := createEmptyDirVolume(volumeName, bindVolume) - volumes = append(volumes, volume) - - volumeMount := createServiceVolumeMount(volumeName, bindVolume) - volumeMounts = append(volumeMounts, volumeMount) - - bindVolumeMount := createInitVolumeMount(volumeName, bindVolume) - bindVolumeMounts = append(bindVolumeMounts, bindVolumeMount) - } - - return volumes, volumeMounts, bindVolumeMounts - -} +// func volumesConfig( +// service composetypes.ServiceConfig, +// composeVolumes map[string]composetypes.VolumeConfig, +// log log.Logger, +// ) (volumes []interface{}, volumeMounts []interface{}, bindVolumeMounts []interface{}) { +// for _, secret := range service.Secrets { +// volume := createSecretVolume(secret) +// volumes = append(volumes, volume) + +// volumeMount := createSecretVolumeMount(secret) +// volumeMounts = append(volumeMounts, volumeMount) +// } + +// var volumeVolumes []composetypes.ServiceVolumeConfig +// var bindVolumes []composetypes.ServiceVolumeConfig +// var tmpfsVolumes []composetypes.ServiceVolumeConfig +// for _, serviceVolume := range service.Volumes { +// switch serviceVolume.Type { +// case composetypes.VolumeTypeBind: +// bindVolumes = append(bindVolumes, serviceVolume) +// case composetypes.VolumeTypeTmpfs: +// tmpfsVolumes = append(tmpfsVolumes, serviceVolume) +// case composetypes.VolumeTypeVolume: +// volumeVolumes = append(volumeVolumes, serviceVolume) +// default: +// log.Warnf("%s volumes are not supported", serviceVolume.Type) +// } +// } + +// volumeMap := map[string]interface{}{} +// for idx, volumeVolume := range volumeVolumes { +// volumeName := resolveServiceVolumeName(service, volumeVolume, idx+1) +// _, ok := volumeMap[volumeName] +// if !ok { +// volume := createVolume(volumeName, DefaultVolumeSize) +// volumes = append(volumes, volume) +// volumeMap[volumeName] = volume +// } + +// volumeMount := createServiceVolumeMount(volumeName, volumeVolume) +// volumeMounts = append(volumeMounts, volumeMount) +// } + +// for _, tmpfsVolume := range tmpfsVolumes { +// volumeName := resolveServiceVolumeName(service, tmpfsVolume, len(volumes)) +// volume := createEmptyDirVolume(volumeName, tmpfsVolume) +// volumes = append(volumes, volume) + +// volumeMount := createServiceVolumeMount(volumeName, tmpfsVolume) +// volumeMounts = append(volumeMounts, volumeMount) +// } + +// for idx, bindVolume := range bindVolumes { +// volumeName := fmt.Sprintf("volume-%d", idx+1) +// volume := createEmptyDirVolume(volumeName, bindVolume) +// volumes = append(volumes, volume) + +// volumeMount := createServiceVolumeMount(volumeName, bindVolume) +// volumeMounts = append(volumeMounts, volumeMount) + +// bindVolumeMount := createInitVolumeMount(volumeName, bindVolume) +// bindVolumeMounts = append(bindVolumeMounts, bindVolumeMount) +// } + +// return volumes, volumeMounts, bindVolumeMounts + +// } func containerConfig(service composetypes.ServiceConfig, volumeMounts []interface{}) (map[string]interface{}, error) { container := map[string]interface{}{ @@ -611,85 +620,85 @@ func containerLivenessProbe(health *composetypes.HealthCheckConfig) (map[string] return livenessProbe, nil } -func createEmptyDirVolume(volumeName string, volume composetypes.ServiceVolumeConfig) interface{} { - // create an emptyDir volume - emptyDir := map[string]interface{}{} - if volume.Tmpfs != nil { - emptyDir["sizeLimit"] = fmt.Sprintf("%d", volume.Tmpfs.Size) - } - return map[string]interface{}{ - "name": volumeName, - "emptyDir": emptyDir, - } -} - -func createSecretVolume(secret composetypes.ServiceSecretConfig) interface{} { - return map[string]interface{}{ - "name": secret.Source, - "secret": map[string]interface{}{ - "secretName": secret.Source, - }, - } -} - -func createSecretVolumeMount(secret composetypes.ServiceSecretConfig) interface{} { - target := secret.Source - if secret.Target != "" { - target = secret.Target - } - return map[string]interface{}{ - "containerPath": fmt.Sprintf("/run/secrets/%s", target), - "volume": map[string]interface{}{ - "name": secret.Source, - "subPath": target, - "readOnly": true, - }, - } -} - -func createServiceVolumeMount(volumeName string, volume composetypes.ServiceVolumeConfig) interface{} { - return map[string]interface{}{ - "containerPath": volume.Target, - "volume": map[string]interface{}{ - "name": volumeName, - "readOnly": volume.ReadOnly, - }, - } -} - -func createInitVolumeMount(volumeName string, volume composetypes.ServiceVolumeConfig) interface{} { - return map[string]interface{}{ - "containerPath": volume.Target, - "volume": map[string]interface{}{ - "name": volumeName, - "readOnly": false, - }, - } -} - -func createVolume(name string, size string) interface{} { - return map[string]interface{}{ - "name": name, - "size": size, - } -} +// func createEmptyDirVolume(volumeName string, volume composetypes.ServiceVolumeConfig) interface{} { +// // create an emptyDir volume +// emptyDir := map[string]interface{}{} +// if volume.Tmpfs != nil { +// emptyDir["sizeLimit"] = fmt.Sprintf("%d", volume.Tmpfs.Size) +// } +// return map[string]interface{}{ +// "name": volumeName, +// "emptyDir": emptyDir, +// } +// } + +// func createSecretVolume(secret composetypes.ServiceSecretConfig) interface{} { +// return map[string]interface{}{ +// "name": secret.Source, +// "secret": map[string]interface{}{ +// "secretName": secret.Source, +// }, +// } +// } + +// func createSecretVolumeMount(secret composetypes.ServiceSecretConfig) interface{} { +// target := secret.Source +// if secret.Target != "" { +// target = secret.Target +// } +// return map[string]interface{}{ +// "containerPath": fmt.Sprintf("/run/secrets/%s", target), +// "volume": map[string]interface{}{ +// "name": secret.Source, +// "subPath": target, +// "readOnly": true, +// }, +// } +// } + +// func createServiceVolumeMount(volumeName string, volume composetypes.ServiceVolumeConfig) interface{} { +// return map[string]interface{}{ +// "containerPath": volume.Target, +// "volume": map[string]interface{}{ +// "name": volumeName, +// "readOnly": volume.ReadOnly, +// }, +// } +// } + +// func createInitVolumeMount(volumeName string, volume composetypes.ServiceVolumeConfig) interface{} { +// return map[string]interface{}{ +// "containerPath": volume.Target, +// "volume": map[string]interface{}{ +// "name": volumeName, +// "readOnly": false, +// }, +// } +// } + +// func createVolume(name string, size string) interface{} { +// return map[string]interface{}{ +// "name": name, +// "size": size, +// } +// } func formatName(name string) string { return regexp.MustCompile(`[\._]`).ReplaceAllString(name, "-") } -func initContainerConfig(service composetypes.ServiceConfig, volumeMounts []interface{}) map[string]interface{} { - return map[string]interface{}{ - "name": UploadVolumesContainerName, - "image": "alpine", - "command": []interface{}{"sh"}, - "args": []interface{}{ - "-c", - "while [ ! -f /tmp/done ]; do sleep 2; done", - }, - "volumeMounts": volumeMounts, - } -} +// func initContainerConfig(service composetypes.ServiceConfig, volumeMounts []interface{}) map[string]interface{} { +// return map[string]interface{}{ +// "name": UploadVolumesContainerName, +// "image": "alpine", +// "command": []interface{}{"sh"}, +// "args": []interface{}{ +// "-c", +// "while [ ! -f /tmp/done ]; do sleep 2; done", +// }, +// "volumeMounts": volumeMounts, +// } +// } func resolveContainerName(service composetypes.ServiceConfig) string { if service.ContainerName != "" { @@ -706,75 +715,75 @@ func resolveImage(service composetypes.ServiceConfig) string { return image } -func resolveLocalPath(volume composetypes.ServiceVolumeConfig) string { - localSubPath := volume.Source - - if strings.HasPrefix(localSubPath, "~") { - localSubPath = fmt.Sprintf(`$!(echo "$HOME/%s")`, strings.TrimLeft(localSubPath, "~/")) - } - return localSubPath -} - -func resolveServiceVolumeName(service composetypes.ServiceConfig, volume composetypes.ServiceVolumeConfig, idx int) string { - volumeName := volume.Source - if volumeName == "" { - volumeName = fmt.Sprintf("%s-%d", formatName(service.Name), idx) - } - return volumeName -} - -func createWaitHook(service composetypes.ServiceConfig) *latest.HookConfig { - serviceName := formatName(service.Name) - return &latest.HookConfig{ - Events: []string{fmt.Sprintf("after:deploy:%s", serviceName)}, - Container: &latest.HookContainer{ - LabelSelector: labelSelector(serviceName), - ContainerName: resolveContainerName(service), - }, - Wait: &latest.HookWaitConfig{ - Running: true, - TerminatedWithCode: ptr.Int32(0), - }, - } -} - -func createUploadVolumeHook(service composetypes.ServiceConfig, volume composetypes.ServiceVolumeConfig) *latest.HookConfig { - serviceName := formatName(service.Name) - return &latest.HookConfig{ - Events: []string{"after:deploy:" + serviceName}, - Upload: &latest.HookSyncConfig{ - LocalPath: resolveLocalPath(volume), - ContainerPath: volume.Target, - }, - Container: &latest.HookContainer{ - LabelSelector: labelSelector(service.Name), - ContainerName: UploadVolumesContainerName, - }, - } -} - -func createUploadDoneHook(service composetypes.ServiceConfig) *latest.HookConfig { - serviceName := formatName(service.Name) - return &latest.HookConfig{ - Events: []string{"after:deploy:" + serviceName}, - Command: "touch /tmp/done", - Container: &latest.HookContainer{ - LabelSelector: labelSelector(service.Name), - ContainerName: UploadVolumesContainerName, - }, - } -} - -func calculateDependentsMap(dockerCompose *composetypes.Project) (map[string][]string, error) { - tree := map[string][]string{} - err := dockerCompose.WithServices(nil, func(service composetypes.ServiceConfig) error { - for _, name := range service.GetDependencies() { - tree[name] = append(tree[name], service.Name) - } - return nil - }) - return tree, err -} +// func resolveLocalPath(volume composetypes.ServiceVolumeConfig) string { +// localSubPath := volume.Source + +// if strings.HasPrefix(localSubPath, "~") { +// localSubPath = fmt.Sprintf(`$!(echo "$HOME/%s")`, strings.TrimLeft(localSubPath, "~/")) +// } +// return localSubPath +// } + +// func resolveServiceVolumeName(service composetypes.ServiceConfig, volume composetypes.ServiceVolumeConfig, idx int) string { +// volumeName := volume.Source +// if volumeName == "" { +// volumeName = fmt.Sprintf("%s-%d", formatName(service.Name), idx) +// } +// return volumeName +// } + +// func createWaitHook(service composetypes.ServiceConfig) *latest.HookConfig { +// serviceName := formatName(service.Name) +// return &latest.HookConfig{ +// Events: []string{fmt.Sprintf("after:deploy:%s", serviceName)}, +// Container: &latest.HookContainer{ +// LabelSelector: labelSelector(serviceName), +// ContainerName: resolveContainerName(service), +// }, +// Wait: &latest.HookWaitConfig{ +// Running: true, +// TerminatedWithCode: ptr.Int32(0), +// }, +// } +// } + +// func createUploadVolumeHook(service composetypes.ServiceConfig, volume composetypes.ServiceVolumeConfig) *latest.HookConfig { +// serviceName := formatName(service.Name) +// return &latest.HookConfig{ +// Events: []string{"after:deploy:" + serviceName}, +// Upload: &latest.HookSyncConfig{ +// LocalPath: resolveLocalPath(volume), +// ContainerPath: volume.Target, +// }, +// Container: &latest.HookContainer{ +// LabelSelector: labelSelector(service.Name), +// ContainerName: UploadVolumesContainerName, +// }, +// } +// } + +// func createUploadDoneHook(service composetypes.ServiceConfig) *latest.HookConfig { +// serviceName := formatName(service.Name) +// return &latest.HookConfig{ +// Events: []string{"after:deploy:" + serviceName}, +// Command: "touch /tmp/done", +// Container: &latest.HookContainer{ +// LabelSelector: labelSelector(service.Name), +// ContainerName: UploadVolumesContainerName, +// }, +// } +// } + +// func calculateDependentsMap(dockerCompose *composetypes.Project) (map[string][]string, error) { +// tree := map[string][]string{} +// err := dockerCompose.WithServices(nil, func(service composetypes.ServiceConfig) error { +// for _, name := range service.GetDependencies() { +// tree[name] = append(tree[name], service.Name) +// } +// return nil +// }) +// return tree, err +// } func shellCommandToSlice(command composetypes.ShellCommand) []interface{} { var slice []interface{} @@ -794,12 +803,11 @@ func hasBuild(service composetypes.ServiceConfig) bool { return service.Build != nil } -func hasLocalSync(service composetypes.ServiceConfig) bool { - for _, volume := range service.Volumes { - if volume.Type == composetypes.VolumeTypeBind { - return true - } - } - return false -} -*/ +// func hasLocalSync(service composetypes.ServiceConfig) bool { +// for _, volume := range service.Volumes { +// if volume.Type == composetypes.VolumeTypeBind { +// return true +// } +// } +// return false +// } diff --git a/pkg/devspace/compose/loader_test.go b/pkg/devspace/compose/loader_test.go index 4b224bdc85..b3bffb637f 100644 --- a/pkg/devspace/compose/loader_test.go +++ b/pkg/devspace/compose/loader_test.go @@ -5,6 +5,7 @@ import ( "io/ioutil" "os" "path/filepath" + "strings" "testing" composeloader "github.com/compose-spec/compose-go/loader" @@ -28,7 +29,9 @@ func TestLoad(t *testing.T) { } for _, dir := range dirs { - testLoad(dir.Name(), t) + if !strings.HasPrefix(dir.Name(), "x_") { + testLoad(dir.Name(), t) + } } } @@ -75,11 +78,11 @@ func testLoad(dir string, t *testing.T) { assert.Check( t, - cmp.DeepEqual(toDeploymentMap(expectedConfig.Deployments), toDeploymentMap(actualConfig.Deployments)), + cmp.DeepEqual(expectedConfig.Deployments, actualConfig.Deployments), "deployment properties did not match in test case %s", dir, ) - actualDeployments := actualConfig.Deployments + // actualDeployments := actualConfig.Deployments actualConfig.Deployments = nil expectedConfig.Deployments = nil @@ -132,16 +135,16 @@ func testLoad(dir string, t *testing.T) { err = dockerCompose.WithServices(nil, func(service composetypes.ServiceConfig) error { waitHookIdx := getWaitHookIndex(service.Name, actualHooks) - for _, dep := range service.GetDependencies() { - // Check deployments order - assert.Check(t, getDeploymentIndex(dep, actualDeployments) < getDeploymentIndex(service.Name, actualDeployments), "%s deployment should come after %s for test case %s", service.Name, dep, dir) + // for _, dep := range service.GetDependencies() { + // // Check deployments order + // assert.Check(t, getDeploymentIndex(dep, actualDeployments) < getDeploymentIndex(service.Name, actualDeployments), "%s deployment should come after %s for test case %s", service.Name, dep, dir) - // Check for wait hook order - _, ok := expectedWaitHooks[service.Name] - if ok { - assert.Check(t, getWaitHookIndex(dep, actualHooks) < waitHookIdx, "%s wait hook should come after %s", service.Name, dep) - } - } + // // Check for wait hook order + // _, ok := expectedWaitHooks[service.Name] + // if ok { + // assert.Check(t, getWaitHookIndex(dep, actualHooks) < waitHookIdx, "%s wait hook should come after %s", service.Name, dep) + // } + // } uploadDoneHookIdx := getUploadDoneHookIndex(service.Name, actualHooks) if uploadDoneHookIdx != -1 { @@ -183,15 +186,6 @@ func toWaitHookMap(hooks []*latest.HookConfig) map[string]latest.HookConfig { return hookMap } -func getDeploymentIndex(name string, deployments []*latest.DeploymentConfig) int { - for idx, deployment := range deployments { - if deployment.Name == name { - return idx - } - } - return -1 -} - func getWaitHookIndex(name string, hooks []*latest.HookConfig) int { for idx, hook := range hooks { if hook.Wait != nil && hook.Container != nil && hook.Container.LabelSelector != nil && hook.Container.LabelSelector["app.kubernetes.io/component"] == name { diff --git a/pkg/devspace/compose/testdata/basic/expected.yaml b/pkg/devspace/compose/testdata/basic/expected.yaml index 5d960862af..3f01613ed0 100644 --- a/pkg/devspace/compose/testdata/basic/expected.yaml +++ b/pkg/devspace/compose/testdata/basic/expected.yaml @@ -1,16 +1,21 @@ -version: v1beta11 +version: v2beta1 +name: docker-compose deployments: -- name: db - helm: - componentChart: true - values: - containers: - - name: db-container - image: mysql/mysql-server:8.0.19 -- name: backend-1-2 - helm: - componentChart: true - values: - containers: - - name: backend-1-2-container - image: mysql/mysql-server:8.0.19 + db: + helm: + chart: + name: component-chart + repo: https://charts.devspace.sh + values: + containers: + - name: db-container + image: mysql/mysql-server:8.0.19 + backend-1-2: + helm: + chart: + name: component-chart + repo: https://charts.devspace.sh + values: + containers: + - name: backend-1-2-container + image: mysql/mysql-server:8.0.19 diff --git a/pkg/devspace/compose/testdata/build/expected.yaml b/pkg/devspace/compose/testdata/build/expected.yaml index 00d5fc3750..89aa19a9e6 100644 --- a/pkg/devspace/compose/testdata/build/expected.yaml +++ b/pkg/devspace/compose/testdata/build/expected.yaml @@ -1,4 +1,5 @@ -version: v1beta11 +version: v2beta1 +name: docker-compose images: foo: image: foo @@ -6,10 +7,12 @@ images: dockerfile: foo/Dockerfile deployments: -- name: foo - helm: - componentChart: true - values: - containers: - - name: foo-container - image: foo + foo: + helm: + chart: + name: component-chart + repo: https://charts.devspace.sh + values: + containers: + - name: foo-container + image: foo diff --git a/pkg/devspace/compose/testdata/build_args_list/docker-compose.yaml b/pkg/devspace/compose/testdata/build_args_list/docker-compose.yaml index 89e57d9735..5b37d2c691 100644 --- a/pkg/devspace/compose/testdata/build_args_list/docker-compose.yaml +++ b/pkg/devspace/compose/testdata/build_args_list/docker-compose.yaml @@ -1,7 +1,6 @@ services: foo: build: - context: foo args: - buildno=1 - gitcommithash=cdc3b19 diff --git a/pkg/devspace/compose/testdata/build_args_list/expected.yaml b/pkg/devspace/compose/testdata/build_args_list/expected.yaml index 6a5135c50e..98d89c5c74 100644 --- a/pkg/devspace/compose/testdata/build_args_list/expected.yaml +++ b/pkg/devspace/compose/testdata/build_args_list/expected.yaml @@ -1,21 +1,21 @@ -version: v1beta11 +version: v2beta1 +name: docker-compose + images: foo: image: foo - context: foo - dockerfile: foo/Dockerfile - build: - docker: - options: - buildArgs: - buildno: 1 - gitcommithash: cdc3b19 + dockerfile: Dockerfile + buildArgs: + buildno: 1 + gitcommithash: cdc3b19 deployments: -- name: foo - helm: - componentChart: true - values: - containers: - - name: foo-container - image: foo + foo: + helm: + chart: + name: component-chart + repo: https://charts.devspace.sh + values: + containers: + - name: foo-container + image: foo diff --git a/pkg/devspace/compose/testdata/build_args_map/docker-compose.yaml b/pkg/devspace/compose/testdata/build_args_map/docker-compose.yaml index d1c59b5268..2c2fc7efe6 100644 --- a/pkg/devspace/compose/testdata/build_args_map/docker-compose.yaml +++ b/pkg/devspace/compose/testdata/build_args_map/docker-compose.yaml @@ -1,7 +1,6 @@ services: foo: build: - context: foo args: buildno: 1 gitcommithash: cdc3b19 diff --git a/pkg/devspace/compose/testdata/build_args_map/expected.yaml b/pkg/devspace/compose/testdata/build_args_map/expected.yaml index 6a5135c50e..98d89c5c74 100644 --- a/pkg/devspace/compose/testdata/build_args_map/expected.yaml +++ b/pkg/devspace/compose/testdata/build_args_map/expected.yaml @@ -1,21 +1,21 @@ -version: v1beta11 +version: v2beta1 +name: docker-compose + images: foo: image: foo - context: foo - dockerfile: foo/Dockerfile - build: - docker: - options: - buildArgs: - buildno: 1 - gitcommithash: cdc3b19 + dockerfile: Dockerfile + buildArgs: + buildno: 1 + gitcommithash: cdc3b19 deployments: -- name: foo - helm: - componentChart: true - values: - containers: - - name: foo-container - image: foo + foo: + helm: + chart: + name: component-chart + repo: https://charts.devspace.sh + values: + containers: + - name: foo-container + image: foo diff --git a/pkg/devspace/compose/testdata/build_context/expected.yaml b/pkg/devspace/compose/testdata/build_context/expected.yaml index 00d5fc3750..9185446668 100644 --- a/pkg/devspace/compose/testdata/build_context/expected.yaml +++ b/pkg/devspace/compose/testdata/build_context/expected.yaml @@ -1,4 +1,6 @@ -version: v1beta11 +version: v2beta1 +name: docker-compose + images: foo: image: foo @@ -6,10 +8,12 @@ images: dockerfile: foo/Dockerfile deployments: -- name: foo - helm: - componentChart: true - values: - containers: - - name: foo-container - image: foo + foo: + helm: + chart: + name: component-chart + repo: https://charts.devspace.sh + values: + containers: + - name: foo-container + image: foo diff --git a/pkg/devspace/compose/testdata/build_dockerfile/expected.yaml b/pkg/devspace/compose/testdata/build_dockerfile/expected.yaml index 81a22171a6..32e9fea355 100644 --- a/pkg/devspace/compose/testdata/build_dockerfile/expected.yaml +++ b/pkg/devspace/compose/testdata/build_dockerfile/expected.yaml @@ -1,15 +1,18 @@ -version: v1beta11 +version: v2beta1 +name: docker-compose + images: foo: image: foo - context: . dockerfile: Dockerfile.prod deployments: -- name: foo - helm: - componentChart: true - values: - containers: - - name: foo-container - image: foo + foo: + helm: + chart: + name: component-chart + repo: https://charts.devspace.sh + values: + containers: + - name: foo-container + image: foo diff --git a/pkg/devspace/compose/testdata/build_entry_point/expected.yaml b/pkg/devspace/compose/testdata/build_entry_point/expected.yaml index 1fd63133f8..12dac444b2 100644 --- a/pkg/devspace/compose/testdata/build_entry_point/expected.yaml +++ b/pkg/devspace/compose/testdata/build_entry_point/expected.yaml @@ -1,16 +1,19 @@ -version: v1beta11 +version: v2beta1 +name: docker-compose + images: foo: image: foo - context: . dockerfile: Dockerfile.prod entrypoint: ["nginx", "-g", "daemon off;"] deployments: -- name: foo - helm: - componentChart: true - values: - containers: - - name: foo-container - image: foo + foo: + helm: + chart: + name: component-chart + repo: https://charts.devspace.sh + values: + containers: + - name: foo-container + image: foo diff --git a/pkg/devspace/compose/testdata/build_image/expected.yaml b/pkg/devspace/compose/testdata/build_image/expected.yaml index e96dc7c938..ca26199c32 100644 --- a/pkg/devspace/compose/testdata/build_image/expected.yaml +++ b/pkg/devspace/compose/testdata/build_image/expected.yaml @@ -1,4 +1,6 @@ -version: v1beta11 +version: v2beta1 +name: docker-compose + images: foo: image: loft.sh/foo @@ -6,10 +8,12 @@ images: dockerfile: foo/Dockerfile deployments: -- name: foo - helm: - componentChart: true - values: - containers: - - name: foo-container - image: loft.sh/foo + foo: + helm: + chart: + name: component-chart + repo: https://charts.devspace.sh + values: + containers: + - name: foo-container + image: loft.sh/foo diff --git a/pkg/devspace/compose/testdata/build_image_tag/expected.yaml b/pkg/devspace/compose/testdata/build_image_tag/expected.yaml index 474655047e..f1030396ee 100644 --- a/pkg/devspace/compose/testdata/build_image_tag/expected.yaml +++ b/pkg/devspace/compose/testdata/build_image_tag/expected.yaml @@ -1,4 +1,6 @@ -version: v1beta11 +version: v2beta1 +name: docker-compose + images: foo: image: loft.sh/foo:latest @@ -6,10 +8,12 @@ images: dockerfile: foo/Dockerfile deployments: -- name: foo - helm: - componentChart: true - values: - containers: - - name: foo-container - image: loft.sh/foo:latest + foo: + helm: + chart: + name: component-chart + repo: https://charts.devspace.sh + values: + containers: + - name: foo-container + image: loft.sh/foo:latest diff --git a/pkg/devspace/compose/testdata/build_network/expected.yaml b/pkg/devspace/compose/testdata/build_network/expected.yaml index 9c14c4db62..c3310aac7f 100644 --- a/pkg/devspace/compose/testdata/build_network/expected.yaml +++ b/pkg/devspace/compose/testdata/build_network/expected.yaml @@ -1,19 +1,19 @@ -version: v1beta11 +version: v2beta1 +name: docker-compose + images: foo: image: foo - context: . dockerfile: Dockerfile - build: - docker: - options: - network: host + network: host deployments: -- name: foo - helm: - componentChart: true - values: - containers: - - name: foo-container - image: foo + foo: + helm: + chart: + name: component-chart + repo: https://charts.devspace.sh + values: + containers: + - name: foo-container + image: foo diff --git a/pkg/devspace/compose/testdata/build_target/expected.yaml b/pkg/devspace/compose/testdata/build_target/expected.yaml index 549572155d..275febb739 100644 --- a/pkg/devspace/compose/testdata/build_target/expected.yaml +++ b/pkg/devspace/compose/testdata/build_target/expected.yaml @@ -1,19 +1,19 @@ -version: v1beta11 +version: v2beta1 +name: docker-compose + images: foo: image: foo - context: . dockerfile: Dockerfile - build: - docker: - options: - target: dev + target: dev deployments: -- name: foo - helm: - componentChart: true - values: - containers: - - name: foo-container - image: foo + foo: + helm: + chart: + name: component-chart + repo: https://charts.devspace.sh + values: + containers: + - name: foo-container + image: foo diff --git a/pkg/devspace/compose/testdata/command/expected.yaml b/pkg/devspace/compose/testdata/command/expected.yaml index d5d7556480..ab960a884c 100644 --- a/pkg/devspace/compose/testdata/command/expected.yaml +++ b/pkg/devspace/compose/testdata/command/expected.yaml @@ -1,11 +1,15 @@ -version: v1beta11 +version: v2beta1 +name: docker-compose + deployments: -- name: db - helm: - componentChart: true - values: - containers: - - name: db-container - image: mysql/mysql-server:8.0.19 - args: - - /code/startDb.sh + db: + helm: + chart: + name: component-chart + repo: https://charts.devspace.sh + values: + containers: + - name: db-container + image: mysql/mysql-server:8.0.19 + args: + - /code/startDb.sh diff --git a/pkg/devspace/compose/testdata/container_name/expected.yaml b/pkg/devspace/compose/testdata/container_name/expected.yaml index b7d27b0c33..1a6faac829 100644 --- a/pkg/devspace/compose/testdata/container_name/expected.yaml +++ b/pkg/devspace/compose/testdata/container_name/expected.yaml @@ -1,16 +1,22 @@ -version: v1beta11 +version: v2beta1 +name: docker-compose + deployments: -- name: db - helm: - componentChart: true - values: - containers: - - name: my-db-1 - image: mysql/mysql-server:8.0.19 -- name: backend - helm: - componentChart: true - values: - containers: - - name: my-backend-1 - image: mysql/mysql-server:8.0.19 + db: + helm: + chart: + name: component-chart + repo: https://charts.devspace.sh + values: + containers: + - name: my-db-1 + image: mysql/mysql-server:8.0.19 + backend: + helm: + chart: + name: component-chart + repo: https://charts.devspace.sh + values: + containers: + - name: my-backend-1 + image: mysql/mysql-server:8.0.19 diff --git a/pkg/devspace/compose/testdata/entry_point/expected.yaml b/pkg/devspace/compose/testdata/entry_point/expected.yaml index 7777d5a144..fbd240cb93 100644 --- a/pkg/devspace/compose/testdata/entry_point/expected.yaml +++ b/pkg/devspace/compose/testdata/entry_point/expected.yaml @@ -1,11 +1,15 @@ -version: v1beta11 +version: v2beta1 +name: docker-compose + deployments: -- name: db - helm: - componentChart: true - values: - containers: - - name: db-container - image: mysql/mysql-server:8.0.19 - command: - - /code/startDb.sh + db: + helm: + chart: + name: component-chart + repo: https://charts.devspace.sh + values: + containers: + - name: db-container + image: mysql/mysql-server:8.0.19 + command: + - /code/startDb.sh diff --git a/pkg/devspace/compose/testdata/env_file_multiple/expected.yaml b/pkg/devspace/compose/testdata/env_file_multiple/expected.yaml index bc9dda3411..3689fc994d 100644 --- a/pkg/devspace/compose/testdata/env_file_multiple/expected.yaml +++ b/pkg/devspace/compose/testdata/env_file_multiple/expected.yaml @@ -1,16 +1,20 @@ -version: v1beta11 +version: v2beta1 +name: docker-compose + deployments: -- name: db - helm: - componentChart: true - values: - containers: - - name: db-container - image: mysql/mysql-server:8.0.19 - env: - - name: COMMON - value: multiple - - name: ENV_FILE - value: multiple - - name: WEB - value: multiple + db: + helm: + chart: + name: component-chart + repo: https://charts.devspace.sh + values: + containers: + - name: db-container + image: mysql/mysql-server:8.0.19 + env: + - name: COMMON + value: multiple + - name: ENV_FILE + value: multiple + - name: WEB + value: multiple diff --git a/pkg/devspace/compose/testdata/env_file_single/expected.yaml b/pkg/devspace/compose/testdata/env_file_single/expected.yaml index 1170acc9a5..1d3364ee29 100644 --- a/pkg/devspace/compose/testdata/env_file_single/expected.yaml +++ b/pkg/devspace/compose/testdata/env_file_single/expected.yaml @@ -1,12 +1,16 @@ -version: v1beta11 +version: v2beta1 +name: docker-compose + deployments: -- name: db - helm: - componentChart: true - values: - containers: - - name: db-container - image: mysql/mysql-server:8.0.19 - env: - - name: ENV_FILE - value: single + db: + helm: + chart: + name: component-chart + repo: https://charts.devspace.sh + values: + containers: + - name: db-container + image: mysql/mysql-server:8.0.19 + env: + - name: ENV_FILE + value: single diff --git a/pkg/devspace/compose/testdata/environment/expected.yaml b/pkg/devspace/compose/testdata/environment/expected.yaml index 6b52ac5432..ae5af6fcab 100644 --- a/pkg/devspace/compose/testdata/environment/expected.yaml +++ b/pkg/devspace/compose/testdata/environment/expected.yaml @@ -1,16 +1,20 @@ -version: v1beta11 +version: v2beta1 +name: docker-compose + deployments: -- name: db - helm: - componentChart: true - values: - containers: - - name: db-container - env: - - name: MYSQL_DATABASE - value: example - - name: MYSQL_ROOT_HOST - value: '%' - - name: MYSQL_ROOT_PASSWORD - value: /run/secrets/db-password - image: mysql/mysql-server:8.0.19 + db: + helm: + chart: + name: component-chart + repo: https://charts.devspace.sh + values: + containers: + - name: db-container + env: + - name: MYSQL_DATABASE + value: example + - name: MYSQL_ROOT_HOST + value: '%' + - name: MYSQL_ROOT_PASSWORD + value: /run/secrets/db-password + image: mysql/mysql-server:8.0.19 diff --git a/pkg/devspace/compose/testdata/expose/expected.yaml b/pkg/devspace/compose/testdata/expose/expected.yaml index d09c706990..2eec75f24c 100644 --- a/pkg/devspace/compose/testdata/expose/expected.yaml +++ b/pkg/devspace/compose/testdata/expose/expected.yaml @@ -1,20 +1,24 @@ -version: v1beta11 +version: v2beta1 +name: docker-compose + deployments: -- name: db - helm: - componentChart: true - values: - containers: - - name: db-container - image: mysql/mysql-server:8.0.19 - service: - ports: - - port: 3306 - - port: 33060 + db: + helm: + chart: + name: component-chart + repo: https://charts.devspace.sh + values: + containers: + - name: db-container + image: mysql/mysql-server:8.0.19 + service: + ports: + - port: 3306 + - port: 33060 dev: - ports: - - labelSelector: + db: + labelSelector: app.kubernetes.io/component: db - forward: - - port: 3306 - - port: 33060 + ports: + - port: 3306 + - port: 33060 diff --git a/pkg/devspace/compose/testdata/extra_hosts_multiple/expected.yaml b/pkg/devspace/compose/testdata/extra_hosts_multiple/expected.yaml index 2227eb9c3a..ec9ebfc866 100644 --- a/pkg/devspace/compose/testdata/extra_hosts_multiple/expected.yaml +++ b/pkg/devspace/compose/testdata/extra_hosts_multiple/expected.yaml @@ -1,15 +1,19 @@ -version: v1beta11 + +version: v2beta1 +name: docker-compose + deployments: -- name: db - helm: - componentChart: true - values: - containers: - - name: db-container - image: mysql/mysql-server:8.0.19 - hostAliases: - - ip: "162.242.195.82" - hostnames: - - "some1host" - - "some2host" - \ No newline at end of file + db: + helm: + chart: + name: component-chart + repo: https://charts.devspace.sh + values: + containers: + - name: db-container + image: mysql/mysql-server:8.0.19 + hostAliases: + - ip: "162.242.195.82" + hostnames: + - "some1host" + - "some2host" diff --git a/pkg/devspace/compose/testdata/extra_hosts_single/expected.yaml b/pkg/devspace/compose/testdata/extra_hosts_single/expected.yaml index 99456fdb5f..aab18d7e2a 100644 --- a/pkg/devspace/compose/testdata/extra_hosts_single/expected.yaml +++ b/pkg/devspace/compose/testdata/extra_hosts_single/expected.yaml @@ -1,13 +1,18 @@ -version: v1beta11 + +version: v2beta1 +name: docker-compose + deployments: -- name: db - helm: - componentChart: true - values: - containers: - - name: db-container - image: mysql/mysql-server:8.0.19 - hostAliases: - - ip: "50.31.209.229" - hostnames: - - "otherhost" + db: + helm: + chart: + name: component-chart + repo: https://charts.devspace.sh + values: + containers: + - name: db-container + image: mysql/mysql-server:8.0.19 + hostAliases: + - ip: "50.31.209.229" + hostnames: + - "otherhost" diff --git a/pkg/devspace/compose/testdata/healthcheck/expected.yaml b/pkg/devspace/compose/testdata/healthcheck/expected.yaml index faa22de565..19a0246bea 100644 --- a/pkg/devspace/compose/testdata/healthcheck/expected.yaml +++ b/pkg/devspace/compose/testdata/healthcheck/expected.yaml @@ -1,50 +1,54 @@ -version: v1beta11 + +version: v2beta1 +name: docker-compose + deployments: -- name: cmd - helm: - componentChart: true - values: - containers: - - name: cmd-container - image: mysql/mysql-server:8.0.19 - livenessProbe: - exec: - command: - - mysqladmin - - ping - - -h - - 127.0.0.1 - - --silent - failureThreshold: 5 - initialDelaySeconds: 3 - periodSeconds: 3 -- name: cmd-shell - helm: - componentChart: true - values: - containers: - - name: cmd-shell-container - image: mysql/mysql-server:8.0.19 - livenessProbe: - exec: - command: - - sh - - -c - - mysqladmin ping -h 127.0.0.1 --silent - failureThreshold: 5 - initialDelaySeconds: 3 - periodSeconds: 3 -- name: none - helm: - componentChart: true - values: - containers: - - name: none-container - image: mysql/mysql-server:8.0.19 -# - name: disable -# helm: -# componentChart: true -# values: -# containers: -# - image: mysql/mysql-server:8.0.19 + cmd: + helm: + chart: + name: component-chart + repo: https://charts.devspace.sh + values: + containers: + - name: cmd-container + image: mysql/mysql-server:8.0.19 + livenessProbe: + exec: + command: + - mysqladmin + - ping + - -h + - 127.0.0.1 + - --silent + failureThreshold: 5 + initialDelaySeconds: 3 + periodSeconds: 3 + cmd-shell: + helm: + chart: + name: component-chart + repo: https://charts.devspace.sh + values: + containers: + - name: cmd-shell-container + image: mysql/mysql-server:8.0.19 + livenessProbe: + exec: + command: + - sh + - -c + - mysqladmin ping -h 127.0.0.1 --silent + failureThreshold: 5 + initialDelaySeconds: 3 + periodSeconds: 3 + none: + helm: + chart: + name: component-chart + repo: https://charts.devspace.sh + values: + containers: + - name: none-container + image: mysql/mysql-server:8.0.19 + diff --git a/pkg/devspace/compose/testdata/ports-long/expected.yaml b/pkg/devspace/compose/testdata/ports-long/expected.yaml index debfa4345e..8fd1c9e012 100644 --- a/pkg/devspace/compose/testdata/ports-long/expected.yaml +++ b/pkg/devspace/compose/testdata/ports-long/expected.yaml @@ -1,25 +1,28 @@ -version: v1beta11 +version: v2beta1 +name: docker-compose + deployments: -- name: db - helm: - componentChart: true - values: - containers: - - name: db-container - image: mysql/mysql-server:8.0.19 - service: - ports: - - containerPort: 80 - port: 8080 - protocol: TCP - - containerPort: 9090 - port: 9090 - protocol: UDP + db: + helm: + chart: + name: component-chart + repo: https://charts.devspace.sh + values: + containers: + - name: db-container + image: mysql/mysql-server:8.0.19 + service: + ports: + - containerPort: 80 + port: 8080 + protocol: TCP + - containerPort: 9090 + port: 9090 + protocol: UDP dev: - ports: - - labelSelector: + db: + labelSelector: app.kubernetes.io/component: db - forward: - - port: 8080 - remotePort: 80 + ports: + - port: 8080:80 - port: 9090 diff --git a/pkg/devspace/compose/testdata/ports-short/expected.yaml b/pkg/devspace/compose/testdata/ports-short/expected.yaml index d18d40c382..0f0d6975ea 100644 --- a/pkg/devspace/compose/testdata/ports-short/expected.yaml +++ b/pkg/devspace/compose/testdata/ports-short/expected.yaml @@ -1,70 +1,64 @@ -version: v1beta11 +version: v2beta1 +name: docker-compose + deployments: -- name: db - helm: - componentChart: true - values: - containers: - - name: db-container - image: mysql/mysql-server:8.0.19 - service: - ports: - - containerPort: 80 - port: 8080 - protocol: TCP - - containerPort: 81 - port: 8081 - protocol: TCP - - containerPort: 82 - port: 8082 - protocol: UDP - - containerPort: 83 - port: 8083 - protocol: TCP - - containerPort: 84 - port: 8084 - protocol: TCP - - containerPort: 85 - port: 8085 - protocol: UDP - - containerPort: 6003 - port: 5003 - protocol: TCP - - containerPort: 6004 - port: 5004 - protocol: TCP - - containerPort: 1240 - port: 5005 - protocol: TCP - - containerPort: 1240 - port: 5006 - protocol: TCP + db: + helm: + chart: + name: component-chart + repo: https://charts.devspace.sh + values: + containers: + - name: db-container + image: mysql/mysql-server:8.0.19 + service: + ports: + - containerPort: 80 + port: 8080 + protocol: TCP + - containerPort: 81 + port: 8081 + protocol: TCP + - containerPort: 82 + port: 8082 + protocol: UDP + - containerPort: 83 + port: 8083 + protocol: TCP + - containerPort: 84 + port: 8084 + protocol: TCP + - containerPort: 85 + port: 8085 + protocol: UDP + - containerPort: 6003 + port: 5003 + protocol: TCP + - containerPort: 6004 + port: 5004 + protocol: TCP + - containerPort: 1240 + port: 5005 + protocol: TCP + - containerPort: 1240 + port: 5006 + protocol: TCP dev: - ports: - - labelSelector: + db: + labelSelector: app.kubernetes.io/component: db - forward: - - port: 8080 - remotePort: 80 - - port: 8081 - remotePort: 81 - - port: 8082 - remotePort: 82 - - port: 8083 - remotePort: 83 - bindAddress: 127.0.0.1 - - port: 8084 - remotePort: 84 - bindAddress: 127.0.0.1 - - port: 8085 - remotePort: 85 - bindAddress: 127.0.0.1 - - port: 5003 - remotePort: 6003 - - port: 5004 - remotePort: 6004 - - port: 5005 - remotePort: 1240 - - port: 5006 - remotePort: 1240 + ports: + - port: 8080:80 + - port: 8081:81 + - port: 8082:82 + - port: 8083:83 + bindAddress: 127.0.0.1 + - port: 8084:84 + bindAddress: 127.0.0.1 + - port: 8085:85 + bindAddress: 127.0.0.1 + - port: 5003:6003 + - port: 5004:6004 + - port: 5005:1240 + - port: 5006:1240 diff --git a/pkg/devspace/compose/testdata/restart-always/expected.yaml b/pkg/devspace/compose/testdata/restart-always/expected.yaml index 591014355e..e5d3d3463f 100644 --- a/pkg/devspace/compose/testdata/restart-always/expected.yaml +++ b/pkg/devspace/compose/testdata/restart-always/expected.yaml @@ -1,10 +1,14 @@ -version: v1beta11 +version: v2beta1 +name: docker-compose + deployments: -- name: db - helm: - componentChart: true - values: - containers: - - name: db-container - image: mysql/mysql-server:8.0.19 - restartPolicy: Always + db: + helm: + chart: + name: component-chart + repo: https://charts.devspace.sh + values: + containers: + - name: db-container + image: mysql/mysql-server:8.0.19 + restartPolicy: Always diff --git a/pkg/devspace/compose/testdata/restart-no/expected.yaml b/pkg/devspace/compose/testdata/restart-no/expected.yaml index 4c33671df5..580b06ac38 100644 --- a/pkg/devspace/compose/testdata/restart-no/expected.yaml +++ b/pkg/devspace/compose/testdata/restart-no/expected.yaml @@ -1,10 +1,14 @@ -version: v1beta11 +version: v2beta1 +name: docker-compose + deployments: -- name: db - helm: - componentChart: true - values: - containers: - - name: db-container - image: mysql/mysql-server:8.0.19 - restartPolicy: Never + db: + helm: + chart: + name: component-chart + repo: https://charts.devspace.sh + values: + containers: + - name: db-container + image: mysql/mysql-server:8.0.19 + restartPolicy: Never diff --git a/pkg/devspace/compose/testdata/restart-on-failure/expected.yaml b/pkg/devspace/compose/testdata/restart-on-failure/expected.yaml index 473ce6869e..a06695d14c 100644 --- a/pkg/devspace/compose/testdata/restart-on-failure/expected.yaml +++ b/pkg/devspace/compose/testdata/restart-on-failure/expected.yaml @@ -1,10 +1,14 @@ -version: v1beta11 +version: v2beta1 +name: docker-compose + deployments: -- name: db - helm: - componentChart: true - values: - containers: - - name: db-container - image: mysql/mysql-server:8.0.19 - restartPolicy: OnFailure + db: + helm: + chart: + name: component-chart + repo: https://charts.devspace.sh + values: + containers: + - name: db-container + image: mysql/mysql-server:8.0.19 + restartPolicy: OnFailure diff --git a/pkg/devspace/compose/testdata/restart-unless-stopped/expected.yaml b/pkg/devspace/compose/testdata/restart-unless-stopped/expected.yaml index 4c33671df5..580b06ac38 100644 --- a/pkg/devspace/compose/testdata/restart-unless-stopped/expected.yaml +++ b/pkg/devspace/compose/testdata/restart-unless-stopped/expected.yaml @@ -1,10 +1,14 @@ -version: v1beta11 +version: v2beta1 +name: docker-compose + deployments: -- name: db - helm: - componentChart: true - values: - containers: - - name: db-container - image: mysql/mysql-server:8.0.19 - restartPolicy: Never + db: + helm: + chart: + name: component-chart + repo: https://charts.devspace.sh + values: + containers: + - name: db-container + image: mysql/mysql-server:8.0.19 + restartPolicy: Never diff --git a/pkg/devspace/compose/testdata/depends_on/docker-compose.yaml b/pkg/devspace/compose/testdata/x_depends_on/docker-compose.yaml similarity index 100% rename from pkg/devspace/compose/testdata/depends_on/docker-compose.yaml rename to pkg/devspace/compose/testdata/x_depends_on/docker-compose.yaml diff --git a/pkg/devspace/compose/testdata/depends_on/expected.yaml b/pkg/devspace/compose/testdata/x_depends_on/expected.yaml similarity index 100% rename from pkg/devspace/compose/testdata/depends_on/expected.yaml rename to pkg/devspace/compose/testdata/x_depends_on/expected.yaml diff --git a/pkg/devspace/compose/testdata/secret-long/docker-compose.yaml b/pkg/devspace/compose/testdata/x_secret-long/docker-compose.yaml similarity index 100% rename from pkg/devspace/compose/testdata/secret-long/docker-compose.yaml rename to pkg/devspace/compose/testdata/x_secret-long/docker-compose.yaml diff --git a/pkg/devspace/compose/testdata/secret-long/expected.yaml b/pkg/devspace/compose/testdata/x_secret-long/expected.yaml similarity index 100% rename from pkg/devspace/compose/testdata/secret-long/expected.yaml rename to pkg/devspace/compose/testdata/x_secret-long/expected.yaml diff --git a/pkg/devspace/compose/testdata/secret-short/docker-compose.yaml b/pkg/devspace/compose/testdata/x_secret-short/docker-compose.yaml similarity index 100% rename from pkg/devspace/compose/testdata/secret-short/docker-compose.yaml rename to pkg/devspace/compose/testdata/x_secret-short/docker-compose.yaml diff --git a/pkg/devspace/compose/testdata/secret-short/expected.yaml b/pkg/devspace/compose/testdata/x_secret-short/expected.yaml similarity index 100% rename from pkg/devspace/compose/testdata/secret-short/expected.yaml rename to pkg/devspace/compose/testdata/x_secret-short/expected.yaml diff --git a/pkg/devspace/compose/testdata/volumes-depends_on/docker-compose.yaml b/pkg/devspace/compose/testdata/x_volumes-depends_on/docker-compose.yaml similarity index 100% rename from pkg/devspace/compose/testdata/volumes-depends_on/docker-compose.yaml rename to pkg/devspace/compose/testdata/x_volumes-depends_on/docker-compose.yaml diff --git a/pkg/devspace/compose/testdata/volumes-depends_on/expected.yaml b/pkg/devspace/compose/testdata/x_volumes-depends_on/expected.yaml similarity index 100% rename from pkg/devspace/compose/testdata/volumes-depends_on/expected.yaml rename to pkg/devspace/compose/testdata/x_volumes-depends_on/expected.yaml diff --git a/pkg/devspace/compose/testdata/volumes-long/docker-compose.yaml b/pkg/devspace/compose/testdata/x_volumes-long/docker-compose.yaml similarity index 100% rename from pkg/devspace/compose/testdata/volumes-long/docker-compose.yaml rename to pkg/devspace/compose/testdata/x_volumes-long/docker-compose.yaml diff --git a/pkg/devspace/compose/testdata/volumes-long/expected.yaml b/pkg/devspace/compose/testdata/x_volumes-long/expected.yaml similarity index 100% rename from pkg/devspace/compose/testdata/volumes-long/expected.yaml rename to pkg/devspace/compose/testdata/x_volumes-long/expected.yaml diff --git a/pkg/devspace/compose/testdata/volumes-short/docker-compose.yaml b/pkg/devspace/compose/testdata/x_volumes-short/docker-compose.yaml similarity index 100% rename from pkg/devspace/compose/testdata/volumes-short/docker-compose.yaml rename to pkg/devspace/compose/testdata/x_volumes-short/docker-compose.yaml diff --git a/pkg/devspace/compose/testdata/volumes-short/expected.yaml b/pkg/devspace/compose/testdata/x_volumes-short/expected.yaml similarity index 100% rename from pkg/devspace/compose/testdata/volumes-short/expected.yaml rename to pkg/devspace/compose/testdata/x_volumes-short/expected.yaml From 2e8bf1ad32bca6e44d60da9cba54053940d620da Mon Sep 17 00:00:00 2001 From: Russell Centanni Date: Fri, 11 Mar 2022 10:44:01 -0500 Subject: [PATCH 2/9] fix: allow short names like db --- pkg/util/encoding/encoding.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/pkg/util/encoding/encoding.go b/pkg/util/encoding/encoding.go index 4f8d3b8633..1c0565e257 100644 --- a/pkg/util/encoding/encoding.go +++ b/pkg/util/encoding/encoding.go @@ -34,9 +34,8 @@ func Convert(ID string) string { return SafeConcatName(ID) } -// UnsafeNameRegEx checks for a valid name and needs to be url compatible -var UnsafeNameRegEx = regexp.MustCompile(`^[a-z0-9][a-z0-9\-]+[a-z0-9]$`) -var UnsafeUpperNameRegEx = regexp.MustCompile(`^[A-Za-z0-9][A-Za-z0-9\-_]+[A-Za-z0-9]$`) +var UnsafeNameRegEx = regexp.MustCompile(`^[a-z0-9]+(?:-?[a-z0-9]+)*$`) +var UnsafeUpperNameRegEx = regexp.MustCompile(`^[A-Za-z0-9]+(?:[\-_]?[A-Za-z0-9])*$`) func IsUnsafeUpperName(unsafeName string) bool { return !UnsafeUpperNameRegEx.MatchString(unsafeName) From 7ecd0221758f2e722134064315e61fb2c54f480f Mon Sep 17 00:00:00 2001 From: Russell Centanni Date: Fri, 11 Mar 2022 12:35:36 -0500 Subject: [PATCH 3/9] refactor: use pipelines for docker-compose secrets --- pkg/devspace/compose/loader.go | 316 +++++++++--------- .../docker-compose.yaml | 0 .../testdata/secret-long/expected.yaml | 33 ++ .../docker-compose.yaml | 0 .../testdata/secret-short/expected.yaml | 33 ++ .../testdata/x_secret-long/expected.yaml | 26 -- .../testdata/x_secret-short/expected.yaml | 26 -- 7 files changed, 227 insertions(+), 207 deletions(-) rename pkg/devspace/compose/testdata/{x_secret-long => secret-long}/docker-compose.yaml (100%) create mode 100644 pkg/devspace/compose/testdata/secret-long/expected.yaml rename pkg/devspace/compose/testdata/{x_secret-short => secret-short}/docker-compose.yaml (100%) create mode 100644 pkg/devspace/compose/testdata/secret-short/expected.yaml delete mode 100644 pkg/devspace/compose/testdata/x_secret-long/expected.yaml delete mode 100644 pkg/devspace/compose/testdata/x_secret-short/expected.yaml diff --git a/pkg/devspace/compose/loader.go b/pkg/devspace/compose/loader.go index 7bb6e6fec7..e99cb2442a 100644 --- a/pkg/devspace/compose/loader.go +++ b/pkg/devspace/compose/loader.go @@ -85,6 +85,7 @@ func (cl *configLoader) Load(log log.Logger) (*latest.Config, error) { var images map[string]*latest.Image var deployments map[string]*latest.DeploymentConfig var dev map[string]*latest.DevPod + var pipelines map[string]*latest.Pipeline baseDir := filepath.Dir(cl.composePath) if len(dockerCompose.Networks) > 0 { @@ -154,18 +155,24 @@ func (cl *configLoader) Load(log log.Logger) (*latest.Config, error) { return nil, err } - // for secretName, secret := range dockerCompose.Secrets { - // createHook, err := createSecretHook(secretName, cwd, secret) - // if err != nil { - // return nil, err - // } - // hooks = append(hooks, createHook) - // hooks = append(hooks, deleteSecretHook(secretName)) - // } + for secretName, secret := range dockerCompose.Secrets { + if pipelines == nil { + pipelines = map[string]*latest.Pipeline{} + } + + devSecretStep, err := createSecretPipeline(secretName, cwd, secret) + if err != nil { + return nil, err + } + + pipelines["dev"] = devSecretStep + pipelines["purge"] = deleteSecretPipeline(secretName) + } config.Images = images config.Deployments = deployments config.Dev = dev + config.Pipelines = pipelines // config.Hooks = hooks return config, nil @@ -322,38 +329,38 @@ func imageConfig(cwd string, service composetypes.ServiceConfig) (*latest.Image, return image, nil } -// func createSecretHook(name string, cwd string, secret composetypes.SecretConfig) (*latest.HookConfig, error) { -// file, err := filepath.Rel(cwd, filepath.Join(cwd, secret.File)) -// if err != nil { -// return nil, err -// } +func createSecretPipeline(name string, cwd string, secret composetypes.SecretConfig) (*latest.Pipeline, error) { + file, err := filepath.Rel(cwd, filepath.Join(cwd, secret.File)) + if err != nil { + return nil, err + } -// return &latest.HookConfig{ -// Events: []string{"before:deploy"}, -// Command: fmt.Sprintf("kubectl create secret generic %s --namespace=${devspace.namespace} --dry-run=client --from-file=%s=%s -o yaml | kubectl apply -f -", name, name, filepath.ToSlash(file)), -// }, nil -// } + return &latest.Pipeline{ + Run: fmt.Sprintf(`kubectl create secret generic %s --namespace=${devspace.namespace} --dry-run=client --from-file=%s=%s -o yaml | kubectl apply -f - +run_default_pipeline dev`, name, name, filepath.ToSlash(file)), + }, nil +} -// func deleteSecretHook(name string) *latest.HookConfig { -// return &latest.HookConfig{ -// Events: []string{"after:purge"}, -// Command: fmt.Sprintf("kubectl delete secret %s --namespace=${devspace.namespace} --ignore-not-found", name), -// } -// } +func deleteSecretPipeline(name string) *latest.Pipeline { + return &latest.Pipeline{ + Run: fmt.Sprintf(`run_default_pipeline purge +kubectl delete secret %s --namespace=${devspace.namespace} --ignore-not-found`, name), + } +} func (cl *configLoader) deploymentConfig(service composetypes.ServiceConfig, composeVolumes map[string]composetypes.VolumeConfig, log log.Logger) (*latest.DeploymentConfig, error) { values := map[string]interface{}{} - // volumes, volumeMounts, bindVolumeMounts := volumesConfig(service, composeVolumes, log) - // if len(volumes) > 0 { - // values["volumes"] = volumes - // } + volumes, volumeMounts, _ := volumesConfig(service, composeVolumes, log) + if len(volumes) > 0 { + values["volumes"] = volumes + } - // if hasLocalSync(service) { - // values["initContainers"] = []interface{}{initContainerConfig(service, bindVolumeMounts)} - // } + // if hasLocalSync(service) { + // values["initContainers"] = []interface{}{initContainerConfig(service, bindVolumeMounts)} + // } - container, err := containerConfig(service, []interface{}{}) + container, err := containerConfig(service, volumeMounts) if err != nil { return nil, err } @@ -445,73 +452,72 @@ func (cl *configLoader) deploymentConfig(service composetypes.ServiceConfig, com }, nil } -// func volumesConfig( -// service composetypes.ServiceConfig, -// composeVolumes map[string]composetypes.VolumeConfig, -// log log.Logger, -// ) (volumes []interface{}, volumeMounts []interface{}, bindVolumeMounts []interface{}) { -// for _, secret := range service.Secrets { -// volume := createSecretVolume(secret) -// volumes = append(volumes, volume) - -// volumeMount := createSecretVolumeMount(secret) -// volumeMounts = append(volumeMounts, volumeMount) -// } - -// var volumeVolumes []composetypes.ServiceVolumeConfig -// var bindVolumes []composetypes.ServiceVolumeConfig -// var tmpfsVolumes []composetypes.ServiceVolumeConfig -// for _, serviceVolume := range service.Volumes { -// switch serviceVolume.Type { -// case composetypes.VolumeTypeBind: -// bindVolumes = append(bindVolumes, serviceVolume) -// case composetypes.VolumeTypeTmpfs: -// tmpfsVolumes = append(tmpfsVolumes, serviceVolume) -// case composetypes.VolumeTypeVolume: -// volumeVolumes = append(volumeVolumes, serviceVolume) -// default: -// log.Warnf("%s volumes are not supported", serviceVolume.Type) -// } -// } - -// volumeMap := map[string]interface{}{} -// for idx, volumeVolume := range volumeVolumes { -// volumeName := resolveServiceVolumeName(service, volumeVolume, idx+1) -// _, ok := volumeMap[volumeName] -// if !ok { -// volume := createVolume(volumeName, DefaultVolumeSize) -// volumes = append(volumes, volume) -// volumeMap[volumeName] = volume -// } +func volumesConfig( + service composetypes.ServiceConfig, + composeVolumes map[string]composetypes.VolumeConfig, + log log.Logger, +) (volumes []interface{}, volumeMounts []interface{}, bindVolumeMounts []interface{}) { + for _, secret := range service.Secrets { + volume := createSecretVolume(secret) + volumes = append(volumes, volume) + + volumeMount := createSecretVolumeMount(secret) + volumeMounts = append(volumeMounts, volumeMount) + } + + var volumeVolumes []composetypes.ServiceVolumeConfig + var bindVolumes []composetypes.ServiceVolumeConfig + var tmpfsVolumes []composetypes.ServiceVolumeConfig + for _, serviceVolume := range service.Volumes { + switch serviceVolume.Type { + case composetypes.VolumeTypeBind: + bindVolumes = append(bindVolumes, serviceVolume) + case composetypes.VolumeTypeTmpfs: + tmpfsVolumes = append(tmpfsVolumes, serviceVolume) + case composetypes.VolumeTypeVolume: + volumeVolumes = append(volumeVolumes, serviceVolume) + default: + log.Warnf("%s volumes are not supported", serviceVolume.Type) + } + } -// volumeMount := createServiceVolumeMount(volumeName, volumeVolume) -// volumeMounts = append(volumeMounts, volumeMount) -// } + volumeMap := map[string]interface{}{} + for idx, volumeVolume := range volumeVolumes { + volumeName := resolveServiceVolumeName(service, volumeVolume, idx+1) + _, ok := volumeMap[volumeName] + if !ok { + volume := createVolume(volumeName, DefaultVolumeSize) + volumes = append(volumes, volume) + volumeMap[volumeName] = volume + } -// for _, tmpfsVolume := range tmpfsVolumes { -// volumeName := resolveServiceVolumeName(service, tmpfsVolume, len(volumes)) -// volume := createEmptyDirVolume(volumeName, tmpfsVolume) -// volumes = append(volumes, volume) + volumeMount := createServiceVolumeMount(volumeName, volumeVolume) + volumeMounts = append(volumeMounts, volumeMount) + } -// volumeMount := createServiceVolumeMount(volumeName, tmpfsVolume) -// volumeMounts = append(volumeMounts, volumeMount) -// } + for _, tmpfsVolume := range tmpfsVolumes { + volumeName := resolveServiceVolumeName(service, tmpfsVolume, len(volumes)) + volume := createEmptyDirVolume(volumeName, tmpfsVolume) + volumes = append(volumes, volume) -// for idx, bindVolume := range bindVolumes { -// volumeName := fmt.Sprintf("volume-%d", idx+1) -// volume := createEmptyDirVolume(volumeName, bindVolume) -// volumes = append(volumes, volume) + volumeMount := createServiceVolumeMount(volumeName, tmpfsVolume) + volumeMounts = append(volumeMounts, volumeMount) + } -// volumeMount := createServiceVolumeMount(volumeName, bindVolume) -// volumeMounts = append(volumeMounts, volumeMount) + for idx, bindVolume := range bindVolumes { + volumeName := fmt.Sprintf("volume-%d", idx+1) + volume := createEmptyDirVolume(volumeName, bindVolume) + volumes = append(volumes, volume) -// bindVolumeMount := createInitVolumeMount(volumeName, bindVolume) -// bindVolumeMounts = append(bindVolumeMounts, bindVolumeMount) -// } + volumeMount := createServiceVolumeMount(volumeName, bindVolume) + volumeMounts = append(volumeMounts, volumeMount) -// return volumes, volumeMounts, bindVolumeMounts + bindVolumeMount := createInitVolumeMount(volumeName, bindVolume) + bindVolumeMounts = append(bindVolumeMounts, bindVolumeMount) + } -// } + return volumes, volumeMounts, bindVolumeMounts +} func containerConfig(service composetypes.ServiceConfig, volumeMounts []interface{}) (map[string]interface{}, error) { container := map[string]interface{}{ @@ -620,68 +626,68 @@ func containerLivenessProbe(health *composetypes.HealthCheckConfig) (map[string] return livenessProbe, nil } -// func createEmptyDirVolume(volumeName string, volume composetypes.ServiceVolumeConfig) interface{} { -// // create an emptyDir volume -// emptyDir := map[string]interface{}{} -// if volume.Tmpfs != nil { -// emptyDir["sizeLimit"] = fmt.Sprintf("%d", volume.Tmpfs.Size) -// } -// return map[string]interface{}{ -// "name": volumeName, -// "emptyDir": emptyDir, -// } -// } +func createEmptyDirVolume(volumeName string, volume composetypes.ServiceVolumeConfig) interface{} { + // create an emptyDir volume + emptyDir := map[string]interface{}{} + if volume.Tmpfs != nil { + emptyDir["sizeLimit"] = fmt.Sprintf("%d", volume.Tmpfs.Size) + } + return map[string]interface{}{ + "name": volumeName, + "emptyDir": emptyDir, + } +} -// func createSecretVolume(secret composetypes.ServiceSecretConfig) interface{} { -// return map[string]interface{}{ -// "name": secret.Source, -// "secret": map[string]interface{}{ -// "secretName": secret.Source, -// }, -// } -// } +func createSecretVolume(secret composetypes.ServiceSecretConfig) interface{} { + return map[string]interface{}{ + "name": secret.Source, + "secret": map[string]interface{}{ + "secretName": secret.Source, + }, + } +} -// func createSecretVolumeMount(secret composetypes.ServiceSecretConfig) interface{} { -// target := secret.Source -// if secret.Target != "" { -// target = secret.Target -// } -// return map[string]interface{}{ -// "containerPath": fmt.Sprintf("/run/secrets/%s", target), -// "volume": map[string]interface{}{ -// "name": secret.Source, -// "subPath": target, -// "readOnly": true, -// }, -// } -// } +func createSecretVolumeMount(secret composetypes.ServiceSecretConfig) interface{} { + target := secret.Source + if secret.Target != "" { + target = secret.Target + } + return map[string]interface{}{ + "containerPath": fmt.Sprintf("/run/secrets/%s", target), + "volume": map[string]interface{}{ + "name": secret.Source, + "subPath": target, + "readOnly": true, + }, + } +} -// func createServiceVolumeMount(volumeName string, volume composetypes.ServiceVolumeConfig) interface{} { -// return map[string]interface{}{ -// "containerPath": volume.Target, -// "volume": map[string]interface{}{ -// "name": volumeName, -// "readOnly": volume.ReadOnly, -// }, -// } -// } +func createServiceVolumeMount(volumeName string, volume composetypes.ServiceVolumeConfig) interface{} { + return map[string]interface{}{ + "containerPath": volume.Target, + "volume": map[string]interface{}{ + "name": volumeName, + "readOnly": volume.ReadOnly, + }, + } +} -// func createInitVolumeMount(volumeName string, volume composetypes.ServiceVolumeConfig) interface{} { -// return map[string]interface{}{ -// "containerPath": volume.Target, -// "volume": map[string]interface{}{ -// "name": volumeName, -// "readOnly": false, -// }, -// } -// } +func createInitVolumeMount(volumeName string, volume composetypes.ServiceVolumeConfig) interface{} { + return map[string]interface{}{ + "containerPath": volume.Target, + "volume": map[string]interface{}{ + "name": volumeName, + "readOnly": false, + }, + } +} -// func createVolume(name string, size string) interface{} { -// return map[string]interface{}{ -// "name": name, -// "size": size, -// } -// } +func createVolume(name string, size string) interface{} { + return map[string]interface{}{ + "name": name, + "size": size, + } +} func formatName(name string) string { return regexp.MustCompile(`[\._]`).ReplaceAllString(name, "-") @@ -724,13 +730,13 @@ func resolveImage(service composetypes.ServiceConfig) string { // return localSubPath // } -// func resolveServiceVolumeName(service composetypes.ServiceConfig, volume composetypes.ServiceVolumeConfig, idx int) string { -// volumeName := volume.Source -// if volumeName == "" { -// volumeName = fmt.Sprintf("%s-%d", formatName(service.Name), idx) -// } -// return volumeName -// } +func resolveServiceVolumeName(service composetypes.ServiceConfig, volume composetypes.ServiceVolumeConfig, idx int) string { + volumeName := volume.Source + if volumeName == "" { + volumeName = fmt.Sprintf("%s-%d", formatName(service.Name), idx) + } + return volumeName +} // func createWaitHook(service composetypes.ServiceConfig) *latest.HookConfig { // serviceName := formatName(service.Name) diff --git a/pkg/devspace/compose/testdata/x_secret-long/docker-compose.yaml b/pkg/devspace/compose/testdata/secret-long/docker-compose.yaml similarity index 100% rename from pkg/devspace/compose/testdata/x_secret-long/docker-compose.yaml rename to pkg/devspace/compose/testdata/secret-long/docker-compose.yaml diff --git a/pkg/devspace/compose/testdata/secret-long/expected.yaml b/pkg/devspace/compose/testdata/secret-long/expected.yaml new file mode 100644 index 0000000000..4e90f87b6f --- /dev/null +++ b/pkg/devspace/compose/testdata/secret-long/expected.yaml @@ -0,0 +1,33 @@ +version: v2beta1 +name: docker-compose + +deployments: + db: + helm: + chart: + name: component-chart + repo: https://charts.devspace.sh + values: + containers: + - name: db-container + image: mysql/mysql-server:8.0.19 + volumeMounts: + - containerPath: /run/secrets/db_secret + volume: + name: db-password + readOnly: true + subPath: db_secret + volumes: + - name: db-password + secret: + secretName: db-password + +pipelines: + dev: + run: |- + kubectl create secret generic db-password --namespace=${devspace.namespace} --dry-run=client --from-file=db-password=db/password.txt -o yaml | kubectl apply -f - + run_default_pipeline dev + purge: + run: |- + run_default_pipeline purge + kubectl delete secret db-password --namespace=${devspace.namespace} --ignore-not-found diff --git a/pkg/devspace/compose/testdata/x_secret-short/docker-compose.yaml b/pkg/devspace/compose/testdata/secret-short/docker-compose.yaml similarity index 100% rename from pkg/devspace/compose/testdata/x_secret-short/docker-compose.yaml rename to pkg/devspace/compose/testdata/secret-short/docker-compose.yaml diff --git a/pkg/devspace/compose/testdata/secret-short/expected.yaml b/pkg/devspace/compose/testdata/secret-short/expected.yaml new file mode 100644 index 0000000000..b19fc00fbc --- /dev/null +++ b/pkg/devspace/compose/testdata/secret-short/expected.yaml @@ -0,0 +1,33 @@ +version: v2beta1 +name: docker-compose + +deployments: + db: + helm: + chart: + name: component-chart + repo: https://charts.devspace.sh + values: + containers: + - name: db-container + image: mysql/mysql-server:8.0.19 + volumeMounts: + - containerPath: /run/secrets/db-password + volume: + name: db-password + readOnly: true + subPath: db-password + volumes: + - name: db-password + secret: + secretName: db-password + +pipelines: + dev: + run: |- + kubectl create secret generic db-password --namespace=${devspace.namespace} --dry-run=client --from-file=db-password=db/password.txt -o yaml | kubectl apply -f - + run_default_pipeline dev + purge: + run: |- + run_default_pipeline purge + kubectl delete secret db-password --namespace=${devspace.namespace} --ignore-not-found diff --git a/pkg/devspace/compose/testdata/x_secret-long/expected.yaml b/pkg/devspace/compose/testdata/x_secret-long/expected.yaml deleted file mode 100644 index f5d40788ea..0000000000 --- a/pkg/devspace/compose/testdata/x_secret-long/expected.yaml +++ /dev/null @@ -1,26 +0,0 @@ -version: v1beta11 -deployments: -- name: db - helm: - componentChart: true - values: - containers: - - name: db-container - image: mysql/mysql-server:8.0.19 - volumeMounts: - - containerPath: /run/secrets/db_secret - volume: - name: db-password - readOnly: true - subPath: db_secret - volumes: - - name: db-password - secret: - secretName: db-password -hooks: -- events: - - before:deploy - command: kubectl create secret generic db-password --namespace=${devspace.namespace} --dry-run=client --from-file=db-password=db/password.txt -o yaml | kubectl apply -f - -- events: - - after:purge - command: kubectl delete secret db-password --namespace=${devspace.namespace} --ignore-not-found diff --git a/pkg/devspace/compose/testdata/x_secret-short/expected.yaml b/pkg/devspace/compose/testdata/x_secret-short/expected.yaml deleted file mode 100644 index 72c2b2cee2..0000000000 --- a/pkg/devspace/compose/testdata/x_secret-short/expected.yaml +++ /dev/null @@ -1,26 +0,0 @@ -version: v1beta11 -deployments: -- name: db - helm: - componentChart: true - values: - containers: - - name: db-container - image: mysql/mysql-server:8.0.19 - volumeMounts: - - containerPath: /run/secrets/db-password - volume: - name: db-password - readOnly: true - subPath: db-password - volumes: - - name: db-password - secret: - secretName: db-password -hooks: -- events: - - before:deploy - command: kubectl create secret generic db-password --namespace=${devspace.namespace} --dry-run=client --from-file=db-password=db/password.txt -o yaml | kubectl apply -f - -- events: - - after:purge - command: kubectl delete secret db-password --namespace=${devspace.namespace} --ignore-not-found From 1e3c3f2986b398c5835e29729f4b33886ca881da Mon Sep 17 00:00:00 2001 From: Russell Centanni Date: Tue, 22 Mar 2022 11:55:46 -0400 Subject: [PATCH 4/9] refactor: docker-compose volume support --- pkg/devspace/compose/loader.go | 223 +++++------------- .../compose/testdata/basic/expected.yaml | 6 - .../compose/testdata/build/expected.yaml | 3 - .../testdata/build_args_list/expected.yaml | 3 - .../testdata/build_args_map/expected.yaml | 3 - .../testdata/build_context/expected.yaml | 3 - .../testdata/build_dockerfile/expected.yaml | 3 - .../testdata/build_entry_point/expected.yaml | 3 - .../testdata/build_image/expected.yaml | 3 - .../testdata/build_image_tag/expected.yaml | 3 - .../testdata/build_network/expected.yaml | 3 - .../testdata/build_target/expected.yaml | 3 - .../compose/testdata/command/expected.yaml | 3 - .../testdata/container_name/expected.yaml | 6 - .../testdata/entry_point/expected.yaml | 3 - .../testdata/env_file_multiple/expected.yaml | 3 - .../testdata/env_file_single/expected.yaml | 3 - .../testdata/environment/expected.yaml | 3 - .../compose/testdata/expose/expected.yaml | 3 - .../extra_hosts_multiple/expected.yaml | 3 - .../testdata/extra_hosts_single/expected.yaml | 3 - .../testdata/healthcheck/expected.yaml | 9 - .../compose/testdata/ports-long/expected.yaml | 3 - .../testdata/ports-short/expected.yaml | 3 - .../testdata/restart-always/expected.yaml | 3 - .../compose/testdata/restart-no/expected.yaml | 3 - .../testdata/restart-on-failure/expected.yaml | 3 - .../restart-unless-stopped/expected.yaml | 3 - .../testdata/secret-long/expected.yaml | 3 - .../testdata/secret-short/expected.yaml | 3 - .../compose/testdata/volumes-long/cache/hi | 1 + .../docker-compose.yaml | 7 +- .../testdata/volumes-long/expected.yaml | 118 +++++++++ .../compose/testdata/volumes-short/cache/hi | 1 + .../docker-compose.yaml | 4 +- .../testdata/volumes-short/expected.yaml | 61 +++++ .../testdata/x_volumes-long/expected.yaml | 151 ------------ .../testdata/x_volumes-short/expected.yaml | 118 --------- pkg/devspace/kubectl/selector/selector.go | 2 +- .../pipelinehandler/commands/select_pod.go | 3 +- 40 files changed, 255 insertions(+), 533 deletions(-) create mode 100644 pkg/devspace/compose/testdata/volumes-long/cache/hi rename pkg/devspace/compose/testdata/{x_volumes-long => volumes-long}/docker-compose.yaml (89%) create mode 100644 pkg/devspace/compose/testdata/volumes-long/expected.yaml create mode 100644 pkg/devspace/compose/testdata/volumes-short/cache/hi rename pkg/devspace/compose/testdata/{x_volumes-short => volumes-short}/docker-compose.yaml (83%) create mode 100644 pkg/devspace/compose/testdata/volumes-short/expected.yaml delete mode 100644 pkg/devspace/compose/testdata/x_volumes-long/expected.yaml delete mode 100644 pkg/devspace/compose/testdata/x_volumes-short/expected.yaml diff --git a/pkg/devspace/compose/loader.go b/pkg/devspace/compose/loader.go index e99cb2442a..bfc5275da4 100644 --- a/pkg/devspace/compose/loader.go +++ b/pkg/devspace/compose/loader.go @@ -15,9 +15,8 @@ import ( composetypes "github.com/compose-spec/compose-go/types" "github.com/loft-sh/devspace/pkg/devspace/config/constants" "github.com/loft-sh/devspace/pkg/devspace/config/versions/latest" - "github.com/loft-sh/devspace/pkg/devspace/deploy/deployer/helm" "github.com/loft-sh/devspace/pkg/util/log" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" v1 "k8s.io/api/core/v1" ) @@ -81,7 +80,6 @@ func (cl *configLoader) Load(log log.Logger) (*latest.Config, error) { return nil, err } - // var hooks []*latest.HookConfig var images map[string]*latest.Image var deployments map[string]*latest.DeploymentConfig var dev map[string]*latest.DevPod @@ -130,25 +128,6 @@ func (cl *configLoader) Load(log log.Logger) (*latest.Config, error) { dev[service.Name] = devConfig } - // bindVolumeHooks := []*latest.HookConfig{} - // for _, volume := range service.Volumes { - // if volume.Type == composetypes.VolumeTypeBind { - // bindVolumeHook := createUploadVolumeHook(service, volume) - // bindVolumeHooks = append(bindVolumeHooks, bindVolumeHook) - // } - // } - - // if len(bindVolumeHooks) > 0 { - // hooks = append(hooks, bindVolumeHooks...) - // hooks = append(hooks, createUploadDoneHook(service)) - // } - - // _, isDependency := dependentsMap[service.Name] - // if isDependency { - // waitHook := createWaitHook(service) - // hooks = append(hooks, waitHook) - // } - return nil }) if err != nil { @@ -198,80 +177,64 @@ func addDevConfig(service composetypes.ServiceConfig, baseDir string, log log.Lo var dev *latest.DevPod devPorts := []*latest.PortMapping{} + for _, port := range service.Ports { + portMapping := &latest.PortMapping{} - if len(service.Ports) > 0 { - if dev == nil { - dev = &latest.DevPod{ - LabelSelector: labelSelector(service.Name), - Ports: []*latest.PortMapping{}, - } + if port.Published == 0 { + log.Warnf("Unassigned port ranges are not supported: %s", port.Target) + continue } - for _, port := range service.Ports { - portMapping := &latest.PortMapping{} - if port.Published == 0 { - log.Warnf("Unassigned port ranges are not supported: %s", port.Target) - continue - } + if port.Published != port.Target { + portMapping.Port = fmt.Sprint(port.Published) + ":" + fmt.Sprint(port.Target) + } else { + portMapping.Port = fmt.Sprint(port.Published) + } - if port.Published != port.Target { - portMapping.Port = fmt.Sprint(port.Published) + ":" + fmt.Sprint(port.Target) - } else { - portMapping.Port = fmt.Sprint(port.Published) - } + if port.HostIP != "" { + portMapping.BindAddress = port.HostIP + } - if port.HostIP != "" { - portMapping.BindAddress = port.HostIP - } + devPorts = append(devPorts, portMapping) + } - devPorts = append(devPorts, portMapping) - } + for _, expose := range service.Expose { + devPorts = append(devPorts, &latest.PortMapping{ + Port: expose, + }) } - if len(service.Expose) > 0 { - if dev == nil { - dev = &latest.DevPod{ - LabelSelector: labelSelector(service.Name), + syncConfigs := []*latest.SyncConfig{} + for _, volume := range service.Volumes { + if volume.Type == composetypes.VolumeTypeBind { + sync := &latest.SyncConfig{ + Path: strings.Join([]string{resolveLocalPath(volume), volume.Target}, ":"), + StartContainer: true, } - } - for _, expose := range service.Expose { - devPorts = append(devPorts, &latest.PortMapping{ - Port: expose, - }) + _, err := os.Stat(filepath.Join(baseDir, volume.Source, DockerIgnorePath)) + if err == nil { + sync.ExcludeFile = DockerIgnorePath + } + + syncConfigs = append(syncConfigs, sync) } } - // devSync := dev.Sync - // if devSync == nil { - // devSync = []*latest.SyncConfig{} - // } - - // for _, volume := range service.Volumes { - // if volume.Type == composetypes.VolumeTypeBind { - // sync := &latest.SyncConfig{ - // LabelSelector: labelSelector(service.Name), - // ContainerName: resolveContainerName(service), - // LocalSubPath: resolveLocalPath(volume), - // ContainerPath: volume.Target, - // } - - // _, err := os.Stat(filepath.Join(baseDir, volume.Source, DockerIgnorePath)) - // if err == nil { - // sync.ExcludeFile = DockerIgnorePath - // } - - // devSync = append(devSync, sync) - // } - // } + if len(devPorts) > 0 || len(syncConfigs) > 0 { + dev = &latest.DevPod{ + LabelSelector: labelSelector(service.Name), + } + } if len(devPorts) > 0 { dev.Ports = devPorts } - // if len(devSync) > 0 { - // dev.Sync = devSync - // } + if len(syncConfigs) > 0 { + dev.Sync = syncConfigs + dev.Command = service.Entrypoint + } return dev, nil } @@ -356,10 +319,6 @@ func (cl *configLoader) deploymentConfig(service composetypes.ServiceConfig, com values["volumes"] = volumes } - // if hasLocalSync(service) { - // values["initContainers"] = []interface{}{initContainerConfig(service, bindVolumeMounts)} - // } - container, err := containerConfig(service, volumeMounts) if err != nil { return nil, err @@ -443,10 +402,6 @@ func (cl *configLoader) deploymentConfig(service composetypes.ServiceConfig, com return &latest.DeploymentConfig{ Helm: &latest.HelmConfig{ - Chart: &latest.ChartConfig{ - Name: helm.DevSpaceChartConfig.Name, - RepoURL: helm.DevSpaceChartConfig.RepoURL, - }, Values: values, }, }, nil @@ -491,7 +446,7 @@ func volumesConfig( volumeMap[volumeName] = volume } - volumeMount := createServiceVolumeMount(volumeName, volumeVolume) + volumeMount := createSharedVolumeMount(volumeName, volumeVolume) volumeMounts = append(volumeMounts, volumeMount) } @@ -662,6 +617,22 @@ func createSecretVolumeMount(secret composetypes.ServiceSecretConfig) interface{ } } +func createSharedVolumeMount(volumeName string, volume composetypes.ServiceVolumeConfig) interface{} { + volumeConfig := map[string]interface{}{ + "name": volumeName, + "shared": true, + } + + if volume.ReadOnly { + volumeConfig["readOnly"] = true + } + + return map[string]interface{}{ + "containerPath": volume.Target, + "volume": volumeConfig, + } +} + func createServiceVolumeMount(volumeName string, volume composetypes.ServiceVolumeConfig) interface{} { return map[string]interface{}{ "containerPath": volume.Target, @@ -693,19 +664,6 @@ func formatName(name string) string { return regexp.MustCompile(`[\._]`).ReplaceAllString(name, "-") } -// func initContainerConfig(service composetypes.ServiceConfig, volumeMounts []interface{}) map[string]interface{} { -// return map[string]interface{}{ -// "name": UploadVolumesContainerName, -// "image": "alpine", -// "command": []interface{}{"sh"}, -// "args": []interface{}{ -// "-c", -// "while [ ! -f /tmp/done ]; do sleep 2; done", -// }, -// "volumeMounts": volumeMounts, -// } -// } - func resolveContainerName(service composetypes.ServiceConfig) string { if service.ContainerName != "" { return formatName(service.ContainerName) @@ -721,14 +679,14 @@ func resolveImage(service composetypes.ServiceConfig) string { return image } -// func resolveLocalPath(volume composetypes.ServiceVolumeConfig) string { -// localSubPath := volume.Source +func resolveLocalPath(volume composetypes.ServiceVolumeConfig) string { + localSubPath := volume.Source -// if strings.HasPrefix(localSubPath, "~") { -// localSubPath = fmt.Sprintf(`$!(echo "$HOME/%s")`, strings.TrimLeft(localSubPath, "~/")) -// } -// return localSubPath -// } + if strings.HasPrefix(localSubPath, "~") { + localSubPath = fmt.Sprintf(`${devspace.userHome}/%s`, strings.TrimLeft(localSubPath, "~/")) + } + return localSubPath +} func resolveServiceVolumeName(service composetypes.ServiceConfig, volume composetypes.ServiceVolumeConfig, idx int) string { volumeName := volume.Source @@ -738,48 +696,6 @@ func resolveServiceVolumeName(service composetypes.ServiceConfig, volume compose return volumeName } -// func createWaitHook(service composetypes.ServiceConfig) *latest.HookConfig { -// serviceName := formatName(service.Name) -// return &latest.HookConfig{ -// Events: []string{fmt.Sprintf("after:deploy:%s", serviceName)}, -// Container: &latest.HookContainer{ -// LabelSelector: labelSelector(serviceName), -// ContainerName: resolveContainerName(service), -// }, -// Wait: &latest.HookWaitConfig{ -// Running: true, -// TerminatedWithCode: ptr.Int32(0), -// }, -// } -// } - -// func createUploadVolumeHook(service composetypes.ServiceConfig, volume composetypes.ServiceVolumeConfig) *latest.HookConfig { -// serviceName := formatName(service.Name) -// return &latest.HookConfig{ -// Events: []string{"after:deploy:" + serviceName}, -// Upload: &latest.HookSyncConfig{ -// LocalPath: resolveLocalPath(volume), -// ContainerPath: volume.Target, -// }, -// Container: &latest.HookContainer{ -// LabelSelector: labelSelector(service.Name), -// ContainerName: UploadVolumesContainerName, -// }, -// } -// } - -// func createUploadDoneHook(service composetypes.ServiceConfig) *latest.HookConfig { -// serviceName := formatName(service.Name) -// return &latest.HookConfig{ -// Events: []string{"after:deploy:" + serviceName}, -// Command: "touch /tmp/done", -// Container: &latest.HookContainer{ -// LabelSelector: labelSelector(service.Name), -// ContainerName: UploadVolumesContainerName, -// }, -// } -// } - // func calculateDependentsMap(dockerCompose *composetypes.Project) (map[string][]string, error) { // tree := map[string][]string{} // err := dockerCompose.WithServices(nil, func(service composetypes.ServiceConfig) error { @@ -808,12 +724,3 @@ func labelSelector(serviceName string) map[string]string { func hasBuild(service composetypes.ServiceConfig) bool { return service.Build != nil } - -// func hasLocalSync(service composetypes.ServiceConfig) bool { -// for _, volume := range service.Volumes { -// if volume.Type == composetypes.VolumeTypeBind { -// return true -// } -// } -// return false -// } diff --git a/pkg/devspace/compose/testdata/basic/expected.yaml b/pkg/devspace/compose/testdata/basic/expected.yaml index 3f01613ed0..69417f193e 100644 --- a/pkg/devspace/compose/testdata/basic/expected.yaml +++ b/pkg/devspace/compose/testdata/basic/expected.yaml @@ -3,18 +3,12 @@ name: docker-compose deployments: db: helm: - chart: - name: component-chart - repo: https://charts.devspace.sh values: containers: - name: db-container image: mysql/mysql-server:8.0.19 backend-1-2: helm: - chart: - name: component-chart - repo: https://charts.devspace.sh values: containers: - name: backend-1-2-container diff --git a/pkg/devspace/compose/testdata/build/expected.yaml b/pkg/devspace/compose/testdata/build/expected.yaml index 89aa19a9e6..b2ac9d57b1 100644 --- a/pkg/devspace/compose/testdata/build/expected.yaml +++ b/pkg/devspace/compose/testdata/build/expected.yaml @@ -9,9 +9,6 @@ images: deployments: foo: helm: - chart: - name: component-chart - repo: https://charts.devspace.sh values: containers: - name: foo-container diff --git a/pkg/devspace/compose/testdata/build_args_list/expected.yaml b/pkg/devspace/compose/testdata/build_args_list/expected.yaml index 98d89c5c74..8c2a85369a 100644 --- a/pkg/devspace/compose/testdata/build_args_list/expected.yaml +++ b/pkg/devspace/compose/testdata/build_args_list/expected.yaml @@ -12,9 +12,6 @@ images: deployments: foo: helm: - chart: - name: component-chart - repo: https://charts.devspace.sh values: containers: - name: foo-container diff --git a/pkg/devspace/compose/testdata/build_args_map/expected.yaml b/pkg/devspace/compose/testdata/build_args_map/expected.yaml index 98d89c5c74..8c2a85369a 100644 --- a/pkg/devspace/compose/testdata/build_args_map/expected.yaml +++ b/pkg/devspace/compose/testdata/build_args_map/expected.yaml @@ -12,9 +12,6 @@ images: deployments: foo: helm: - chart: - name: component-chart - repo: https://charts.devspace.sh values: containers: - name: foo-container diff --git a/pkg/devspace/compose/testdata/build_context/expected.yaml b/pkg/devspace/compose/testdata/build_context/expected.yaml index 9185446668..a538909923 100644 --- a/pkg/devspace/compose/testdata/build_context/expected.yaml +++ b/pkg/devspace/compose/testdata/build_context/expected.yaml @@ -10,9 +10,6 @@ images: deployments: foo: helm: - chart: - name: component-chart - repo: https://charts.devspace.sh values: containers: - name: foo-container diff --git a/pkg/devspace/compose/testdata/build_dockerfile/expected.yaml b/pkg/devspace/compose/testdata/build_dockerfile/expected.yaml index 32e9fea355..79cfe0255c 100644 --- a/pkg/devspace/compose/testdata/build_dockerfile/expected.yaml +++ b/pkg/devspace/compose/testdata/build_dockerfile/expected.yaml @@ -9,9 +9,6 @@ images: deployments: foo: helm: - chart: - name: component-chart - repo: https://charts.devspace.sh values: containers: - name: foo-container diff --git a/pkg/devspace/compose/testdata/build_entry_point/expected.yaml b/pkg/devspace/compose/testdata/build_entry_point/expected.yaml index 12dac444b2..c4a33079bf 100644 --- a/pkg/devspace/compose/testdata/build_entry_point/expected.yaml +++ b/pkg/devspace/compose/testdata/build_entry_point/expected.yaml @@ -10,9 +10,6 @@ images: deployments: foo: helm: - chart: - name: component-chart - repo: https://charts.devspace.sh values: containers: - name: foo-container diff --git a/pkg/devspace/compose/testdata/build_image/expected.yaml b/pkg/devspace/compose/testdata/build_image/expected.yaml index ca26199c32..1f6d8de482 100644 --- a/pkg/devspace/compose/testdata/build_image/expected.yaml +++ b/pkg/devspace/compose/testdata/build_image/expected.yaml @@ -10,9 +10,6 @@ images: deployments: foo: helm: - chart: - name: component-chart - repo: https://charts.devspace.sh values: containers: - name: foo-container diff --git a/pkg/devspace/compose/testdata/build_image_tag/expected.yaml b/pkg/devspace/compose/testdata/build_image_tag/expected.yaml index f1030396ee..8b30ab54f7 100644 --- a/pkg/devspace/compose/testdata/build_image_tag/expected.yaml +++ b/pkg/devspace/compose/testdata/build_image_tag/expected.yaml @@ -10,9 +10,6 @@ images: deployments: foo: helm: - chart: - name: component-chart - repo: https://charts.devspace.sh values: containers: - name: foo-container diff --git a/pkg/devspace/compose/testdata/build_network/expected.yaml b/pkg/devspace/compose/testdata/build_network/expected.yaml index c3310aac7f..ed94ab25aa 100644 --- a/pkg/devspace/compose/testdata/build_network/expected.yaml +++ b/pkg/devspace/compose/testdata/build_network/expected.yaml @@ -10,9 +10,6 @@ images: deployments: foo: helm: - chart: - name: component-chart - repo: https://charts.devspace.sh values: containers: - name: foo-container diff --git a/pkg/devspace/compose/testdata/build_target/expected.yaml b/pkg/devspace/compose/testdata/build_target/expected.yaml index 275febb739..6f7dd357e2 100644 --- a/pkg/devspace/compose/testdata/build_target/expected.yaml +++ b/pkg/devspace/compose/testdata/build_target/expected.yaml @@ -10,9 +10,6 @@ images: deployments: foo: helm: - chart: - name: component-chart - repo: https://charts.devspace.sh values: containers: - name: foo-container diff --git a/pkg/devspace/compose/testdata/command/expected.yaml b/pkg/devspace/compose/testdata/command/expected.yaml index ab960a884c..fc7b404497 100644 --- a/pkg/devspace/compose/testdata/command/expected.yaml +++ b/pkg/devspace/compose/testdata/command/expected.yaml @@ -4,9 +4,6 @@ name: docker-compose deployments: db: helm: - chart: - name: component-chart - repo: https://charts.devspace.sh values: containers: - name: db-container diff --git a/pkg/devspace/compose/testdata/container_name/expected.yaml b/pkg/devspace/compose/testdata/container_name/expected.yaml index 1a6faac829..239f42ec2a 100644 --- a/pkg/devspace/compose/testdata/container_name/expected.yaml +++ b/pkg/devspace/compose/testdata/container_name/expected.yaml @@ -4,18 +4,12 @@ name: docker-compose deployments: db: helm: - chart: - name: component-chart - repo: https://charts.devspace.sh values: containers: - name: my-db-1 image: mysql/mysql-server:8.0.19 backend: helm: - chart: - name: component-chart - repo: https://charts.devspace.sh values: containers: - name: my-backend-1 diff --git a/pkg/devspace/compose/testdata/entry_point/expected.yaml b/pkg/devspace/compose/testdata/entry_point/expected.yaml index fbd240cb93..b3912d2faa 100644 --- a/pkg/devspace/compose/testdata/entry_point/expected.yaml +++ b/pkg/devspace/compose/testdata/entry_point/expected.yaml @@ -4,9 +4,6 @@ name: docker-compose deployments: db: helm: - chart: - name: component-chart - repo: https://charts.devspace.sh values: containers: - name: db-container diff --git a/pkg/devspace/compose/testdata/env_file_multiple/expected.yaml b/pkg/devspace/compose/testdata/env_file_multiple/expected.yaml index 3689fc994d..2d20739366 100644 --- a/pkg/devspace/compose/testdata/env_file_multiple/expected.yaml +++ b/pkg/devspace/compose/testdata/env_file_multiple/expected.yaml @@ -4,9 +4,6 @@ name: docker-compose deployments: db: helm: - chart: - name: component-chart - repo: https://charts.devspace.sh values: containers: - name: db-container diff --git a/pkg/devspace/compose/testdata/env_file_single/expected.yaml b/pkg/devspace/compose/testdata/env_file_single/expected.yaml index 1d3364ee29..d99ac3d187 100644 --- a/pkg/devspace/compose/testdata/env_file_single/expected.yaml +++ b/pkg/devspace/compose/testdata/env_file_single/expected.yaml @@ -4,9 +4,6 @@ name: docker-compose deployments: db: helm: - chart: - name: component-chart - repo: https://charts.devspace.sh values: containers: - name: db-container diff --git a/pkg/devspace/compose/testdata/environment/expected.yaml b/pkg/devspace/compose/testdata/environment/expected.yaml index ae5af6fcab..add96b2882 100644 --- a/pkg/devspace/compose/testdata/environment/expected.yaml +++ b/pkg/devspace/compose/testdata/environment/expected.yaml @@ -4,9 +4,6 @@ name: docker-compose deployments: db: helm: - chart: - name: component-chart - repo: https://charts.devspace.sh values: containers: - name: db-container diff --git a/pkg/devspace/compose/testdata/expose/expected.yaml b/pkg/devspace/compose/testdata/expose/expected.yaml index 2eec75f24c..a5963c8fd3 100644 --- a/pkg/devspace/compose/testdata/expose/expected.yaml +++ b/pkg/devspace/compose/testdata/expose/expected.yaml @@ -4,9 +4,6 @@ name: docker-compose deployments: db: helm: - chart: - name: component-chart - repo: https://charts.devspace.sh values: containers: - name: db-container diff --git a/pkg/devspace/compose/testdata/extra_hosts_multiple/expected.yaml b/pkg/devspace/compose/testdata/extra_hosts_multiple/expected.yaml index ec9ebfc866..a3d6b7b906 100644 --- a/pkg/devspace/compose/testdata/extra_hosts_multiple/expected.yaml +++ b/pkg/devspace/compose/testdata/extra_hosts_multiple/expected.yaml @@ -5,9 +5,6 @@ name: docker-compose deployments: db: helm: - chart: - name: component-chart - repo: https://charts.devspace.sh values: containers: - name: db-container diff --git a/pkg/devspace/compose/testdata/extra_hosts_single/expected.yaml b/pkg/devspace/compose/testdata/extra_hosts_single/expected.yaml index aab18d7e2a..3d7e5a91d5 100644 --- a/pkg/devspace/compose/testdata/extra_hosts_single/expected.yaml +++ b/pkg/devspace/compose/testdata/extra_hosts_single/expected.yaml @@ -5,9 +5,6 @@ name: docker-compose deployments: db: helm: - chart: - name: component-chart - repo: https://charts.devspace.sh values: containers: - name: db-container diff --git a/pkg/devspace/compose/testdata/healthcheck/expected.yaml b/pkg/devspace/compose/testdata/healthcheck/expected.yaml index 19a0246bea..3f0134edcf 100644 --- a/pkg/devspace/compose/testdata/healthcheck/expected.yaml +++ b/pkg/devspace/compose/testdata/healthcheck/expected.yaml @@ -5,9 +5,6 @@ name: docker-compose deployments: cmd: helm: - chart: - name: component-chart - repo: https://charts.devspace.sh values: containers: - name: cmd-container @@ -25,9 +22,6 @@ deployments: periodSeconds: 3 cmd-shell: helm: - chart: - name: component-chart - repo: https://charts.devspace.sh values: containers: - name: cmd-shell-container @@ -43,9 +37,6 @@ deployments: periodSeconds: 3 none: helm: - chart: - name: component-chart - repo: https://charts.devspace.sh values: containers: - name: none-container diff --git a/pkg/devspace/compose/testdata/ports-long/expected.yaml b/pkg/devspace/compose/testdata/ports-long/expected.yaml index 8fd1c9e012..6dfacd1624 100644 --- a/pkg/devspace/compose/testdata/ports-long/expected.yaml +++ b/pkg/devspace/compose/testdata/ports-long/expected.yaml @@ -4,9 +4,6 @@ name: docker-compose deployments: db: helm: - chart: - name: component-chart - repo: https://charts.devspace.sh values: containers: - name: db-container diff --git a/pkg/devspace/compose/testdata/ports-short/expected.yaml b/pkg/devspace/compose/testdata/ports-short/expected.yaml index 0f0d6975ea..502d332eac 100644 --- a/pkg/devspace/compose/testdata/ports-short/expected.yaml +++ b/pkg/devspace/compose/testdata/ports-short/expected.yaml @@ -4,9 +4,6 @@ name: docker-compose deployments: db: helm: - chart: - name: component-chart - repo: https://charts.devspace.sh values: containers: - name: db-container diff --git a/pkg/devspace/compose/testdata/restart-always/expected.yaml b/pkg/devspace/compose/testdata/restart-always/expected.yaml index e5d3d3463f..ceb31cbce7 100644 --- a/pkg/devspace/compose/testdata/restart-always/expected.yaml +++ b/pkg/devspace/compose/testdata/restart-always/expected.yaml @@ -4,9 +4,6 @@ name: docker-compose deployments: db: helm: - chart: - name: component-chart - repo: https://charts.devspace.sh values: containers: - name: db-container diff --git a/pkg/devspace/compose/testdata/restart-no/expected.yaml b/pkg/devspace/compose/testdata/restart-no/expected.yaml index 580b06ac38..b82228a91e 100644 --- a/pkg/devspace/compose/testdata/restart-no/expected.yaml +++ b/pkg/devspace/compose/testdata/restart-no/expected.yaml @@ -4,9 +4,6 @@ name: docker-compose deployments: db: helm: - chart: - name: component-chart - repo: https://charts.devspace.sh values: containers: - name: db-container diff --git a/pkg/devspace/compose/testdata/restart-on-failure/expected.yaml b/pkg/devspace/compose/testdata/restart-on-failure/expected.yaml index a06695d14c..000c250587 100644 --- a/pkg/devspace/compose/testdata/restart-on-failure/expected.yaml +++ b/pkg/devspace/compose/testdata/restart-on-failure/expected.yaml @@ -4,9 +4,6 @@ name: docker-compose deployments: db: helm: - chart: - name: component-chart - repo: https://charts.devspace.sh values: containers: - name: db-container diff --git a/pkg/devspace/compose/testdata/restart-unless-stopped/expected.yaml b/pkg/devspace/compose/testdata/restart-unless-stopped/expected.yaml index 580b06ac38..b82228a91e 100644 --- a/pkg/devspace/compose/testdata/restart-unless-stopped/expected.yaml +++ b/pkg/devspace/compose/testdata/restart-unless-stopped/expected.yaml @@ -4,9 +4,6 @@ name: docker-compose deployments: db: helm: - chart: - name: component-chart - repo: https://charts.devspace.sh values: containers: - name: db-container diff --git a/pkg/devspace/compose/testdata/secret-long/expected.yaml b/pkg/devspace/compose/testdata/secret-long/expected.yaml index 4e90f87b6f..a986f69253 100644 --- a/pkg/devspace/compose/testdata/secret-long/expected.yaml +++ b/pkg/devspace/compose/testdata/secret-long/expected.yaml @@ -4,9 +4,6 @@ name: docker-compose deployments: db: helm: - chart: - name: component-chart - repo: https://charts.devspace.sh values: containers: - name: db-container diff --git a/pkg/devspace/compose/testdata/secret-short/expected.yaml b/pkg/devspace/compose/testdata/secret-short/expected.yaml index b19fc00fbc..45833e4a55 100644 --- a/pkg/devspace/compose/testdata/secret-short/expected.yaml +++ b/pkg/devspace/compose/testdata/secret-short/expected.yaml @@ -4,9 +4,6 @@ name: docker-compose deployments: db: helm: - chart: - name: component-chart - repo: https://charts.devspace.sh values: containers: - name: db-container diff --git a/pkg/devspace/compose/testdata/volumes-long/cache/hi b/pkg/devspace/compose/testdata/volumes-long/cache/hi new file mode 100644 index 0000000000..8ab686eafe --- /dev/null +++ b/pkg/devspace/compose/testdata/volumes-long/cache/hi @@ -0,0 +1 @@ +Hello, World! diff --git a/pkg/devspace/compose/testdata/x_volumes-long/docker-compose.yaml b/pkg/devspace/compose/testdata/volumes-long/docker-compose.yaml similarity index 89% rename from pkg/devspace/compose/testdata/x_volumes-long/docker-compose.yaml rename to pkg/devspace/compose/testdata/volumes-long/docker-compose.yaml index bb4026a136..2451a12dab 100644 --- a/pkg/devspace/compose/testdata/x_volumes-long/docker-compose.yaml +++ b/pkg/devspace/compose/testdata/volumes-long/docker-compose.yaml @@ -1,7 +1,7 @@ services: db: - image: loft.sh/mysql-server:8.0.19 - build: db + image: alpine + entrypoint: sh -c 'cat /tmp/cache/hi && tail -f /dev/null' volumes: # Just specify a path and let the Engine create a volume - type: volume @@ -52,7 +52,8 @@ services: target: /var/lib/mysql backend_1.2: - image: mysql/mysql-server:8.0.19 + image: alpine + entrypoint: sh -c 'cat /tmp/cache/hi && tail -f /dev/null' volumes: # Just specify a path and let the Engine create a volume - type: volume diff --git a/pkg/devspace/compose/testdata/volumes-long/expected.yaml b/pkg/devspace/compose/testdata/volumes-long/expected.yaml new file mode 100644 index 0000000000..cbf7097fa0 --- /dev/null +++ b/pkg/devspace/compose/testdata/volumes-long/expected.yaml @@ -0,0 +1,118 @@ +version: v2beta1 + +name: docker-compose + +deployments: + db: + helm: + values: + containers: + - name: db-container + image: alpine + command: + - sh + - -c + - 'cat /tmp/cache/hi && tail -f /dev/null' + volumeMounts: + - containerPath: /var/lib/mydata + volume: + name: db-1 + shared: true + - containerPath: /var/lib/mysql + volume: + name: datavolume + shared: true + - containerPath: /var/lib/readonly + volume: + name: datavolume + shared: true + readOnly: true + - containerPath: /var/lib/tmpfs + volume: + name: db-2 + readOnly: false + - containerPath: /var/lib/tmpfs-1000 + volume: + name: db-3 + readOnly: false + - containerPath: /var/lib/data + volume: + name: volume-1 + readOnly: false + - containerPath: /tmp/cache + volume: + name: volume-2 + readOnly: false + - containerPath: /etc/configs/ + volume: + name: volume-3 + readOnly: false + volumes: + - name: db-1 + size: 5Gi + - name: datavolume + size: 5Gi + - name: db-2 + emptyDir: {} + - name: db-3 + emptyDir: + sizeLimit: "1000" + - name: volume-1 + emptyDir: {} + - name: volume-2 + emptyDir: {} + - name: volume-3 + emptyDir: {} + backend-1-2: + helm: + values: + containers: + - name: backend-1-2-container + image: alpine + command: + - sh + - -c + - 'cat /tmp/cache/hi && tail -f /dev/null' + volumeMounts: + - containerPath: /var/lib/mydata + volume: + name: backend-1-2-1 + shared: true + volumes: + - name: backend-1-2-1 + size: 5Gi + +dev: + db: + labelSelector: + app.kubernetes.io/component: db + command: + - sh + - -c + - 'cat /tmp/cache/hi && tail -f /dev/null' + sync: + - path: /opt/data:/var/lib/data + startContainer: true + - path: ./cache:/tmp/cache + startContainer: true + - path: ${devspace.userHome}/configs:/etc/configs/ + startContainer: true + + +# dev: +# sync: +# - containerName: db-container +# labelSelector: +# app.kubernetes.io/component: db +# localSubPath: /opt/data +# containerPath: /var/lib/data +# - containerName: db-container +# labelSelector: +# app.kubernetes.io/component: db +# localSubPath: ./cache +# containerPath: /tmp/cache +# - containerName: db-container +# labelSelector: +# app.kubernetes.io/component: db +# localSubPath: $!(echo "$HOME/configs") +# containerPath: /etc/configs/ diff --git a/pkg/devspace/compose/testdata/volumes-short/cache/hi b/pkg/devspace/compose/testdata/volumes-short/cache/hi new file mode 100644 index 0000000000..8ab686eafe --- /dev/null +++ b/pkg/devspace/compose/testdata/volumes-short/cache/hi @@ -0,0 +1 @@ +Hello, World! diff --git a/pkg/devspace/compose/testdata/x_volumes-short/docker-compose.yaml b/pkg/devspace/compose/testdata/volumes-short/docker-compose.yaml similarity index 83% rename from pkg/devspace/compose/testdata/x_volumes-short/docker-compose.yaml rename to pkg/devspace/compose/testdata/volumes-short/docker-compose.yaml index 2e4689eedd..3bbb8838eb 100644 --- a/pkg/devspace/compose/testdata/x_volumes-short/docker-compose.yaml +++ b/pkg/devspace/compose/testdata/volumes-short/docker-compose.yaml @@ -1,7 +1,7 @@ services: db: - image: loft.sh/mysql-server:8.0.19 - build: db + image: alpine + entrypoint: sh -c 'cat /tmp/cache/hi && tail -f /dev/null' volumes: # Just specify a path and let the Engine create a volume - /var/lib/mydata diff --git a/pkg/devspace/compose/testdata/volumes-short/expected.yaml b/pkg/devspace/compose/testdata/volumes-short/expected.yaml new file mode 100644 index 0000000000..4e2bc5cb2e --- /dev/null +++ b/pkg/devspace/compose/testdata/volumes-short/expected.yaml @@ -0,0 +1,61 @@ +version: v2beta1 +name: docker-compose +deployments: + db: + helm: + values: + containers: + - name: db-container + image: alpine + command: + - sh + - -c + - 'cat /tmp/cache/hi && tail -f /dev/null' + volumeMounts: + - containerPath: /var/lib/mydata + volume: + name: db-1 + shared: true + - containerPath: /var/lib/mysql + volume: + name: datavolume + shared: true + - containerPath: /var/lib/data + volume: + name: volume-1 + readOnly: false + - containerPath: /tmp/cache + volume: + name: volume-2 + readOnly: false + - containerPath: /etc/configs/ + volume: + name: volume-3 + readOnly: true + volumes: + - name: db-1 + size: 5Gi + - name: datavolume + size: 5Gi + - name: volume-1 + emptyDir: {} + - name: volume-2 + emptyDir: {} + - name: volume-3 + emptyDir: {} + +dev: + db: + labelSelector: + app.kubernetes.io/component: db + command: + - sh + - -c + - 'cat /tmp/cache/hi && tail -f /dev/null' + sync: + - path: /opt/data:/var/lib/data + startContainer: true + - path: ./cache:/tmp/cache + startContainer: true + - path: ${devspace.userHome}/configs:/etc/configs/ + startContainer: true diff --git a/pkg/devspace/compose/testdata/x_volumes-long/expected.yaml b/pkg/devspace/compose/testdata/x_volumes-long/expected.yaml deleted file mode 100644 index 682476cb82..0000000000 --- a/pkg/devspace/compose/testdata/x_volumes-long/expected.yaml +++ /dev/null @@ -1,151 +0,0 @@ -version: v1beta11 - -images: - db: - context: db - dockerfile: db/Dockerfile - image: loft.sh/mysql-server:8.0.19 - -deployments: -- name: db - helm: - componentChart: true - values: - initContainers: - - name: upload-volumes - image: alpine - command: - - sh - args: - - -c - - while [ ! -f /tmp/done ]; do sleep 2; done - volumeMounts: - - containerPath: /var/lib/data - volume: - name: volume-1 - readOnly: false - - containerPath: /tmp/cache - volume: - name: volume-2 - readOnly: false - - containerPath: /etc/configs/ - volume: - name: volume-3 - readOnly: false - containers: - - name: db-container - image: loft.sh/mysql-server:8.0.19 - volumeMounts: - - containerPath: /var/lib/mydata - volume: - name: db-1 - readOnly: false - - containerPath: /var/lib/mysql - volume: - name: datavolume - readOnly: false - - containerPath: /var/lib/readonly - volume: - name: datavolume - readOnly: true - - containerPath: /var/lib/tmpfs - volume: - name: db-2 - readOnly: false - - containerPath: /var/lib/tmpfs-1000 - volume: - name: db-3 - readOnly: false - - containerPath: /var/lib/data - volume: - name: volume-1 - readOnly: false - - containerPath: /tmp/cache - volume: - name: volume-2 - readOnly: false - - containerPath: /etc/configs/ - volume: - name: volume-3 - readOnly: false - volumes: - - name: db-1 - size: 5Gi - - name: datavolume - size: 5Gi - - name: db-2 - emptyDir: {} - - name: db-3 - emptyDir: - sizeLimit: "1000" - - name: volume-1 - emptyDir: {} - - name: volume-2 - emptyDir: {} - - name: volume-3 - emptyDir: {} -- name: backend-1-2 - helm: - componentChart: true - values: - containers: - - name: backend-1-2-container - image: mysql/mysql-server:8.0.19 - volumeMounts: - - containerPath: /var/lib/mydata - volume: - name: backend-1-2-1 - readOnly: false - volumes: - - name: backend-1-2-1 - size: 5Gi - -dev: - sync: - - containerName: db-container - labelSelector: - app.kubernetes.io/component: db - localSubPath: /opt/data - containerPath: /var/lib/data - - containerName: db-container - labelSelector: - app.kubernetes.io/component: db - localSubPath: ./cache - containerPath: /tmp/cache - - containerName: db-container - labelSelector: - app.kubernetes.io/component: db - localSubPath: $!(echo "$HOME/configs") - containerPath: /etc/configs/ - -hooks: -- events: ["after:deploy:db"] - upload: - localPath: /opt/data - containerPath: /var/lib/data - container: - containerName: upload-volumes - labelSelector: - app.kubernetes.io/component: db -- events: ["after:deploy:db"] - upload: - localPath: ./cache - containerPath: /tmp/cache - container: - containerName: upload-volumes - labelSelector: - app.kubernetes.io/component: db -- events: ["after:deploy:db"] - upload: - localPath: $!(echo "$HOME/configs") - containerPath: /etc/configs/ - container: - containerName: upload-volumes - labelSelector: - app.kubernetes.io/component: db -- events: ["after:deploy:db"] - command: touch /tmp/done - container: - containerName: upload-volumes - labelSelector: - app.kubernetes.io/component: db diff --git a/pkg/devspace/compose/testdata/x_volumes-short/expected.yaml b/pkg/devspace/compose/testdata/x_volumes-short/expected.yaml deleted file mode 100644 index 5debd2afb3..0000000000 --- a/pkg/devspace/compose/testdata/x_volumes-short/expected.yaml +++ /dev/null @@ -1,118 +0,0 @@ -version: v1beta11 -images: - db: - context: db - dockerfile: db/Dockerfile - image: loft.sh/mysql-server:8.0.19 - -deployments: -- name: db - helm: - componentChart: true - values: - initContainers: - - name: upload-volumes - image: alpine - command: - - sh - args: - - -c - - while [ ! -f /tmp/done ]; do sleep 2; done - volumeMounts: - - containerPath: /var/lib/data - volume: - name: volume-1 - readOnly: false - - containerPath: /tmp/cache - volume: - name: volume-2 - readOnly: false - - containerPath: /etc/configs/ - volume: - name: volume-3 - readOnly: false - containers: - - name: db-container - image: loft.sh/mysql-server:8.0.19 - volumeMounts: - - containerPath: /var/lib/mydata - volume: - name: db-1 - readOnly: false - - containerPath: /var/lib/mysql - volume: - name: datavolume - readOnly: false - - containerPath: /var/lib/data - volume: - name: volume-1 - readOnly: false - - containerPath: /tmp/cache - volume: - name: volume-2 - readOnly: false - - containerPath: /etc/configs/ - volume: - name: volume-3 - readOnly: true - volumes: - - name: db-1 - size: 5Gi - - name: datavolume - size: 5Gi - - name: volume-1 - emptyDir: {} - - name: volume-2 - emptyDir: {} - - name: volume-3 - emptyDir: {} - -dev: - sync: - - containerName: db-container - labelSelector: - app.kubernetes.io/component: db - localSubPath: /opt/data - containerPath: /var/lib/data - - containerName: db-container - labelSelector: - app.kubernetes.io/component: db - localSubPath: ./cache - containerPath: /tmp/cache - - containerName: db-container - labelSelector: - app.kubernetes.io/component: db - localSubPath: $!(echo "$HOME/configs") - containerPath: /etc/configs/ - -hooks: -- events: ["after:deploy:db"] - upload: - localPath: /opt/data - containerPath: /var/lib/data - container: - containerName: upload-volumes - labelSelector: - app.kubernetes.io/component: db -- events: ["after:deploy:db"] - upload: - localPath: ./cache - containerPath: /tmp/cache - container: - containerName: upload-volumes - labelSelector: - app.kubernetes.io/component: db -- events: ["after:deploy:db"] - upload: - localPath: $!(echo "$HOME/configs") - containerPath: /etc/configs/ - container: - containerName: upload-volumes - labelSelector: - app.kubernetes.io/component: db -- events: ["after:deploy:db"] - command: touch /tmp/done - container: - containerName: upload-volumes - labelSelector: - app.kubernetes.io/component: db diff --git a/pkg/devspace/kubectl/selector/selector.go b/pkg/devspace/kubectl/selector/selector.go index 39744bbea3..427355b50f 100644 --- a/pkg/devspace/kubectl/selector/selector.go +++ b/pkg/devspace/kubectl/selector/selector.go @@ -52,7 +52,7 @@ var FilterNonRunningContainers = func(p *corev1.Pod, c *corev1.Container) bool { return true } for _, cs := range p.Status.InitContainerStatuses { - if cs.Name == c.Name && cs.Ready && cs.State.Running != nil { + if cs.Name == c.Name && cs.State.Running != nil { return false } } diff --git a/pkg/devspace/pipeline/engine/pipelinehandler/commands/select_pod.go b/pkg/devspace/pipeline/engine/pipelinehandler/commands/select_pod.go index bf598b16d6..964ce4c127 100644 --- a/pkg/devspace/pipeline/engine/pipelinehandler/commands/select_pod.go +++ b/pkg/devspace/pipeline/engine/pipelinehandler/commands/select_pod.go @@ -2,12 +2,13 @@ package commands import ( "fmt" + "time" + "github.com/jessevdk/go-flags" devspacecontext "github.com/loft-sh/devspace/pkg/devspace/context" "github.com/loft-sh/devspace/pkg/devspace/services/targetselector" "github.com/pkg/errors" "mvdan.cc/sh/v3/interp" - "time" ) type SelectPodOptions struct { From fea8c521e0ce3735dd7b26acb11afa351e615832 Mon Sep 17 00:00:00 2001 From: Russell Centanni Date: Wed, 30 Mar 2022 21:25:06 -0400 Subject: [PATCH 5/9] refactor: docker-compose dependency support --- pkg/devspace/compose/config_builder.go | 51 ++ pkg/devspace/compose/dependency.go | 22 + pkg/devspace/compose/deployment.go | 240 ++++++ pkg/devspace/compose/dev.go | 95 +++ pkg/devspace/compose/image.go | 70 ++ pkg/devspace/compose/loader.go | 701 ++---------------- pkg/devspace/compose/loader_test.go | 206 ++--- pkg/devspace/compose/secret.go | 48 ++ .../basic/{expected.yaml => devspace.yaml} | 0 .../build/{expected.yaml => devspace.yaml} | 0 .../{expected.yaml => devspace.yaml} | 0 .../{expected.yaml => devspace.yaml} | 0 .../{expected.yaml => devspace.yaml} | 0 .../{expected.yaml => devspace.yaml} | 0 .../{expected.yaml => devspace.yaml} | 0 .../{expected.yaml => devspace.yaml} | 0 .../{expected.yaml => devspace.yaml} | 0 .../{expected.yaml => devspace.yaml} | 0 .../{expected.yaml => devspace.yaml} | 0 .../command/{expected.yaml => devspace.yaml} | 0 .../{expected.yaml => devspace.yaml} | 0 .../testdata/depends_on/devspace-backend.yaml | 104 +++ .../testdata/depends_on/devspace-cache.yaml | 101 +++ .../testdata/depends_on/devspace-db.yaml | 94 +++ .../depends_on/devspace-frontend.yaml | 100 +++ .../depends_on/devspace-messaging.yaml | 102 +++ .../compose/testdata/depends_on/devspace.yaml | 112 +++ .../docker-compose.yaml | 0 .../{expected.yaml => devspace.yaml} | 0 .../{expected.yaml => devspace.yaml} | 0 .../{expected.yaml => devspace.yaml} | 0 .../{expected.yaml => devspace.yaml} | 0 .../expose/{expected.yaml => devspace.yaml} | 0 .../{expected.yaml => devspace.yaml} | 0 .../{expected.yaml => devspace.yaml} | 0 .../{expected.yaml => devspace.yaml} | 0 .../{expected.yaml => devspace.yaml} | 0 .../{expected.yaml => devspace.yaml} | 0 .../{expected.yaml => devspace.yaml} | 0 .../{expected.yaml => devspace.yaml} | 0 .../{expected.yaml => devspace.yaml} | 0 .../{expected.yaml => devspace.yaml} | 0 .../{expected.yaml => devspace.yaml} | 0 .../{expected.yaml => devspace.yaml} | 0 .../{expected.yaml => devspace.yaml} | 0 .../{expected.yaml => devspace.yaml} | 0 .../testdata/x_depends_on/expected.yaml | 92 --- .../{expected.yaml => devspace.yaml} | 0 pkg/devspace/compose/volume.go | 149 ++++ 49 files changed, 1470 insertions(+), 817 deletions(-) create mode 100644 pkg/devspace/compose/config_builder.go create mode 100644 pkg/devspace/compose/dependency.go create mode 100644 pkg/devspace/compose/deployment.go create mode 100644 pkg/devspace/compose/dev.go create mode 100644 pkg/devspace/compose/image.go create mode 100644 pkg/devspace/compose/secret.go rename pkg/devspace/compose/testdata/basic/{expected.yaml => devspace.yaml} (100%) rename pkg/devspace/compose/testdata/build/{expected.yaml => devspace.yaml} (100%) rename pkg/devspace/compose/testdata/build_args_list/{expected.yaml => devspace.yaml} (100%) rename pkg/devspace/compose/testdata/build_args_map/{expected.yaml => devspace.yaml} (100%) rename pkg/devspace/compose/testdata/build_context/{expected.yaml => devspace.yaml} (100%) rename pkg/devspace/compose/testdata/build_dockerfile/{expected.yaml => devspace.yaml} (100%) rename pkg/devspace/compose/testdata/build_entry_point/{expected.yaml => devspace.yaml} (100%) rename pkg/devspace/compose/testdata/build_image/{expected.yaml => devspace.yaml} (100%) rename pkg/devspace/compose/testdata/build_image_tag/{expected.yaml => devspace.yaml} (100%) rename pkg/devspace/compose/testdata/build_network/{expected.yaml => devspace.yaml} (100%) rename pkg/devspace/compose/testdata/build_target/{expected.yaml => devspace.yaml} (100%) rename pkg/devspace/compose/testdata/command/{expected.yaml => devspace.yaml} (100%) rename pkg/devspace/compose/testdata/container_name/{expected.yaml => devspace.yaml} (100%) create mode 100644 pkg/devspace/compose/testdata/depends_on/devspace-backend.yaml create mode 100644 pkg/devspace/compose/testdata/depends_on/devspace-cache.yaml create mode 100644 pkg/devspace/compose/testdata/depends_on/devspace-db.yaml create mode 100644 pkg/devspace/compose/testdata/depends_on/devspace-frontend.yaml create mode 100644 pkg/devspace/compose/testdata/depends_on/devspace-messaging.yaml create mode 100644 pkg/devspace/compose/testdata/depends_on/devspace.yaml rename pkg/devspace/compose/testdata/{x_depends_on => depends_on}/docker-compose.yaml (100%) rename pkg/devspace/compose/testdata/entry_point/{expected.yaml => devspace.yaml} (100%) rename pkg/devspace/compose/testdata/env_file_multiple/{expected.yaml => devspace.yaml} (100%) rename pkg/devspace/compose/testdata/env_file_single/{expected.yaml => devspace.yaml} (100%) rename pkg/devspace/compose/testdata/environment/{expected.yaml => devspace.yaml} (100%) rename pkg/devspace/compose/testdata/expose/{expected.yaml => devspace.yaml} (100%) rename pkg/devspace/compose/testdata/extra_hosts_multiple/{expected.yaml => devspace.yaml} (100%) rename pkg/devspace/compose/testdata/extra_hosts_single/{expected.yaml => devspace.yaml} (100%) rename pkg/devspace/compose/testdata/healthcheck/{expected.yaml => devspace.yaml} (100%) rename pkg/devspace/compose/testdata/ports-long/{expected.yaml => devspace.yaml} (100%) rename pkg/devspace/compose/testdata/ports-short/{expected.yaml => devspace.yaml} (100%) rename pkg/devspace/compose/testdata/restart-always/{expected.yaml => devspace.yaml} (100%) rename pkg/devspace/compose/testdata/restart-no/{expected.yaml => devspace.yaml} (100%) rename pkg/devspace/compose/testdata/restart-on-failure/{expected.yaml => devspace.yaml} (100%) rename pkg/devspace/compose/testdata/restart-unless-stopped/{expected.yaml => devspace.yaml} (100%) rename pkg/devspace/compose/testdata/secret-long/{expected.yaml => devspace.yaml} (100%) rename pkg/devspace/compose/testdata/secret-short/{expected.yaml => devspace.yaml} (100%) rename pkg/devspace/compose/testdata/volumes-long/{expected.yaml => devspace.yaml} (100%) rename pkg/devspace/compose/testdata/volumes-short/{expected.yaml => devspace.yaml} (100%) delete mode 100644 pkg/devspace/compose/testdata/x_depends_on/expected.yaml rename pkg/devspace/compose/testdata/x_volumes-depends_on/{expected.yaml => devspace.yaml} (100%) create mode 100644 pkg/devspace/compose/volume.go diff --git a/pkg/devspace/compose/config_builder.go b/pkg/devspace/compose/config_builder.go new file mode 100644 index 0000000000..6cf48d47db --- /dev/null +++ b/pkg/devspace/compose/config_builder.go @@ -0,0 +1,51 @@ +package compose + +import ( + "regexp" + + composetypes "github.com/compose-spec/compose-go/types" + "github.com/loft-sh/devspace/pkg/devspace/config/versions/latest" + "github.com/loft-sh/devspace/pkg/util/log" +) + +type ConfigBuilder interface { + AddDependencies(dockerCompose composetypes.Project, service composetypes.ServiceConfig) error + AddDeployment(dockerCompose composetypes.Project, service composetypes.ServiceConfig) error + AddDev(service composetypes.ServiceConfig) error + AddImage(service composetypes.ServiceConfig) error + AddSecret(dockerCompose composetypes.Project, service composetypes.ServiceConfig) error + Config() *latest.Config + SetName(name string) +} + +type configBuilder struct { + config *latest.Config + log log.Logger + workingDir string +} + +func NewConfigBuilder(workingDir string, log log.Logger) ConfigBuilder { + return &configBuilder{ + config: latest.New().(*latest.Config), + log: log, + workingDir: workingDir, + } +} + +func (cb *configBuilder) Config() *latest.Config { + return cb.config +} + +func (cb *configBuilder) SetName(name string) { + cb.config.Name = name +} + +func formatName(name string) string { + return regexp.MustCompile(`[\._]`).ReplaceAllString(name, "-") +} + +func labelSelector(serviceName string) map[string]string { + return map[string]string{ + "app.kubernetes.io/component": serviceName, + } +} diff --git a/pkg/devspace/compose/dependency.go b/pkg/devspace/compose/dependency.go new file mode 100644 index 0000000000..e646566584 --- /dev/null +++ b/pkg/devspace/compose/dependency.go @@ -0,0 +1,22 @@ +package compose + +import ( + composetypes "github.com/compose-spec/compose-go/types" + "github.com/loft-sh/devspace/pkg/devspace/config/versions/latest" +) + +func (cb *configBuilder) AddDependencies(dockerCompose composetypes.Project, service composetypes.ServiceConfig) error { + for _, dependency := range service.GetDependencies() { + if cb.config.Dependencies == nil { + cb.config.Dependencies = map[string]*latest.DependencyConfig{} + } + + depName := formatName(dependency) + cb.config.Dependencies[depName] = &latest.DependencyConfig{ + Source: &latest.SourceConfig{ + Path: "devspace-" + depName + ".yaml", + }, + } + } + return nil +} diff --git a/pkg/devspace/compose/deployment.go b/pkg/devspace/compose/deployment.go new file mode 100644 index 0000000000..21d469e88e --- /dev/null +++ b/pkg/devspace/compose/deployment.go @@ -0,0 +1,240 @@ +package compose + +import ( + "fmt" + "sort" + "strconv" + "strings" + "time" + + composetypes "github.com/compose-spec/compose-go/types" + "github.com/loft-sh/devspace/pkg/devspace/config/versions/latest" + v1 "k8s.io/api/core/v1" +) + +func (cb *configBuilder) AddDeployment(dockerCompose composetypes.Project, service composetypes.ServiceConfig) error { + values := map[string]interface{}{} + + volumes, volumeMounts, _ := volumesConfig(service, dockerCompose.Volumes, cb.log) + if len(volumes) > 0 { + values["volumes"] = volumes + } + + container, err := containerConfig(service, volumeMounts) + if err != nil { + return err + } + values["containers"] = []interface{}{container} + + if service.Restart != "" { + restartPolicy := string(v1.RestartPolicyNever) + switch service.Restart { + case "always": + restartPolicy = string(v1.RestartPolicyAlways) + case "on-failure": + restartPolicy = string(v1.RestartPolicyOnFailure) + } + values["restartPolicy"] = restartPolicy + } + + ports := []interface{}{} + if len(service.Ports) > 0 { + for _, port := range service.Ports { + var protocol string + switch port.Protocol { + case "tcp": + protocol = string(v1.ProtocolTCP) + case "udp": + protocol = string(v1.ProtocolUDP) + default: + return fmt.Errorf("invalid protocol %s", port.Protocol) + } + + if port.Published == 0 { + cb.log.Warnf("Unassigned port ranges are not supported: %s", port.Target) + continue + } + + ports = append(ports, map[string]interface{}{ + "port": int(port.Published), + "containerPort": int(port.Target), + "protocol": protocol, + }) + } + } + + if len(service.Expose) > 0 { + for _, port := range service.Expose { + intPort, err := strconv.Atoi(port) + if err != nil { + return fmt.Errorf("expected integer for port number: %s", err.Error()) + } + ports = append(ports, map[string]interface{}{ + "port": intPort, + }) + } + } + + if len(ports) > 0 { + values["service"] = map[string]interface{}{ + "ports": ports, + } + } + + if len(service.ExtraHosts) > 0 { + hostsMap := map[string][]interface{}{} + for _, host := range service.ExtraHosts { + hostTokens := strings.Split(host, ":") + hostName := hostTokens[0] + hostIP := hostTokens[1] + hostsMap[hostIP] = append(hostsMap[hostIP], hostName) + } + + hostAliases := []interface{}{} + for ip, hosts := range hostsMap { + hostAliases = append(hostAliases, map[string]interface{}{ + "ip": ip, + "hostnames": hosts, + }) + } + + values["hostAliases"] = hostAliases + } + + deployment := &latest.DeploymentConfig{ + Helm: &latest.HelmConfig{ + Values: values, + }, + } + + if cb.config.Deployments == nil { + cb.config.Deployments = map[string]*latest.DeploymentConfig{} + } + + deploymentName := formatName(service.Name) + cb.config.Deployments[deploymentName] = deployment + + return nil +} + +func containerConfig(service composetypes.ServiceConfig, volumeMounts []interface{}) (map[string]interface{}, error) { + container := map[string]interface{}{ + "name": containerName(service), + "image": resolveImage(service), + } + + if len(service.Command) > 0 { + container["args"] = shellCommandToSlice(service.Command) + } + + if service.Build == nil && len(service.Entrypoint) > 0 { + container["command"] = shellCommandToSlice(service.Entrypoint) + } + + if service.Environment != nil { + env := containerEnv(service.Environment) + if len(env) > 0 { + container["env"] = env + } + } + + if service.HealthCheck != nil { + livenessProbe, err := containerLivenessProbe(service.HealthCheck) + if err != nil { + return nil, err + } + if livenessProbe != nil { + container["livenessProbe"] = livenessProbe + } + } + + if len(volumeMounts) > 0 { + container["volumeMounts"] = volumeMounts + } + + return container, nil +} + +func containerEnv(env composetypes.MappingWithEquals) []interface{} { + envs := []interface{}{} + keys := []string{} + for name := range env { + keys = append(keys, name) + } + sort.Strings(keys) + + for _, name := range keys { + value := env[name] + envs = append(envs, map[string]interface{}{ + "name": name, + "value": *value, + }) + } + return envs +} + +func containerName(service composetypes.ServiceConfig) string { + if service.ContainerName != "" { + return formatName(service.ContainerName) + } + return fmt.Sprintf("%s-container", formatName(service.Name)) +} + +func containerLivenessProbe(health *composetypes.HealthCheckConfig) (map[string]interface{}, error) { + if len(health.Test) == 0 { + return nil, nil + } + + var command []interface{} + testKind := health.Test[0] + switch testKind { + case "NONE": + return nil, nil + case "CMD": + for _, test := range health.Test[1:] { + command = append(command, test) + } + case "CMD-SHELL": + command = append(command, "sh") + command = append(command, "-c") + command = append(command, health.Test[1]) + default: + command = append(command, health.Test[0:]) + } + + livenessProbe := map[string]interface{}{ + "exec": map[string]interface{}{ + "command": command, + }, + } + + if health.Retries != nil { + livenessProbe["failureThreshold"] = int(*health.Retries) + } + + if health.Interval != nil { + period, err := time.ParseDuration(health.Interval.String()) + if err != nil { + return nil, err + } + livenessProbe["periodSeconds"] = int(period.Seconds()) + } + + if health.StartPeriod != nil { + initialDelay, err := time.ParseDuration(health.Interval.String()) + if err != nil { + return nil, err + } + livenessProbe["initialDelaySeconds"] = int(initialDelay.Seconds()) + } + + return livenessProbe, nil +} + +func shellCommandToSlice(command composetypes.ShellCommand) []interface{} { + var slice []interface{} + for _, item := range command { + slice = append(slice, item) + } + return slice +} diff --git a/pkg/devspace/compose/dev.go b/pkg/devspace/compose/dev.go new file mode 100644 index 0000000000..1c9ea1ecbc --- /dev/null +++ b/pkg/devspace/compose/dev.go @@ -0,0 +1,95 @@ +package compose + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + composetypes "github.com/compose-spec/compose-go/types" + "github.com/loft-sh/devspace/pkg/devspace/config/versions/latest" +) + +func (cb *configBuilder) AddDev(service composetypes.ServiceConfig) error { + var dev *latest.DevPod + + devPorts := []*latest.PortMapping{} + for _, port := range service.Ports { + portMapping := &latest.PortMapping{} + + if port.Published == 0 { + cb.log.Warnf("Unassigned port ranges are not supported: %s", port.Target) + continue + } + + if port.Published != port.Target { + portMapping.Port = fmt.Sprint(port.Published) + ":" + fmt.Sprint(port.Target) + } else { + portMapping.Port = fmt.Sprint(port.Published) + } + + if port.HostIP != "" { + portMapping.BindAddress = port.HostIP + } + + devPorts = append(devPorts, portMapping) + } + + for _, expose := range service.Expose { + devPorts = append(devPorts, &latest.PortMapping{ + Port: expose, + }) + } + + syncConfigs := []*latest.SyncConfig{} + for _, volume := range service.Volumes { + if volume.Type == composetypes.VolumeTypeBind { + sync := &latest.SyncConfig{ + Path: strings.Join([]string{resolveLocalPath(volume), volume.Target}, ":"), + StartContainer: true, + } + + _, err := os.Stat(filepath.Join(cb.workingDir, volume.Source, DockerIgnorePath)) + if err == nil { + sync.ExcludeFile = DockerIgnorePath + } + + syncConfigs = append(syncConfigs, sync) + } + } + + if len(devPorts) > 0 || len(syncConfigs) > 0 { + dev = &latest.DevPod{ + LabelSelector: labelSelector(service.Name), + } + } + + if len(devPorts) > 0 { + dev.Ports = devPorts + } + + if len(syncConfigs) > 0 { + dev.Sync = syncConfigs + dev.Command = service.Entrypoint + } + + if dev != nil { + if cb.config.Dev == nil { + cb.config.Dev = map[string]*latest.DevPod{} + } + + devName := formatName(service.Name) + cb.config.Dev[devName] = dev + } + + return nil +} + +func resolveLocalPath(volume composetypes.ServiceVolumeConfig) string { + localSubPath := volume.Source + + if strings.HasPrefix(localSubPath, "~") { + localSubPath = fmt.Sprintf(`${devspace.userHome}/%s`, strings.TrimLeft(localSubPath, "~/")) + } + return localSubPath +} diff --git a/pkg/devspace/compose/image.go b/pkg/devspace/compose/image.go new file mode 100644 index 0000000000..98f26188b3 --- /dev/null +++ b/pkg/devspace/compose/image.go @@ -0,0 +1,70 @@ +package compose + +import ( + "path/filepath" + + composetypes "github.com/compose-spec/compose-go/types" + "github.com/loft-sh/devspace/pkg/devspace/config/versions/latest" +) + +func (cb *configBuilder) AddImage(service composetypes.ServiceConfig) error { + build := service.Build + if build == nil { + cb.config.Images = nil + return nil + } + + context, err := filepath.Rel(cb.workingDir, filepath.Join(cb.workingDir, build.Context)) + if err != nil { + return err + } + context = filepath.ToSlash(context) + if context == "." { + context = "" + } + + dockerfile, err := filepath.Rel(cb.workingDir, filepath.Join(cb.workingDir, build.Context, build.Dockerfile)) + if err != nil { + return err + } + + image := &latest.Image{ + Image: resolveImage(service), + Context: context, + Dockerfile: filepath.ToSlash(dockerfile), + } + + if build.Args != nil { + image.BuildArgs = build.Args + } + + if build.Target != "" { + image.Target = build.Target + } + + if build.Network != "" { + image.Network = build.Network + } + + if len(service.Entrypoint) > 0 { + image.Entrypoint = service.Entrypoint + } + + if cb.config.Images == nil { + + cb.config.Images = map[string]*latest.Image{} + } + + imageName := formatName(service.Name) + cb.config.Images[imageName] = image + + return nil +} + +func resolveImage(service composetypes.ServiceConfig) string { + image := service.Name + if service.Image != "" { + image = service.Image + } + return image +} diff --git a/pkg/devspace/compose/loader.go b/pkg/devspace/compose/loader.go index bfc5275da4..6540e7ae2b 100644 --- a/pkg/devspace/compose/loader.go +++ b/pkg/devspace/compose/loader.go @@ -1,15 +1,9 @@ package compose import ( - "fmt" "io/ioutil" "os" "path/filepath" - "regexp" - "sort" - "strconv" - "strings" - "time" composeloader "github.com/compose-spec/compose-go/loader" composetypes "github.com/compose-spec/compose-go/types" @@ -17,7 +11,6 @@ import ( "github.com/loft-sh/devspace/pkg/devspace/config/versions/latest" "github.com/loft-sh/devspace/pkg/util/log" "gopkg.in/yaml.v3" - v1 "k8s.io/api/core/v1" ) var ( @@ -27,16 +20,6 @@ var ( UploadVolumesContainerName = "upload-volumes" ) -type ConfigLoader interface { - Load(log log.Logger) (*latest.Config, error) - - Save(config *latest.Config) error -} - -type configLoader struct { - composePath string -} - func GetDockerComposePath() string { for _, composePath := range DockerComposePaths { _, err := os.Stat(composePath) @@ -47,16 +30,29 @@ func GetDockerComposePath() string { return "" } -func NewDockerComposeLoader(composePath string) ConfigLoader { - return &configLoader{ +type ComposeManager interface { + Load(log log.Logger) error + Config(path string) *latest.Config + Configs() map[string]*latest.Config + Save() error +} + +type composeManager struct { + composePath string + configs map[string]*latest.Config +} + +func NewComposeManager(composePath string) ComposeManager { + return &composeManager{ composePath: composePath, + configs: map[string]*latest.Config{}, } } -func (cl *configLoader) Load(log log.Logger) (*latest.Config, error) { - composeFile, err := ioutil.ReadFile(cl.composePath) +func (cm *composeManager) Load(log log.Logger) error { + composeFile, err := ioutil.ReadFile(cm.composePath) if err != nil { - return nil, err + return err } dockerCompose, err := composeloader.Load(composetypes.ConfigDetails{ @@ -67,660 +63,103 @@ func (cl *configLoader) Load(log log.Logger) (*latest.Config, error) { }, }) if err != nil { - return nil, err + return err } - config := latest.New().(*latest.Config) - config.Name = dockerCompose.Name - if config.Name == "" { - config.Name = "docker-compose" - } - cwd, err := os.Getwd() + dependentsMap, err := calculateDependentsMap(dockerCompose) if err != nil { - return nil, err + return err } - var images map[string]*latest.Image - var deployments map[string]*latest.DeploymentConfig - var dev map[string]*latest.DevPod - var pipelines map[string]*latest.Pipeline - baseDir := filepath.Dir(cl.composePath) + builders := map[string]ConfigBuilder{} + err = dockerCompose.WithServices(nil, func(service composetypes.ServiceConfig) error { + configKey := constants.DefaultConfigPath + configName := "docker-compose" - if len(dockerCompose.Networks) > 0 { - log.Warn("networks are not supported") - } + isDependency := dependentsMap[service.Name] != nil + if isDependency { + configKey = "devspace-" + service.Name + ".yaml" + configName = service.Name + } + + builder := builders[configKey] + if builder == nil { + workingDir := filepath.Dir(cm.composePath) + builder = NewConfigBuilder(workingDir, log) + builders[configKey] = builder + } - // dependentsMap, err := calculateDependentsMap(dockerCompose) - // if err != nil { - // return nil, err - // } + builder.SetName(configName) - err = dockerCompose.WithServices(nil, func(service composetypes.ServiceConfig) error { - imageConfig, err := imageConfig(cwd, service) + err := builder.AddImage(service) if err != nil { return err } - if imageConfig != nil { - if images == nil { - images = map[string]*latest.Image{} - } - images[service.Name] = imageConfig - } - deploymentName := formatName(service.Name) - deploymentConfig, err := cl.deploymentConfig(service, dockerCompose.Volumes, log) + err = builder.AddDeployment(*dockerCompose, service) if err != nil { return err } - if deployments == nil { - deployments = map[string]*latest.DeploymentConfig{} - } - deployments[deploymentName] = deploymentConfig - devConfig, err := addDevConfig(service, baseDir, log) + err = builder.AddDev(service) if err != nil { return err } - if devConfig != nil { - if dev == nil { - dev = map[string]*latest.DevPod{} - } - dev[service.Name] = devConfig - } - return nil - }) - if err != nil { - return nil, err - } - - for secretName, secret := range dockerCompose.Secrets { - if pipelines == nil { - pipelines = map[string]*latest.Pipeline{} + err = builder.AddSecret(*dockerCompose, service) + if err != nil { + return err } - devSecretStep, err := createSecretPipeline(secretName, cwd, secret) + err = builder.AddDependencies(*dockerCompose, service) if err != nil { - return nil, err + return err } - pipelines["dev"] = devSecretStep - pipelines["purge"] = deleteSecretPipeline(secretName) - } - - config.Images = images - config.Deployments = deployments - config.Dev = dev - config.Pipelines = pipelines - // config.Hooks = hooks - - return config, nil -} - -func (d *configLoader) Save(config *latest.Config) error { - // Convert to string - configYaml, err := yaml.Marshal(config) + return nil + }) if err != nil { return err } - // Path to save the configuration to - err = ioutil.WriteFile(constants.DefaultConfigPath, configYaml, os.ModePerm) - if err != nil { - return err + for path, builder := range builders { + cm.configs[path] = builder.Config() } return nil } -func addDevConfig(service composetypes.ServiceConfig, baseDir string, log log.Logger) (*latest.DevPod, error) { - var dev *latest.DevPod - - devPorts := []*latest.PortMapping{} - for _, port := range service.Ports { - portMapping := &latest.PortMapping{} - - if port.Published == 0 { - log.Warnf("Unassigned port ranges are not supported: %s", port.Target) - continue - } - - if port.Published != port.Target { - portMapping.Port = fmt.Sprint(port.Published) + ":" + fmt.Sprint(port.Target) - } else { - portMapping.Port = fmt.Sprint(port.Published) - } - - if port.HostIP != "" { - portMapping.BindAddress = port.HostIP - } - - devPorts = append(devPorts, portMapping) - } - - for _, expose := range service.Expose { - devPorts = append(devPorts, &latest.PortMapping{ - Port: expose, - }) - } - - syncConfigs := []*latest.SyncConfig{} - for _, volume := range service.Volumes { - if volume.Type == composetypes.VolumeTypeBind { - sync := &latest.SyncConfig{ - Path: strings.Join([]string{resolveLocalPath(volume), volume.Target}, ":"), - StartContainer: true, - } - - _, err := os.Stat(filepath.Join(baseDir, volume.Source, DockerIgnorePath)) - if err == nil { - sync.ExcludeFile = DockerIgnorePath - } - - syncConfigs = append(syncConfigs, sync) - } - } - - if len(devPorts) > 0 || len(syncConfigs) > 0 { - dev = &latest.DevPod{ - LabelSelector: labelSelector(service.Name), - } - } - - if len(devPorts) > 0 { - dev.Ports = devPorts - } - - if len(syncConfigs) > 0 { - dev.Sync = syncConfigs - dev.Command = service.Entrypoint - } - - return dev, nil -} - -func imageConfig(cwd string, service composetypes.ServiceConfig) (*latest.Image, error) { - build := service.Build - if build == nil { - return nil, nil - } - - context, err := filepath.Rel(cwd, filepath.Join(cwd, build.Context)) - if err != nil { - return nil, err - } - context = filepath.ToSlash(context) - if context == "." { - context = "" - } - - dockerfile, err := filepath.Rel(cwd, filepath.Join(cwd, build.Context, build.Dockerfile)) - if err != nil { - return nil, err - } - - image := &latest.Image{ - Image: resolveImage(service), - Context: context, - Dockerfile: filepath.ToSlash(dockerfile), - } - - if build.Args != nil { - image.BuildArgs = build.Args - } - - if build.Target != "" { - image.Target = build.Target - } - - if build.Network != "" { - image.Network = build.Network - } - - // if hasBuildOptions { - // image.Build = &latest.BuildConfig{ - // Docker: &latest.DockerConfig{ - // Options: buildOptions, - // }, - // } - // } - - if len(service.Entrypoint) > 0 { - image.Entrypoint = service.Entrypoint - } - - return image, nil -} - -func createSecretPipeline(name string, cwd string, secret composetypes.SecretConfig) (*latest.Pipeline, error) { - file, err := filepath.Rel(cwd, filepath.Join(cwd, secret.File)) - if err != nil { - return nil, err - } - - return &latest.Pipeline{ - Run: fmt.Sprintf(`kubectl create secret generic %s --namespace=${devspace.namespace} --dry-run=client --from-file=%s=%s -o yaml | kubectl apply -f - -run_default_pipeline dev`, name, name, filepath.ToSlash(file)), - }, nil -} - -func deleteSecretPipeline(name string) *latest.Pipeline { - return &latest.Pipeline{ - Run: fmt.Sprintf(`run_default_pipeline purge -kubectl delete secret %s --namespace=${devspace.namespace} --ignore-not-found`, name), - } -} - -func (cl *configLoader) deploymentConfig(service composetypes.ServiceConfig, composeVolumes map[string]composetypes.VolumeConfig, log log.Logger) (*latest.DeploymentConfig, error) { - values := map[string]interface{}{} - - volumes, volumeMounts, _ := volumesConfig(service, composeVolumes, log) - if len(volumes) > 0 { - values["volumes"] = volumes - } - - container, err := containerConfig(service, volumeMounts) - if err != nil { - return nil, err - } - values["containers"] = []interface{}{container} - - if service.Restart != "" { - restartPolicy := string(v1.RestartPolicyNever) - switch service.Restart { - case "always": - restartPolicy = string(v1.RestartPolicyAlways) - case "on-failure": - restartPolicy = string(v1.RestartPolicyOnFailure) - } - values["restartPolicy"] = restartPolicy - } - - ports := []interface{}{} - if len(service.Ports) > 0 { - for _, port := range service.Ports { - var protocol string - switch port.Protocol { - case "tcp": - protocol = string(v1.ProtocolTCP) - case "udp": - protocol = string(v1.ProtocolUDP) - default: - return nil, fmt.Errorf("invalid protocol %s", port.Protocol) - } - - if port.Published == 0 { - log.Warnf("Unassigned port ranges are not supported: %s", port.Target) - continue - } - - ports = append(ports, map[string]interface{}{ - "port": int(port.Published), - "containerPort": int(port.Target), - "protocol": protocol, - }) - } - } - - if len(service.Expose) > 0 { - for _, port := range service.Expose { - intPort, err := strconv.Atoi(port) - if err != nil { - return nil, fmt.Errorf("expected integer for port number: %s", err.Error()) - } - ports = append(ports, map[string]interface{}{ - "port": intPort, - }) - } - } - - if len(ports) > 0 { - values["service"] = map[string]interface{}{ - "ports": ports, - } - } - - if len(service.ExtraHosts) > 0 { - hostsMap := map[string][]interface{}{} - for _, host := range service.ExtraHosts { - hostTokens := strings.Split(host, ":") - hostName := hostTokens[0] - hostIP := hostTokens[1] - hostsMap[hostIP] = append(hostsMap[hostIP], hostName) - } - - hostAliases := []interface{}{} - for ip, hosts := range hostsMap { - hostAliases = append(hostAliases, map[string]interface{}{ - "ip": ip, - "hostnames": hosts, - }) - } - - values["hostAliases"] = hostAliases - } - - return &latest.DeploymentConfig{ - Helm: &latest.HelmConfig{ - Values: values, - }, - }, nil -} - -func volumesConfig( - service composetypes.ServiceConfig, - composeVolumes map[string]composetypes.VolumeConfig, - log log.Logger, -) (volumes []interface{}, volumeMounts []interface{}, bindVolumeMounts []interface{}) { - for _, secret := range service.Secrets { - volume := createSecretVolume(secret) - volumes = append(volumes, volume) - - volumeMount := createSecretVolumeMount(secret) - volumeMounts = append(volumeMounts, volumeMount) - } - - var volumeVolumes []composetypes.ServiceVolumeConfig - var bindVolumes []composetypes.ServiceVolumeConfig - var tmpfsVolumes []composetypes.ServiceVolumeConfig - for _, serviceVolume := range service.Volumes { - switch serviceVolume.Type { - case composetypes.VolumeTypeBind: - bindVolumes = append(bindVolumes, serviceVolume) - case composetypes.VolumeTypeTmpfs: - tmpfsVolumes = append(tmpfsVolumes, serviceVolume) - case composetypes.VolumeTypeVolume: - volumeVolumes = append(volumeVolumes, serviceVolume) - default: - log.Warnf("%s volumes are not supported", serviceVolume.Type) - } - } - - volumeMap := map[string]interface{}{} - for idx, volumeVolume := range volumeVolumes { - volumeName := resolveServiceVolumeName(service, volumeVolume, idx+1) - _, ok := volumeMap[volumeName] - if !ok { - volume := createVolume(volumeName, DefaultVolumeSize) - volumes = append(volumes, volume) - volumeMap[volumeName] = volume - } - - volumeMount := createSharedVolumeMount(volumeName, volumeVolume) - volumeMounts = append(volumeMounts, volumeMount) - } - - for _, tmpfsVolume := range tmpfsVolumes { - volumeName := resolveServiceVolumeName(service, tmpfsVolume, len(volumes)) - volume := createEmptyDirVolume(volumeName, tmpfsVolume) - volumes = append(volumes, volume) - - volumeMount := createServiceVolumeMount(volumeName, tmpfsVolume) - volumeMounts = append(volumeMounts, volumeMount) - } - - for idx, bindVolume := range bindVolumes { - volumeName := fmt.Sprintf("volume-%d", idx+1) - volume := createEmptyDirVolume(volumeName, bindVolume) - volumes = append(volumes, volume) - - volumeMount := createServiceVolumeMount(volumeName, bindVolume) - volumeMounts = append(volumeMounts, volumeMount) - - bindVolumeMount := createInitVolumeMount(volumeName, bindVolume) - bindVolumeMounts = append(bindVolumeMounts, bindVolumeMount) - } - - return volumes, volumeMounts, bindVolumeMounts +func (cm *composeManager) Config(path string) *latest.Config { + return cm.configs[path] } -func containerConfig(service composetypes.ServiceConfig, volumeMounts []interface{}) (map[string]interface{}, error) { - container := map[string]interface{}{ - "name": resolveContainerName(service), - "image": resolveImage(service), - } - - if len(service.Command) > 0 { - container["args"] = shellCommandToSlice(service.Command) - } - - if !hasBuild(service) && len(service.Entrypoint) > 0 { - container["command"] = shellCommandToSlice(service.Entrypoint) - } - - if service.Environment != nil { - env := containerEnv(service.Environment) - if len(env) > 0 { - container["env"] = env - } - } - - if service.HealthCheck != nil { - livenessProbe, err := containerLivenessProbe(service.HealthCheck) - if err != nil { - return nil, err - } - if livenessProbe != nil { - container["livenessProbe"] = livenessProbe - } - } - - if len(volumeMounts) > 0 { - container["volumeMounts"] = volumeMounts - } - - return container, nil +func (cm *composeManager) Configs() map[string]*latest.Config { + return cm.configs } -func containerEnv(env composetypes.MappingWithEquals) []interface{} { - envs := []interface{}{} - keys := []string{} - for name := range env { - keys = append(keys, name) - } - sort.Strings(keys) - - for _, name := range keys { - value := env[name] - envs = append(envs, map[string]interface{}{ - "name": name, - "value": *value, - }) - } - return envs -} - -func containerLivenessProbe(health *composetypes.HealthCheckConfig) (map[string]interface{}, error) { - if len(health.Test) == 0 { - return nil, nil - } - - var command []interface{} - testKind := health.Test[0] - switch testKind { - case "NONE": - return nil, nil - case "CMD": - for _, test := range health.Test[1:] { - command = append(command, test) - } - case "CMD-SHELL": - command = append(command, "sh") - command = append(command, "-c") - command = append(command, health.Test[1]) - default: - command = append(command, health.Test[0:]) - } - - livenessProbe := map[string]interface{}{ - "exec": map[string]interface{}{ - "command": command, - }, - } - - if health.Retries != nil { - livenessProbe["failureThreshold"] = int(*health.Retries) - } - - if health.Interval != nil { - period, err := time.ParseDuration(health.Interval.String()) +func (cm *composeManager) Save() error { + for path, config := range cm.configs { + configYaml, err := yaml.Marshal(config) if err != nil { - return nil, err + return err } - livenessProbe["periodSeconds"] = int(period.Seconds()) - } - if health.StartPeriod != nil { - initialDelay, err := time.ParseDuration(health.Interval.String()) + err = ioutil.WriteFile(path, configYaml, os.ModePerm) if err != nil { - return nil, err + return err } - livenessProbe["initialDelaySeconds"] = int(initialDelay.Seconds()) - } - - return livenessProbe, nil -} - -func createEmptyDirVolume(volumeName string, volume composetypes.ServiceVolumeConfig) interface{} { - // create an emptyDir volume - emptyDir := map[string]interface{}{} - if volume.Tmpfs != nil { - emptyDir["sizeLimit"] = fmt.Sprintf("%d", volume.Tmpfs.Size) - } - return map[string]interface{}{ - "name": volumeName, - "emptyDir": emptyDir, - } -} - -func createSecretVolume(secret composetypes.ServiceSecretConfig) interface{} { - return map[string]interface{}{ - "name": secret.Source, - "secret": map[string]interface{}{ - "secretName": secret.Source, - }, - } -} - -func createSecretVolumeMount(secret composetypes.ServiceSecretConfig) interface{} { - target := secret.Source - if secret.Target != "" { - target = secret.Target - } - return map[string]interface{}{ - "containerPath": fmt.Sprintf("/run/secrets/%s", target), - "volume": map[string]interface{}{ - "name": secret.Source, - "subPath": target, - "readOnly": true, - }, - } -} - -func createSharedVolumeMount(volumeName string, volume composetypes.ServiceVolumeConfig) interface{} { - volumeConfig := map[string]interface{}{ - "name": volumeName, - "shared": true, - } - - if volume.ReadOnly { - volumeConfig["readOnly"] = true - } - - return map[string]interface{}{ - "containerPath": volume.Target, - "volume": volumeConfig, - } -} - -func createServiceVolumeMount(volumeName string, volume composetypes.ServiceVolumeConfig) interface{} { - return map[string]interface{}{ - "containerPath": volume.Target, - "volume": map[string]interface{}{ - "name": volumeName, - "readOnly": volume.ReadOnly, - }, - } -} - -func createInitVolumeMount(volumeName string, volume composetypes.ServiceVolumeConfig) interface{} { - return map[string]interface{}{ - "containerPath": volume.Target, - "volume": map[string]interface{}{ - "name": volumeName, - "readOnly": false, - }, - } -} - -func createVolume(name string, size string) interface{} { - return map[string]interface{}{ - "name": name, - "size": size, } -} - -func formatName(name string) string { - return regexp.MustCompile(`[\._]`).ReplaceAllString(name, "-") -} -func resolveContainerName(service composetypes.ServiceConfig) string { - if service.ContainerName != "" { - return formatName(service.ContainerName) - } - return fmt.Sprintf("%s-container", formatName(service.Name)) -} - -func resolveImage(service composetypes.ServiceConfig) string { - image := service.Name - if service.Image != "" { - image = service.Image - } - return image -} - -func resolveLocalPath(volume composetypes.ServiceVolumeConfig) string { - localSubPath := volume.Source - - if strings.HasPrefix(localSubPath, "~") { - localSubPath = fmt.Sprintf(`${devspace.userHome}/%s`, strings.TrimLeft(localSubPath, "~/")) - } - return localSubPath -} - -func resolveServiceVolumeName(service composetypes.ServiceConfig, volume composetypes.ServiceVolumeConfig, idx int) string { - volumeName := volume.Source - if volumeName == "" { - volumeName = fmt.Sprintf("%s-%d", formatName(service.Name), idx) - } - return volumeName -} - -// func calculateDependentsMap(dockerCompose *composetypes.Project) (map[string][]string, error) { -// tree := map[string][]string{} -// err := dockerCompose.WithServices(nil, func(service composetypes.ServiceConfig) error { -// for _, name := range service.GetDependencies() { -// tree[name] = append(tree[name], service.Name) -// } -// return nil -// }) -// return tree, err -// } - -func shellCommandToSlice(command composetypes.ShellCommand) []interface{} { - var slice []interface{} - for _, item := range command { - slice = append(slice, item) - } - return slice -} - -func labelSelector(serviceName string) map[string]string { - return map[string]string{ - "app.kubernetes.io/component": serviceName, - } + return nil } -func hasBuild(service composetypes.ServiceConfig) bool { - return service.Build != nil +func calculateDependentsMap(dockerCompose *composetypes.Project) (map[string][]string, error) { + tree := map[string][]string{} + err := dockerCompose.WithServices(nil, func(service composetypes.ServiceConfig) error { + for _, name := range service.GetDependencies() { + tree[name] = append(tree[name], service.Name) + } + return nil + }) + return tree, err } diff --git a/pkg/devspace/compose/loader_test.go b/pkg/devspace/compose/loader_test.go index b3bffb637f..eebdd45c47 100644 --- a/pkg/devspace/compose/loader_test.go +++ b/pkg/devspace/compose/loader_test.go @@ -28,11 +28,25 @@ func TestLoad(t *testing.T) { t.Error("No test cases found. Add some!") } + focused := []string{} for _, dir := range dirs { - if !strings.HasPrefix(dir.Name(), "x_") { - testLoad(dir.Name(), t) + if strings.HasPrefix(dir.Name(), "f_") { + focused = append(focused, dir.Name()) } } + + if len(focused) > 0 { + for _, focus := range focused { + testLoad(focus, t) + } + } else { + for _, dir := range dirs { + if !strings.HasPrefix(dir.Name(), "x_") { + testLoad(dir.Name(), t) + } + } + } + } func testLoad(dir string, t *testing.T) { @@ -53,9 +67,10 @@ func testLoad(dir string, t *testing.T) { }() dockerComposePath := GetDockerComposePath() - loader := NewDockerComposeLoader(dockerComposePath) + loader := NewComposeManager(dockerComposePath) + + actualError := loader.Load(log.Discard) - actualConfig, actualError := loader.Load(log.Discard) if actualError != nil { expectedError, err := ioutil.ReadFile("error.txt") if err != nil { @@ -65,107 +80,110 @@ func testLoad(dir string, t *testing.T) { assert.Equal(t, string(expectedError), actualError.Error(), "Expected error:\n%s\nbut got:\n%s\n in testCase %s", string(expectedError), actualError.Error(), dir) } - data, err := ioutil.ReadFile("expected.yaml") - if err != nil { - t.Errorf("Please create the expected DevSpace configuration by creating a expected.yaml in the testdata/%s folder", dir) - } + for path, actualConfig := range loader.Configs() { + data, err := ioutil.ReadFile(path) + if err != nil { + t.Errorf("Please create the expected DevSpace configuration by creating a %s in the testdata/%s folder", path, dir) + } - expectedConfig := &latest.Config{} - err = yaml.Unmarshal(data, expectedConfig) - if err != nil { - t.Errorf("Error unmarshaling the expected configuration: %s", err.Error()) - } + expectedConfig := &latest.Config{} + err = yaml.Unmarshal(data, expectedConfig) + if err != nil { + t.Errorf("Error unmarshaling the expected configuration: %s", err.Error()) + } - assert.Check( - t, - cmp.DeepEqual(expectedConfig.Deployments, actualConfig.Deployments), - "deployment properties did not match in test case %s", - dir, - ) - // actualDeployments := actualConfig.Deployments - actualConfig.Deployments = nil - expectedConfig.Deployments = nil - - assert.Check( - t, - cmp.DeepEqual(toWaitHookMap(expectedConfig.Hooks), toWaitHookMap(actualConfig.Hooks)), - "hook properties did not match in test case %s", - dir, - ) - actualHooks := actualConfig.Hooks - actualConfig.Hooks = nil - expectedConfig.Hooks = nil - - assert.Check( - t, - cmp.DeepEqual(expectedConfig, actualConfig), - "config properties did not match in test case %s", - dir, - ) - - // Load docker compose to determine dependency ordering - content, err := ioutil.ReadFile(dockerComposePath) - if err != nil { - t.Errorf("Unexpected error occurred loading the docker-compose.yaml: %s", err.Error()) - } - dockerCompose, err := composeloader.Load(composetypes.ConfigDetails{ - ConfigFiles: []composetypes.ConfigFile{ - { - Content: content, + assert.Check( + t, + cmp.DeepEqual(expectedConfig.Deployments, actualConfig.Deployments), + "deployment properties did not match in test case %s", + dir, + ) + // actualDeployments := actualConfig.Deployments + actualConfig.Deployments = nil + expectedConfig.Deployments = nil + + assert.Check( + t, + cmp.DeepEqual(toWaitHookMap(expectedConfig.Hooks), toWaitHookMap(actualConfig.Hooks)), + "hook properties did not match in test case %s", + dir, + ) + actualHooks := actualConfig.Hooks + actualConfig.Hooks = nil + expectedConfig.Hooks = nil + + assert.Check( + t, + cmp.DeepEqual(expectedConfig, actualConfig), + "config properties did not match in test case %s", + dir, + ) + + // Load docker compose to determine dependency ordering + content, err := ioutil.ReadFile(dockerComposePath) + if err != nil { + t.Errorf("Unexpected error occurred loading the docker-compose.yaml: %s", err.Error()) + } + dockerCompose, err := composeloader.Load(composetypes.ConfigDetails{ + ConfigFiles: []composetypes.ConfigFile{ + { + Content: content, + }, }, - }, - }) - if err != nil { - t.Error(err) - } - - // Determine which deployments should have wait hooks - expectedWaitHooks := map[string]bool{} - err = dockerCompose.WithServices(nil, func(service composetypes.ServiceConfig) error { - for _, dep := range service.GetDependencies() { - expectedWaitHooks[dep] = true + }) + if err != nil { + t.Error(err) } - return nil - }) - if err != nil { - t.Error(err) - } - // Iterate services in dependency order - err = dockerCompose.WithServices(nil, func(service composetypes.ServiceConfig) error { - waitHookIdx := getWaitHookIndex(service.Name, actualHooks) - - // for _, dep := range service.GetDependencies() { - // // Check deployments order - // assert.Check(t, getDeploymentIndex(dep, actualDeployments) < getDeploymentIndex(service.Name, actualDeployments), "%s deployment should come after %s for test case %s", service.Name, dep, dir) - - // // Check for wait hook order - // _, ok := expectedWaitHooks[service.Name] - // if ok { - // assert.Check(t, getWaitHookIndex(dep, actualHooks) < waitHookIdx, "%s wait hook should come after %s", service.Name, dep) - // } - // } - - uploadDoneHookIdx := getUploadDoneHookIndex(service.Name, actualHooks) - if uploadDoneHookIdx != -1 { - // Check that upload done hooks come before wait hooks - if waitHookIdx != -1 { - assert.Check(t, uploadDoneHookIdx < waitHookIdx, "%s wait hook should come after upload done hooks for test case %s", service.Name, dir) + // Determine which deployments should have wait hooks + expectedWaitHooks := map[string]bool{} + err = dockerCompose.WithServices(nil, func(service composetypes.ServiceConfig) error { + for _, dep := range service.GetDependencies() { + expectedWaitHooks[dep] = true } + return nil + }) + if err != nil { + t.Error(err) + } + + // Iterate services in dependency order + err = dockerCompose.WithServices(nil, func(service composetypes.ServiceConfig) error { + waitHookIdx := getWaitHookIndex(service.Name, actualHooks) + + // for _, dep := range service.GetDependencies() { + // // Check deployments order + // assert.Check(t, getDeploymentIndex(dep, actualDeployments) < getDeploymentIndex(service.Name, actualDeployments), "%s deployment should come after %s for test case %s", service.Name, dep, dir) + + // // Check for wait hook order + // _, ok := expectedWaitHooks[service.Name] + // if ok { + // assert.Check(t, getWaitHookIndex(dep, actualHooks) < waitHookIdx, "%s wait hook should come after %s", service.Name, dep) + // } + // } + + uploadDoneHookIdx := getUploadDoneHookIndex(service.Name, actualHooks) + if uploadDoneHookIdx != -1 { + // Check that upload done hooks come before wait hooks + if waitHookIdx != -1 { + assert.Check(t, uploadDoneHookIdx < waitHookIdx, "%s wait hook should come after upload done hooks for test case %s", service.Name, dir) + } - // Check that upload hooks come before upload done hooks - for idx, hook := range actualHooks { - if hook.Upload != nil && hook.Container.ContainerName == UploadVolumesContainerName && hook.Container.LabelSelector != nil && hook.Container.LabelSelector["app.kubernetes.io/component"] == service.Name { - assert.Check(t, idx < uploadDoneHookIdx, "%s upload done hook should come after upload hooks for test case %s", service.Name, dir) + // Check that upload hooks come before upload done hooks + for idx, hook := range actualHooks { + if hook.Upload != nil && hook.Container.ContainerName == UploadVolumesContainerName && hook.Container.LabelSelector != nil && hook.Container.LabelSelector["app.kubernetes.io/component"] == service.Name { + assert.Check(t, idx < uploadDoneHookIdx, "%s upload done hook should come after upload hooks for test case %s", service.Name, dir) + } } } - } - return nil - }) - if err != nil { - t.Error(err) + return nil + }) + if err != nil { + t.Error(err) + } } + } func toDeploymentMap(deployments []*latest.DeploymentConfig) map[string]latest.DeploymentConfig { diff --git a/pkg/devspace/compose/secret.go b/pkg/devspace/compose/secret.go new file mode 100644 index 0000000000..e9810f1d43 --- /dev/null +++ b/pkg/devspace/compose/secret.go @@ -0,0 +1,48 @@ +package compose + +import ( + "fmt" + "path/filepath" + + composetypes "github.com/compose-spec/compose-go/types" + "github.com/loft-sh/devspace/pkg/devspace/config/versions/latest" +) + +func (cb *configBuilder) AddSecret(dockerCompose composetypes.Project, service composetypes.ServiceConfig) error { + var pipelines map[string]*latest.Pipeline + for secretName, secret := range dockerCompose.Secrets { + if pipelines == nil { + pipelines = map[string]*latest.Pipeline{} + } + + devSecretStep, err := createSecretPipeline(secretName, cb.workingDir, secret) + if err != nil { + return err + } + + pipelines["dev"] = devSecretStep + pipelines["purge"] = deleteSecretPipeline(secretName) + } + + cb.config.Pipelines = pipelines + return nil +} + +func createSecretPipeline(name string, cwd string, secret composetypes.SecretConfig) (*latest.Pipeline, error) { + file, err := filepath.Rel(cwd, filepath.Join(cwd, secret.File)) + if err != nil { + return nil, err + } + + return &latest.Pipeline{ + Run: fmt.Sprintf(`kubectl create secret generic %s --namespace=${devspace.namespace} --dry-run=client --from-file=%s=%s -o yaml | kubectl apply -f - +run_default_pipeline dev`, name, name, filepath.ToSlash(file)), + }, nil +} + +func deleteSecretPipeline(name string) *latest.Pipeline { + return &latest.Pipeline{ + Run: fmt.Sprintf(`run_default_pipeline purge +kubectl delete secret %s --namespace=${devspace.namespace} --ignore-not-found`, name), + } +} diff --git a/pkg/devspace/compose/testdata/basic/expected.yaml b/pkg/devspace/compose/testdata/basic/devspace.yaml similarity index 100% rename from pkg/devspace/compose/testdata/basic/expected.yaml rename to pkg/devspace/compose/testdata/basic/devspace.yaml diff --git a/pkg/devspace/compose/testdata/build/expected.yaml b/pkg/devspace/compose/testdata/build/devspace.yaml similarity index 100% rename from pkg/devspace/compose/testdata/build/expected.yaml rename to pkg/devspace/compose/testdata/build/devspace.yaml diff --git a/pkg/devspace/compose/testdata/build_args_list/expected.yaml b/pkg/devspace/compose/testdata/build_args_list/devspace.yaml similarity index 100% rename from pkg/devspace/compose/testdata/build_args_list/expected.yaml rename to pkg/devspace/compose/testdata/build_args_list/devspace.yaml diff --git a/pkg/devspace/compose/testdata/build_args_map/expected.yaml b/pkg/devspace/compose/testdata/build_args_map/devspace.yaml similarity index 100% rename from pkg/devspace/compose/testdata/build_args_map/expected.yaml rename to pkg/devspace/compose/testdata/build_args_map/devspace.yaml diff --git a/pkg/devspace/compose/testdata/build_context/expected.yaml b/pkg/devspace/compose/testdata/build_context/devspace.yaml similarity index 100% rename from pkg/devspace/compose/testdata/build_context/expected.yaml rename to pkg/devspace/compose/testdata/build_context/devspace.yaml diff --git a/pkg/devspace/compose/testdata/build_dockerfile/expected.yaml b/pkg/devspace/compose/testdata/build_dockerfile/devspace.yaml similarity index 100% rename from pkg/devspace/compose/testdata/build_dockerfile/expected.yaml rename to pkg/devspace/compose/testdata/build_dockerfile/devspace.yaml diff --git a/pkg/devspace/compose/testdata/build_entry_point/expected.yaml b/pkg/devspace/compose/testdata/build_entry_point/devspace.yaml similarity index 100% rename from pkg/devspace/compose/testdata/build_entry_point/expected.yaml rename to pkg/devspace/compose/testdata/build_entry_point/devspace.yaml diff --git a/pkg/devspace/compose/testdata/build_image/expected.yaml b/pkg/devspace/compose/testdata/build_image/devspace.yaml similarity index 100% rename from pkg/devspace/compose/testdata/build_image/expected.yaml rename to pkg/devspace/compose/testdata/build_image/devspace.yaml diff --git a/pkg/devspace/compose/testdata/build_image_tag/expected.yaml b/pkg/devspace/compose/testdata/build_image_tag/devspace.yaml similarity index 100% rename from pkg/devspace/compose/testdata/build_image_tag/expected.yaml rename to pkg/devspace/compose/testdata/build_image_tag/devspace.yaml diff --git a/pkg/devspace/compose/testdata/build_network/expected.yaml b/pkg/devspace/compose/testdata/build_network/devspace.yaml similarity index 100% rename from pkg/devspace/compose/testdata/build_network/expected.yaml rename to pkg/devspace/compose/testdata/build_network/devspace.yaml diff --git a/pkg/devspace/compose/testdata/build_target/expected.yaml b/pkg/devspace/compose/testdata/build_target/devspace.yaml similarity index 100% rename from pkg/devspace/compose/testdata/build_target/expected.yaml rename to pkg/devspace/compose/testdata/build_target/devspace.yaml diff --git a/pkg/devspace/compose/testdata/command/expected.yaml b/pkg/devspace/compose/testdata/command/devspace.yaml similarity index 100% rename from pkg/devspace/compose/testdata/command/expected.yaml rename to pkg/devspace/compose/testdata/command/devspace.yaml diff --git a/pkg/devspace/compose/testdata/container_name/expected.yaml b/pkg/devspace/compose/testdata/container_name/devspace.yaml similarity index 100% rename from pkg/devspace/compose/testdata/container_name/expected.yaml rename to pkg/devspace/compose/testdata/container_name/devspace.yaml diff --git a/pkg/devspace/compose/testdata/depends_on/devspace-backend.yaml b/pkg/devspace/compose/testdata/depends_on/devspace-backend.yaml new file mode 100644 index 0000000000..2e8a43098d --- /dev/null +++ b/pkg/devspace/compose/testdata/depends_on/devspace-backend.yaml @@ -0,0 +1,104 @@ +version: v2beta1 + +name: backend + +dependencies: + cache: + path: devspace-cache.yaml + db: + path: devspace-db.yaml + messaging: + path: devspace-messaging.yaml + +deployments: + backend: + helm: + values: + containers: + - name: backend-container + image: rails:latest +# deployments: +# - name: db +# helm: +# componentChart: true +# values: +# containers: +# - name: db-container +# image: mysql/mysql-server:8.0.19 +# - name: cache +# helm: +# componentChart: true +# values: +# containers: +# - name: cache-container +# image: redis:latest +# - name: messaging +# helm: +# componentChart: true +# values: +# containers: +# - name: messaging-container +# image: rabbitmq:latest +# - name: worker +# helm: +# componentChart: true +# values: +# containers: +# - name: worker-container +# image: rails:latest + +# - name: frontend +# helm: +# componentChart: true +# values: +# containers: +# - name: frontend-container +# image: nginx:latest +# - name: proxy +# helm: +# componentChart: true +# values: +# containers: +# - name: proxy-container +# image: haproxy:latest +# hooks: +# - events: ["after:deploy:db"] +# container: +# labelSelector: +# app.kubernetes.io/component: db +# containerName: db-container +# wait: +# running: true +# terminatedWithCode: 0 +# - events: ["after:deploy:cache"] +# container: +# labelSelector: +# app.kubernetes.io/component: cache +# containerName: cache-container +# wait: +# running: true +# terminatedWithCode: 0 +# - events: ["after:deploy:messaging"] +# container: +# labelSelector: +# app.kubernetes.io/component: messaging +# containerName: messaging-container +# wait: +# running: true +# terminatedWithCode: 0 +# - events: ["after:deploy:backend"] +# container: +# labelSelector: +# app.kubernetes.io/component: backend +# containerName: backend-container +# wait: +# running: true +# terminatedWithCode: 0 +# - events: ["after:deploy:frontend"] +# container: +# labelSelector: +# app.kubernetes.io/component: frontend +# containerName: frontend-container +# wait: +# running: true +# terminatedWithCode: 0 diff --git a/pkg/devspace/compose/testdata/depends_on/devspace-cache.yaml b/pkg/devspace/compose/testdata/depends_on/devspace-cache.yaml new file mode 100644 index 0000000000..eb6a4e425d --- /dev/null +++ b/pkg/devspace/compose/testdata/depends_on/devspace-cache.yaml @@ -0,0 +1,101 @@ +version: v2beta1 + +name: cache + +deployments: + cache: + helm: + values: + containers: + - name: cache-container + image: redis:latest +# - name: db +# helm: +# componentChart: true +# values: +# containers: +# - name: db-container +# image: mysql/mysql-server:8.0.19 +# - name: cache +# helm: +# componentChart: true +# values: +# containers: +# - name: cache-container +# image: redis:latest +# - name: messaging +# helm: +# componentChart: true +# values: +# containers: +# - name: messaging-container +# image: rabbitmq:latest +# - name: worker +# helm: +# componentChart: true +# values: +# containers: +# - name: worker-container +# image: rails:latest +# - name: backend +# helm: +# componentChart: true +# values: +# containers: +# - name: backend-container +# image: rails:latest +# - name: frontend +# helm: +# componentChart: true +# values: +# containers: +# - name: frontend-container +# image: nginx:latest +# - name: proxy +# helm: +# componentChart: true +# values: +# containers: +# - name: proxy-container +# image: haproxy:latest +# hooks: +# - events: ["after:deploy:db"] +# container: +# labelSelector: +# app.kubernetes.io/component: db +# containerName: db-container +# wait: +# running: true +# terminatedWithCode: 0 +# - events: ["after:deploy:cache"] +# container: +# labelSelector: +# app.kubernetes.io/component: cache +# containerName: cache-container +# wait: +# running: true +# terminatedWithCode: 0 +# - events: ["after:deploy:messaging"] +# container: +# labelSelector: +# app.kubernetes.io/component: messaging +# containerName: messaging-container +# wait: +# running: true +# terminatedWithCode: 0 +# - events: ["after:deploy:backend"] +# container: +# labelSelector: +# app.kubernetes.io/component: backend +# containerName: backend-container +# wait: +# running: true +# terminatedWithCode: 0 +# - events: ["after:deploy:frontend"] +# container: +# labelSelector: +# app.kubernetes.io/component: frontend +# containerName: frontend-container +# wait: +# running: true +# terminatedWithCode: 0 diff --git a/pkg/devspace/compose/testdata/depends_on/devspace-db.yaml b/pkg/devspace/compose/testdata/depends_on/devspace-db.yaml new file mode 100644 index 0000000000..16b1f277ab --- /dev/null +++ b/pkg/devspace/compose/testdata/depends_on/devspace-db.yaml @@ -0,0 +1,94 @@ +version: v2beta1 + +name: db + +deployments: + db: + helm: + values: + containers: + - name: db-container + image: mysql/mysql-server:8.0.19 +# - name: cache +# helm: +# componentChart: true +# values: +# containers: +# - name: cache-container +# image: redis:latest +# - name: messaging +# helm: +# componentChart: true +# values: +# containers: +# - name: messaging-container +# image: rabbitmq:latest +# - name: worker +# helm: +# componentChart: true +# values: +# containers: +# - name: worker-container +# image: rails:latest +# - name: backend +# helm: +# componentChart: true +# values: +# containers: +# - name: backend-container +# image: rails:latest +# - name: frontend +# helm: +# componentChart: true +# values: +# containers: +# - name: frontend-container +# image: nginx:latest +# - name: proxy +# helm: +# componentChart: true +# values: +# containers: +# - name: proxy-container +# image: haproxy:latest +# hooks: +# - events: ["after:deploy:db"] +# container: +# labelSelector: +# app.kubernetes.io/component: db +# containerName: db-container +# wait: +# running: true +# terminatedWithCode: 0 +# - events: ["after:deploy:cache"] +# container: +# labelSelector: +# app.kubernetes.io/component: cache +# containerName: cache-container +# wait: +# running: true +# terminatedWithCode: 0 +# - events: ["after:deploy:messaging"] +# container: +# labelSelector: +# app.kubernetes.io/component: messaging +# containerName: messaging-container +# wait: +# running: true +# terminatedWithCode: 0 +# - events: ["after:deploy:backend"] +# container: +# labelSelector: +# app.kubernetes.io/component: backend +# containerName: backend-container +# wait: +# running: true +# terminatedWithCode: 0 +# - events: ["after:deploy:frontend"] +# container: +# labelSelector: +# app.kubernetes.io/component: frontend +# containerName: frontend-container +# wait: +# running: true +# terminatedWithCode: 0 diff --git a/pkg/devspace/compose/testdata/depends_on/devspace-frontend.yaml b/pkg/devspace/compose/testdata/depends_on/devspace-frontend.yaml new file mode 100644 index 0000000000..59416bd975 --- /dev/null +++ b/pkg/devspace/compose/testdata/depends_on/devspace-frontend.yaml @@ -0,0 +1,100 @@ +version: v2beta1 + +name: frontend + +dependencies: + backend: + path: devspace-backend.yaml + +deployments: + frontend: + helm: + values: + containers: + - name: frontend-container + image: nginx:latest + +# deployments: +# - name: db +# helm: +# componentChart: true +# values: +# containers: +# - name: db-container +# image: mysql/mysql-server:8.0.19 +# - name: cache +# helm: +# componentChart: true +# values: +# containers: +# - name: cache-container +# image: redis:latest +# - name: messaging +# helm: +# componentChart: true +# values: +# containers: +# - name: messaging-container +# image: rabbitmq:latest +# - name: worker +# helm: +# componentChart: true +# values: +# containers: +# - name: worker-container +# image: rails:latest +# - name: backend +# helm: +# componentChart: true +# values: +# containers: +# - name: backend-container +# image: rails:latest +# - name: proxy +# helm: +# componentChart: true +# values: +# containers: +# - name: proxy-container +# image: haproxy:latest +# hooks: +# - events: ["after:deploy:db"] +# container: +# labelSelector: +# app.kubernetes.io/component: db +# containerName: db-container +# wait: +# running: true +# terminatedWithCode: 0 +# - events: ["after:deploy:cache"] +# container: +# labelSelector: +# app.kubernetes.io/component: cache +# containerName: cache-container +# wait: +# running: true +# terminatedWithCode: 0 +# - events: ["after:deploy:messaging"] +# container: +# labelSelector: +# app.kubernetes.io/component: messaging +# containerName: messaging-container +# wait: +# running: true +# terminatedWithCode: 0 +# - events: ["after:deploy:backend"] +# container: +# labelSelector: +# app.kubernetes.io/component: backend +# containerName: backend-container +# wait: +# running: true +# terminatedWithCode: 0 +# - events: ["after:deploy:frontend"] +# container: +# labelSelector: +# app.kubernetes.io/component: frontend +# containerName: frontend-container +# wait: +# running: true +# terminatedWithCode: 0 diff --git a/pkg/devspace/compose/testdata/depends_on/devspace-messaging.yaml b/pkg/devspace/compose/testdata/depends_on/devspace-messaging.yaml new file mode 100644 index 0000000000..222e528a97 --- /dev/null +++ b/pkg/devspace/compose/testdata/depends_on/devspace-messaging.yaml @@ -0,0 +1,102 @@ +version: v2beta1 + +name: messaging + +deployments: + messaging: + helm: + values: + containers: + - name: messaging-container + image: rabbitmq:latest +# deployments: +# - name: db +# helm: +# componentChart: true +# values: +# containers: +# - name: db-container +# image: mysql/mysql-server:8.0.19 +# - name: cache +# helm: +# componentChart: true +# values: +# containers: +# - name: cache-container +# image: redis:latest +# - name: messaging +# helm: +# componentChart: true +# values: +# containers: +# - name: messaging-container +# image: rabbitmq:latest +# - name: worker +# helm: +# componentChart: true +# values: +# containers: +# - name: worker-container +# image: rails:latest +# - name: backend +# helm: +# componentChart: true +# values: +# containers: +# - name: backend-container +# image: rails:latest +# - name: frontend +# helm: +# componentChart: true +# values: +# containers: +# - name: frontend-container +# image: nginx:latest +# - name: proxy +# helm: +# componentChart: true +# values: +# containers: +# - name: proxy-container +# image: haproxy:latest +# hooks: +# - events: ["after:deploy:db"] +# container: +# labelSelector: +# app.kubernetes.io/component: db +# containerName: db-container +# wait: +# running: true +# terminatedWithCode: 0 +# - events: ["after:deploy:cache"] +# container: +# labelSelector: +# app.kubernetes.io/component: cache +# containerName: cache-container +# wait: +# running: true +# terminatedWithCode: 0 +# - events: ["after:deploy:messaging"] +# container: +# labelSelector: +# app.kubernetes.io/component: messaging +# containerName: messaging-container +# wait: +# running: true +# terminatedWithCode: 0 +# - events: ["after:deploy:backend"] +# container: +# labelSelector: +# app.kubernetes.io/component: backend +# containerName: backend-container +# wait: +# running: true +# terminatedWithCode: 0 +# - events: ["after:deploy:frontend"] +# container: +# labelSelector: +# app.kubernetes.io/component: frontend +# containerName: frontend-container +# wait: +# running: true +# terminatedWithCode: 0 diff --git a/pkg/devspace/compose/testdata/depends_on/devspace.yaml b/pkg/devspace/compose/testdata/depends_on/devspace.yaml new file mode 100644 index 0000000000..87800967bc --- /dev/null +++ b/pkg/devspace/compose/testdata/depends_on/devspace.yaml @@ -0,0 +1,112 @@ +version: v2beta1 + +name: docker-compose + +dependencies: + backend: + path: devspace-backend.yaml + db: + path: devspace-db.yaml + frontend: + path: devspace-frontend.yaml + messaging: + path: devspace-messaging.yaml + +deployments: + proxy: + helm: + componentChart: true + values: + containers: + - name: proxy-container + image: haproxy:latest + worker: + helm: + values: + containers: + - name: worker-container + image: rails:latest +# deployments: +# - name: db +# helm: +# componentChart: true +# values: +# containers: +# - name: db-container +# image: mysql/mysql-server:8.0.19 +# - name: cache +# helm: +# componentChart: true +# values: +# containers: +# - name: cache-container +# image: redis:latest +# - name: messaging +# helm: +# componentChart: true +# values: +# containers: +# - name: messaging-container +# image: rabbitmq:latest +# - name: worker +# helm: +# componentChart: true +# values: +# containers: +# - name: worker-container +# image: rails:latest +# - name: backend +# helm: +# componentChart: true +# values: +# containers: +# - name: backend-container +# image: rails:latest +# - name: frontend +# helm: +# componentChart: true +# values: +# containers: +# - name: frontend-container +# image: nginx:latest +# hooks: +# - events: ["after:deploy:db"] +# container: +# labelSelector: +# app.kubernetes.io/component: db +# containerName: db-container +# wait: +# running: true +# terminatedWithCode: 0 +# - events: ["after:deploy:cache"] +# container: +# labelSelector: +# app.kubernetes.io/component: cache +# containerName: cache-container +# wait: +# running: true +# terminatedWithCode: 0 +# - events: ["after:deploy:messaging"] +# container: +# labelSelector: +# app.kubernetes.io/component: messaging +# containerName: messaging-container +# wait: +# running: true +# terminatedWithCode: 0 +# - events: ["after:deploy:backend"] +# container: +# labelSelector: +# app.kubernetes.io/component: backend +# containerName: backend-container +# wait: +# running: true +# terminatedWithCode: 0 +# - events: ["after:deploy:frontend"] +# container: +# labelSelector: +# app.kubernetes.io/component: frontend +# containerName: frontend-container +# wait: +# running: true +# terminatedWithCode: 0 diff --git a/pkg/devspace/compose/testdata/x_depends_on/docker-compose.yaml b/pkg/devspace/compose/testdata/depends_on/docker-compose.yaml similarity index 100% rename from pkg/devspace/compose/testdata/x_depends_on/docker-compose.yaml rename to pkg/devspace/compose/testdata/depends_on/docker-compose.yaml diff --git a/pkg/devspace/compose/testdata/entry_point/expected.yaml b/pkg/devspace/compose/testdata/entry_point/devspace.yaml similarity index 100% rename from pkg/devspace/compose/testdata/entry_point/expected.yaml rename to pkg/devspace/compose/testdata/entry_point/devspace.yaml diff --git a/pkg/devspace/compose/testdata/env_file_multiple/expected.yaml b/pkg/devspace/compose/testdata/env_file_multiple/devspace.yaml similarity index 100% rename from pkg/devspace/compose/testdata/env_file_multiple/expected.yaml rename to pkg/devspace/compose/testdata/env_file_multiple/devspace.yaml diff --git a/pkg/devspace/compose/testdata/env_file_single/expected.yaml b/pkg/devspace/compose/testdata/env_file_single/devspace.yaml similarity index 100% rename from pkg/devspace/compose/testdata/env_file_single/expected.yaml rename to pkg/devspace/compose/testdata/env_file_single/devspace.yaml diff --git a/pkg/devspace/compose/testdata/environment/expected.yaml b/pkg/devspace/compose/testdata/environment/devspace.yaml similarity index 100% rename from pkg/devspace/compose/testdata/environment/expected.yaml rename to pkg/devspace/compose/testdata/environment/devspace.yaml diff --git a/pkg/devspace/compose/testdata/expose/expected.yaml b/pkg/devspace/compose/testdata/expose/devspace.yaml similarity index 100% rename from pkg/devspace/compose/testdata/expose/expected.yaml rename to pkg/devspace/compose/testdata/expose/devspace.yaml diff --git a/pkg/devspace/compose/testdata/extra_hosts_multiple/expected.yaml b/pkg/devspace/compose/testdata/extra_hosts_multiple/devspace.yaml similarity index 100% rename from pkg/devspace/compose/testdata/extra_hosts_multiple/expected.yaml rename to pkg/devspace/compose/testdata/extra_hosts_multiple/devspace.yaml diff --git a/pkg/devspace/compose/testdata/extra_hosts_single/expected.yaml b/pkg/devspace/compose/testdata/extra_hosts_single/devspace.yaml similarity index 100% rename from pkg/devspace/compose/testdata/extra_hosts_single/expected.yaml rename to pkg/devspace/compose/testdata/extra_hosts_single/devspace.yaml diff --git a/pkg/devspace/compose/testdata/healthcheck/expected.yaml b/pkg/devspace/compose/testdata/healthcheck/devspace.yaml similarity index 100% rename from pkg/devspace/compose/testdata/healthcheck/expected.yaml rename to pkg/devspace/compose/testdata/healthcheck/devspace.yaml diff --git a/pkg/devspace/compose/testdata/ports-long/expected.yaml b/pkg/devspace/compose/testdata/ports-long/devspace.yaml similarity index 100% rename from pkg/devspace/compose/testdata/ports-long/expected.yaml rename to pkg/devspace/compose/testdata/ports-long/devspace.yaml diff --git a/pkg/devspace/compose/testdata/ports-short/expected.yaml b/pkg/devspace/compose/testdata/ports-short/devspace.yaml similarity index 100% rename from pkg/devspace/compose/testdata/ports-short/expected.yaml rename to pkg/devspace/compose/testdata/ports-short/devspace.yaml diff --git a/pkg/devspace/compose/testdata/restart-always/expected.yaml b/pkg/devspace/compose/testdata/restart-always/devspace.yaml similarity index 100% rename from pkg/devspace/compose/testdata/restart-always/expected.yaml rename to pkg/devspace/compose/testdata/restart-always/devspace.yaml diff --git a/pkg/devspace/compose/testdata/restart-no/expected.yaml b/pkg/devspace/compose/testdata/restart-no/devspace.yaml similarity index 100% rename from pkg/devspace/compose/testdata/restart-no/expected.yaml rename to pkg/devspace/compose/testdata/restart-no/devspace.yaml diff --git a/pkg/devspace/compose/testdata/restart-on-failure/expected.yaml b/pkg/devspace/compose/testdata/restart-on-failure/devspace.yaml similarity index 100% rename from pkg/devspace/compose/testdata/restart-on-failure/expected.yaml rename to pkg/devspace/compose/testdata/restart-on-failure/devspace.yaml diff --git a/pkg/devspace/compose/testdata/restart-unless-stopped/expected.yaml b/pkg/devspace/compose/testdata/restart-unless-stopped/devspace.yaml similarity index 100% rename from pkg/devspace/compose/testdata/restart-unless-stopped/expected.yaml rename to pkg/devspace/compose/testdata/restart-unless-stopped/devspace.yaml diff --git a/pkg/devspace/compose/testdata/secret-long/expected.yaml b/pkg/devspace/compose/testdata/secret-long/devspace.yaml similarity index 100% rename from pkg/devspace/compose/testdata/secret-long/expected.yaml rename to pkg/devspace/compose/testdata/secret-long/devspace.yaml diff --git a/pkg/devspace/compose/testdata/secret-short/expected.yaml b/pkg/devspace/compose/testdata/secret-short/devspace.yaml similarity index 100% rename from pkg/devspace/compose/testdata/secret-short/expected.yaml rename to pkg/devspace/compose/testdata/secret-short/devspace.yaml diff --git a/pkg/devspace/compose/testdata/volumes-long/expected.yaml b/pkg/devspace/compose/testdata/volumes-long/devspace.yaml similarity index 100% rename from pkg/devspace/compose/testdata/volumes-long/expected.yaml rename to pkg/devspace/compose/testdata/volumes-long/devspace.yaml diff --git a/pkg/devspace/compose/testdata/volumes-short/expected.yaml b/pkg/devspace/compose/testdata/volumes-short/devspace.yaml similarity index 100% rename from pkg/devspace/compose/testdata/volumes-short/expected.yaml rename to pkg/devspace/compose/testdata/volumes-short/devspace.yaml diff --git a/pkg/devspace/compose/testdata/x_depends_on/expected.yaml b/pkg/devspace/compose/testdata/x_depends_on/expected.yaml deleted file mode 100644 index fb9c8ac8ac..0000000000 --- a/pkg/devspace/compose/testdata/x_depends_on/expected.yaml +++ /dev/null @@ -1,92 +0,0 @@ -version: v1beta11 -deployments: -- name: db - helm: - componentChart: true - values: - containers: - - name: db-container - image: mysql/mysql-server:8.0.19 -- name: cache - helm: - componentChart: true - values: - containers: - - name: cache-container - image: redis:latest -- name: messaging - helm: - componentChart: true - values: - containers: - - name: messaging-container - image: rabbitmq:latest -- name: worker - helm: - componentChart: true - values: - containers: - - name: worker-container - image: rails:latest -- name: backend - helm: - componentChart: true - values: - containers: - - name: backend-container - image: rails:latest -- name: frontend - helm: - componentChart: true - values: - containers: - - name: frontend-container - image: nginx:latest -- name: proxy - helm: - componentChart: true - values: - containers: - - name: proxy-container - image: haproxy:latest -hooks: -- events: ["after:deploy:db"] - container: - labelSelector: - app.kubernetes.io/component: db - containerName: db-container - wait: - running: true - terminatedWithCode: 0 -- events: ["after:deploy:cache"] - container: - labelSelector: - app.kubernetes.io/component: cache - containerName: cache-container - wait: - running: true - terminatedWithCode: 0 -- events: ["after:deploy:messaging"] - container: - labelSelector: - app.kubernetes.io/component: messaging - containerName: messaging-container - wait: - running: true - terminatedWithCode: 0 -- events: ["after:deploy:backend"] - container: - labelSelector: - app.kubernetes.io/component: backend - containerName: backend-container - wait: - running: true - terminatedWithCode: 0 -- events: ["after:deploy:frontend"] - container: - labelSelector: - app.kubernetes.io/component: frontend - containerName: frontend-container - wait: - running: true - terminatedWithCode: 0 diff --git a/pkg/devspace/compose/testdata/x_volumes-depends_on/expected.yaml b/pkg/devspace/compose/testdata/x_volumes-depends_on/devspace.yaml similarity index 100% rename from pkg/devspace/compose/testdata/x_volumes-depends_on/expected.yaml rename to pkg/devspace/compose/testdata/x_volumes-depends_on/devspace.yaml diff --git a/pkg/devspace/compose/volume.go b/pkg/devspace/compose/volume.go new file mode 100644 index 0000000000..5499c69ae6 --- /dev/null +++ b/pkg/devspace/compose/volume.go @@ -0,0 +1,149 @@ +package compose + +import ( + "fmt" + + "github.com/loft-sh/devspace/pkg/util/log" + + composetypes "github.com/compose-spec/compose-go/types" +) + +func volumesConfig( + service composetypes.ServiceConfig, + composeVolumes map[string]composetypes.VolumeConfig, + log log.Logger, +) (volumes []interface{}, volumeMounts []interface{}, bindVolumeMounts []interface{}) { + for _, secret := range service.Secrets { + volume := createSecretVolume(secret) + volumes = append(volumes, volume) + + volumeMount := createSecretVolumeMount(secret) + volumeMounts = append(volumeMounts, volumeMount) + } + + var volumeVolumes []composetypes.ServiceVolumeConfig + var bindVolumes []composetypes.ServiceVolumeConfig + var tmpfsVolumes []composetypes.ServiceVolumeConfig + for _, serviceVolume := range service.Volumes { + switch serviceVolume.Type { + case composetypes.VolumeTypeBind: + bindVolumes = append(bindVolumes, serviceVolume) + case composetypes.VolumeTypeTmpfs: + tmpfsVolumes = append(tmpfsVolumes, serviceVolume) + case composetypes.VolumeTypeVolume: + volumeVolumes = append(volumeVolumes, serviceVolume) + default: + log.Warnf("%s volumes are not supported", serviceVolume.Type) + } + } + + volumeMap := map[string]interface{}{} + for idx, volumeVolume := range volumeVolumes { + volumeName := volumeName(service, volumeVolume, idx+1) + _, ok := volumeMap[volumeName] + if !ok { + volume := createVolume(volumeName, DefaultVolumeSize) + volumes = append(volumes, volume) + volumeMap[volumeName] = volume + } + + volumeMount := createSharedVolumeMount(volumeName, volumeVolume) + volumeMounts = append(volumeMounts, volumeMount) + } + + for _, tmpfsVolume := range tmpfsVolumes { + volumeName := volumeName(service, tmpfsVolume, len(volumes)) + volume := createEmptyDirVolume(volumeName, tmpfsVolume) + volumes = append(volumes, volume) + + volumeMount := createServiceVolumeMount(volumeName, tmpfsVolume) + volumeMounts = append(volumeMounts, volumeMount) + } + + for idx, bindVolume := range bindVolumes { + volumeName := fmt.Sprintf("volume-%d", idx+1) + volume := createEmptyDirVolume(volumeName, bindVolume) + volumes = append(volumes, volume) + + volumeMount := createServiceVolumeMount(volumeName, bindVolume) + volumeMounts = append(volumeMounts, volumeMount) + } + + return volumes, volumeMounts, bindVolumeMounts +} + +func createEmptyDirVolume(volumeName string, volume composetypes.ServiceVolumeConfig) interface{} { + emptyDir := map[string]interface{}{} + if volume.Tmpfs != nil { + emptyDir["sizeLimit"] = fmt.Sprintf("%d", volume.Tmpfs.Size) + } + return map[string]interface{}{ + "name": volumeName, + "emptyDir": emptyDir, + } +} + +func createSecretVolume(secret composetypes.ServiceSecretConfig) interface{} { + return map[string]interface{}{ + "name": secret.Source, + "secret": map[string]interface{}{ + "secretName": secret.Source, + }, + } +} + +func createSecretVolumeMount(secret composetypes.ServiceSecretConfig) interface{} { + target := secret.Source + if secret.Target != "" { + target = secret.Target + } + return map[string]interface{}{ + "containerPath": fmt.Sprintf("/run/secrets/%s", target), + "volume": map[string]interface{}{ + "name": secret.Source, + "subPath": target, + "readOnly": true, + }, + } +} + +func createSharedVolumeMount(volumeName string, volume composetypes.ServiceVolumeConfig) interface{} { + volumeConfig := map[string]interface{}{ + "name": volumeName, + "shared": true, + } + + if volume.ReadOnly { + volumeConfig["readOnly"] = true + } + + return map[string]interface{}{ + "containerPath": volume.Target, + "volume": volumeConfig, + } +} + +func createServiceVolumeMount(volumeName string, volume composetypes.ServiceVolumeConfig) interface{} { + return map[string]interface{}{ + "containerPath": volume.Target, + "volume": map[string]interface{}{ + "name": volumeName, + "readOnly": volume.ReadOnly, + }, + } +} + +func createVolume(name string, size string) interface{} { + return map[string]interface{}{ + "name": name, + "size": size, + } +} + +func volumeName(service composetypes.ServiceConfig, volume composetypes.ServiceVolumeConfig, idx int) string { + volumeName := volume.Source + if volumeName == "" { + volumeName = fmt.Sprintf("%s-%d", formatName(service.Name), idx) + } + return volumeName +} From deb40ae9e1cbc3710fca3acd56baf311e24fe7a6 Mon Sep 17 00:00:00 2001 From: Russell Centanni Date: Thu, 31 Mar 2022 13:41:38 -0400 Subject: [PATCH 6/9] refactor: handle dependencies with nested directory --- pkg/devspace/compose/config_builder.go | 4 +- pkg/devspace/compose/dependency.go | 32 ++- pkg/devspace/compose/image.go | 14 +- pkg/devspace/compose/loader_test.go | 224 ------------------ .../compose/{loader.go => manager.go} | 39 ++- pkg/devspace/compose/manager_test.go | 102 ++++++++ .../testdata/build_args_list/devspace.yaml | 1 - .../testdata/build_args_map/devspace.yaml | 1 - .../testdata/build_network/devspace.yaml | 1 - .../testdata/build_target/devspace.yaml | 1 - .../depends_on_with_build/backend/Dockerfile | 1 + .../backend/devspace.yaml | 108 +++++++++ .../depends_on_with_build/devspace-cache.yaml | 101 ++++++++ .../depends_on_with_build/devspace-db.yaml | 94 ++++++++ .../devspace-frontend.yaml | 100 ++++++++ .../devspace-messaging.yaml | 102 ++++++++ .../depends_on_with_build/devspace.yaml | 112 +++++++++ .../depends_on_with_build/docker-compose.yaml | 28 +++ 18 files changed, 822 insertions(+), 243 deletions(-) delete mode 100644 pkg/devspace/compose/loader_test.go rename pkg/devspace/compose/{loader.go => manager.go} (76%) create mode 100644 pkg/devspace/compose/manager_test.go create mode 100644 pkg/devspace/compose/testdata/depends_on_with_build/backend/Dockerfile create mode 100644 pkg/devspace/compose/testdata/depends_on_with_build/backend/devspace.yaml create mode 100644 pkg/devspace/compose/testdata/depends_on_with_build/devspace-cache.yaml create mode 100644 pkg/devspace/compose/testdata/depends_on_with_build/devspace-db.yaml create mode 100644 pkg/devspace/compose/testdata/depends_on_with_build/devspace-frontend.yaml create mode 100644 pkg/devspace/compose/testdata/depends_on_with_build/devspace-messaging.yaml create mode 100644 pkg/devspace/compose/testdata/depends_on_with_build/devspace.yaml create mode 100644 pkg/devspace/compose/testdata/depends_on_with_build/docker-compose.yaml diff --git a/pkg/devspace/compose/config_builder.go b/pkg/devspace/compose/config_builder.go index 6cf48d47db..106090b845 100644 --- a/pkg/devspace/compose/config_builder.go +++ b/pkg/devspace/compose/config_builder.go @@ -9,10 +9,10 @@ import ( ) type ConfigBuilder interface { - AddDependencies(dockerCompose composetypes.Project, service composetypes.ServiceConfig) error + AddDependencies(dependency composetypes.Project, service composetypes.ServiceConfig) error AddDeployment(dockerCompose composetypes.Project, service composetypes.ServiceConfig) error AddDev(service composetypes.ServiceConfig) error - AddImage(service composetypes.ServiceConfig) error + AddImage(dockerCompose composetypes.Project, service composetypes.ServiceConfig) error AddSecret(dockerCompose composetypes.Project, service composetypes.ServiceConfig) error Config() *latest.Config SetName(name string) diff --git a/pkg/devspace/compose/dependency.go b/pkg/devspace/compose/dependency.go index e646566584..26c22fc8d3 100644 --- a/pkg/devspace/compose/dependency.go +++ b/pkg/devspace/compose/dependency.go @@ -1,20 +1,48 @@ package compose import ( + "path/filepath" + composetypes "github.com/compose-spec/compose-go/types" "github.com/loft-sh/devspace/pkg/devspace/config/versions/latest" ) func (cb *configBuilder) AddDependencies(dockerCompose composetypes.Project, service composetypes.ServiceConfig) error { for _, dependency := range service.GetDependencies() { + depName := formatName(dependency) + if cb.config.Dependencies == nil { cb.config.Dependencies = map[string]*latest.DependencyConfig{} } - depName := formatName(dependency) + depService, err := dockerCompose.GetService(dependency) + if err != nil { + return err + } + + currentPath := dockerCompose.WorkingDir + if service.Build != nil && service.Build.Context != "" { + currentPath = filepath.Join(dockerCompose.WorkingDir, service.Build.Context) + } + + dependencyPath := dockerCompose.WorkingDir + if depService.Build != nil && depService.Build.Context != "" { + dependencyPath = filepath.Join(dockerCompose.WorkingDir, depService.Build.Context) + } + + relativePath, err := filepath.Rel(currentPath, dependencyPath) + if err != nil { + return err + } + + fileName := "" + if dependencyPath == dockerCompose.WorkingDir { + fileName = "devspace-" + depName + ".yaml" + } + cb.config.Dependencies[depName] = &latest.DependencyConfig{ Source: &latest.SourceConfig{ - Path: "devspace-" + depName + ".yaml", + Path: filepath.Join(relativePath, fileName), }, } } diff --git a/pkg/devspace/compose/image.go b/pkg/devspace/compose/image.go index 98f26188b3..d73c7809a5 100644 --- a/pkg/devspace/compose/image.go +++ b/pkg/devspace/compose/image.go @@ -7,27 +7,34 @@ import ( "github.com/loft-sh/devspace/pkg/devspace/config/versions/latest" ) -func (cb *configBuilder) AddImage(service composetypes.ServiceConfig) error { +func (cb *configBuilder) AddImage(dockerCompose composetypes.Project, service composetypes.ServiceConfig) error { build := service.Build if build == nil { cb.config.Images = nil return nil } - context, err := filepath.Rel(cb.workingDir, filepath.Join(cb.workingDir, build.Context)) + currentDir := filepath.Join(dockerCompose.WorkingDir, cb.workingDir) + contextDir := filepath.Join(dockerCompose.WorkingDir, build.Context) + context, err := filepath.Rel(currentDir, contextDir) if err != nil { return err } + context = filepath.ToSlash(context) if context == "." { context = "" } - dockerfile, err := filepath.Rel(cb.workingDir, filepath.Join(cb.workingDir, build.Context, build.Dockerfile)) + dockerfile, err := filepath.Rel(currentDir, filepath.Join(dockerCompose.WorkingDir, build.Context, build.Dockerfile)) if err != nil { return err } + if dockerfile == "Dockerfile" { + dockerfile = "" + } + image := &latest.Image{ Image: resolveImage(service), Context: context, @@ -51,7 +58,6 @@ func (cb *configBuilder) AddImage(service composetypes.ServiceConfig) error { } if cb.config.Images == nil { - cb.config.Images = map[string]*latest.Image{} } diff --git a/pkg/devspace/compose/loader_test.go b/pkg/devspace/compose/loader_test.go deleted file mode 100644 index eebdd45c47..0000000000 --- a/pkg/devspace/compose/loader_test.go +++ /dev/null @@ -1,224 +0,0 @@ -package compose - -/* -import ( - "io/ioutil" - "os" - "path/filepath" - "strings" - "testing" - - composeloader "github.com/compose-spec/compose-go/loader" - composetypes "github.com/compose-spec/compose-go/types" - "github.com/loft-sh/devspace/pkg/devspace/config/versions/latest" - "github.com/loft-sh/devspace/pkg/util/hash" - "github.com/loft-sh/devspace/pkg/util/log" - "gopkg.in/yaml.v3" - "gotest.tools/assert" - "gotest.tools/assert/cmp" -) - -func TestLoad(t *testing.T) { - dirs, err := ioutil.ReadDir("testdata") - if err != nil { - t.Error(err) - } - - if len(dirs) == 0 { - t.Error("No test cases found. Add some!") - } - - focused := []string{} - for _, dir := range dirs { - if strings.HasPrefix(dir.Name(), "f_") { - focused = append(focused, dir.Name()) - } - } - - if len(focused) > 0 { - for _, focus := range focused { - testLoad(focus, t) - } - } else { - for _, dir := range dirs { - if !strings.HasPrefix(dir.Name(), "x_") { - testLoad(dir.Name(), t) - } - } - } - -} - -func testLoad(dir string, t *testing.T) { - wd, err := os.Getwd() - if err != nil { - t.Error(err) - } - - err = os.Chdir(filepath.Join(wd, "testdata", dir)) - if err != nil { - t.Error(err) - } - defer func() { - err := os.Chdir(wd) - if err != nil { - t.Error(err) - } - }() - - dockerComposePath := GetDockerComposePath() - loader := NewComposeManager(dockerComposePath) - - actualError := loader.Load(log.Discard) - - if actualError != nil { - expectedError, err := ioutil.ReadFile("error.txt") - if err != nil { - t.Errorf("Unexpected error occurred loading the docker-compose.yaml: %s", err.Error()) - } - - assert.Equal(t, string(expectedError), actualError.Error(), "Expected error:\n%s\nbut got:\n%s\n in testCase %s", string(expectedError), actualError.Error(), dir) - } - - for path, actualConfig := range loader.Configs() { - data, err := ioutil.ReadFile(path) - if err != nil { - t.Errorf("Please create the expected DevSpace configuration by creating a %s in the testdata/%s folder", path, dir) - } - - expectedConfig := &latest.Config{} - err = yaml.Unmarshal(data, expectedConfig) - if err != nil { - t.Errorf("Error unmarshaling the expected configuration: %s", err.Error()) - } - - assert.Check( - t, - cmp.DeepEqual(expectedConfig.Deployments, actualConfig.Deployments), - "deployment properties did not match in test case %s", - dir, - ) - // actualDeployments := actualConfig.Deployments - actualConfig.Deployments = nil - expectedConfig.Deployments = nil - - assert.Check( - t, - cmp.DeepEqual(toWaitHookMap(expectedConfig.Hooks), toWaitHookMap(actualConfig.Hooks)), - "hook properties did not match in test case %s", - dir, - ) - actualHooks := actualConfig.Hooks - actualConfig.Hooks = nil - expectedConfig.Hooks = nil - - assert.Check( - t, - cmp.DeepEqual(expectedConfig, actualConfig), - "config properties did not match in test case %s", - dir, - ) - - // Load docker compose to determine dependency ordering - content, err := ioutil.ReadFile(dockerComposePath) - if err != nil { - t.Errorf("Unexpected error occurred loading the docker-compose.yaml: %s", err.Error()) - } - dockerCompose, err := composeloader.Load(composetypes.ConfigDetails{ - ConfigFiles: []composetypes.ConfigFile{ - { - Content: content, - }, - }, - }) - if err != nil { - t.Error(err) - } - - // Determine which deployments should have wait hooks - expectedWaitHooks := map[string]bool{} - err = dockerCompose.WithServices(nil, func(service composetypes.ServiceConfig) error { - for _, dep := range service.GetDependencies() { - expectedWaitHooks[dep] = true - } - return nil - }) - if err != nil { - t.Error(err) - } - - // Iterate services in dependency order - err = dockerCompose.WithServices(nil, func(service composetypes.ServiceConfig) error { - waitHookIdx := getWaitHookIndex(service.Name, actualHooks) - - // for _, dep := range service.GetDependencies() { - // // Check deployments order - // assert.Check(t, getDeploymentIndex(dep, actualDeployments) < getDeploymentIndex(service.Name, actualDeployments), "%s deployment should come after %s for test case %s", service.Name, dep, dir) - - // // Check for wait hook order - // _, ok := expectedWaitHooks[service.Name] - // if ok { - // assert.Check(t, getWaitHookIndex(dep, actualHooks) < waitHookIdx, "%s wait hook should come after %s", service.Name, dep) - // } - // } - - uploadDoneHookIdx := getUploadDoneHookIndex(service.Name, actualHooks) - if uploadDoneHookIdx != -1 { - // Check that upload done hooks come before wait hooks - if waitHookIdx != -1 { - assert.Check(t, uploadDoneHookIdx < waitHookIdx, "%s wait hook should come after upload done hooks for test case %s", service.Name, dir) - } - - // Check that upload hooks come before upload done hooks - for idx, hook := range actualHooks { - if hook.Upload != nil && hook.Container.ContainerName == UploadVolumesContainerName && hook.Container.LabelSelector != nil && hook.Container.LabelSelector["app.kubernetes.io/component"] == service.Name { - assert.Check(t, idx < uploadDoneHookIdx, "%s upload done hook should come after upload hooks for test case %s", service.Name, dir) - } - } - } - - return nil - }) - if err != nil { - t.Error(err) - } - } - -} - -func toDeploymentMap(deployments []*latest.DeploymentConfig) map[string]latest.DeploymentConfig { - deploymentMap := map[string]latest.DeploymentConfig{} - for _, deployment := range deployments { - deploymentMap[deployment.Name] = *deployment - } - return deploymentMap -} - -func toWaitHookMap(hooks []*latest.HookConfig) map[string]latest.HookConfig { - hookMap := map[string]latest.HookConfig{} - for _, hook := range hooks { - out, _ := yaml.Marshal(hook) - hookKey := hash.String(string(out)) - hookMap[hookKey] = *hook - } - return hookMap -} - -func getWaitHookIndex(name string, hooks []*latest.HookConfig) int { - for idx, hook := range hooks { - if hook.Wait != nil && hook.Container != nil && hook.Container.LabelSelector != nil && hook.Container.LabelSelector["app.kubernetes.io/component"] == name { - return idx - } - } - return -1 -} - -func getUploadDoneHookIndex(name string, hooks []*latest.HookConfig) int { - for idx, hook := range hooks { - if hook.Command == "touch /tmp/done" && hook.Container != nil && hook.Container.LabelSelector != nil && hook.Container.LabelSelector["app.kubernetes.io/component"] == name { - return idx - } - } - return -1 -} -*/ diff --git a/pkg/devspace/compose/loader.go b/pkg/devspace/compose/manager.go similarity index 76% rename from pkg/devspace/compose/loader.go rename to pkg/devspace/compose/manager.go index 6540e7ae2b..5f8699285f 100644 --- a/pkg/devspace/compose/loader.go +++ b/pkg/devspace/compose/manager.go @@ -73,25 +73,31 @@ func (cm *composeManager) Load(log log.Logger) error { builders := map[string]ConfigBuilder{} err = dockerCompose.WithServices(nil, func(service composetypes.ServiceConfig) error { - configKey := constants.DefaultConfigPath configName := "docker-compose" + workingDir := filepath.Dir(cm.composePath) isDependency := dependentsMap[service.Name] != nil if isDependency { - configKey = "devspace-" + service.Name + ".yaml" + // configKey = "devspace-" + service.Name + ".yaml" + // if service.Build != nil && service.Build.Context != "" { + // configKey = filepath.Join(service.Build.Context, "devspace.yaml") + // } + configName = service.Name + if service.Build != nil && service.Build.Context != "" { + workingDir = filepath.Join(workingDir, service.Build.Context) + } } - builder := builders[configKey] + builder := builders[configName] if builder == nil { - workingDir := filepath.Dir(cm.composePath) builder = NewConfigBuilder(workingDir, log) - builders[configKey] = builder + builders[configName] = builder } builder.SetName(configName) - err := builder.AddImage(service) + err := builder.AddImage(*dockerCompose, service) if err != nil { return err } @@ -122,8 +128,27 @@ func (cm *composeManager) Load(log log.Logger) error { return err } - for path, builder := range builders { + err = dockerCompose.WithServices(nil, func(service composetypes.ServiceConfig) error { + configName := "docker-compose" + path := constants.DefaultConfigPath + + isDependency := dependentsMap[service.Name] != nil + if isDependency { + configName = service.Name + + path = "devspace-" + service.Name + ".yaml" + if service.Build != nil && service.Build.Context != "" { + path = filepath.Join(service.Build.Context, "devspace.yaml") + } + } + + builder := builders[configName] cm.configs[path] = builder.Config() + + return nil + }) + if err != nil { + return err } return nil diff --git a/pkg/devspace/compose/manager_test.go b/pkg/devspace/compose/manager_test.go new file mode 100644 index 0000000000..657b0e3836 --- /dev/null +++ b/pkg/devspace/compose/manager_test.go @@ -0,0 +1,102 @@ +package compose + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/loft-sh/devspace/pkg/devspace/config/versions/latest" + "github.com/loft-sh/devspace/pkg/util/log" + "gopkg.in/yaml.v3" + "gotest.tools/assert" + "gotest.tools/assert/cmp" +) + +func TestLoad(t *testing.T) { + dirs, err := ioutil.ReadDir("testdata") + if err != nil { + t.Error(err) + } + + if len(dirs) == 0 { + t.Error("No test cases found. Add some!") + } + + focused := []string{} + for _, dir := range dirs { + if strings.HasPrefix(dir.Name(), "f_") { + focused = append(focused, dir.Name()) + } + } + + if len(focused) > 0 { + for _, focus := range focused { + testLoad(focus, t) + } + } else { + for _, dir := range dirs { + if !strings.HasPrefix(dir.Name(), "x_") { + testLoad(dir.Name(), t) + } + } + } + +} + +func testLoad(dir string, t *testing.T) { + wd, err := os.Getwd() + if err != nil { + t.Error(err) + } + + err = os.Chdir(filepath.Join(wd, "testdata", dir)) + if err != nil { + t.Error(err) + } + defer func() { + err := os.Chdir(wd) + if err != nil { + t.Error(err) + } + }() + + dockerComposePath := GetDockerComposePath() + loader := NewComposeManager(dockerComposePath) + + actualError := loader.Load(log.Discard) + + if actualError != nil { + expectedError, err := ioutil.ReadFile("error.txt") + if err != nil { + t.Errorf("Unexpected error occurred loading the docker-compose.yaml: %s", err.Error()) + } + + assert.Equal(t, string(expectedError), actualError.Error(), "Expected error:\n%s\nbut got:\n%s\n in testCase %s", string(expectedError), actualError.Error(), dir) + } + + for path, actualConfig := range loader.Configs() { + fmt.Println(path) + + data, err := ioutil.ReadFile(path) + if err != nil { + t.Errorf("Please create the expected DevSpace configuration by creating a %s in the testdata/%s folder", path, dir) + } + + expectedConfig := &latest.Config{} + err = yaml.Unmarshal(data, expectedConfig) + if err != nil { + t.Errorf("Error unmarshaling the expected configuration: %s", err.Error()) + } + + assert.Check( + t, + cmp.DeepEqual(expectedConfig, actualConfig), + "configs did not match in test case %s", + dir, + ) + } + +} diff --git a/pkg/devspace/compose/testdata/build_args_list/devspace.yaml b/pkg/devspace/compose/testdata/build_args_list/devspace.yaml index 8c2a85369a..7df9df43c2 100644 --- a/pkg/devspace/compose/testdata/build_args_list/devspace.yaml +++ b/pkg/devspace/compose/testdata/build_args_list/devspace.yaml @@ -4,7 +4,6 @@ name: docker-compose images: foo: image: foo - dockerfile: Dockerfile buildArgs: buildno: 1 gitcommithash: cdc3b19 diff --git a/pkg/devspace/compose/testdata/build_args_map/devspace.yaml b/pkg/devspace/compose/testdata/build_args_map/devspace.yaml index 8c2a85369a..7df9df43c2 100644 --- a/pkg/devspace/compose/testdata/build_args_map/devspace.yaml +++ b/pkg/devspace/compose/testdata/build_args_map/devspace.yaml @@ -4,7 +4,6 @@ name: docker-compose images: foo: image: foo - dockerfile: Dockerfile buildArgs: buildno: 1 gitcommithash: cdc3b19 diff --git a/pkg/devspace/compose/testdata/build_network/devspace.yaml b/pkg/devspace/compose/testdata/build_network/devspace.yaml index ed94ab25aa..68a6ec2667 100644 --- a/pkg/devspace/compose/testdata/build_network/devspace.yaml +++ b/pkg/devspace/compose/testdata/build_network/devspace.yaml @@ -4,7 +4,6 @@ name: docker-compose images: foo: image: foo - dockerfile: Dockerfile network: host deployments: diff --git a/pkg/devspace/compose/testdata/build_target/devspace.yaml b/pkg/devspace/compose/testdata/build_target/devspace.yaml index 6f7dd357e2..753366e646 100644 --- a/pkg/devspace/compose/testdata/build_target/devspace.yaml +++ b/pkg/devspace/compose/testdata/build_target/devspace.yaml @@ -4,7 +4,6 @@ name: docker-compose images: foo: image: foo - dockerfile: Dockerfile target: dev deployments: diff --git a/pkg/devspace/compose/testdata/depends_on_with_build/backend/Dockerfile b/pkg/devspace/compose/testdata/depends_on_with_build/backend/Dockerfile new file mode 100644 index 0000000000..67fd379018 --- /dev/null +++ b/pkg/devspace/compose/testdata/depends_on_with_build/backend/Dockerfile @@ -0,0 +1 @@ +FROM alpine diff --git a/pkg/devspace/compose/testdata/depends_on_with_build/backend/devspace.yaml b/pkg/devspace/compose/testdata/depends_on_with_build/backend/devspace.yaml new file mode 100644 index 0000000000..d61ca30dd6 --- /dev/null +++ b/pkg/devspace/compose/testdata/depends_on_with_build/backend/devspace.yaml @@ -0,0 +1,108 @@ +version: v2beta1 + +name: backend + +images: + backend: + image: backend + +dependencies: + cache: + path: ../devspace-cache.yaml + db: + path: ../devspace-db.yaml + messaging: + path: ../devspace-messaging.yaml + +deployments: + backend: + helm: + values: + containers: + - name: backend-container + image: backend +# deployments: +# - name: db +# helm: +# componentChart: true +# values: +# containers: +# - name: db-container +# image: mysql/mysql-server:8.0.19 +# - name: cache +# helm: +# componentChart: true +# values: +# containers: +# - name: cache-container +# image: redis:latest +# - name: messaging +# helm: +# componentChart: true +# values: +# containers: +# - name: messaging-container +# image: rabbitmq:latest +# - name: worker +# helm: +# componentChart: true +# values: +# containers: +# - name: worker-container +# image: rails:latest + +# - name: frontend +# helm: +# componentChart: true +# values: +# containers: +# - name: frontend-container +# image: nginx:latest +# - name: proxy +# helm: +# componentChart: true +# values: +# containers: +# - name: proxy-container +# image: haproxy:latest +# hooks: +# - events: ["after:deploy:db"] +# container: +# labelSelector: +# app.kubernetes.io/component: db +# containerName: db-container +# wait: +# running: true +# terminatedWithCode: 0 +# - events: ["after:deploy:cache"] +# container: +# labelSelector: +# app.kubernetes.io/component: cache +# containerName: cache-container +# wait: +# running: true +# terminatedWithCode: 0 +# - events: ["after:deploy:messaging"] +# container: +# labelSelector: +# app.kubernetes.io/component: messaging +# containerName: messaging-container +# wait: +# running: true +# terminatedWithCode: 0 +# - events: ["after:deploy:backend"] +# container: +# labelSelector: +# app.kubernetes.io/component: backend +# containerName: backend-container +# wait: +# running: true +# terminatedWithCode: 0 +# - events: ["after:deploy:frontend"] +# container: +# labelSelector: +# app.kubernetes.io/component: frontend +# containerName: frontend-container +# wait: +# running: true +# terminatedWithCode: 0 diff --git a/pkg/devspace/compose/testdata/depends_on_with_build/devspace-cache.yaml b/pkg/devspace/compose/testdata/depends_on_with_build/devspace-cache.yaml new file mode 100644 index 0000000000..eb6a4e425d --- /dev/null +++ b/pkg/devspace/compose/testdata/depends_on_with_build/devspace-cache.yaml @@ -0,0 +1,101 @@ +version: v2beta1 + +name: cache + +deployments: + cache: + helm: + values: + containers: + - name: cache-container + image: redis:latest +# - name: db +# helm: +# componentChart: true +# values: +# containers: +# - name: db-container +# image: mysql/mysql-server:8.0.19 +# - name: cache +# helm: +# componentChart: true +# values: +# containers: +# - name: cache-container +# image: redis:latest +# - name: messaging +# helm: +# componentChart: true +# values: +# containers: +# - name: messaging-container +# image: rabbitmq:latest +# - name: worker +# helm: +# componentChart: true +# values: +# containers: +# - name: worker-container +# image: rails:latest +# - name: backend +# helm: +# componentChart: true +# values: +# containers: +# - name: backend-container +# image: rails:latest +# - name: frontend +# helm: +# componentChart: true +# values: +# containers: +# - name: frontend-container +# image: nginx:latest +# - name: proxy +# helm: +# componentChart: true +# values: +# containers: +# - name: proxy-container +# image: haproxy:latest +# hooks: +# - events: ["after:deploy:db"] +# container: +# labelSelector: +# app.kubernetes.io/component: db +# containerName: db-container +# wait: +# running: true +# terminatedWithCode: 0 +# - events: ["after:deploy:cache"] +# container: +# labelSelector: +# app.kubernetes.io/component: cache +# containerName: cache-container +# wait: +# running: true +# terminatedWithCode: 0 +# - events: ["after:deploy:messaging"] +# container: +# labelSelector: +# app.kubernetes.io/component: messaging +# containerName: messaging-container +# wait: +# running: true +# terminatedWithCode: 0 +# - events: ["after:deploy:backend"] +# container: +# labelSelector: +# app.kubernetes.io/component: backend +# containerName: backend-container +# wait: +# running: true +# terminatedWithCode: 0 +# - events: ["after:deploy:frontend"] +# container: +# labelSelector: +# app.kubernetes.io/component: frontend +# containerName: frontend-container +# wait: +# running: true +# terminatedWithCode: 0 diff --git a/pkg/devspace/compose/testdata/depends_on_with_build/devspace-db.yaml b/pkg/devspace/compose/testdata/depends_on_with_build/devspace-db.yaml new file mode 100644 index 0000000000..16b1f277ab --- /dev/null +++ b/pkg/devspace/compose/testdata/depends_on_with_build/devspace-db.yaml @@ -0,0 +1,94 @@ +version: v2beta1 + +name: db + +deployments: + db: + helm: + values: + containers: + - name: db-container + image: mysql/mysql-server:8.0.19 +# - name: cache +# helm: +# componentChart: true +# values: +# containers: +# - name: cache-container +# image: redis:latest +# - name: messaging +# helm: +# componentChart: true +# values: +# containers: +# - name: messaging-container +# image: rabbitmq:latest +# - name: worker +# helm: +# componentChart: true +# values: +# containers: +# - name: worker-container +# image: rails:latest +# - name: backend +# helm: +# componentChart: true +# values: +# containers: +# - name: backend-container +# image: rails:latest +# - name: frontend +# helm: +# componentChart: true +# values: +# containers: +# - name: frontend-container +# image: nginx:latest +# - name: proxy +# helm: +# componentChart: true +# values: +# containers: +# - name: proxy-container +# image: haproxy:latest +# hooks: +# - events: ["after:deploy:db"] +# container: +# labelSelector: +# app.kubernetes.io/component: db +# containerName: db-container +# wait: +# running: true +# terminatedWithCode: 0 +# - events: ["after:deploy:cache"] +# container: +# labelSelector: +# app.kubernetes.io/component: cache +# containerName: cache-container +# wait: +# running: true +# terminatedWithCode: 0 +# - events: ["after:deploy:messaging"] +# container: +# labelSelector: +# app.kubernetes.io/component: messaging +# containerName: messaging-container +# wait: +# running: true +# terminatedWithCode: 0 +# - events: ["after:deploy:backend"] +# container: +# labelSelector: +# app.kubernetes.io/component: backend +# containerName: backend-container +# wait: +# running: true +# terminatedWithCode: 0 +# - events: ["after:deploy:frontend"] +# container: +# labelSelector: +# app.kubernetes.io/component: frontend +# containerName: frontend-container +# wait: +# running: true +# terminatedWithCode: 0 diff --git a/pkg/devspace/compose/testdata/depends_on_with_build/devspace-frontend.yaml b/pkg/devspace/compose/testdata/depends_on_with_build/devspace-frontend.yaml new file mode 100644 index 0000000000..f6a4bdc7b5 --- /dev/null +++ b/pkg/devspace/compose/testdata/depends_on_with_build/devspace-frontend.yaml @@ -0,0 +1,100 @@ +version: v2beta1 + +name: frontend + +dependencies: + backend: + path: backend + +deployments: + frontend: + helm: + values: + containers: + - name: frontend-container + image: nginx:latest + +# deployments: +# - name: db +# helm: +# componentChart: true +# values: +# containers: +# - name: db-container +# image: mysql/mysql-server:8.0.19 +# - name: cache +# helm: +# componentChart: true +# values: +# containers: +# - name: cache-container +# image: redis:latest +# - name: messaging +# helm: +# componentChart: true +# values: +# containers: +# - name: messaging-container +# image: rabbitmq:latest +# - name: worker +# helm: +# componentChart: true +# values: +# containers: +# - name: worker-container +# image: rails:latest +# - name: backend +# helm: +# componentChart: true +# values: +# containers: +# - name: backend-container +# image: rails:latest +# - name: proxy +# helm: +# componentChart: true +# values: +# containers: +# - name: proxy-container +# image: haproxy:latest +# hooks: +# - events: ["after:deploy:db"] +# container: +# labelSelector: +# app.kubernetes.io/component: db +# containerName: db-container +# wait: +# running: true +# terminatedWithCode: 0 +# - events: ["after:deploy:cache"] +# container: +# labelSelector: +# app.kubernetes.io/component: cache +# containerName: cache-container +# wait: +# running: true +# terminatedWithCode: 0 +# - events: ["after:deploy:messaging"] +# container: +# labelSelector: +# app.kubernetes.io/component: messaging +# containerName: messaging-container +# wait: +# running: true +# terminatedWithCode: 0 +# - events: ["after:deploy:backend"] +# container: +# labelSelector: +# app.kubernetes.io/component: backend +# containerName: backend-container +# wait: +# running: true +# terminatedWithCode: 0 +# - events: ["after:deploy:frontend"] +# container: +# labelSelector: +# app.kubernetes.io/component: frontend +# containerName: frontend-container +# wait: +# running: true +# terminatedWithCode: 0 diff --git a/pkg/devspace/compose/testdata/depends_on_with_build/devspace-messaging.yaml b/pkg/devspace/compose/testdata/depends_on_with_build/devspace-messaging.yaml new file mode 100644 index 0000000000..222e528a97 --- /dev/null +++ b/pkg/devspace/compose/testdata/depends_on_with_build/devspace-messaging.yaml @@ -0,0 +1,102 @@ +version: v2beta1 + +name: messaging + +deployments: + messaging: + helm: + values: + containers: + - name: messaging-container + image: rabbitmq:latest +# deployments: +# - name: db +# helm: +# componentChart: true +# values: +# containers: +# - name: db-container +# image: mysql/mysql-server:8.0.19 +# - name: cache +# helm: +# componentChart: true +# values: +# containers: +# - name: cache-container +# image: redis:latest +# - name: messaging +# helm: +# componentChart: true +# values: +# containers: +# - name: messaging-container +# image: rabbitmq:latest +# - name: worker +# helm: +# componentChart: true +# values: +# containers: +# - name: worker-container +# image: rails:latest +# - name: backend +# helm: +# componentChart: true +# values: +# containers: +# - name: backend-container +# image: rails:latest +# - name: frontend +# helm: +# componentChart: true +# values: +# containers: +# - name: frontend-container +# image: nginx:latest +# - name: proxy +# helm: +# componentChart: true +# values: +# containers: +# - name: proxy-container +# image: haproxy:latest +# hooks: +# - events: ["after:deploy:db"] +# container: +# labelSelector: +# app.kubernetes.io/component: db +# containerName: db-container +# wait: +# running: true +# terminatedWithCode: 0 +# - events: ["after:deploy:cache"] +# container: +# labelSelector: +# app.kubernetes.io/component: cache +# containerName: cache-container +# wait: +# running: true +# terminatedWithCode: 0 +# - events: ["after:deploy:messaging"] +# container: +# labelSelector: +# app.kubernetes.io/component: messaging +# containerName: messaging-container +# wait: +# running: true +# terminatedWithCode: 0 +# - events: ["after:deploy:backend"] +# container: +# labelSelector: +# app.kubernetes.io/component: backend +# containerName: backend-container +# wait: +# running: true +# terminatedWithCode: 0 +# - events: ["after:deploy:frontend"] +# container: +# labelSelector: +# app.kubernetes.io/component: frontend +# containerName: frontend-container +# wait: +# running: true +# terminatedWithCode: 0 diff --git a/pkg/devspace/compose/testdata/depends_on_with_build/devspace.yaml b/pkg/devspace/compose/testdata/depends_on_with_build/devspace.yaml new file mode 100644 index 0000000000..578d755fb3 --- /dev/null +++ b/pkg/devspace/compose/testdata/depends_on_with_build/devspace.yaml @@ -0,0 +1,112 @@ +version: v2beta1 + +name: docker-compose + +dependencies: + backend: + path: backend + db: + path: devspace-db.yaml + frontend: + path: devspace-frontend.yaml + messaging: + path: devspace-messaging.yaml + +deployments: + proxy: + helm: + componentChart: true + values: + containers: + - name: proxy-container + image: haproxy:latest + worker: + helm: + values: + containers: + - name: worker-container + image: rails:latest +# deployments: +# - name: db +# helm: +# componentChart: true +# values: +# containers: +# - name: db-container +# image: mysql/mysql-server:8.0.19 +# - name: cache +# helm: +# componentChart: true +# values: +# containers: +# - name: cache-container +# image: redis:latest +# - name: messaging +# helm: +# componentChart: true +# values: +# containers: +# - name: messaging-container +# image: rabbitmq:latest +# - name: worker +# helm: +# componentChart: true +# values: +# containers: +# - name: worker-container +# image: rails:latest +# - name: backend +# helm: +# componentChart: true +# values: +# containers: +# - name: backend-container +# image: rails:latest +# - name: frontend +# helm: +# componentChart: true +# values: +# containers: +# - name: frontend-container +# image: nginx:latest +# hooks: +# - events: ["after:deploy:db"] +# container: +# labelSelector: +# app.kubernetes.io/component: db +# containerName: db-container +# wait: +# running: true +# terminatedWithCode: 0 +# - events: ["after:deploy:cache"] +# container: +# labelSelector: +# app.kubernetes.io/component: cache +# containerName: cache-container +# wait: +# running: true +# terminatedWithCode: 0 +# - events: ["after:deploy:messaging"] +# container: +# labelSelector: +# app.kubernetes.io/component: messaging +# containerName: messaging-container +# wait: +# running: true +# terminatedWithCode: 0 +# - events: ["after:deploy:backend"] +# container: +# labelSelector: +# app.kubernetes.io/component: backend +# containerName: backend-container +# wait: +# running: true +# terminatedWithCode: 0 +# - events: ["after:deploy:frontend"] +# container: +# labelSelector: +# app.kubernetes.io/component: frontend +# containerName: frontend-container +# wait: +# running: true +# terminatedWithCode: 0 diff --git a/pkg/devspace/compose/testdata/depends_on_with_build/docker-compose.yaml b/pkg/devspace/compose/testdata/depends_on_with_build/docker-compose.yaml new file mode 100644 index 0000000000..8c3c20cea6 --- /dev/null +++ b/pkg/devspace/compose/testdata/depends_on_with_build/docker-compose.yaml @@ -0,0 +1,28 @@ +services: + db: + image: mysql/mysql-server:8.0.19 + cache: + image: redis:latest + worker: + image: rails:latest + depends_on: + - db + - messaging + backend: + image: backend + build: backend + depends_on: + - db + - cache + - messaging + frontend: + image: nginx:latest + depends_on: + - backend + messaging: + image: rabbitmq:latest + proxy: + image: haproxy:latest + depends_on: + - frontend + - backend From f607b5e7c23dc7b43bfc1b81dda033786e7188f6 Mon Sep 17 00:00:00 2001 From: Russell Centanni Date: Tue, 5 Apr 2022 15:34:15 -0400 Subject: [PATCH 7/9] refactor: integrate init command with compose config manager --- cmd/init.go | 278 +++++++++++------- pkg/devspace/compose/config_builder.go | 8 +- pkg/devspace/compose/dependency.go | 2 +- pkg/devspace/compose/deployment.go | 2 +- pkg/devspace/compose/image.go | 7 +- pkg/devspace/compose/manager.go | 67 +++-- pkg/devspace/compose/manager_test.go | 9 +- pkg/devspace/compose/secret.go | 2 +- .../testdata/volumes-long/devspace.yaml | 15 +- .../testdata/volumes-long/docker-compose.yaml | 5 + .../testdata/volumes-short/devspace.yaml | 2 +- pkg/devspace/compose/volume.go | 26 +- pkg/devspace/kubectl/selector/selector.go | 2 +- .../pipelinehandler/commands/select_pod.go | 3 +- 14 files changed, 269 insertions(+), 159 deletions(-) diff --git a/cmd/init.go b/cmd/init.go index 661d14c63a..d924a5f325 100644 --- a/cmd/init.go +++ b/cmd/init.go @@ -11,6 +11,7 @@ import ( "strconv" "strings" + "github.com/loft-sh/devspace/pkg/devspace/compose" "github.com/loft-sh/devspace/pkg/devspace/config/localcache" "github.com/sirupsen/logrus" @@ -147,45 +148,31 @@ func (cmd *InitCmd) Run(f factory.Factory) error { // Print DevSpace logo log.PrintLogo() - /* - generateFromDockerCompose := false - // TODO: Enable again - dockerComposePath := "" // compose.GetDockerComposePath() - if dockerComposePath != "" { - selectedDockerComposeOption, err := cmd.log.Question(&survey.QuestionOptions{ - Question: "Docker Compose configuration detected. Do you want to create a DevSpace configuration based on Docker Compose?", - DefaultValue: DockerComposeDevSpaceConfigOption, - Options: []string{ - DockerComposeDevSpaceConfigOption, - NewDevSpaceConfigOption, - }, - }) - if err != nil { - return err - } + // Determine if we're initializing from scratch, or using docker-compose.yaml + dockerComposePath, generateFromDockerCompose, err := cmd.shouldGenerateFromDockerCompose() + if err != nil { + return err + } - generateFromDockerCompose = selectedDockerComposeOption == DockerComposeDevSpaceConfigOption - } + if generateFromDockerCompose { + err = cmd.initDockerCompose(f, dockerComposePath) + } else { + err = cmd.initDevspace(f, configLoader) + } - if generateFromDockerCompose { - composeLoader := compose.NewDockerComposeLoader(dockerComposePath) - if err != nil { - return err - } + if err != nil { + panic(err) + } - // Load config - config, err := composeLoader.Load(cmd.log) - if err != nil { - return err - } + cmd.log.WriteString(logrus.InfoLevel, "\n") + cmd.log.Done("Project successfully initialized") + cmd.log.Info("Configuration saved in devspace.yaml - you can make adjustments as needed") + cmd.log.Infof("\r \nYou can now run:\n1. %s - to pick which Kubernetes namespace to work in\n2. %s - to start developing your project in Kubernetes\n\nRun `%s` or `%s` to see a list of available commands and flags\n", ansi.Color("devspace use namespace", "blue+b"), ansi.Color("devspace dev", "blue+b"), ansi.Color("devspace -h", "blue+b"), ansi.Color("devspace [command] -h", "blue+b")) - // Save config - err = composeLoader.Save(config) - if err != nil { - return err - } - } else {*/ + return nil +} +func (cmd *InitCmd) initDevspace(f factory.Factory, configLoader loader.ConfigLoader) error { // Create new dockerfile generator languageHandler, err := generator.NewLanguageHandler("", "", cmd.log) if err != nil { @@ -476,8 +463,6 @@ create_deployments --all \ # 3. Deploy Helm charts and ma return err } - /*}*/ - // Save generated err = localCache.Save() if err != nil { @@ -491,6 +476,81 @@ create_deployments --all \ # 3. Deploy Helm charts and ma } configPath := loader.ConfigPath("") + err = annotateConfig(configPath) + if err != nil { + return err + } + + return nil +} + +func (cmd *InitCmd) initDockerCompose(f factory.Factory, composePath string) error { + project, err := compose.LoadDockerComposeProject(composePath) + if err != nil { + return err + } + + // Prompt user for entrypoints for each container with sync folders. + for idx, service := range project.Services { + localPaths := compose.GetServiceSyncPaths(project, service) + noEntryPoint := len(service.Entrypoint) == 0 + hasSyncEndpoints := len(localPaths) > 0 + + if noEntryPoint && hasSyncEndpoints { + entrypointStr, err := cmd.log.Question(&survey.QuestionOptions{ + Question: "How is this container started? (e.g. npm start, gradle run, go run main.go)", + }) + if err != nil { + return err + } + + entrypoint := strings.Split(entrypointStr, " ") + project.Services[idx].Entrypoint = entrypoint + } + } + + // Generate DevSpace configuration + composeManager := compose.NewComposeManager(project) + err = composeManager.Load(cmd.log) + if err != nil { + return err + } + + // Save each configuration file + for path, config := range composeManager.Configs() { + localCache, err := localcache.NewCacheLoader().Load(path) + if err != nil { + return err + } + + // Save config + err = loader.Save(path, config) + if err != nil { + return err + } + + // Save generated + err = localCache.Save() + if err != nil { + return errors.Errorf("Error saving generated file: %v", err) + } + + // Add .devspace/ to .gitignore + err = appendToIgnoreFile(gitIgnoreFile, devspaceFolderGitignore) + if err != nil { + cmd.log.Warn(err) + } + + err = annotateConfig(path) + if err != nil { + return err + } + } + + return nil +} + +func annotateConfig(configPath string) error { annotatedConfig, err := ioutil.ReadFile(configPath) if err != nil { panic(err) @@ -541,74 +601,9 @@ create_deployments --all \ # 3. Deploy Helm charts and ma return err } - cmd.log.WriteString(logrus.InfoLevel, "\n") - cmd.log.Done("Project successfully initialized") - cmd.log.Info("Configuration saved in devspace.yaml - you can make adjustments as needed") - cmd.log.Infof("\r \nYou can now run:\n1. %s - to pick which Kubernetes namespace to work in\n2. %s - to start developing your project in Kubernetes\n\nRun `%s` or `%s` to see a list of available commands and flags\n", ansi.Color("devspace use namespace", "blue+b"), ansi.Color("devspace dev", "blue+b"), ansi.Color("devspace -h", "blue+b"), ansi.Color("devspace [command] -h", "blue+b")) - return nil -} - -func appendToIgnoreFile(ignoreFile, content string) error { - // Check if ignoreFile exists - _, err := os.Stat(ignoreFile) - if os.IsNotExist(err) { - _ = fsutil.WriteToFile([]byte(content), ignoreFile) - } else { - fileContent, err := ioutil.ReadFile(ignoreFile) - if err != nil { - return errors.Errorf("Error reading file %s: %v", ignoreFile, err) - } - - // append only if not found in file content - if !strings.Contains(string(fileContent), content) { - file, err := os.OpenFile(ignoreFile, os.O_APPEND|os.O_WRONLY, 0600) - if err != nil { - return errors.Errorf("Error writing file %s: %v", ignoreFile, err) - } - - defer file.Close() - if _, err = file.WriteString(content); err != nil { - return errors.Errorf("Error writing file %s: %v", ignoreFile, err) - } - } - } return nil } -func getProjectName() (string, string, error) { - projectName := "" - projectNamespace := "" - gitRemote, err := command.Output(context.TODO(), "", "git", "config", "--get", "remote.origin.url") - if err == nil { - sep := "/" - projectParts := strings.Split(string(regexp.MustCompile(`^.*?://[^/]+?/([^.]+)(\.git)?`).ReplaceAll(gitRemote, []byte("$1"))), sep) - partsLen := len(projectParts) - if partsLen > 1 { - projectNamespace = strings.Join(projectParts[0:partsLen-1], sep) - projectName = projectParts[partsLen-1] - } - } - - if projectName == "" { - absPath, err := filepath.Abs(".") - if err != nil { - return "", "", err - } - projectName = filepath.Base(absPath) - } - - projectName = strings.ToLower(projectName) - projectName = regexp.MustCompile("[^a-zA-Z0-9- ]+").ReplaceAllString(projectName, "") - projectName = regexp.MustCompile("[^a-zA-Z0-9-]+").ReplaceAllString(projectName, "-") - projectName = strings.Trim(projectName, "-") - - if !SpaceNameValidationRegEx.MatchString(projectName) || len(projectName) > 42 { - projectName = "devspace" - } - - return projectName, projectNamespace, nil -} - func (cmd *InitCmd) addDevConfig(config *latest.Config, imageName, image string, port int, languageHandler *generator.LanguageHandler) error { if config.Dev == nil { config.Dev = map[string]*latest.DevPod{} @@ -739,6 +734,87 @@ func (cmd *InitCmd) render(f factory.Factory, config *latest.Config) (string, er return writer.String(), nil } +func (cmd *InitCmd) shouldGenerateFromDockerCompose() (string, bool, error) { + dockerComposePath := compose.GetDockerComposePath() + if dockerComposePath != "" { + selectedDockerComposeOption, err := cmd.log.Question(&survey.QuestionOptions{ + Question: "Docker Compose configuration detected. Do you want to create a DevSpace configuration based on Docker Compose?", + DefaultValue: DockerComposeDevSpaceConfigOption, + Options: []string{ + DockerComposeDevSpaceConfigOption, + NewDevSpaceConfigOption, + }, + }) + if err != nil { + return "", false, err + } + + return dockerComposePath, selectedDockerComposeOption == DockerComposeDevSpaceConfigOption, nil + } + return "", false, nil +} + +func appendToIgnoreFile(ignoreFile, content string) error { + // Check if ignoreFile exists + _, err := os.Stat(ignoreFile) + if os.IsNotExist(err) { + _ = fsutil.WriteToFile([]byte(content), ignoreFile) + } else { + fileContent, err := ioutil.ReadFile(ignoreFile) + if err != nil { + return errors.Errorf("Error reading file %s: %v", ignoreFile, err) + } + + // append only if not found in file content + if !strings.Contains(string(fileContent), content) { + file, err := os.OpenFile(ignoreFile, os.O_APPEND|os.O_WRONLY, 0600) + if err != nil { + return errors.Errorf("Error writing file %s: %v", ignoreFile, err) + } + + defer file.Close() + if _, err = file.WriteString(content); err != nil { + return errors.Errorf("Error writing file %s: %v", ignoreFile, err) + } + } + } + return nil +} + +func getProjectName() (string, string, error) { + projectName := "" + projectNamespace := "" + gitRemote, err := command.Output(context.TODO(), "", "git", "config", "--get", "remote.origin.url") + if err == nil { + sep := "/" + projectParts := strings.Split(string(regexp.MustCompile(`^.*?://[^/]+?/([^.]+)(\.git)?`).ReplaceAll(gitRemote, []byte("$1"))), sep) + partsLen := len(projectParts) + if partsLen > 1 { + projectNamespace = strings.Join(projectParts[0:partsLen-1], sep) + projectName = projectParts[partsLen-1] + } + } + + if projectName == "" { + absPath, err := filepath.Abs(".") + if err != nil { + return "", "", err + } + projectName = filepath.Base(absPath) + } + + projectName = strings.ToLower(projectName) + projectName = regexp.MustCompile("[^a-zA-Z0-9- ]+").ReplaceAllString(projectName, "") + projectName = regexp.MustCompile("[^a-zA-Z0-9-]+").ReplaceAllString(projectName, "-") + projectName = strings.Trim(projectName, "-") + + if !SpaceNameValidationRegEx.MatchString(projectName) || len(projectName) > 42 { + projectName = "devspace" + } + + return projectName, projectNamespace, nil +} + func parseImages(manifests string) ([]string, error) { images := []string{} diff --git a/pkg/devspace/compose/config_builder.go b/pkg/devspace/compose/config_builder.go index 106090b845..093bfef259 100644 --- a/pkg/devspace/compose/config_builder.go +++ b/pkg/devspace/compose/config_builder.go @@ -9,11 +9,11 @@ import ( ) type ConfigBuilder interface { - AddDependencies(dependency composetypes.Project, service composetypes.ServiceConfig) error - AddDeployment(dockerCompose composetypes.Project, service composetypes.ServiceConfig) error + AddDependencies(dependency *composetypes.Project, service composetypes.ServiceConfig) error + AddDeployment(dockerCompose *composetypes.Project, service composetypes.ServiceConfig) error AddDev(service composetypes.ServiceConfig) error - AddImage(dockerCompose composetypes.Project, service composetypes.ServiceConfig) error - AddSecret(dockerCompose composetypes.Project, service composetypes.ServiceConfig) error + AddImage(dockerCompose *composetypes.Project, service composetypes.ServiceConfig) error + AddSecret(dockerCompose *composetypes.Project, service composetypes.ServiceConfig) error Config() *latest.Config SetName(name string) } diff --git a/pkg/devspace/compose/dependency.go b/pkg/devspace/compose/dependency.go index 26c22fc8d3..38692cbe92 100644 --- a/pkg/devspace/compose/dependency.go +++ b/pkg/devspace/compose/dependency.go @@ -7,7 +7,7 @@ import ( "github.com/loft-sh/devspace/pkg/devspace/config/versions/latest" ) -func (cb *configBuilder) AddDependencies(dockerCompose composetypes.Project, service composetypes.ServiceConfig) error { +func (cb *configBuilder) AddDependencies(dockerCompose *composetypes.Project, service composetypes.ServiceConfig) error { for _, dependency := range service.GetDependencies() { depName := formatName(dependency) diff --git a/pkg/devspace/compose/deployment.go b/pkg/devspace/compose/deployment.go index 21d469e88e..2a3ceaf347 100644 --- a/pkg/devspace/compose/deployment.go +++ b/pkg/devspace/compose/deployment.go @@ -12,7 +12,7 @@ import ( v1 "k8s.io/api/core/v1" ) -func (cb *configBuilder) AddDeployment(dockerCompose composetypes.Project, service composetypes.ServiceConfig) error { +func (cb *configBuilder) AddDeployment(dockerCompose *composetypes.Project, service composetypes.ServiceConfig) error { values := map[string]interface{}{} volumes, volumeMounts, _ := volumesConfig(service, dockerCompose.Volumes, cb.log) diff --git a/pkg/devspace/compose/image.go b/pkg/devspace/compose/image.go index d73c7809a5..c0c07bcf91 100644 --- a/pkg/devspace/compose/image.go +++ b/pkg/devspace/compose/image.go @@ -7,16 +7,15 @@ import ( "github.com/loft-sh/devspace/pkg/devspace/config/versions/latest" ) -func (cb *configBuilder) AddImage(dockerCompose composetypes.Project, service composetypes.ServiceConfig) error { +func (cb *configBuilder) AddImage(dockerCompose *composetypes.Project, service composetypes.ServiceConfig) error { build := service.Build if build == nil { cb.config.Images = nil return nil } - currentDir := filepath.Join(dockerCompose.WorkingDir, cb.workingDir) contextDir := filepath.Join(dockerCompose.WorkingDir, build.Context) - context, err := filepath.Rel(currentDir, contextDir) + context, err := filepath.Rel(cb.workingDir, contextDir) if err != nil { return err } @@ -26,7 +25,7 @@ func (cb *configBuilder) AddImage(dockerCompose composetypes.Project, service co context = "" } - dockerfile, err := filepath.Rel(currentDir, filepath.Join(dockerCompose.WorkingDir, build.Context, build.Dockerfile)) + dockerfile, err := filepath.Rel(cb.workingDir, filepath.Join(dockerCompose.WorkingDir, build.Context, build.Dockerfile)) if err != nil { return err } diff --git a/pkg/devspace/compose/manager.go b/pkg/devspace/compose/manager.go index 5f8699285f..cb8ea651a7 100644 --- a/pkg/devspace/compose/manager.go +++ b/pkg/devspace/compose/manager.go @@ -30,59 +30,58 @@ func GetDockerComposePath() string { return "" } +func LoadDockerComposeProject(path string) (*composetypes.Project, error) { + composeFile, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + + project, err := composeloader.Load(composetypes.ConfigDetails{ + ConfigFiles: []composetypes.ConfigFile{ + { + Content: composeFile, + }, + }, + }) + if err != nil { + return nil, err + } + + return project, nil +} + type ComposeManager interface { Load(log log.Logger) error - Config(path string) *latest.Config Configs() map[string]*latest.Config Save() error } type composeManager struct { - composePath string - configs map[string]*latest.Config + configs map[string]*latest.Config + project *composetypes.Project } -func NewComposeManager(composePath string) ComposeManager { +func NewComposeManager(project *composetypes.Project) ComposeManager { return &composeManager{ - composePath: composePath, - configs: map[string]*latest.Config{}, + configs: map[string]*latest.Config{}, + project: project, } } func (cm *composeManager) Load(log log.Logger) error { - composeFile, err := ioutil.ReadFile(cm.composePath) - if err != nil { - return err - } - - dockerCompose, err := composeloader.Load(composetypes.ConfigDetails{ - ConfigFiles: []composetypes.ConfigFile{ - { - Content: composeFile, - }, - }, - }) - if err != nil { - return err - } - dependentsMap, err := calculateDependentsMap(dockerCompose) + dependentsMap, err := calculateDependentsMap(cm.project) if err != nil { return err } builders := map[string]ConfigBuilder{} - err = dockerCompose.WithServices(nil, func(service composetypes.ServiceConfig) error { + err = cm.project.WithServices(nil, func(service composetypes.ServiceConfig) error { configName := "docker-compose" - workingDir := filepath.Dir(cm.composePath) + workingDir := cm.project.WorkingDir isDependency := dependentsMap[service.Name] != nil if isDependency { - // configKey = "devspace-" + service.Name + ".yaml" - // if service.Build != nil && service.Build.Context != "" { - // configKey = filepath.Join(service.Build.Context, "devspace.yaml") - // } - configName = service.Name if service.Build != nil && service.Build.Context != "" { workingDir = filepath.Join(workingDir, service.Build.Context) @@ -97,12 +96,12 @@ func (cm *composeManager) Load(log log.Logger) error { builder.SetName(configName) - err := builder.AddImage(*dockerCompose, service) + err := builder.AddImage(cm.project, service) if err != nil { return err } - err = builder.AddDeployment(*dockerCompose, service) + err = builder.AddDeployment(cm.project, service) if err != nil { return err } @@ -112,12 +111,12 @@ func (cm *composeManager) Load(log log.Logger) error { return err } - err = builder.AddSecret(*dockerCompose, service) + err = builder.AddSecret(cm.project, service) if err != nil { return err } - err = builder.AddDependencies(*dockerCompose, service) + err = builder.AddDependencies(cm.project, service) if err != nil { return err } @@ -128,7 +127,7 @@ func (cm *composeManager) Load(log log.Logger) error { return err } - err = dockerCompose.WithServices(nil, func(service composetypes.ServiceConfig) error { + err = cm.project.WithServices(nil, func(service composetypes.ServiceConfig) error { configName := "docker-compose" path := constants.DefaultConfigPath diff --git a/pkg/devspace/compose/manager_test.go b/pkg/devspace/compose/manager_test.go index 657b0e3836..0157239caa 100644 --- a/pkg/devspace/compose/manager_test.go +++ b/pkg/devspace/compose/manager_test.go @@ -1,7 +1,6 @@ package compose import ( - "fmt" "io/ioutil" "os" "path/filepath" @@ -64,7 +63,11 @@ func testLoad(dir string, t *testing.T) { }() dockerComposePath := GetDockerComposePath() - loader := NewComposeManager(dockerComposePath) + dockerCompose, err := LoadDockerComposeProject(dockerComposePath) + if err != nil { + t.Errorf("Unexpected error occurred loading the docker-compose.yaml: %s", err.Error()) + } + loader := NewComposeManager(dockerCompose) actualError := loader.Load(log.Discard) @@ -78,8 +81,6 @@ func testLoad(dir string, t *testing.T) { } for path, actualConfig := range loader.Configs() { - fmt.Println(path) - data, err := ioutil.ReadFile(path) if err != nil { t.Errorf("Please create the expected DevSpace configuration by creating a %s in the testdata/%s folder", path, dir) diff --git a/pkg/devspace/compose/secret.go b/pkg/devspace/compose/secret.go index e9810f1d43..fb3049e1e9 100644 --- a/pkg/devspace/compose/secret.go +++ b/pkg/devspace/compose/secret.go @@ -8,7 +8,7 @@ import ( "github.com/loft-sh/devspace/pkg/devspace/config/versions/latest" ) -func (cb *configBuilder) AddSecret(dockerCompose composetypes.Project, service composetypes.ServiceConfig) error { +func (cb *configBuilder) AddSecret(dockerCompose *composetypes.Project, service composetypes.ServiceConfig) error { var pipelines map[string]*latest.Pipeline for secretName, secret := range dockerCompose.Secrets { if pipelines == nil { diff --git a/pkg/devspace/compose/testdata/volumes-long/devspace.yaml b/pkg/devspace/compose/testdata/volumes-long/devspace.yaml index cbf7097fa0..9d0a889f3c 100644 --- a/pkg/devspace/compose/testdata/volumes-long/devspace.yaml +++ b/pkg/devspace/compose/testdata/volumes-long/devspace.yaml @@ -18,6 +18,11 @@ deployments: volume: name: db-1 shared: true + - containerPath: /var/lib/ro + volume: + name: db-2 + shared: true + readOnly: true - containerPath: /var/lib/mysql volume: name: datavolume @@ -29,11 +34,11 @@ deployments: readOnly: true - containerPath: /var/lib/tmpfs volume: - name: db-2 + name: db-3 readOnly: false - containerPath: /var/lib/tmpfs-1000 volume: - name: db-3 + name: db-4 readOnly: false - containerPath: /var/lib/data volume: @@ -50,11 +55,13 @@ deployments: volumes: - name: db-1 size: 5Gi + - name: db-2 + size: 5Gi - name: datavolume size: 5Gi - - name: db-2 - emptyDir: {} - name: db-3 + emptyDir: {} + - name: db-4 emptyDir: sizeLimit: "1000" - name: volume-1 diff --git a/pkg/devspace/compose/testdata/volumes-long/docker-compose.yaml b/pkg/devspace/compose/testdata/volumes-long/docker-compose.yaml index 2451a12dab..048c839a43 100644 --- a/pkg/devspace/compose/testdata/volumes-long/docker-compose.yaml +++ b/pkg/devspace/compose/testdata/volumes-long/docker-compose.yaml @@ -7,6 +7,11 @@ services: - type: volume target: /var/lib/mydata + # Just specify a path and let the Engine create a readonly + - type: volume + target: /var/lib/ro + read_only: true + # Specify an absolute path mapping # - /opt/data:/var/lib/mysql - type: bind diff --git a/pkg/devspace/compose/testdata/volumes-short/devspace.yaml b/pkg/devspace/compose/testdata/volumes-short/devspace.yaml index 4e2bc5cb2e..6d5bab4791 100644 --- a/pkg/devspace/compose/testdata/volumes-short/devspace.yaml +++ b/pkg/devspace/compose/testdata/volumes-short/devspace.yaml @@ -31,7 +31,7 @@ deployments: - containerPath: /etc/configs/ volume: name: volume-3 - readOnly: true + readOnly: false volumes: - name: db-1 size: 5Gi diff --git a/pkg/devspace/compose/volume.go b/pkg/devspace/compose/volume.go index 5499c69ae6..72a770142d 100644 --- a/pkg/devspace/compose/volume.go +++ b/pkg/devspace/compose/volume.go @@ -8,6 +8,26 @@ import ( composetypes "github.com/compose-spec/compose-go/types" ) +func GetServiceSyncPaths( + project *composetypes.Project, + service composetypes.ServiceConfig, +) []string { + syncPaths := []string{} + for _, volumeMount := range service.Volumes { + isProvisionedVolume := false + for _, volume := range project.Volumes { + if volumeMount.Source == volume.Name { + isProvisionedVolume = true + } + } + + if !isProvisionedVolume { + syncPaths = append(syncPaths, volumeMount.Target) + } + } + return syncPaths +} + func volumesConfig( service composetypes.ServiceConfig, composeVolumes map[string]composetypes.VolumeConfig, @@ -124,11 +144,15 @@ func createSharedVolumeMount(volumeName string, volume composetypes.ServiceVolum } func createServiceVolumeMount(volumeName string, volume composetypes.ServiceVolumeConfig) interface{} { + readonly := volume.ReadOnly + if volume.Source != "" { + readonly = false + } return map[string]interface{}{ "containerPath": volume.Target, "volume": map[string]interface{}{ "name": volumeName, - "readOnly": volume.ReadOnly, + "readOnly": readonly, }, } } diff --git a/pkg/devspace/kubectl/selector/selector.go b/pkg/devspace/kubectl/selector/selector.go index 427355b50f..39744bbea3 100644 --- a/pkg/devspace/kubectl/selector/selector.go +++ b/pkg/devspace/kubectl/selector/selector.go @@ -52,7 +52,7 @@ var FilterNonRunningContainers = func(p *corev1.Pod, c *corev1.Container) bool { return true } for _, cs := range p.Status.InitContainerStatuses { - if cs.Name == c.Name && cs.State.Running != nil { + if cs.Name == c.Name && cs.Ready && cs.State.Running != nil { return false } } diff --git a/pkg/devspace/pipeline/engine/pipelinehandler/commands/select_pod.go b/pkg/devspace/pipeline/engine/pipelinehandler/commands/select_pod.go index 964ce4c127..bf598b16d6 100644 --- a/pkg/devspace/pipeline/engine/pipelinehandler/commands/select_pod.go +++ b/pkg/devspace/pipeline/engine/pipelinehandler/commands/select_pod.go @@ -2,13 +2,12 @@ package commands import ( "fmt" - "time" - "github.com/jessevdk/go-flags" devspacecontext "github.com/loft-sh/devspace/pkg/devspace/context" "github.com/loft-sh/devspace/pkg/devspace/services/targetselector" "github.com/pkg/errors" "mvdan.cc/sh/v3/interp" + "time" ) type SelectPodOptions struct { From d7eebc0c532d1587ac720e7ab0cf58ca2273db4c Mon Sep 17 00:00:00 2001 From: Russell Centanni Date: Tue, 5 Apr 2022 16:49:33 -0400 Subject: [PATCH 8/9] refactor: upgrade compose-go --- cmd/init.go | 7 + go.mod | 3 +- go.sum | 20 +- pkg/devspace/compose/deployment.go | 64 +- pkg/devspace/compose/dev.go | 11 +- pkg/devspace/compose/manager.go | 51 +- .../volumes-depends_on/devspace-db.yaml | 34 + .../testdata/volumes-depends_on/devspace.yaml | 38 + .../docker-compose.yaml | 2 + .../testdata/volumes-long/devspace.yaml | 4 +- .../testdata/volumes-short/devspace.yaml | 4 +- .../x_volumes-depends_on/devspace.yaml | 110 -- .../compose-spec/compose-go/LICENSE | 191 +++ .../github.com/compose-spec/compose-go/NOTICE | 2 + .../compose-spec/compose-go/consts/consts.go | 23 + .../compose-spec/compose-go/dotenv/LICENSE | 22 + .../compose-go/dotenv/godotenv.go | 375 +++++ .../compose-spec/compose-go/dotenv/parser.go | 234 +++ .../compose-spec/compose-go/errdefs/errors.go | 53 + .../compose-go/interpolation/interpolation.go | 177 ++ .../compose-go/loader/example1.env | 8 + .../compose-go/loader/example2.env | 4 + .../compose-go/loader/full-example.yml | 419 +++++ .../compose-go/loader/interpolate.go | 124 ++ .../compose-spec/compose-go/loader/loader.go | 1182 +++++++++++++ .../compose-spec/compose-go/loader/merge.go | 362 ++++ .../compose-go/loader/normalize.go | 264 +++ .../compose-go/loader/validate.go | 70 + .../compose-spec/compose-go/loader/volume.go | 180 ++ .../compose-go/loader/windows_path.go | 82 + .../compose-go/schema/compose-spec.json | 827 ++++++++++ .../compose-spec/compose-go/schema/schema.go | 164 ++ .../compose-go/template/template.go | 334 ++++ .../compose-spec/compose-go/types/config.go | 106 ++ .../compose-spec/compose-go/types/project.go | 342 ++++ .../compose-spec/compose-go/types/types.go | 896 ++++++++++ .../distribution/distribution/v3/LICENSE | 202 +++ .../distribution/v3/digestset/set.go | 247 +++ .../distribution/v3/reference/helpers.go | 42 + .../distribution/v3/reference/normalize.go | 198 +++ .../distribution/v3/reference/reference.go | 433 +++++ .../distribution/v3/reference/regexp.go | 147 ++ .../mattn/go-shellwords/.travis.yml | 16 + vendor/github.com/mattn/go-shellwords/LICENSE | 21 + .../github.com/mattn/go-shellwords/README.md | 55 + vendor/github.com/mattn/go-shellwords/go.mod | 3 + .../github.com/mattn/go-shellwords/go.test.sh | 12 + .../mattn/go-shellwords/shellwords.go | 317 ++++ .../mattn/go-shellwords/util_posix.go | 29 + .../mattn/go-shellwords/util_windows.go | 29 + .../mitchellh/mapstructure/CHANGELOG.md | 83 + .../github.com/mitchellh/mapstructure/LICENSE | 21 + .../mitchellh/mapstructure/README.md | 46 + .../mitchellh/mapstructure/decode_hooks.go | 257 +++ .../mitchellh/mapstructure/error.go | 50 + .../github.com/mitchellh/mapstructure/go.mod | 3 + .../mitchellh/mapstructure/mapstructure.go | 1467 +++++++++++++++++ .../gojsonpointer/LICENSE-APACHE-2.0.txt | 202 +++ .../xeipuuv/gojsonpointer/README.md | 41 + .../xeipuuv/gojsonpointer/pointer.go | 211 +++ .../gojsonreference/LICENSE-APACHE-2.0.txt | 202 +++ .../xeipuuv/gojsonreference/README.md | 10 + .../xeipuuv/gojsonreference/reference.go | 147 ++ .../xeipuuv/gojsonschema/.gitignore | 3 + .../xeipuuv/gojsonschema/.travis.yml | 9 + .../gojsonschema/LICENSE-APACHE-2.0.txt | 202 +++ .../github.com/xeipuuv/gojsonschema/README.md | 466 ++++++ .../github.com/xeipuuv/gojsonschema/draft.go | 125 ++ .../github.com/xeipuuv/gojsonschema/errors.go | 364 ++++ .../xeipuuv/gojsonschema/format_checkers.go | 368 +++++ .../xeipuuv/gojsonschema/glide.yaml | 13 + vendor/github.com/xeipuuv/gojsonschema/go.mod | 7 + vendor/github.com/xeipuuv/gojsonschema/go.sum | 11 + .../xeipuuv/gojsonschema/internalLog.go | 37 + .../xeipuuv/gojsonschema/jsonContext.go | 73 + .../xeipuuv/gojsonschema/jsonLoader.go | 386 +++++ .../xeipuuv/gojsonschema/locales.go | 472 ++++++ .../github.com/xeipuuv/gojsonschema/result.go | 220 +++ .../github.com/xeipuuv/gojsonschema/schema.go | 1087 ++++++++++++ .../xeipuuv/gojsonschema/schemaLoader.go | 206 +++ .../xeipuuv/gojsonschema/schemaPool.go | 215 +++ .../gojsonschema/schemaReferencePool.go | 68 + .../xeipuuv/gojsonschema/schemaType.go | 83 + .../xeipuuv/gojsonschema/subSchema.go | 149 ++ .../github.com/xeipuuv/gojsonschema/types.go | 62 + .../github.com/xeipuuv/gojsonschema/utils.go | 197 +++ .../xeipuuv/gojsonschema/validation.go | 858 ++++++++++ vendor/modules.txt | 25 +- 88 files changed, 16833 insertions(+), 153 deletions(-) create mode 100755 pkg/devspace/compose/testdata/volumes-depends_on/devspace-db.yaml create mode 100755 pkg/devspace/compose/testdata/volumes-depends_on/devspace.yaml rename pkg/devspace/compose/testdata/{x_volumes-depends_on => volumes-depends_on}/docker-compose.yaml (74%) delete mode 100644 pkg/devspace/compose/testdata/x_volumes-depends_on/devspace.yaml create mode 100644 vendor/github.com/compose-spec/compose-go/LICENSE create mode 100644 vendor/github.com/compose-spec/compose-go/NOTICE create mode 100644 vendor/github.com/compose-spec/compose-go/consts/consts.go create mode 100644 vendor/github.com/compose-spec/compose-go/dotenv/LICENSE create mode 100644 vendor/github.com/compose-spec/compose-go/dotenv/godotenv.go create mode 100644 vendor/github.com/compose-spec/compose-go/dotenv/parser.go create mode 100644 vendor/github.com/compose-spec/compose-go/errdefs/errors.go create mode 100644 vendor/github.com/compose-spec/compose-go/interpolation/interpolation.go create mode 100644 vendor/github.com/compose-spec/compose-go/loader/example1.env create mode 100644 vendor/github.com/compose-spec/compose-go/loader/example2.env create mode 100644 vendor/github.com/compose-spec/compose-go/loader/full-example.yml create mode 100644 vendor/github.com/compose-spec/compose-go/loader/interpolate.go create mode 100644 vendor/github.com/compose-spec/compose-go/loader/loader.go create mode 100644 vendor/github.com/compose-spec/compose-go/loader/merge.go create mode 100644 vendor/github.com/compose-spec/compose-go/loader/normalize.go create mode 100644 vendor/github.com/compose-spec/compose-go/loader/validate.go create mode 100644 vendor/github.com/compose-spec/compose-go/loader/volume.go create mode 100644 vendor/github.com/compose-spec/compose-go/loader/windows_path.go create mode 100644 vendor/github.com/compose-spec/compose-go/schema/compose-spec.json create mode 100644 vendor/github.com/compose-spec/compose-go/schema/schema.go create mode 100644 vendor/github.com/compose-spec/compose-go/template/template.go create mode 100644 vendor/github.com/compose-spec/compose-go/types/config.go create mode 100644 vendor/github.com/compose-spec/compose-go/types/project.go create mode 100644 vendor/github.com/compose-spec/compose-go/types/types.go create mode 100644 vendor/github.com/distribution/distribution/v3/LICENSE create mode 100644 vendor/github.com/distribution/distribution/v3/digestset/set.go create mode 100644 vendor/github.com/distribution/distribution/v3/reference/helpers.go create mode 100644 vendor/github.com/distribution/distribution/v3/reference/normalize.go create mode 100644 vendor/github.com/distribution/distribution/v3/reference/reference.go create mode 100644 vendor/github.com/distribution/distribution/v3/reference/regexp.go create mode 100644 vendor/github.com/mattn/go-shellwords/.travis.yml create mode 100644 vendor/github.com/mattn/go-shellwords/LICENSE create mode 100644 vendor/github.com/mattn/go-shellwords/README.md create mode 100644 vendor/github.com/mattn/go-shellwords/go.mod create mode 100644 vendor/github.com/mattn/go-shellwords/go.test.sh create mode 100644 vendor/github.com/mattn/go-shellwords/shellwords.go create mode 100644 vendor/github.com/mattn/go-shellwords/util_posix.go create mode 100644 vendor/github.com/mattn/go-shellwords/util_windows.go create mode 100644 vendor/github.com/mitchellh/mapstructure/CHANGELOG.md create mode 100644 vendor/github.com/mitchellh/mapstructure/LICENSE create mode 100644 vendor/github.com/mitchellh/mapstructure/README.md create mode 100644 vendor/github.com/mitchellh/mapstructure/decode_hooks.go create mode 100644 vendor/github.com/mitchellh/mapstructure/error.go create mode 100644 vendor/github.com/mitchellh/mapstructure/go.mod create mode 100644 vendor/github.com/mitchellh/mapstructure/mapstructure.go create mode 100644 vendor/github.com/xeipuuv/gojsonpointer/LICENSE-APACHE-2.0.txt create mode 100644 vendor/github.com/xeipuuv/gojsonpointer/README.md create mode 100644 vendor/github.com/xeipuuv/gojsonpointer/pointer.go create mode 100644 vendor/github.com/xeipuuv/gojsonreference/LICENSE-APACHE-2.0.txt create mode 100644 vendor/github.com/xeipuuv/gojsonreference/README.md create mode 100644 vendor/github.com/xeipuuv/gojsonreference/reference.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/.gitignore create mode 100644 vendor/github.com/xeipuuv/gojsonschema/.travis.yml create mode 100644 vendor/github.com/xeipuuv/gojsonschema/LICENSE-APACHE-2.0.txt create mode 100644 vendor/github.com/xeipuuv/gojsonschema/README.md create mode 100644 vendor/github.com/xeipuuv/gojsonschema/draft.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/errors.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/format_checkers.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/glide.yaml create mode 100644 vendor/github.com/xeipuuv/gojsonschema/go.mod create mode 100644 vendor/github.com/xeipuuv/gojsonschema/go.sum create mode 100644 vendor/github.com/xeipuuv/gojsonschema/internalLog.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/jsonContext.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/jsonLoader.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/locales.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/result.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/schema.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/schemaLoader.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/schemaPool.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/schemaReferencePool.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/schemaType.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/subSchema.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/types.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/utils.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/validation.go diff --git a/cmd/init.go b/cmd/init.go index d924a5f325..83569198b4 100644 --- a/cmd/init.go +++ b/cmd/init.go @@ -490,6 +490,13 @@ func (cmd *InitCmd) initDockerCompose(f factory.Factory, composePath string) err return err } + projectName, _, err := getProjectName() + if err != nil { + return err + } + + project.Name = projectName + // Prompt user for entrypoints for each container with sync folders. for idx, service := range project.Services { localPaths := compose.GetServiceSyncPaths(project, service) diff --git a/go.mod b/go.mod index 08cf46bcc2..402d151257 100644 --- a/go.mod +++ b/go.mod @@ -7,19 +7,18 @@ require ( github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d github.com/blang/semver v3.5.1+incompatible github.com/bmatcuk/doublestar v1.1.1 + github.com/compose-spec/compose-go v1.2.2 github.com/creack/pty v1.1.15 github.com/docker/cli v20.10.0-beta1.0.20201029214301-1d20b15adc38+incompatible github.com/docker/distribution v2.7.1+incompatible github.com/docker/docker v20.10.5+incompatible github.com/docker/go-connections v0.4.0 - github.com/docker/go-metrics v0.0.1 // indirect github.com/evanphx/json-patch v4.12.0+incompatible github.com/evanphx/json-patch/v5 v5.1.0 github.com/ghodss/yaml v1.0.0 github.com/gliderlabs/ssh v0.3.3 github.com/google/uuid v1.1.2 github.com/gorilla/websocket v1.4.2 - github.com/imdario/mergo v0.3.12 // indirect github.com/inconshreveable/go-update v0.0.0-20160112193335-8152e7eb6ccf // indirect github.com/jessevdk/go-flags v1.4.0 github.com/joho/godotenv v1.3.0 diff --git a/go.sum b/go.sum index dfee818de0..6f034e4601 100644 --- a/go.sum +++ b/go.sum @@ -184,6 +184,7 @@ github.com/aws/aws-sdk-go v1.20.6/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN github.com/aws/aws-sdk-go v1.25.11/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.27.1/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.31.6/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= +github.com/aws/aws-sdk-go v1.34.9/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59/go.mod h1:q/89r3U2H7sSsE2t6Kca0lfwTK8JdoNGS/yzM/4iH5I= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= @@ -210,6 +211,7 @@ github.com/bombsimon/wsl/v3 v3.0.0/go.mod h1:st10JtZYLE4D5sC7b8xV4zTKZwAQjCH/Hy2 github.com/bombsimon/wsl/v3 v3.1.0/go.mod h1:st10JtZYLE4D5sC7b8xV4zTKZwAQjCH/Hy2Pm1FNZIc= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= +github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= @@ -236,6 +238,8 @@ github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnht github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20160425231609-f8ad88b59a58/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/compose-spec/compose-go v1.2.2 h1:y1dwl3KUTBnWPVur6EZno9zUIum6Q87/F5keljnGQB4= +github.com/compose-spec/compose-go v1.2.2/go.mod h1:pAy7Mikpeft4pxkFU565/DRHEbDfR84G6AQuiL+Hdg8= github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340 h1:9atoWyI9RtXFwf7UDbme/6M8Ud0rFrx+Q3ZWgSnsxtw= @@ -303,6 +307,8 @@ github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11 github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= +github.com/distribution/distribution/v3 v3.0.0-20210316161203-a01c71e2477e h1:n81KvOMrLZa+VWHwST7dun9f0G98X3zREHS1ztYzZKU= +github.com/distribution/distribution/v3 v3.0.0-20210316161203-a01c71e2477e/go.mod h1:xpWTC2KnJMiDLkoawhsPQcXjvwATEBcbq0xevG2YR9M= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/docker/cli v0.0.0-20190925022749-754388324470/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= @@ -516,6 +522,7 @@ github.com/golangci/revgrep v0.0.0-20180526074752-d9c87f5ffaf0/go.mod h1:qOQCunE github.com/golangci/revgrep v0.0.0-20180812185044-276a5c0a1039/go.mod h1:qOQCunEYvmd/TLamH+7LlVccLvUH5kZNhbCgTHoBbp4= github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk= +github.com/gomodule/redigo v1.8.2/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0= github.com/google/btree v0.0.0-20180124185431-e89373fe6b4a/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -592,6 +599,7 @@ github.com/goreleaser/nfpm v1.2.1/go.mod h1:TtWrABZozuLOttX2uDlYyECfQX7x5XYkVxhj github.com/goreleaser/nfpm v1.3.0/go.mod h1:w0p7Kc9TAUgWMyrub63ex3M2Mgw88M4GZXoTq5UCb40= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= +github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= @@ -773,6 +781,8 @@ github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Ky github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= +github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk= +github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-zglob v0.0.1/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo= github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= @@ -798,6 +808,8 @@ github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:F github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.3.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.3 h1:OVowDSCllw/YjdLkam3/sm7wEtOy59d8ndGgCcyj8cs= +github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= github.com/moby/buildkit v0.8.2 h1:kvb0cLWss4mOhCxcXSTENzzA+t1JR1eIyXFhDrI+73g= github.com/moby/buildkit v0.8.2/go.mod h1:5PZi7ALzuxG604ggYSeN+rzC+CyJscuXS7WetulJr1Y= @@ -1125,9 +1137,13 @@ github.com/xanzy/go-gitlab v0.31.0/go.mod h1:sPLojNBn68fMUWSxIJtdVVIP8uSBYqesTfD github.com/xanzy/go-gitlab v0.32.0/go.mod h1:sPLojNBn68fMUWSxIJtdVVIP8uSBYqesTfDUseX11Ug= github.com/xanzy/ssh-agent v0.2.1 h1:TCbipTQL2JiiCprBWx9frJ2eJlCYT00NmctrHxVAr70= github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= +github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= @@ -1202,6 +1218,7 @@ golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -1747,8 +1764,9 @@ gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= -gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= +gotest.tools/v3 v3.1.0 h1:rVV8Tcg/8jHUkPUorwjaMTtemIMVXfIPKiOqnhEhakk= +gotest.tools/v3 v3.1.0/go.mod h1:fHy7eyTmJFO5bQbUsEGQ1v4m2J3Jz9eWL54TP2/ZuYQ= grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20180920025451-e3ad64cb4ed3/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/pkg/devspace/compose/deployment.go b/pkg/devspace/compose/deployment.go index 2a3ceaf347..483d4ef74a 100644 --- a/pkg/devspace/compose/deployment.go +++ b/pkg/devspace/compose/deployment.go @@ -38,41 +38,43 @@ func (cb *configBuilder) AddDeployment(dockerCompose *composetypes.Project, serv } ports := []interface{}{} - if len(service.Ports) > 0 { - for _, port := range service.Ports { - var protocol string - switch port.Protocol { - case "tcp": - protocol = string(v1.ProtocolTCP) - case "udp": - protocol = string(v1.ProtocolUDP) - default: - return fmt.Errorf("invalid protocol %s", port.Protocol) - } - - if port.Published == 0 { - cb.log.Warnf("Unassigned port ranges are not supported: %s", port.Target) - continue - } - - ports = append(ports, map[string]interface{}{ - "port": int(port.Published), - "containerPort": int(port.Target), - "protocol": protocol, - }) + for _, port := range service.Ports { + var protocol string + switch port.Protocol { + case "tcp": + protocol = string(v1.ProtocolTCP) + case "udp": + protocol = string(v1.ProtocolUDP) + default: + return fmt.Errorf("invalid protocol %s", port.Protocol) + } + + // fmt.Println(port.Published) + if port.Published == "" { + cb.log.Warnf("Unassigned ports are not supported: %s", port.Target) + continue + } + + portNumber, err := strconv.Atoi(port.Published) + if err != nil { + return err } + + ports = append(ports, map[string]interface{}{ + "port": portNumber, + "containerPort": int(port.Target), + "protocol": protocol, + }) } - if len(service.Expose) > 0 { - for _, port := range service.Expose { - intPort, err := strconv.Atoi(port) - if err != nil { - return fmt.Errorf("expected integer for port number: %s", err.Error()) - } - ports = append(ports, map[string]interface{}{ - "port": intPort, - }) + for _, port := range service.Expose { + intPort, err := strconv.Atoi(port) + if err != nil { + return fmt.Errorf("expected integer for port number: %s", err.Error()) } + ports = append(ports, map[string]interface{}{ + "port": intPort, + }) } if len(ports) > 0 { diff --git a/pkg/devspace/compose/dev.go b/pkg/devspace/compose/dev.go index 1c9ea1ecbc..d5491e52f2 100644 --- a/pkg/devspace/compose/dev.go +++ b/pkg/devspace/compose/dev.go @@ -4,6 +4,7 @@ import ( "fmt" "os" "path/filepath" + "strconv" "strings" composetypes "github.com/compose-spec/compose-go/types" @@ -15,14 +16,20 @@ func (cb *configBuilder) AddDev(service composetypes.ServiceConfig) error { devPorts := []*latest.PortMapping{} for _, port := range service.Ports { + portMapping := &latest.PortMapping{} - if port.Published == 0 { + if port.Published == "" { cb.log.Warnf("Unassigned port ranges are not supported: %s", port.Target) continue } - if port.Published != port.Target { + portNumber, err := strconv.Atoi(port.Published) + if err != nil { + return err + } + + if portNumber != int(port.Target) { portMapping.Port = fmt.Sprint(port.Published) + ":" + fmt.Sprint(port.Target) } else { portMapping.Port = fmt.Sprint(port.Published) diff --git a/pkg/devspace/compose/manager.go b/pkg/devspace/compose/manager.go index cb8ea651a7..2eaae8e608 100644 --- a/pkg/devspace/compose/manager.go +++ b/pkg/devspace/compose/manager.go @@ -1,9 +1,12 @@ package compose import ( + "fmt" "io/ioutil" "os" "path/filepath" + "strconv" + "strings" composeloader "github.com/compose-spec/compose-go/loader" composetypes "github.com/compose-spec/compose-go/types" @@ -47,6 +50,19 @@ func LoadDockerComposeProject(path string) (*composetypes.Project, error) { return nil, err } + // Expand service ports + for idx, service := range project.Services { + ports := []composetypes.ServicePortConfig{} + for _, port := range service.Ports { + expandedPorts, err := expandPublishedPortRange(port) + if err != nil { + return nil, err + } + ports = append(ports, expandedPorts...) + } + project.Services[idx].Ports = ports + } + return project, nil } @@ -69,7 +85,6 @@ func NewComposeManager(project *composetypes.Project) ComposeManager { } func (cm *composeManager) Load(log log.Logger) error { - dependentsMap, err := calculateDependentsMap(cm.project) if err != nil { return err @@ -187,3 +202,37 @@ func calculateDependentsMap(dockerCompose *composetypes.Project) (map[string][]s }) return tree, err } + +func expandPublishedPortRange(port composetypes.ServicePortConfig) ([]composetypes.ServicePortConfig, error) { + if !strings.Contains(port.Published, "-") { + return []composetypes.ServicePortConfig{port}, nil + } + + publishedRange := strings.Split(port.Published, "-") + if len(publishedRange) > 2 { + return nil, fmt.Errorf("invalid port range") + } + + begin, err := strconv.Atoi(publishedRange[0]) + if err != nil { + return nil, fmt.Errorf("invalid port range %s: beginning value must be numeric", port.Published) + } + + end, err := strconv.Atoi(publishedRange[1]) + if err != nil { + return nil, fmt.Errorf("invalid port range %s: end value must be numeric", port.Published) + } + + var portConfigs []composetypes.ServicePortConfig + for i := begin; i <= end; i++ { + portConfigs = append(portConfigs, composetypes.ServicePortConfig{ + HostIP: port.HostIP, + Protocol: port.Protocol, + Target: port.Target, + Published: strconv.Itoa(i), + Mode: "ingress", + }) + } + + return portConfigs, nil +} diff --git a/pkg/devspace/compose/testdata/volumes-depends_on/devspace-db.yaml b/pkg/devspace/compose/testdata/volumes-depends_on/devspace-db.yaml new file mode 100755 index 0000000000..cac022fc28 --- /dev/null +++ b/pkg/devspace/compose/testdata/volumes-depends_on/devspace-db.yaml @@ -0,0 +1,34 @@ +version: v2beta1 +name: db + +deployments: + db: + helm: + values: + containers: + - command: + - tail + - -f + - /dev/null + image: loft.sh/mysql-server:8.0.19 + name: db-container + volumeMounts: + - containerPath: /tmp/cache + volume: + name: volume-1 + readOnly: false + volumes: + - emptyDir: {} + name: volume-1 + +dev: + db: + labelSelector: + app.kubernetes.io/component: db + command: + - tail + - -f + - /dev/null + sync: + - startContainer: true + path: ./cache:/tmp/cache diff --git a/pkg/devspace/compose/testdata/volumes-depends_on/devspace.yaml b/pkg/devspace/compose/testdata/volumes-depends_on/devspace.yaml new file mode 100755 index 0000000000..ebf83bf35a --- /dev/null +++ b/pkg/devspace/compose/testdata/volumes-depends_on/devspace.yaml @@ -0,0 +1,38 @@ +version: v2beta1 +name: docker-compose + +deployments: + backend: + helm: + values: + containers: + - command: + - tail + - -f + - /dev/null + image: rails:latest + name: backend-container + volumeMounts: + - containerPath: /tmp/cache + volume: + name: volume-1 + readOnly: false + volumes: + - emptyDir: {} + name: volume-1 + +dev: + backend: + labelSelector: + app.kubernetes.io/component: backend + command: + - tail + - -f + - /dev/null + sync: + - startContainer: true + path: ./cache:/tmp/cache + +dependencies: + db: + path: devspace-db.yaml diff --git a/pkg/devspace/compose/testdata/x_volumes-depends_on/docker-compose.yaml b/pkg/devspace/compose/testdata/volumes-depends_on/docker-compose.yaml similarity index 74% rename from pkg/devspace/compose/testdata/x_volumes-depends_on/docker-compose.yaml rename to pkg/devspace/compose/testdata/volumes-depends_on/docker-compose.yaml index d683a1dd36..5712e59b48 100644 --- a/pkg/devspace/compose/testdata/x_volumes-depends_on/docker-compose.yaml +++ b/pkg/devspace/compose/testdata/volumes-depends_on/docker-compose.yaml @@ -1,10 +1,12 @@ services: db: image: loft.sh/mysql-server:8.0.19 + entrypoint: tail -f /dev/null volumes: - ./cache:/tmp/cache backend: image: rails:latest + entrypoint: tail -f /dev/null depends_on: - db volumes: diff --git a/pkg/devspace/compose/testdata/volumes-long/devspace.yaml b/pkg/devspace/compose/testdata/volumes-long/devspace.yaml index 9d0a889f3c..2207d91106 100644 --- a/pkg/devspace/compose/testdata/volumes-long/devspace.yaml +++ b/pkg/devspace/compose/testdata/volumes-long/devspace.yaml @@ -48,7 +48,7 @@ deployments: volume: name: volume-2 readOnly: false - - containerPath: /etc/configs/ + - containerPath: /etc/configs volume: name: volume-3 readOnly: false @@ -102,7 +102,7 @@ dev: startContainer: true - path: ./cache:/tmp/cache startContainer: true - - path: ${devspace.userHome}/configs:/etc/configs/ + - path: ${devspace.userHome}/configs:/etc/configs startContainer: true diff --git a/pkg/devspace/compose/testdata/volumes-short/devspace.yaml b/pkg/devspace/compose/testdata/volumes-short/devspace.yaml index 6d5bab4791..862fb78e3d 100644 --- a/pkg/devspace/compose/testdata/volumes-short/devspace.yaml +++ b/pkg/devspace/compose/testdata/volumes-short/devspace.yaml @@ -28,7 +28,7 @@ deployments: volume: name: volume-2 readOnly: false - - containerPath: /etc/configs/ + - containerPath: /etc/configs volume: name: volume-3 readOnly: false @@ -57,5 +57,5 @@ dev: startContainer: true - path: ./cache:/tmp/cache startContainer: true - - path: ${devspace.userHome}/configs:/etc/configs/ + - path: ${devspace.userHome}/configs:/etc/configs startContainer: true diff --git a/pkg/devspace/compose/testdata/x_volumes-depends_on/devspace.yaml b/pkg/devspace/compose/testdata/x_volumes-depends_on/devspace.yaml deleted file mode 100644 index 83c1ab0f56..0000000000 --- a/pkg/devspace/compose/testdata/x_volumes-depends_on/devspace.yaml +++ /dev/null @@ -1,110 +0,0 @@ -version: v1beta11 - -deployments: -- name: db - helm: - componentChart: true - values: - initContainers: - - name: upload-volumes - image: alpine - command: - - sh - args: - - -c - - while [ ! -f /tmp/done ]; do sleep 2; done - volumeMounts: - - containerPath: /tmp/cache - volume: - name: volume-1 - readOnly: false - containers: - - name: db-container - image: loft.sh/mysql-server:8.0.19 - volumeMounts: - - containerPath: /tmp/cache - volume: - name: volume-1 - readOnly: false - volumes: - - name: volume-1 - emptyDir: {} -- name: backend - helm: - componentChart: true - values: - initContainers: - - name: upload-volumes - image: alpine - command: - - sh - args: - - -c - - while [ ! -f /tmp/done ]; do sleep 2; done - volumeMounts: - - containerPath: /tmp/cache - volume: - name: volume-1 - readOnly: false - containers: - - name: backend-container - image: rails:latest - volumeMounts: - - containerPath: /tmp/cache - volume: - name: volume-1 - readOnly: false - volumes: - - name: volume-1 - emptyDir: {} - -dev: - sync: - - containerName: db-container - labelSelector: - app.kubernetes.io/component: db - localSubPath: ./cache - containerPath: /tmp/cache - - containerName: backend-container - labelSelector: - app.kubernetes.io/component: backend - localSubPath: ./cache - containerPath: /tmp/cache - -hooks: -- events: ["after:deploy:db"] - upload: - localPath: ./cache - containerPath: /tmp/cache - container: - containerName: upload-volumes - labelSelector: - app.kubernetes.io/component: db -- events: ["after:deploy:db"] - command: touch /tmp/done - container: - containerName: upload-volumes - labelSelector: - app.kubernetes.io/component: db -- events: ["after:deploy:db"] - container: - labelSelector: - app.kubernetes.io/component: db - containerName: db-container - wait: - running: true - terminatedWithCode: 0 -- events: ["after:deploy:backend"] - upload: - localPath: ./cache - containerPath: /tmp/cache - container: - containerName: upload-volumes - labelSelector: - app.kubernetes.io/component: backend -- events: ["after:deploy:backend"] - command: touch /tmp/done - container: - containerName: upload-volumes - labelSelector: - app.kubernetes.io/component: backend diff --git a/vendor/github.com/compose-spec/compose-go/LICENSE b/vendor/github.com/compose-spec/compose-go/LICENSE new file mode 100644 index 0000000000..9c8e20ab85 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2017 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/compose-spec/compose-go/NOTICE b/vendor/github.com/compose-spec/compose-go/NOTICE new file mode 100644 index 0000000000..9c2755477e --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/NOTICE @@ -0,0 +1,2 @@ +The Compose Specification +Copyright 2020 The Compose Specification Authors diff --git a/vendor/github.com/compose-spec/compose-go/consts/consts.go b/vendor/github.com/compose-spec/compose-go/consts/consts.go new file mode 100644 index 0000000000..bf5cc9f1b1 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/consts/consts.go @@ -0,0 +1,23 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package consts + +const ( + ComposeProjectName = "COMPOSE_PROJECT_NAME" + ComposePathSeparator = "COMPOSE_PATH_SEPARATOR" + ComposeFilePath = "COMPOSE_FILE" +) diff --git a/vendor/github.com/compose-spec/compose-go/dotenv/LICENSE b/vendor/github.com/compose-spec/compose-go/dotenv/LICENSE new file mode 100644 index 0000000000..9390caf660 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/dotenv/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2013 John Barton + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/compose-spec/compose-go/dotenv/godotenv.go b/vendor/github.com/compose-spec/compose-go/dotenv/godotenv.go new file mode 100644 index 0000000000..479831aac8 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/dotenv/godotenv.go @@ -0,0 +1,375 @@ +// Package dotenv is a go port of the ruby dotenv library (https://github.com/bkeepers/dotenv) +// +// Examples/readme can be found on the github page at https://github.com/joho/godotenv +// +// The TL;DR is that you make a .env file that looks something like +// +// SOME_ENV_VAR=somevalue +// +// and then in your go code you can call +// +// godotenv.Load() +// +// and all the env vars declared in .env will be available through os.Getenv("SOME_ENV_VAR") +package dotenv + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "regexp" + "sort" + "strconv" + "strings" +) + +const doubleQuoteSpecialChars = "\\\n\r\"!$`" + +// LookupFn represents a lookup function to resolve variables from +type LookupFn func(string) (string, bool) + +var noLookupFn = func(s string) (string, bool) { + return "", false +} + +// Parse reads an env file from io.Reader, returning a map of keys and values. +func Parse(r io.Reader) (map[string]string, error) { + return ParseWithLookup(r, nil) +} + +// ParseWithLookup reads an env file from io.Reader, returning a map of keys and values. +func ParseWithLookup(r io.Reader, lookupFn LookupFn) (map[string]string, error) { + data, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + + return UnmarshalBytesWithLookup(data, lookupFn) +} + +// Load will read your env file(s) and load them into ENV for this process. +// +// Call this function as close as possible to the start of your program (ideally in main) +// +// If you call Load without any args it will default to loading .env in the current path +// +// You can otherwise tell it which files to load (there can be more than one) like +// +// godotenv.Load("fileone", "filetwo") +// +// It's important to note that it WILL NOT OVERRIDE an env variable that already exists - consider the .env file to set dev vars or sensible defaults +func Load(filenames ...string) (err error) { + return load(false, filenames...) +} + +// Overload will read your env file(s) and load them into ENV for this process. +// +// Call this function as close as possible to the start of your program (ideally in main) +// +// If you call Overload without any args it will default to loading .env in the current path +// +// You can otherwise tell it which files to load (there can be more than one) like +// +// godotenv.Overload("fileone", "filetwo") +// +// It's important to note this WILL OVERRIDE an env variable that already exists - consider the .env file to forcefilly set all vars. +func Overload(filenames ...string) (err error) { + return load(true, filenames...) +} + +func load(overload bool, filenames ...string) (err error) { + filenames = filenamesOrDefault(filenames) + + for _, filename := range filenames { + err = loadFile(filename, overload) + if err != nil { + return // return early on a spazout + } + } + return +} + +// ReadWithLookup gets all env vars from the files and/or lookup function and return values as +// a map rather than automatically writing values into env +func ReadWithLookup(lookupFn LookupFn, filenames ...string) (envMap map[string]string, err error) { + filenames = filenamesOrDefault(filenames) + envMap = make(map[string]string) + + for _, filename := range filenames { + individualEnvMap, individualErr := readFile(filename, lookupFn) + + if individualErr != nil { + err = individualErr + return // return early on a spazout + } + + for key, value := range individualEnvMap { + envMap[key] = value + } + } + + return +} + +// Read all env (with same file loading semantics as Load) but return values as +// a map rather than automatically writing values into env +func Read(filenames ...string) (envMap map[string]string, err error) { + return ReadWithLookup(nil, filenames...) +} + +// Unmarshal reads an env file from a string, returning a map of keys and values. +func Unmarshal(str string) (envMap map[string]string, err error) { + return UnmarshalBytes([]byte(str)) +} + +// UnmarshalBytes parses env file from byte slice of chars, returning a map of keys and values. +func UnmarshalBytes(src []byte) (map[string]string, error) { + return UnmarshalBytesWithLookup(src, nil) +} + +// UnmarshalBytesWithLookup parses env file from byte slice of chars, returning a map of keys and values. +func UnmarshalBytesWithLookup(src []byte, lookupFn LookupFn) (map[string]string, error) { + out := make(map[string]string) + err := parseBytes(src, out, lookupFn) + return out, err +} + +// Exec loads env vars from the specified filenames (empty map falls back to default) +// then executes the cmd specified. +// +// Simply hooks up os.Stdin/err/out to the command and calls Run() +// +// If you want more fine grained control over your command it's recommended +// that you use `Load()` or `Read()` and the `os/exec` package yourself. +func Exec(filenames []string, cmd string, cmdArgs []string) error { + if err := Load(filenames...); err != nil { + return err + } + + command := exec.Command(cmd, cmdArgs...) + command.Stdin = os.Stdin + command.Stdout = os.Stdout + command.Stderr = os.Stderr + return command.Run() +} + +// Write serializes the given environment and writes it to a file +func Write(envMap map[string]string, filename string) error { + content, err := Marshal(envMap) + if err != nil { + return err + } + file, err := os.Create(filename) + if err != nil { + return err + } + defer file.Close() + _, err = file.WriteString(content + "\n") + if err != nil { + return err + } + return file.Sync() +} + +// Marshal outputs the given environment as a dotenv-formatted environment file. +// Each line is in the format: KEY="VALUE" where VALUE is backslash-escaped. +func Marshal(envMap map[string]string) (string, error) { + lines := make([]string, 0, len(envMap)) + for k, v := range envMap { + if d, err := strconv.Atoi(v); err == nil { + lines = append(lines, fmt.Sprintf(`%s=%d`, k, d)) + } else { + lines = append(lines, fmt.Sprintf(`%s="%s"`, k, doubleQuoteEscape(v))) + } + } + sort.Strings(lines) + return strings.Join(lines, "\n"), nil +} + +func filenamesOrDefault(filenames []string) []string { + if len(filenames) == 0 { + return []string{".env"} + } + return filenames +} + +func loadFile(filename string, overload bool) error { + envMap, err := readFile(filename, nil) + if err != nil { + return err + } + + currentEnv := map[string]bool{} + rawEnv := os.Environ() + for _, rawEnvLine := range rawEnv { + key := strings.Split(rawEnvLine, "=")[0] + currentEnv[key] = true + } + + for key, value := range envMap { + if !currentEnv[key] || overload { + _ = os.Setenv(key, value) + } + } + + return nil +} + +func readFile(filename string, lookupFn LookupFn) (envMap map[string]string, err error) { + file, err := os.Open(filename) + if err != nil { + return + } + defer file.Close() + + return ParseWithLookup(file, lookupFn) +} + +var exportRegex = regexp.MustCompile(`^\s*(?:export\s+)?(.*?)\s*$`) + +func parseLine(line string, envMap map[string]string) (key string, value string, err error) { + return parseLineWithLookup(line, envMap, nil) +} +func parseLineWithLookup(line string, envMap map[string]string, lookupFn LookupFn) (key string, value string, err error) { + if len(line) == 0 { + err = errors.New("zero length string") + return + } + + // ditch the comments (but keep quoted hashes) + if strings.Contains(line, "#") { + segmentsBetweenHashes := strings.Split(line, "#") + quotesAreOpen := false + var segmentsToKeep []string + for _, segment := range segmentsBetweenHashes { + if strings.Count(segment, "\"") == 1 || strings.Count(segment, "'") == 1 { + if quotesAreOpen { + quotesAreOpen = false + segmentsToKeep = append(segmentsToKeep, segment) + } else { + quotesAreOpen = true + } + } + + if len(segmentsToKeep) == 0 || quotesAreOpen { + segmentsToKeep = append(segmentsToKeep, segment) + } + } + + line = strings.Join(segmentsToKeep, "#") + } + + firstEquals := strings.Index(line, "=") + firstColon := strings.Index(line, ":") + splitString := strings.SplitN(line, "=", 2) + if firstColon != -1 && (firstColon < firstEquals || firstEquals == -1) { + // This is a yaml-style line + splitString = strings.SplitN(line, ":", 2) + } + + if len(splitString) != 2 { + err = errors.New("can't separate key from value") + return + } + key = exportRegex.ReplaceAllString(splitString[0], "$1") + + // Parse the value + value = parseValue(splitString[1], envMap, lookupFn) + return +} + +var ( + singleQuotesRegex = regexp.MustCompile(`\A'(.*)'\z`) + doubleQuotesRegex = regexp.MustCompile(`\A"(.*)"\z`) + escapeRegex = regexp.MustCompile(`\\.`) + unescapeCharsRegex = regexp.MustCompile(`\\([^$])`) +) + +func parseValue(value string, envMap map[string]string, lookupFn LookupFn) string { + + // trim + value = strings.Trim(value, " ") + + // check if we've got quoted values or possible escapes + if len(value) > 1 { + singleQuotes := singleQuotesRegex.FindStringSubmatch(value) + + doubleQuotes := doubleQuotesRegex.FindStringSubmatch(value) + + if singleQuotes != nil || doubleQuotes != nil { + // pull the quotes off the edges + value = value[1 : len(value)-1] + } + + if doubleQuotes != nil { + // expand newlines + value = escapeRegex.ReplaceAllStringFunc(value, func(match string) string { + c := strings.TrimPrefix(match, `\`) + switch c { + case "n": + return "\n" + case "r": + return "\r" + default: + return match + } + }) + // unescape characters + value = unescapeCharsRegex.ReplaceAllString(value, "$1") + } + + if singleQuotes == nil { + value = expandVariables(value, envMap, lookupFn) + } + } + + return value +} + +var expandVarRegex = regexp.MustCompile(`(\\)?(\$)(\()?\{?([A-Z0-9_]+)?\}?`) + +func expandVariables(v string, envMap map[string]string, lookupFn LookupFn) string { + return expandVarRegex.ReplaceAllStringFunc(v, func(s string) string { + submatch := expandVarRegex.FindStringSubmatch(s) + + if submatch == nil { + return s + } + if submatch[1] == "\\" || submatch[2] == "(" { + return submatch[0][1:] + } else if submatch[4] != "" { + // first check if we have defined this already earlier + if envMap[submatch[4]] != "" { + return envMap[submatch[4]] + } + if lookupFn == nil { + return "" + } + // if we have not defined it, check the lookup function provided + // by the user + s2, ok := lookupFn(submatch[4]) + if ok { + return s2 + } + return "" + } + return s + }) +} + +func doubleQuoteEscape(line string) string { + for _, c := range doubleQuoteSpecialChars { + toReplace := "\\" + string(c) + if c == '\n' { + toReplace = `\n` + } + if c == '\r' { + toReplace = `\r` + } + line = strings.Replace(line, string(c), toReplace, -1) + } + return line +} diff --git a/vendor/github.com/compose-spec/compose-go/dotenv/parser.go b/vendor/github.com/compose-spec/compose-go/dotenv/parser.go new file mode 100644 index 0000000000..85ed2c0088 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/dotenv/parser.go @@ -0,0 +1,234 @@ +package dotenv + +import ( + "bytes" + "errors" + "fmt" + "strings" + "unicode" +) + +const ( + charComment = '#' + prefixSingleQuote = '\'' + prefixDoubleQuote = '"' + + exportPrefix = "export" +) + +func parseBytes(src []byte, out map[string]string, lookupFn LookupFn) error { + cutset := src + for { + cutset = getStatementStart(cutset) + if cutset == nil { + // reached end of file + break + } + + key, left, inherited, err := locateKeyName(cutset) + if err != nil { + return err + } + if strings.Contains(key, " ") { + return errors.New("key cannot contain a space") + } + + if inherited { + if lookupFn == nil { + lookupFn = noLookupFn + } + + value, ok := lookupFn(key) + if ok { + out[key] = value + } + cutset = left + continue + } + + value, left, err := extractVarValue(left, out, lookupFn) + if err != nil { + return err + } + + out[key] = value + cutset = left + } + + return nil +} + +// getStatementPosition returns position of statement begin. +// +// It skips any comment line or non-whitespace character. +func getStatementStart(src []byte) []byte { + pos := indexOfNonSpaceChar(src) + if pos == -1 { + return nil + } + + src = src[pos:] + if src[0] != charComment { + return src + } + + // skip comment section + pos = bytes.IndexFunc(src, isCharFunc('\n')) + if pos == -1 { + return nil + } + + return getStatementStart(src[pos:]) +} + +// locateKeyName locates and parses key name and returns rest of slice +func locateKeyName(src []byte) (key string, cutset []byte, inherited bool, err error) { + // trim "export" and space at beginning + src = bytes.TrimLeftFunc(bytes.TrimPrefix(src, []byte(exportPrefix)), isSpace) + + // locate key name end and validate it in single loop + offset := 0 +loop: + for i, char := range src { + rchar := rune(char) + if isSpace(rchar) { + continue + } + + switch char { + case '=', ':', '\n': + // library also supports yaml-style value declaration + key = string(src[0:i]) + offset = i + 1 + inherited = char == '\n' + break loop + case '_': + default: + // variable name should match [A-Za-z0-9_] + if unicode.IsLetter(rchar) || unicode.IsNumber(rchar) { + continue + } + + return "", nil, inherited, fmt.Errorf( + `unexpected character %q in variable name near %q`, + string(char), string(src)) + } + } + + if len(src) == 0 { + return "", nil, inherited, errors.New("zero length string") + } + + // trim whitespace + key = strings.TrimRightFunc(key, unicode.IsSpace) + cutset = bytes.TrimLeftFunc(src[offset:], isSpace) + return key, cutset, inherited, nil +} + +// extractVarValue extracts variable value and returns rest of slice +func extractVarValue(src []byte, envMap map[string]string, lookupFn LookupFn) (value string, rest []byte, err error) { + quote, isQuoted := hasQuotePrefix(src) + if !isQuoted { + // unquoted value - read until new line + end := bytes.IndexFunc(src, isNewLine) + var rest []byte + if end < 0 { + value := strings.Split(string(src), "#")[0] // Remove inline comments on unquoted lines + value = strings.TrimRightFunc(value, unicode.IsSpace) + return expandVariables(value, envMap, lookupFn), nil, nil + } + + value := strings.Split(string(src[0:end]), "#")[0] + value = strings.TrimRightFunc(value, unicode.IsSpace) + rest = src[end:] + return expandVariables(value, envMap, lookupFn), rest, nil + } + + // lookup quoted string terminator + for i := 1; i < len(src); i++ { + if char := src[i]; char != quote { + continue + } + + // skip escaped quote symbol (\" or \', depends on quote) + if prevChar := src[i-1]; prevChar == '\\' { + continue + } + + // trim quotes + trimFunc := isCharFunc(rune(quote)) + value = string(bytes.TrimLeftFunc(bytes.TrimRightFunc(src[0:i], trimFunc), trimFunc)) + if quote == prefixDoubleQuote { + // unescape newlines for double quote (this is compat feature) + // and expand environment variables + value = expandVariables(expandEscapes(value), envMap, lookupFn) + } + + return value, src[i+1:], nil + } + + // return formatted error if quoted string is not terminated + valEndIndex := bytes.IndexFunc(src, isCharFunc('\n')) + if valEndIndex == -1 { + valEndIndex = len(src) + } + + return "", nil, fmt.Errorf("unterminated quoted value %s", src[:valEndIndex]) +} + +func expandEscapes(str string) string { + out := escapeRegex.ReplaceAllStringFunc(str, func(match string) string { + c := strings.TrimPrefix(match, `\`) + switch c { + case "n": + return "\n" + case "r": + return "\r" + default: + return match + } + }) + return unescapeCharsRegex.ReplaceAllString(out, "$1") +} + +func indexOfNonSpaceChar(src []byte) int { + return bytes.IndexFunc(src, func(r rune) bool { + return !unicode.IsSpace(r) + }) +} + +// hasQuotePrefix reports whether charset starts with single or double quote and returns quote character +func hasQuotePrefix(src []byte) (quote byte, isQuoted bool) { + if len(src) == 0 { + return 0, false + } + + switch prefix := src[0]; prefix { + case prefixDoubleQuote, prefixSingleQuote: + return prefix, true + default: + return 0, false + } +} + +func isCharFunc(char rune) func(rune) bool { + return func(v rune) bool { + return v == char + } +} + +// isSpace reports whether the rune is a space character but not line break character +// +// this differs from unicode.IsSpace, which also applies line break as space +func isSpace(r rune) bool { + switch r { + case '\t', '\v', '\f', '\r', ' ', 0x85, 0xA0: + return true + } + return false +} + +// isNewLine reports whether the rune is a new line character +func isNewLine(r rune) bool { + return r == '\n' +} diff --git a/vendor/github.com/compose-spec/compose-go/errdefs/errors.go b/vendor/github.com/compose-spec/compose-go/errdefs/errors.go new file mode 100644 index 0000000000..a54407007e --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/errdefs/errors.go @@ -0,0 +1,53 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package errdefs + +import "errors" + +var ( + // ErrNotFound is returned when an object is not found + ErrNotFound = errors.New("not found") + + // ErrInvalid is returned when a compose project is invalid + ErrInvalid = errors.New("invalid compose project") + + // ErrUnsupported is returned when a compose project uses an unsupported attribute + ErrUnsupported = errors.New("unsupported attribute") + + // ErrIncompatible is returned when a compose project uses an incompatible attribute + ErrIncompatible = errors.New("incompatible attribute") +) + +// IsNotFoundError returns true if the unwrapped error is ErrNotFound +func IsNotFoundError(err error) bool { + return errors.Is(err, ErrNotFound) +} + +// IsInvalidError returns true if the unwrapped error is ErrInvalid +func IsInvalidError(err error) bool { + return errors.Is(err, ErrInvalid) +} + +// IsUnsupportedError returns true if the unwrapped error is ErrUnsupported +func IsUnsupportedError(err error) bool { + return errors.Is(err, ErrUnsupported) +} + +// IsUnsupportedError returns true if the unwrapped error is ErrIncompatible +func IsIncompatibleError(err error) bool { + return errors.Is(err, ErrIncompatible) +} diff --git a/vendor/github.com/compose-spec/compose-go/interpolation/interpolation.go b/vendor/github.com/compose-spec/compose-go/interpolation/interpolation.go new file mode 100644 index 0000000000..9c36e6d8b1 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/interpolation/interpolation.go @@ -0,0 +1,177 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package interpolation + +import ( + "os" + "strings" + + "github.com/compose-spec/compose-go/template" + "github.com/pkg/errors" +) + +// Options supported by Interpolate +type Options struct { + // LookupValue from a key + LookupValue LookupValue + // TypeCastMapping maps key paths to functions to cast to a type + TypeCastMapping map[Path]Cast + // Substitution function to use + Substitute func(string, template.Mapping) (string, error) +} + +// LookupValue is a function which maps from variable names to values. +// Returns the value as a string and a bool indicating whether +// the value is present, to distinguish between an empty string +// and the absence of a value. +type LookupValue func(key string) (string, bool) + +// Cast a value to a new type, or return an error if the value can't be cast +type Cast func(value string) (interface{}, error) + +// Interpolate replaces variables in a string with the values from a mapping +func Interpolate(config map[string]interface{}, opts Options) (map[string]interface{}, error) { + if opts.LookupValue == nil { + opts.LookupValue = os.LookupEnv + } + if opts.TypeCastMapping == nil { + opts.TypeCastMapping = make(map[Path]Cast) + } + if opts.Substitute == nil { + opts.Substitute = template.Substitute + } + + out := map[string]interface{}{} + + for key, value := range config { + interpolatedValue, err := recursiveInterpolate(value, NewPath(key), opts) + if err != nil { + return out, err + } + out[key] = interpolatedValue + } + + return out, nil +} + +func recursiveInterpolate(value interface{}, path Path, opts Options) (interface{}, error) { + switch value := value.(type) { + case string: + newValue, err := opts.Substitute(value, template.Mapping(opts.LookupValue)) + if err != nil || newValue == value { + return value, newPathError(path, err) + } + caster, ok := opts.getCasterForPath(path) + if !ok { + return newValue, nil + } + casted, err := caster(newValue) + return casted, newPathError(path, errors.Wrap(err, "failed to cast to expected type")) + + case map[string]interface{}: + out := map[string]interface{}{} + for key, elem := range value { + interpolatedElem, err := recursiveInterpolate(elem, path.Next(key), opts) + if err != nil { + return nil, err + } + out[key] = interpolatedElem + } + return out, nil + + case []interface{}: + out := make([]interface{}, len(value)) + for i, elem := range value { + interpolatedElem, err := recursiveInterpolate(elem, path.Next(PathMatchList), opts) + if err != nil { + return nil, err + } + out[i] = interpolatedElem + } + return out, nil + + default: + return value, nil + } +} + +func newPathError(path Path, err error) error { + switch err := err.(type) { + case nil: + return nil + case *template.InvalidTemplateError: + return errors.Errorf( + "invalid interpolation format for %s: %#v. You may need to escape any $ with another $", + path, err.Template) + default: + return errors.Wrapf(err, "error while interpolating %s", path) + } +} + +const pathSeparator = "." + +// PathMatchAll is a token used as part of a Path to match any key at that level +// in the nested structure +const PathMatchAll = "*" + +// PathMatchList is a token used as part of a Path to match items in a list +const PathMatchList = "[]" + +// Path is a dotted path of keys to a value in a nested mapping structure. A * +// section in a path will match any key in the mapping structure. +type Path string + +// NewPath returns a new Path +func NewPath(items ...string) Path { + return Path(strings.Join(items, pathSeparator)) +} + +// Next returns a new path by append part to the current path +func (p Path) Next(part string) Path { + return Path(string(p) + pathSeparator + part) +} + +func (p Path) parts() []string { + return strings.Split(string(p), pathSeparator) +} + +func (p Path) matches(pattern Path) bool { + patternParts := pattern.parts() + parts := p.parts() + + if len(patternParts) != len(parts) { + return false + } + for index, part := range parts { + switch patternParts[index] { + case PathMatchAll, part: + continue + default: + return false + } + } + return true +} + +func (o Options) getCasterForPath(path Path) (Cast, bool) { + for pattern, caster := range o.TypeCastMapping { + if path.matches(pattern) { + return caster, true + } + } + return nil, false +} diff --git a/vendor/github.com/compose-spec/compose-go/loader/example1.env b/vendor/github.com/compose-spec/compose-go/loader/example1.env new file mode 100644 index 0000000000..f19ec0df4e --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/loader/example1.env @@ -0,0 +1,8 @@ +# passed through +FOO=foo_from_env_file + +# overridden in example2.env +BAR=bar_from_env_file + +# overridden in full-example.yml +BAZ=baz_from_env_file diff --git a/vendor/github.com/compose-spec/compose-go/loader/example2.env b/vendor/github.com/compose-spec/compose-go/loader/example2.env new file mode 100644 index 0000000000..f47d1e6145 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/loader/example2.env @@ -0,0 +1,4 @@ +BAR=bar_from_env_file_2 + +# overridden in configDetails.Environment +QUX=quz_from_env_file_2 diff --git a/vendor/github.com/compose-spec/compose-go/loader/full-example.yml b/vendor/github.com/compose-spec/compose-go/loader/full-example.yml new file mode 100644 index 0000000000..4f17450ef7 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/loader/full-example.yml @@ -0,0 +1,419 @@ +name: Full_Example_project_name +services: + foo: + + build: + context: ./dir + dockerfile: Dockerfile + args: + foo: bar + ssh: + - default + target: foo + network: foo + cache_from: + - foo + - bar + labels: [FOO=BAR] + + + cap_add: + - ALL + + cap_drop: + - NET_ADMIN + - SYS_ADMIN + + cgroup_parent: m-executor-abcd + + # String or list + command: bundle exec thin -p 3000 + # command: ["bundle", "exec", "thin", "-p", "3000"] + + configs: + - config1 + - source: config2 + target: /my_config + uid: '103' + gid: '103' + mode: 0440 + + container_name: my-web-container + + depends_on: + - db + - redis + + deploy: + mode: replicated + replicas: 6 + labels: [FOO=BAR] + rollback_config: + parallelism: 3 + delay: 10s + failure_action: continue + monitor: 60s + max_failure_ratio: 0.3 + order: start-first + update_config: + parallelism: 3 + delay: 10s + failure_action: continue + monitor: 60s + max_failure_ratio: 0.3 + order: start-first + resources: + limits: + cpus: '0.001' + memory: 50M + reservations: + cpus: '0.0001' + memory: 20M + generic_resources: + - discrete_resource_spec: + kind: 'gpu' + value: 2 + - discrete_resource_spec: + kind: 'ssd' + value: 1 + restart_policy: + condition: on-failure + delay: 5s + max_attempts: 3 + window: 120s + placement: + constraints: [node=foo] + max_replicas_per_node: 5 + preferences: + - spread: node.labels.az + endpoint_mode: dnsrr + + device_cgroup_rules: + - "c 1:3 mr" + - "a 7:* rmw" + + devices: + - "/dev/ttyUSB0:/dev/ttyUSB0" + + # String or list + # dns: 8.8.8.8 + dns: + - 8.8.8.8 + - 9.9.9.9 + + # String or list + # dns_search: example.com + dns_search: + - dc1.example.com + - dc2.example.com + + domainname: foo.com + + # String or list + # entrypoint: /code/entrypoint.sh -p 3000 + entrypoint: ["/code/entrypoint.sh", "-p", "3000"] + + # String or list + # env_file: .env + env_file: + - ./example1.env + - ./example2.env + + # Mapping or list + # Mapping values can be strings, numbers or null + # Booleans are not allowed - must be quoted + environment: + BAZ: baz_from_service_def + QUX: + # environment: + # - RACK_ENV=development + # - SHOW=true + # - SESSION_SECRET + + # Items can be strings or numbers + expose: + - "3000" + - 8000 + + external_links: + - redis_1 + - project_db_1:mysql + - project_db_1:postgresql + + # Mapping or list + # Mapping values must be strings + # extra_hosts: + # somehost: "162.242.195.82" + # otherhost: "50.31.209.229" + extra_hosts: + - "somehost:162.242.195.82" + - "otherhost:50.31.209.229" + + hostname: foo + + healthcheck: + test: echo "hello world" + interval: 10s + timeout: 1s + retries: 5 + start_period: 15s + + # Any valid image reference - repo, tag, id, sha + image: redis + # image: ubuntu:14.04 + # image: tutum/influxdb + # image: example-registry.com:4000/postgresql + # image: a4bc65fd + # image: busybox@sha256:38a203e1986cf79639cfb9b2e1d6e773de84002feea2d4eb006b52004ee8502d + + ipc: host + + # Mapping or list + # Mapping values can be strings, numbers or null + labels: + com.example.description: "Accounting webapp" + com.example.number: 42 + com.example.empty-label: + # labels: + # - "com.example.description=Accounting webapp" + # - "com.example.number=42" + # - "com.example.empty-label" + + links: + - db + - db:database + - redis + + logging: + driver: syslog + options: + syslog-address: "tcp://192.168.0.42:123" + + mac_address: 02:42:ac:11:65:43 + + # network_mode: "bridge" + # network_mode: "host" + # network_mode: "none" + # Use the network mode of an arbitrary container from another service + # network_mode: "service:db" + # Use the network mode of another container, specified by name or id + # network_mode: "container:some-container" + network_mode: "container:0cfeab0f748b9a743dc3da582046357c6ef497631c1a016d28d2bf9b4f899f7b" + + networks: + some-network: + aliases: + - alias1 + - alias3 + other-network: + ipv4_address: 172.16.238.10 + ipv6_address: 2001:3984:3989::10 + other-other-network: + + pid: "host" + + ports: + - 3000 + - "3001-3005" + - "8000:8000" + - "9090-9091:8080-8081" + - "49100:22" + - "127.0.0.1:8001:8001" + - "127.0.0.1:5000-5010:5000-5010" + + privileged: true + + read_only: true + + restart: always + + secrets: + - secret1 + - source: secret2 + target: my_secret + uid: '103' + gid: '103' + mode: 0440 + + security_opt: + - label=level:s0:c100,c200 + - label=type:svirt_apache_t + + stdin_open: true + + stop_grace_period: 20s + + stop_signal: SIGUSR1 + + sysctls: + net.core.somaxconn: 1024 + net.ipv4.tcp_syncookies: 0 + + # String or list + # tmpfs: /run + tmpfs: + - /run + - /tmp + + tty: true + + ulimits: + # Single number or mapping with soft + hard limits + nproc: 65535 + nofile: + soft: 20000 + hard: 40000 + + user: someone + + volumes: + # Just specify a path and let the Engine create a volume + - /var/lib/mysql + # Specify an absolute path mapping + - /opt/data:/var/lib/mysql + # Path on the host, relative to the Compose file + - .:/code + - ./static:/var/www/html + # User-relative path + - ~/configs:/etc/configs:ro + # Named volume + - datavolume:/var/lib/mysql + - type: bind + source: ./opt + target: /opt + consistency: cached + - type: tmpfs + target: /opt + tmpfs: + size: 10000 + + working_dir: /code + x-bar: baz + x-foo: bar + +networks: + # Entries can be null, which specifies simply that a network + # called "{project name}_some-network" should be created and + # use the default driver + some-network: + + other-network: + driver: overlay + + driver_opts: + # Values can be strings or numbers + foo: "bar" + baz: 1 + + ipam: + driver: overlay + # driver_opts: + # # Values can be strings or numbers + # com.docker.network.enable_ipv6: "true" + # com.docker.network.numeric_value: 1 + config: + - subnet: 172.28.0.0/16 + ip_range: 172.28.5.0/24 + gateway: 172.28.5.254 + aux_addresses: + host1: 172.28.1.5 + host2: 172.28.1.6 + host3: 172.28.1.7 + - subnet: 2001:3984:3989::/64 + gateway: 2001:3984:3989::1 + + labels: + foo: bar + + external-network: + # Specifies that a pre-existing network called "external-network" + # can be referred to within this file as "external-network" + external: true + + other-external-network: + # Specifies that a pre-existing network called "my-cool-network" + # can be referred to within this file as "other-external-network" + external: + name: my-cool-network + x-bar: baz + x-foo: bar + +volumes: + # Entries can be null, which specifies simply that a volume + # called "{project name}_some-volume" should be created and + # use the default driver + some-volume: + + other-volume: + driver: flocker + + driver_opts: + # Values can be strings or numbers + foo: "bar" + baz: 1 + labels: + foo: bar + + another-volume: + name: "user_specified_name" + driver: vsphere + + driver_opts: + # Values can be strings or numbers + foo: "bar" + baz: 1 + + external-volume: + # Specifies that a pre-existing volume called "external-volume" + # can be referred to within this file as "external-volume" + external: true + + other-external-volume: + # Specifies that a pre-existing volume called "my-cool-volume" + # can be referred to within this file as "other-external-volume" + # This example uses the deprecated "volume.external.name" (replaced by "volume.name") + external: + name: my-cool-volume + + external-volume3: + # Specifies that a pre-existing volume called "this-is-volume3" + # can be referred to within this file as "external-volume3" + name: this-is-volume3 + external: true + x-bar: baz + x-foo: bar + +configs: + config1: + file: ./config_data + labels: + foo: bar + config2: + external: + name: my_config + config3: + external: true + config4: + name: foo + x-bar: baz + x-foo: bar + +secrets: + secret1: + file: ./secret_data + labels: + foo: bar + secret2: + external: + name: my_secret + secret3: + external: true + secret4: + name: bar + x-bar: baz + x-foo: bar +x-bar: baz +x-foo: bar +x-nested: + bar: baz + foo: bar diff --git a/vendor/github.com/compose-spec/compose-go/loader/interpolate.go b/vendor/github.com/compose-spec/compose-go/loader/interpolate.go new file mode 100644 index 0000000000..97a19f5dd8 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/loader/interpolate.go @@ -0,0 +1,124 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package loader + +import ( + "strconv" + "strings" + + interp "github.com/compose-spec/compose-go/interpolation" + "github.com/pkg/errors" +) + +var interpolateTypeCastMapping = map[interp.Path]interp.Cast{ + servicePath("configs", interp.PathMatchList, "mode"): toInt, + servicePath("cpu_count"): toInt64, + servicePath("cpu_percent"): toFloat, + servicePath("cpu_period"): toInt64, + servicePath("cpu_quota"): toInt64, + servicePath("cpu_rt_period"): toInt64, + servicePath("cpu_rt_runtime"): toInt64, + servicePath("cpus"): toFloat32, + servicePath("cpu_shares"): toInt64, + servicePath("init"): toBoolean, + servicePath("deploy", "replicas"): toInt, + servicePath("deploy", "update_config", "parallelism"): toInt, + servicePath("deploy", "update_config", "max_failure_ratio"): toFloat, + servicePath("deploy", "rollback_config", "parallelism"): toInt, + servicePath("deploy", "rollback_config", "max_failure_ratio"): toFloat, + servicePath("deploy", "restart_policy", "max_attempts"): toInt, + servicePath("deploy", "placement", "max_replicas_per_node"): toInt, + servicePath("healthcheck", "retries"): toInt, + servicePath("healthcheck", "disable"): toBoolean, + servicePath("mem_limit"): toUnitBytes, + servicePath("mem_reservation"): toUnitBytes, + servicePath("memswap_limit"): toUnitBytes, + servicePath("mem_swappiness"): toUnitBytes, + servicePath("oom_kill_disable"): toBoolean, + servicePath("oom_score_adj"): toInt64, + servicePath("pids_limit"): toInt64, + servicePath("ports", interp.PathMatchList, "target"): toInt, + servicePath("privileged"): toBoolean, + servicePath("read_only"): toBoolean, + servicePath("scale"): toInt, + servicePath("secrets", interp.PathMatchList, "mode"): toInt, + servicePath("shm_size"): toUnitBytes, + servicePath("stdin_open"): toBoolean, + servicePath("stop_grace_period"): toDuration, + servicePath("tty"): toBoolean, + servicePath("ulimits", interp.PathMatchAll): toInt, + servicePath("ulimits", interp.PathMatchAll, "hard"): toInt, + servicePath("ulimits", interp.PathMatchAll, "soft"): toInt, + servicePath("volumes", interp.PathMatchList, "read_only"): toBoolean, + servicePath("volumes", interp.PathMatchList, "volume", "nocopy"): toBoolean, + servicePath("volumes", interp.PathMatchList, "tmpfs", "size"): toUnitBytes, + iPath("networks", interp.PathMatchAll, "external"): toBoolean, + iPath("networks", interp.PathMatchAll, "internal"): toBoolean, + iPath("networks", interp.PathMatchAll, "attachable"): toBoolean, + iPath("networks", interp.PathMatchAll, "enable_ipv6"): toBoolean, + iPath("volumes", interp.PathMatchAll, "external"): toBoolean, + iPath("secrets", interp.PathMatchAll, "external"): toBoolean, + iPath("configs", interp.PathMatchAll, "external"): toBoolean, +} + +func iPath(parts ...string) interp.Path { + return interp.NewPath(parts...) +} + +func servicePath(parts ...string) interp.Path { + return iPath(append([]string{"services", interp.PathMatchAll}, parts...)...) +} + +func toInt(value string) (interface{}, error) { + return strconv.Atoi(value) +} + +func toInt64(value string) (interface{}, error) { + return strconv.ParseInt(value, 10, 64) +} + +func toUnitBytes(value string) (interface{}, error) { + return transformSize(value) +} + +func toDuration(value string) (interface{}, error) { + return transformStringToDuration(value) +} + +func toFloat(value string) (interface{}, error) { + return strconv.ParseFloat(value, 64) +} + +func toFloat32(value string) (interface{}, error) { + f, err := strconv.ParseFloat(value, 32) + if err != nil { + return nil, err + } + return float32(f), nil +} + +// should match http://yaml.org/type/bool.html +func toBoolean(value string) (interface{}, error) { + switch strings.ToLower(value) { + case "y", "yes", "true", "on": + return true, nil + case "n", "no", "false", "off": + return false, nil + default: + return nil, errors.Errorf("invalid boolean: %s", value) + } +} diff --git a/vendor/github.com/compose-spec/compose-go/loader/loader.go b/vendor/github.com/compose-spec/compose-go/loader/loader.go new file mode 100644 index 0000000000..895bdb2609 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/loader/loader.go @@ -0,0 +1,1182 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package loader + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "path/filepath" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" + + "github.com/compose-spec/compose-go/consts" + "github.com/compose-spec/compose-go/dotenv" + interp "github.com/compose-spec/compose-go/interpolation" + "github.com/compose-spec/compose-go/schema" + "github.com/compose-spec/compose-go/template" + "github.com/compose-spec/compose-go/types" + "github.com/docker/go-units" + "github.com/mattn/go-shellwords" + "github.com/mitchellh/mapstructure" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "gopkg.in/yaml.v2" +) + +// Options supported by Load +type Options struct { + // Skip schema validation + SkipValidation bool + // Skip interpolation + SkipInterpolation bool + // Skip normalization + SkipNormalization bool + // Resolve paths + ResolvePaths bool + // Convert Windows paths + ConvertWindowsPaths bool + // Skip consistency check + SkipConsistencyCheck bool + // Skip extends + SkipExtends bool + // Interpolation options + Interpolate *interp.Options + // Discard 'env_file' entries after resolving to 'environment' section + discardEnvFiles bool + // Set project projectName + projectName string + // Indicates when the projectName was imperatively set or guessed from path + projectNameImperativelySet bool +} + +func (o *Options) SetProjectName(name string, imperativelySet bool) { + o.projectName = normalizeProjectName(name) + o.projectNameImperativelySet = imperativelySet +} + +func (o Options) GetProjectName() (string, bool) { + return o.projectName, o.projectNameImperativelySet +} + +// serviceRef identifies a reference to a service. It's used to detect cyclic +// references in "extends". +type serviceRef struct { + filename string + service string +} + +type cycleTracker struct { + loaded []serviceRef +} + +func (ct *cycleTracker) Add(filename, service string) error { + toAdd := serviceRef{filename: filename, service: service} + for _, loaded := range ct.loaded { + if toAdd == loaded { + // Create an error message of the form: + // Circular reference: + // service-a in docker-compose.yml + // extends service-b in docker-compose.yml + // extends service-a in docker-compose.yml + errLines := []string{ + "Circular reference:", + fmt.Sprintf(" %s in %s", ct.loaded[0].service, ct.loaded[0].filename), + } + for _, service := range append(ct.loaded[1:], toAdd) { + errLines = append(errLines, fmt.Sprintf(" extends %s in %s", service.service, service.filename)) + } + + return errors.New(strings.Join(errLines, "\n")) + } + } + + ct.loaded = append(ct.loaded, toAdd) + return nil +} + +// WithDiscardEnvFiles sets the Options to discard the `env_file` section after resolving to +// the `environment` section +func WithDiscardEnvFiles(opts *Options) { + opts.discardEnvFiles = true +} + +// WithSkipValidation sets the Options to skip validation when loading sections +func WithSkipValidation(opts *Options) { + opts.SkipValidation = true +} + +// ParseYAML reads the bytes from a file, parses the bytes into a mapping +// structure, and returns it. +func ParseYAML(source []byte) (map[string]interface{}, error) { + var cfg interface{} + if err := yaml.Unmarshal(source, &cfg); err != nil { + return nil, err + } + cfgMap, ok := cfg.(map[interface{}]interface{}) + if !ok { + return nil, errors.Errorf("Top-level object must be a mapping") + } + converted, err := convertToStringKeysRecursive(cfgMap, "") + if err != nil { + return nil, err + } + return converted.(map[string]interface{}), nil +} + +// Load reads a ConfigDetails and returns a fully loaded configuration +func Load(configDetails types.ConfigDetails, options ...func(*Options)) (*types.Project, error) { + if len(configDetails.ConfigFiles) < 1 { + return nil, errors.Errorf("No files specified") + } + + opts := &Options{ + Interpolate: &interp.Options{ + Substitute: template.Substitute, + LookupValue: configDetails.LookupEnv, + TypeCastMapping: interpolateTypeCastMapping, + }, + } + + for _, op := range options { + op(opts) + } + + var configs []*types.Config + for i, file := range configDetails.ConfigFiles { + configDict := file.Config + if configDict == nil { + dict, err := parseConfig(file.Content, opts) + if err != nil { + return nil, err + } + configDict = dict + file.Config = dict + configDetails.ConfigFiles[i] = file + } + + if !opts.SkipValidation { + if err := schema.Validate(configDict); err != nil { + return nil, err + } + } + + configDict = groupXFieldsIntoExtensions(configDict) + + cfg, err := loadSections(file.Filename, configDict, configDetails, opts) + if err != nil { + return nil, err + } + if opts.discardEnvFiles { + for i := range cfg.Services { + cfg.Services[i].EnvFile = nil + } + } + + configs = append(configs, cfg) + } + + model, err := merge(configs) + if err != nil { + return nil, err + } + + for _, s := range model.Services { + var newEnvFiles types.StringList + for _, ef := range s.EnvFile { + newEnvFiles = append(newEnvFiles, absPath(configDetails.WorkingDir, ef)) + } + s.EnvFile = newEnvFiles + } + + projectName, projectNameImperativelySet := opts.GetProjectName() + model.Name = normalizeProjectName(model.Name) + if !projectNameImperativelySet && model.Name != "" { + projectName = model.Name + } + + if projectName != "" { + configDetails.Environment[consts.ComposeProjectName] = projectName + } + project := &types.Project{ + Name: projectName, + WorkingDir: configDetails.WorkingDir, + Services: model.Services, + Networks: model.Networks, + Volumes: model.Volumes, + Secrets: model.Secrets, + Configs: model.Configs, + Environment: configDetails.Environment, + Extensions: model.Extensions, + } + + if !opts.SkipNormalization { + err = normalize(project, opts.ResolvePaths) + if err != nil { + return nil, err + } + } + + if !opts.SkipConsistencyCheck { + err = checkConsistency(project) + if err != nil { + return nil, err + } + } + + return project, nil +} + +func normalizeProjectName(s string) string { + r := regexp.MustCompile("[a-z0-9_-]") + s = strings.ToLower(s) + s = strings.Join(r.FindAllString(s, -1), "") + return strings.TrimLeft(s, "_-") +} + +func parseConfig(b []byte, opts *Options) (map[string]interface{}, error) { + yml, err := ParseYAML(b) + if err != nil { + return nil, err + } + if !opts.SkipInterpolation { + return interp.Interpolate(yml, *opts.Interpolate) + } + return yml, err +} + +func groupXFieldsIntoExtensions(dict map[string]interface{}) map[string]interface{} { + extras := map[string]interface{}{} + for key, value := range dict { + if strings.HasPrefix(key, "x-") { + extras[key] = value + delete(dict, key) + } + if d, ok := value.(map[string]interface{}); ok { + dict[key] = groupXFieldsIntoExtensions(d) + } + } + if len(extras) > 0 { + dict["extensions"] = extras + } + return dict +} + +func loadSections(filename string, config map[string]interface{}, configDetails types.ConfigDetails, opts *Options) (*types.Config, error) { + var err error + cfg := types.Config{ + Filename: filename, + } + name := "" + if n, ok := config["name"]; ok { + name, ok = n.(string) + if !ok { + return nil, errors.New("project name must be a string") + } + } + cfg.Name = name + cfg.Services, err = LoadServices(filename, getSection(config, "services"), configDetails.WorkingDir, configDetails.LookupEnv, opts) + if err != nil { + return nil, err + } + + cfg.Networks, err = LoadNetworks(getSection(config, "networks")) + if err != nil { + return nil, err + } + cfg.Volumes, err = LoadVolumes(getSection(config, "volumes")) + if err != nil { + return nil, err + } + cfg.Secrets, err = LoadSecrets(getSection(config, "secrets"), configDetails, opts.ResolvePaths) + if err != nil { + return nil, err + } + cfg.Configs, err = LoadConfigObjs(getSection(config, "configs"), configDetails, opts.ResolvePaths) + if err != nil { + return nil, err + } + extensions := getSection(config, "extensions") + if len(extensions) > 0 { + cfg.Extensions = extensions + } + return &cfg, nil +} + +func getSection(config map[string]interface{}, key string) map[string]interface{} { + section, ok := config[key] + if !ok { + return make(map[string]interface{}) + } + return section.(map[string]interface{}) +} + +// ForbiddenPropertiesError is returned when there are properties in the Compose +// file that are forbidden. +type ForbiddenPropertiesError struct { + Properties map[string]string +} + +func (e *ForbiddenPropertiesError) Error() string { + return "Configuration contains forbidden properties" +} + +// Transform converts the source into the target struct with compose types transformer +// and the specified transformers if any. +func Transform(source interface{}, target interface{}, additionalTransformers ...Transformer) error { + data := mapstructure.Metadata{} + config := &mapstructure.DecoderConfig{ + DecodeHook: mapstructure.ComposeDecodeHookFunc( + createTransformHook(additionalTransformers...), + mapstructure.StringToTimeDurationHookFunc()), + Result: target, + Metadata: &data, + } + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + return decoder.Decode(source) +} + +// TransformerFunc defines a function to perform the actual transformation +type TransformerFunc func(interface{}) (interface{}, error) + +// Transformer defines a map to type transformer +type Transformer struct { + TypeOf reflect.Type + Func TransformerFunc +} + +func createTransformHook(additionalTransformers ...Transformer) mapstructure.DecodeHookFuncType { + transforms := map[reflect.Type]func(interface{}) (interface{}, error){ + reflect.TypeOf(types.External{}): transformExternal, + reflect.TypeOf(types.HealthCheckTest{}): transformHealthCheckTest, + reflect.TypeOf(types.ShellCommand{}): transformShellCommand, + reflect.TypeOf(types.StringList{}): transformStringList, + reflect.TypeOf(map[string]string{}): transformMapStringString, + reflect.TypeOf(types.UlimitsConfig{}): transformUlimits, + reflect.TypeOf(types.UnitBytes(0)): transformSize, + reflect.TypeOf([]types.ServicePortConfig{}): transformServicePort, + reflect.TypeOf(types.ServiceSecretConfig{}): transformFileReferenceConfig, + reflect.TypeOf(types.ServiceConfigObjConfig{}): transformFileReferenceConfig, + reflect.TypeOf(types.StringOrNumberList{}): transformStringOrNumberList, + reflect.TypeOf(map[string]*types.ServiceNetworkConfig{}): transformServiceNetworkMap, + reflect.TypeOf(types.Mapping{}): transformMappingOrListFunc("=", false), + reflect.TypeOf(types.MappingWithEquals{}): transformMappingOrListFunc("=", true), + reflect.TypeOf(types.Labels{}): transformMappingOrListFunc("=", false), + reflect.TypeOf(types.MappingWithColon{}): transformMappingOrListFunc(":", false), + reflect.TypeOf(types.HostsList{}): transformListOrMappingFunc(":", false), + reflect.TypeOf(types.ServiceVolumeConfig{}): transformServiceVolumeConfig, + reflect.TypeOf(types.BuildConfig{}): transformBuildConfig, + reflect.TypeOf(types.Duration(0)): transformStringToDuration, + reflect.TypeOf(types.DependsOnConfig{}): transformDependsOnConfig, + reflect.TypeOf(types.ExtendsConfig{}): transformExtendsConfig, + reflect.TypeOf(types.DeviceRequest{}): transformServiceDeviceRequest, + reflect.TypeOf(types.SSHConfig{}): transformSSHConfig, + } + + for _, transformer := range additionalTransformers { + transforms[transformer.TypeOf] = transformer.Func + } + + return func(_ reflect.Type, target reflect.Type, data interface{}) (interface{}, error) { + transform, ok := transforms[target] + if !ok { + return data, nil + } + return transform(data) + } +} + +// keys need to be converted to strings for jsonschema +func convertToStringKeysRecursive(value interface{}, keyPrefix string) (interface{}, error) { + if mapping, ok := value.(map[interface{}]interface{}); ok { + dict := make(map[string]interface{}) + for key, entry := range mapping { + str, ok := key.(string) + if !ok { + return nil, formatInvalidKeyError(keyPrefix, key) + } + var newKeyPrefix string + if keyPrefix == "" { + newKeyPrefix = str + } else { + newKeyPrefix = fmt.Sprintf("%s.%s", keyPrefix, str) + } + convertedEntry, err := convertToStringKeysRecursive(entry, newKeyPrefix) + if err != nil { + return nil, err + } + dict[str] = convertedEntry + } + return dict, nil + } + if list, ok := value.([]interface{}); ok { + var convertedList []interface{} + for index, entry := range list { + newKeyPrefix := fmt.Sprintf("%s[%d]", keyPrefix, index) + convertedEntry, err := convertToStringKeysRecursive(entry, newKeyPrefix) + if err != nil { + return nil, err + } + convertedList = append(convertedList, convertedEntry) + } + return convertedList, nil + } + return value, nil +} + +func formatInvalidKeyError(keyPrefix string, key interface{}) error { + var location string + if keyPrefix == "" { + location = "at top level" + } else { + location = fmt.Sprintf("in %s", keyPrefix) + } + return errors.Errorf("Non-string key %s: %#v", location, key) +} + +// LoadServices produces a ServiceConfig map from a compose file Dict +// the servicesDict is not validated if directly used. Use Load() to enable validation +func LoadServices(filename string, servicesDict map[string]interface{}, workingDir string, lookupEnv template.Mapping, opts *Options) ([]types.ServiceConfig, error) { + var services []types.ServiceConfig + + x, ok := servicesDict["extensions"] + if ok { + // as a top-level attribute, "services" doesn't support extensions, and a service can be named `x-foo` + for k, v := range x.(map[string]interface{}) { + servicesDict[k] = v + } + } + + for name := range servicesDict { + serviceConfig, err := loadServiceWithExtends(filename, name, servicesDict, workingDir, lookupEnv, opts, &cycleTracker{}) + if err != nil { + return nil, err + } + + services = append(services, *serviceConfig) + } + + return services, nil +} + +func loadServiceWithExtends(filename, name string, servicesDict map[string]interface{}, workingDir string, lookupEnv template.Mapping, opts *Options, ct *cycleTracker) (*types.ServiceConfig, error) { + if err := ct.Add(filename, name); err != nil { + return nil, err + } + + target, ok := servicesDict[name] + if !ok { + return nil, fmt.Errorf("cannot extend service %q in %s: service not found", name, filename) + } + + serviceConfig, err := LoadService(name, target.(map[string]interface{}), workingDir, lookupEnv, opts.ResolvePaths, opts.ConvertWindowsPaths) + if err != nil { + return nil, err + } + + if serviceConfig.Extends != nil && !opts.SkipExtends { + baseServiceName := *serviceConfig.Extends["service"] + var baseService *types.ServiceConfig + if file := serviceConfig.Extends["file"]; file == nil { + baseService, err = loadServiceWithExtends(filename, baseServiceName, servicesDict, workingDir, lookupEnv, opts, ct) + if err != nil { + return nil, err + } + } else { + // Resolve the path to the imported file, and load it. + baseFilePath := absPath(workingDir, *file) + + bytes, err := ioutil.ReadFile(baseFilePath) + if err != nil { + return nil, err + } + + baseFile, err := parseConfig(bytes, opts) + if err != nil { + return nil, err + } + + baseFileServices := getSection(baseFile, "services") + baseService, err = loadServiceWithExtends(baseFilePath, baseServiceName, baseFileServices, filepath.Dir(baseFilePath), lookupEnv, opts, ct) + if err != nil { + return nil, err + } + + // Make paths relative to the importing Compose file. Note that we + // make the paths relative to `*file` rather than `baseFilePath` so + // that the resulting paths won't be absolute if `*file` isn't an + // absolute path. + baseFileParent := filepath.Dir(*file) + if baseService.Build != nil { + // Note that the Dockerfile is always defined relative to the + // build context, so there's no need to update the Dockerfile field. + baseService.Build.Context = absPath(baseFileParent, baseService.Build.Context) + } + + for i, vol := range baseService.Volumes { + if vol.Type != types.VolumeTypeBind { + continue + } + baseService.Volumes[i].Source = absPath(baseFileParent, vol.Source) + } + } + + serviceConfig, err = _merge(baseService, serviceConfig) + if err != nil { + return nil, err + } + } + + return serviceConfig, nil +} + +// LoadService produces a single ServiceConfig from a compose file Dict +// the serviceDict is not validated if directly used. Use Load() to enable validation +func LoadService(name string, serviceDict map[string]interface{}, workingDir string, lookupEnv template.Mapping, resolvePaths bool, convertPaths bool) (*types.ServiceConfig, error) { + serviceConfig := &types.ServiceConfig{ + Scale: 1, + } + if err := Transform(serviceDict, serviceConfig); err != nil { + return nil, err + } + serviceConfig.Name = name + + if err := resolveEnvironment(serviceConfig, workingDir, lookupEnv); err != nil { + return nil, err + } + + for i, volume := range serviceConfig.Volumes { + if volume.Type != types.VolumeTypeBind { + continue + } + + if volume.Source == "" { + return nil, errors.New(`invalid mount config for type "bind": field Source must not be empty`) + } + + if resolvePaths { + serviceConfig.Volumes[i] = resolveVolumePath(volume, workingDir, lookupEnv) + } + + if convertPaths { + serviceConfig.Volumes[i] = convertVolumePath(volume) + } + } + + return serviceConfig, nil +} + +// Windows paths, c:\\my\\path\\shiny, need to be changed to be compatible with +// the Engine. Volume paths are expected to be linux style /c/my/path/shiny/ +func convertVolumePath(volume types.ServiceVolumeConfig) types.ServiceVolumeConfig { + volumeName := strings.ToLower(filepath.VolumeName(volume.Source)) + if len(volumeName) != 2 { + return volume + } + + convertedSource := fmt.Sprintf("/%c%s", volumeName[0], volume.Source[len(volumeName):]) + convertedSource = strings.ReplaceAll(convertedSource, "\\", "/") + + volume.Source = convertedSource + return volume +} + +func resolveEnvironment(serviceConfig *types.ServiceConfig, workingDir string, lookupEnv template.Mapping) error { + environment := types.MappingWithEquals{} + + if len(serviceConfig.EnvFile) > 0 { + for _, envFile := range serviceConfig.EnvFile { + filePath := absPath(workingDir, envFile) + file, err := os.Open(filePath) + if err != nil { + return err + } + defer file.Close() + fileVars, err := dotenv.ParseWithLookup(file, dotenv.LookupFn(lookupEnv)) + if err != nil { + return err + } + env := types.MappingWithEquals{} + for k, v := range fileVars { + v := v + env[k] = &v + } + environment.OverrideBy(env.Resolve(lookupEnv).RemoveEmpty()) + } + } + + environment.OverrideBy(serviceConfig.Environment.Resolve(lookupEnv)) + serviceConfig.Environment = environment + return nil +} + +func resolveVolumePath(volume types.ServiceVolumeConfig, workingDir string, lookupEnv template.Mapping) types.ServiceVolumeConfig { + filePath := expandUser(volume.Source, lookupEnv) + // Check if source is an absolute path (either Unix or Windows), to + // handle a Windows client with a Unix daemon or vice-versa. + // + // Note that this is not required for Docker for Windows when specifying + // a local Windows path, because Docker for Windows translates the Windows + // path into a valid path within the VM. + if !path.IsAbs(filePath) && !isAbs(filePath) { + filePath = absPath(workingDir, filePath) + } + volume.Source = filePath + return volume +} + +// TODO: make this more robust +func expandUser(path string, lookupEnv template.Mapping) string { + if strings.HasPrefix(path, "~") { + home, err := os.UserHomeDir() + if err != nil { + logrus.Warn("cannot expand '~', because the environment lacks HOME") + return path + } + return filepath.Join(home, path[1:]) + } + return path +} + +func transformUlimits(data interface{}) (interface{}, error) { + switch value := data.(type) { + case int: + return types.UlimitsConfig{Single: value}, nil + case map[string]interface{}: + ulimit := types.UlimitsConfig{} + if v, ok := value["soft"]; ok { + ulimit.Soft = v.(int) + } + if v, ok := value["hard"]; ok { + ulimit.Hard = v.(int) + } + return ulimit, nil + default: + return data, errors.Errorf("invalid type %T for ulimits", value) + } +} + +// LoadNetworks produces a NetworkConfig map from a compose file Dict +// the source Dict is not validated if directly used. Use Load() to enable validation +func LoadNetworks(source map[string]interface{}) (map[string]types.NetworkConfig, error) { + networks := make(map[string]types.NetworkConfig) + err := Transform(source, &networks) + if err != nil { + return networks, err + } + for name, network := range networks { + if !network.External.External { + continue + } + switch { + case network.External.Name != "": + if network.Name != "" { + return nil, errors.Errorf("network %s: network.external.name and network.name conflict; only use network.name", name) + } + logrus.Warnf("network %s: network.external.name is deprecated in favor of network.name", name) + network.Name = network.External.Name + network.External.Name = "" + case network.Name == "": + network.Name = name + } + networks[name] = network + } + return networks, nil +} + +func externalVolumeError(volume, key string) error { + return errors.Errorf( + "conflicting parameters \"external\" and %q specified for volume %q", + key, volume) +} + +// LoadVolumes produces a VolumeConfig map from a compose file Dict +// the source Dict is not validated if directly used. Use Load() to enable validation +func LoadVolumes(source map[string]interface{}) (map[string]types.VolumeConfig, error) { + volumes := make(map[string]types.VolumeConfig) + if err := Transform(source, &volumes); err != nil { + return volumes, err + } + + for name, volume := range volumes { + if !volume.External.External { + continue + } + switch { + case volume.Driver != "": + return nil, externalVolumeError(name, "driver") + case len(volume.DriverOpts) > 0: + return nil, externalVolumeError(name, "driver_opts") + case len(volume.Labels) > 0: + return nil, externalVolumeError(name, "labels") + case volume.External.Name != "": + if volume.Name != "" { + return nil, errors.Errorf("volume %s: volume.external.name and volume.name conflict; only use volume.name", name) + } + logrus.Warnf("volume %s: volume.external.name is deprecated in favor of volume.name", name) + volume.Name = volume.External.Name + volume.External.Name = "" + case volume.Name == "": + volume.Name = name + } + volumes[name] = volume + } + return volumes, nil +} + +// LoadSecrets produces a SecretConfig map from a compose file Dict +// the source Dict is not validated if directly used. Use Load() to enable validation +func LoadSecrets(source map[string]interface{}, details types.ConfigDetails, resolvePaths bool) (map[string]types.SecretConfig, error) { + secrets := make(map[string]types.SecretConfig) + if err := Transform(source, &secrets); err != nil { + return secrets, err + } + for name, secret := range secrets { + obj, err := loadFileObjectConfig(name, "secret", types.FileObjectConfig(secret), details, resolvePaths) + if err != nil { + return nil, err + } + secretConfig := types.SecretConfig(obj) + secrets[name] = secretConfig + } + return secrets, nil +} + +// LoadConfigObjs produces a ConfigObjConfig map from a compose file Dict +// the source Dict is not validated if directly used. Use Load() to enable validation +func LoadConfigObjs(source map[string]interface{}, details types.ConfigDetails, resolvePaths bool) (map[string]types.ConfigObjConfig, error) { + configs := make(map[string]types.ConfigObjConfig) + if err := Transform(source, &configs); err != nil { + return configs, err + } + for name, config := range configs { + obj, err := loadFileObjectConfig(name, "config", types.FileObjectConfig(config), details, resolvePaths) + if err != nil { + return nil, err + } + configConfig := types.ConfigObjConfig(obj) + configs[name] = configConfig + } + return configs, nil +} + +func loadFileObjectConfig(name string, objType string, obj types.FileObjectConfig, details types.ConfigDetails, resolvePaths bool) (types.FileObjectConfig, error) { + // if "external: true" + switch { + case obj.External.External: + // handle deprecated external.name + if obj.External.Name != "" { + if obj.Name != "" { + return obj, errors.Errorf("%[1]s %[2]s: %[1]s.external.name and %[1]s.name conflict; only use %[1]s.name", objType, name) + } + logrus.Warnf("%[1]s %[2]s: %[1]s.external.name is deprecated in favor of %[1]s.name", objType, name) + obj.Name = obj.External.Name + obj.External.Name = "" + } else { + if obj.Name == "" { + obj.Name = name + } + } + // if not "external: true" + case obj.Driver != "": + if obj.File != "" { + return obj, errors.Errorf("%[1]s %[2]s: %[1]s.driver and %[1]s.file conflict; only use %[1]s.driver", objType, name) + } + default: + if resolvePaths { + obj.File = absPath(details.WorkingDir, obj.File) + } + } + + return obj, nil +} + +func absPath(workingDir string, filePath string) string { + if strings.HasPrefix(filePath, "~") { + home, _ := os.UserHomeDir() + return filepath.Join(home, filePath[1:]) + } + if filepath.IsAbs(filePath) { + return filePath + } + return filepath.Join(workingDir, filePath) +} + +var transformMapStringString TransformerFunc = func(data interface{}) (interface{}, error) { + switch value := data.(type) { + case map[string]interface{}: + return toMapStringString(value, false), nil + case map[string]string: + return value, nil + default: + return data, errors.Errorf("invalid type %T for map[string]string", value) + } +} + +var transformExternal TransformerFunc = func(data interface{}) (interface{}, error) { + switch value := data.(type) { + case bool: + return map[string]interface{}{"external": value}, nil + case map[string]interface{}: + return map[string]interface{}{"external": true, "name": value["name"]}, nil + default: + return data, errors.Errorf("invalid type %T for external", value) + } +} + +var transformServicePort TransformerFunc = func(data interface{}) (interface{}, error) { + switch entries := data.(type) { + case []interface{}: + // We process the list instead of individual items here. + // The reason is that one entry might be mapped to multiple ServicePortConfig. + // Therefore we take an input of a list and return an output of a list. + var ports []interface{} + for _, entry := range entries { + switch value := entry.(type) { + case int: + parsed, err := types.ParsePortConfig(fmt.Sprint(value)) + if err != nil { + return data, err + } + for _, v := range parsed { + ports = append(ports, v) + } + case string: + parsed, err := types.ParsePortConfig(value) + if err != nil { + return data, err + } + for _, v := range parsed { + ports = append(ports, v) + } + case map[string]interface{}: + published := value["published"] + if v, ok := published.(int); ok { + value["published"] = strconv.Itoa(v) + } + ports = append(ports, groupXFieldsIntoExtensions(value)) + default: + return data, errors.Errorf("invalid type %T for port", value) + } + } + return ports, nil + default: + return data, errors.Errorf("invalid type %T for port", entries) + } +} + +var transformServiceDeviceRequest TransformerFunc = func(data interface{}) (interface{}, error) { + switch value := data.(type) { + case map[string]interface{}: + count, ok := value["count"] + if ok { + switch val := count.(type) { + case int: + return value, nil + case string: + if strings.ToLower(val) == "all" { + value["count"] = -1 + return value, nil + } + return data, errors.Errorf("invalid string value for 'count' (the only value allowed is 'all')") + default: + return data, errors.Errorf("invalid type %T for device count", val) + } + } + return data, nil + default: + return data, errors.Errorf("invalid type %T for resource reservation", value) + } +} + +var transformFileReferenceConfig TransformerFunc = func(data interface{}) (interface{}, error) { + switch value := data.(type) { + case string: + return map[string]interface{}{"source": value}, nil + case map[string]interface{}: + if target, ok := value["target"]; ok { + value["target"] = cleanTarget(target.(string)) + } + return groupXFieldsIntoExtensions(value), nil + default: + return data, errors.Errorf("invalid type %T for secret", value) + } +} + +func cleanTarget(target string) string { + if target == "" { + return "" + } + return path.Clean(target) +} + +var transformBuildConfig TransformerFunc = func(data interface{}) (interface{}, error) { + switch value := data.(type) { + case string: + return map[string]interface{}{"context": value}, nil + case map[string]interface{}: + return groupXFieldsIntoExtensions(data.(map[string]interface{})), nil + default: + return data, errors.Errorf("invalid type %T for service build", value) + } +} + +var transformDependsOnConfig TransformerFunc = func(data interface{}) (interface{}, error) { + switch value := data.(type) { + case []interface{}: + transformed := map[string]interface{}{} + for _, serviceIntf := range value { + service, ok := serviceIntf.(string) + if !ok { + return data, errors.Errorf("invalid type %T for service depends_on elementn, expected string", value) + } + transformed[service] = map[string]interface{}{"condition": types.ServiceConditionStarted} + } + return transformed, nil + case map[string]interface{}: + return groupXFieldsIntoExtensions(data.(map[string]interface{})), nil + default: + return data, errors.Errorf("invalid type %T for service depends_on", value) + } +} + +var transformExtendsConfig TransformerFunc = func(data interface{}) (interface{}, error) { + switch data.(type) { + case string: + data = map[string]interface{}{ + "service": data, + } + } + return transformMappingOrListFunc("=", true)(data) +} + +var transformServiceVolumeConfig TransformerFunc = func(data interface{}) (interface{}, error) { + switch value := data.(type) { + case string: + volume, err := ParseVolume(value) + volume.Target = cleanTarget(volume.Target) + return volume, err + case map[string]interface{}: + data := groupXFieldsIntoExtensions(data.(map[string]interface{})) + if target, ok := data["target"]; ok { + data["target"] = cleanTarget(target.(string)) + } + return data, nil + default: + return data, errors.Errorf("invalid type %T for service volume", value) + } +} + +var transformServiceNetworkMap TransformerFunc = func(value interface{}) (interface{}, error) { + if list, ok := value.([]interface{}); ok { + mapValue := map[interface{}]interface{}{} + for _, name := range list { + mapValue[name] = nil + } + return mapValue, nil + } + return value, nil +} + +var transformSSHConfig TransformerFunc = func(data interface{}) (interface{}, error) { + switch value := data.(type) { + case map[string]interface{}: + var result []types.SSHKey + for key, val := range value { + if val == nil { + val = "" + } + result = append(result, types.SSHKey{ID: key, Path: val.(string)}) + } + return result, nil + case []interface{}: + var result []types.SSHKey + for _, v := range value { + key, val := transformValueToMapEntry(v.(string), "=", false) + result = append(result, types.SSHKey{ID: key, Path: val.(string)}) + } + return result, nil + case string: + return ParseShortSSHSyntax(value) + } + return nil, errors.Errorf("expected a sting, map or a list, got %T: %#v", data, data) +} + +// ParseShortSSHSyntax parse short syntax for SSH authentications +func ParseShortSSHSyntax(value string) ([]types.SSHKey, error) { + if value == "" { + value = "default" + } + key, val := transformValueToMapEntry(value, "=", false) + result := []types.SSHKey{{ID: key, Path: val.(string)}} + return result, nil +} + +var transformStringOrNumberList TransformerFunc = func(value interface{}) (interface{}, error) { + list := value.([]interface{}) + result := make([]string, len(list)) + for i, item := range list { + result[i] = fmt.Sprint(item) + } + return result, nil +} + +var transformStringList TransformerFunc = func(data interface{}) (interface{}, error) { + switch value := data.(type) { + case string: + return []string{value}, nil + case []interface{}: + return value, nil + default: + return data, errors.Errorf("invalid type %T for string list", value) + } +} + +func transformMappingOrListFunc(sep string, allowNil bool) TransformerFunc { + return func(data interface{}) (interface{}, error) { + return transformMappingOrList(data, sep, allowNil) + } +} + +func transformListOrMappingFunc(sep string, allowNil bool) TransformerFunc { + return func(data interface{}) (interface{}, error) { + return transformListOrMapping(data, sep, allowNil) + } +} + +func transformListOrMapping(listOrMapping interface{}, sep string, allowNil bool) (interface{}, error) { + switch value := listOrMapping.(type) { + case map[string]interface{}: + return toStringList(value, sep, allowNil), nil + case []interface{}: + return listOrMapping, nil + } + return nil, errors.Errorf("expected a map or a list, got %T: %#v", listOrMapping, listOrMapping) +} + +func transformMappingOrList(mappingOrList interface{}, sep string, allowNil bool) (interface{}, error) { + switch value := mappingOrList.(type) { + case map[string]interface{}: + return toMapStringString(value, allowNil), nil + case []interface{}: + result := make(map[string]interface{}) + for _, value := range value { + key, val := transformValueToMapEntry(value.(string), sep, allowNil) + result[key] = val + } + return result, nil + } + return nil, errors.Errorf("expected a map or a list, got %T: %#v", mappingOrList, mappingOrList) +} + +func transformValueToMapEntry(value string, separator string, allowNil bool) (string, interface{}) { + parts := strings.SplitN(value, separator, 2) + key := parts[0] + switch { + case len(parts) == 1 && allowNil: + return key, nil + case len(parts) == 1 && !allowNil: + return key, "" + default: + return key, parts[1] + } +} + +var transformShellCommand TransformerFunc = func(value interface{}) (interface{}, error) { + if str, ok := value.(string); ok { + return shellwords.Parse(str) + } + return value, nil +} + +var transformHealthCheckTest TransformerFunc = func(data interface{}) (interface{}, error) { + switch value := data.(type) { + case string: + return append([]string{"CMD-SHELL"}, value), nil + case []interface{}: + return value, nil + default: + return value, errors.Errorf("invalid type %T for healthcheck.test", value) + } +} + +var transformSize TransformerFunc = func(value interface{}) (interface{}, error) { + switch value := value.(type) { + case int: + return int64(value), nil + case int64, types.UnitBytes: + return value, nil + case string: + return units.RAMInBytes(value) + default: + return value, errors.Errorf("invalid type for size %T", value) + } +} + +var transformStringToDuration TransformerFunc = func(value interface{}) (interface{}, error) { + switch value := value.(type) { + case string: + d, err := time.ParseDuration(value) + if err != nil { + return value, err + } + return types.Duration(d), nil + case types.Duration: + return value, nil + default: + return value, errors.Errorf("invalid type %T for duration", value) + } +} + +func toMapStringString(value map[string]interface{}, allowNil bool) map[string]interface{} { + output := make(map[string]interface{}) + for key, value := range value { + output[key] = toString(value, allowNil) + } + return output +} + +func toString(value interface{}, allowNil bool) interface{} { + switch { + case value != nil: + return fmt.Sprint(value) + case allowNil: + return nil + default: + return "" + } +} + +func toStringList(value map[string]interface{}, separator string, allowNil bool) []string { + var output []string + for key, value := range value { + if value == nil && !allowNil { + continue + } + output = append(output, fmt.Sprintf("%s%s%s", key, separator, value)) + } + sort.Strings(output) + return output +} diff --git a/vendor/github.com/compose-spec/compose-go/loader/merge.go b/vendor/github.com/compose-spec/compose-go/loader/merge.go new file mode 100644 index 0000000000..f6138ca292 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/loader/merge.go @@ -0,0 +1,362 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package loader + +import ( + "reflect" + "sort" + + "github.com/compose-spec/compose-go/types" + "github.com/imdario/mergo" + "github.com/pkg/errors" +) + +type specials struct { + m map[reflect.Type]func(dst, src reflect.Value) error +} + +var serviceSpecials = &specials{ + m: map[reflect.Type]func(dst, src reflect.Value) error{ + reflect.TypeOf(&types.LoggingConfig{}): safelyMerge(mergeLoggingConfig), + reflect.TypeOf(&types.UlimitsConfig{}): safelyMerge(mergeUlimitsConfig), + reflect.TypeOf([]types.ServiceVolumeConfig{}): mergeSlice(toServiceVolumeConfigsMap, toServiceVolumeConfigsSlice), + reflect.TypeOf([]types.ServicePortConfig{}): mergeSlice(toServicePortConfigsMap, toServicePortConfigsSlice), + reflect.TypeOf([]types.ServiceSecretConfig{}): mergeSlice(toServiceSecretConfigsMap, toServiceSecretConfigsSlice), + reflect.TypeOf([]types.ServiceConfigObjConfig{}): mergeSlice(toServiceConfigObjConfigsMap, toSServiceConfigObjConfigsSlice), + reflect.TypeOf(&types.UlimitsConfig{}): mergeUlimitsConfig, + reflect.TypeOf(&types.ServiceNetworkConfig{}): mergeServiceNetworkConfig, + }, +} + +func (s *specials) Transformer(t reflect.Type) func(dst, src reflect.Value) error { + if fn, ok := s.m[t]; ok { + return fn + } + return nil +} + +func merge(configs []*types.Config) (*types.Config, error) { + base := configs[0] + for _, override := range configs[1:] { + var err error + base.Name = mergeNames(base.Name, override.Name) + base.Services, err = mergeServices(base.Services, override.Services) + if err != nil { + return base, errors.Wrapf(err, "cannot merge services from %s", override.Filename) + } + base.Volumes, err = mergeVolumes(base.Volumes, override.Volumes) + if err != nil { + return base, errors.Wrapf(err, "cannot merge volumes from %s", override.Filename) + } + base.Networks, err = mergeNetworks(base.Networks, override.Networks) + if err != nil { + return base, errors.Wrapf(err, "cannot merge networks from %s", override.Filename) + } + base.Secrets, err = mergeSecrets(base.Secrets, override.Secrets) + if err != nil { + return base, errors.Wrapf(err, "cannot merge secrets from %s", override.Filename) + } + base.Configs, err = mergeConfigs(base.Configs, override.Configs) + if err != nil { + return base, errors.Wrapf(err, "cannot merge configs from %s", override.Filename) + } + base.Extensions, err = mergeExtensions(base.Extensions, override.Extensions) + if err != nil { + return base, errors.Wrapf(err, "cannot merge extensions from %s", override.Filename) + } + } + return base, nil +} + +func mergeNames(base, override string) string { + if override != "" { + return override + } + return base +} + +func mergeServices(base, override []types.ServiceConfig) ([]types.ServiceConfig, error) { + baseServices := mapByName(base) + overrideServices := mapByName(override) + for name, overrideService := range overrideServices { + overrideService := overrideService + if baseService, ok := baseServices[name]; ok { + merged, err := _merge(&baseService, &overrideService) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge service %s", name) + } + baseServices[name] = *merged + continue + } + baseServices[name] = overrideService + } + services := []types.ServiceConfig{} + for _, baseService := range baseServices { + services = append(services, baseService) + } + sort.Slice(services, func(i, j int) bool { return services[i].Name < services[j].Name }) + return services, nil +} + +func _merge(baseService *types.ServiceConfig, overrideService *types.ServiceConfig) (*types.ServiceConfig, error) { + if err := mergo.Merge(baseService, overrideService, mergo.WithAppendSlice, mergo.WithOverride, mergo.WithTransformers(serviceSpecials)); err != nil { + return nil, err + } + if overrideService.Command != nil { + baseService.Command = overrideService.Command + } + if overrideService.Entrypoint != nil { + baseService.Entrypoint = overrideService.Entrypoint + } + if baseService.Environment != nil { + baseService.Environment.OverrideBy(overrideService.Environment) + } else { + baseService.Environment = overrideService.Environment + } + return baseService, nil +} + +func toServiceSecretConfigsMap(s interface{}) (map[interface{}]interface{}, error) { + secrets, ok := s.([]types.ServiceSecretConfig) + if !ok { + return nil, errors.Errorf("not a serviceSecretConfig: %v", s) + } + m := map[interface{}]interface{}{} + for _, secret := range secrets { + m[secret.Source] = secret + } + return m, nil +} + +func toServiceConfigObjConfigsMap(s interface{}) (map[interface{}]interface{}, error) { + secrets, ok := s.([]types.ServiceConfigObjConfig) + if !ok { + return nil, errors.Errorf("not a serviceSecretConfig: %v", s) + } + m := map[interface{}]interface{}{} + for _, secret := range secrets { + m[secret.Source] = secret + } + return m, nil +} + +func toServicePortConfigsMap(s interface{}) (map[interface{}]interface{}, error) { + ports, ok := s.([]types.ServicePortConfig) + if !ok { + return nil, errors.Errorf("not a servicePortConfig slice: %v", s) + } + m := map[interface{}]interface{}{} + type port struct { + target uint32 + published string + ip string + protocol string + } + + for _, p := range ports { + mergeKey := port{ + target: p.Target, + published: p.Published, + ip: p.HostIP, + protocol: p.Protocol, + } + m[mergeKey] = p + } + return m, nil +} + +func toServiceVolumeConfigsMap(s interface{}) (map[interface{}]interface{}, error) { + volumes, ok := s.([]types.ServiceVolumeConfig) + if !ok { + return nil, errors.Errorf("not a ServiceVolumeConfig slice: %v", s) + } + m := map[interface{}]interface{}{} + for _, v := range volumes { + m[v.Target] = v + } + return m, nil +} + +func toServiceSecretConfigsSlice(dst reflect.Value, m map[interface{}]interface{}) error { + var s []types.ServiceSecretConfig + for _, v := range m { + s = append(s, v.(types.ServiceSecretConfig)) + } + sort.Slice(s, func(i, j int) bool { return s[i].Source < s[j].Source }) + dst.Set(reflect.ValueOf(s)) + return nil +} + +func toSServiceConfigObjConfigsSlice(dst reflect.Value, m map[interface{}]interface{}) error { + var s []types.ServiceConfigObjConfig + for _, v := range m { + s = append(s, v.(types.ServiceConfigObjConfig)) + } + sort.Slice(s, func(i, j int) bool { return s[i].Source < s[j].Source }) + dst.Set(reflect.ValueOf(s)) + return nil +} + +func toServicePortConfigsSlice(dst reflect.Value, m map[interface{}]interface{}) error { + var s []types.ServicePortConfig + for _, v := range m { + s = append(s, v.(types.ServicePortConfig)) + } + sort.Slice(s, func(i, j int) bool { + if s[i].Target != s[j].Target { + return s[i].Target < s[j].Target + } + if s[i].Published != s[j].Published { + return s[i].Published < s[j].Published + } + if s[i].HostIP != s[j].HostIP { + return s[i].HostIP < s[j].HostIP + } + return s[i].Protocol < s[j].Protocol + }) + dst.Set(reflect.ValueOf(s)) + return nil +} + +func toServiceVolumeConfigsSlice(dst reflect.Value, m map[interface{}]interface{}) error { + var s []types.ServiceVolumeConfig + for _, v := range m { + s = append(s, v.(types.ServiceVolumeConfig)) + } + sort.Slice(s, func(i, j int) bool { return s[i].Target < s[j].Target }) + dst.Set(reflect.ValueOf(s)) + return nil +} + +type toMapFn func(s interface{}) (map[interface{}]interface{}, error) +type writeValueFromMapFn func(reflect.Value, map[interface{}]interface{}) error + +func safelyMerge(mergeFn func(dst, src reflect.Value) error) func(dst, src reflect.Value) error { + return func(dst, src reflect.Value) error { + if src.IsNil() { + return nil + } + if dst.IsNil() { + dst.Set(src) + return nil + } + return mergeFn(dst, src) + } +} + +func mergeSlice(toMap toMapFn, writeValue writeValueFromMapFn) func(dst, src reflect.Value) error { + return func(dst, src reflect.Value) error { + dstMap, err := sliceToMap(toMap, dst) + if err != nil { + return err + } + srcMap, err := sliceToMap(toMap, src) + if err != nil { + return err + } + if err := mergo.Map(&dstMap, srcMap, mergo.WithOverride); err != nil { + return err + } + return writeValue(dst, dstMap) + } +} + +func sliceToMap(toMap toMapFn, v reflect.Value) (map[interface{}]interface{}, error) { + // check if valid + if !v.IsValid() { + return nil, errors.Errorf("invalid value : %+v", v) + } + return toMap(v.Interface()) +} + +func mergeLoggingConfig(dst, src reflect.Value) error { + // Same driver, merging options + if getLoggingDriver(dst.Elem()) == getLoggingDriver(src.Elem()) || + getLoggingDriver(dst.Elem()) == "" || getLoggingDriver(src.Elem()) == "" { + if getLoggingDriver(dst.Elem()) == "" { + dst.Elem().FieldByName("Driver").SetString(getLoggingDriver(src.Elem())) + } + dstOptions := dst.Elem().FieldByName("Options").Interface().(map[string]string) + srcOptions := src.Elem().FieldByName("Options").Interface().(map[string]string) + return mergo.Merge(&dstOptions, srcOptions, mergo.WithOverride) + } + // Different driver, override with src + dst.Set(src) + return nil +} + +// nolint: unparam +func mergeUlimitsConfig(dst, src reflect.Value) error { + if src.Interface() != reflect.Zero(reflect.TypeOf(src.Interface())).Interface() { + dst.Elem().Set(src.Elem()) + } + return nil +} + +// nolint: unparam +func mergeServiceNetworkConfig(dst, src reflect.Value) error { + if src.Interface() != reflect.Zero(reflect.TypeOf(src.Interface())).Interface() { + dst.Elem().FieldByName("Aliases").Set(src.Elem().FieldByName("Aliases")) + if ipv4 := src.Elem().FieldByName("Ipv4Address").Interface().(string); ipv4 != "" { + dst.Elem().FieldByName("Ipv4Address").SetString(ipv4) + } + if ipv6 := src.Elem().FieldByName("Ipv6Address").Interface().(string); ipv6 != "" { + dst.Elem().FieldByName("Ipv6Address").SetString(ipv6) + } + } + return nil +} + +func getLoggingDriver(v reflect.Value) string { + return v.FieldByName("Driver").String() +} + +func mapByName(services []types.ServiceConfig) map[string]types.ServiceConfig { + m := map[string]types.ServiceConfig{} + for _, service := range services { + m[service.Name] = service + } + return m +} + +func mergeVolumes(base, override map[string]types.VolumeConfig) (map[string]types.VolumeConfig, error) { + err := mergo.Map(&base, &override, mergo.WithOverride) + return base, err +} + +func mergeNetworks(base, override map[string]types.NetworkConfig) (map[string]types.NetworkConfig, error) { + err := mergo.Map(&base, &override, mergo.WithOverride) + return base, err +} + +func mergeSecrets(base, override map[string]types.SecretConfig) (map[string]types.SecretConfig, error) { + err := mergo.Map(&base, &override, mergo.WithOverride) + return base, err +} + +func mergeConfigs(base, override map[string]types.ConfigObjConfig) (map[string]types.ConfigObjConfig, error) { + err := mergo.Map(&base, &override, mergo.WithOverride) + return base, err +} + +func mergeExtensions(base, override map[string]interface{}) (map[string]interface{}, error) { + if base == nil { + base = map[string]interface{}{} + } + err := mergo.Map(&base, &override, mergo.WithOverride) + return base, err +} diff --git a/vendor/github.com/compose-spec/compose-go/loader/normalize.go b/vendor/github.com/compose-spec/compose-go/loader/normalize.go new file mode 100644 index 0000000000..4b98d624a7 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/loader/normalize.go @@ -0,0 +1,264 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package loader + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/compose-spec/compose-go/errdefs" + "github.com/compose-spec/compose-go/types" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// normalize compose project by moving deprecated attributes to their canonical position and injecting implicit defaults +func normalize(project *types.Project, resolvePaths bool) error { + absWorkingDir, err := filepath.Abs(project.WorkingDir) + if err != nil { + return err + } + project.WorkingDir = absWorkingDir + + absComposeFiles, err := absComposeFiles(project.ComposeFiles) + if err != nil { + return err + } + project.ComposeFiles = absComposeFiles + + if project.Networks == nil { + project.Networks = make(map[string]types.NetworkConfig) + } + + // If not declared explicitly, Compose model involves an implicit "default" network + if _, ok := project.Networks["default"]; !ok { + project.Networks["default"] = types.NetworkConfig{} + } + + err = relocateExternalName(project) + if err != nil { + return err + } + + for i, s := range project.Services { + if len(s.Networks) == 0 && s.NetworkMode == "" { + // Service without explicit network attachment are implicitly exposed on default network + s.Networks = map[string]*types.ServiceNetworkConfig{"default": nil} + } + + if s.PullPolicy == types.PullPolicyIfNotPresent { + s.PullPolicy = types.PullPolicyMissing + } + + fn := func(s string) (string, bool) { + v, ok := project.Environment[s] + return v, ok + } + + if s.Build != nil { + if s.Build.Dockerfile == "" { + s.Build.Dockerfile = "Dockerfile" + } + localContext := absPath(project.WorkingDir, s.Build.Context) + if _, err := os.Stat(localContext); err == nil { + if resolvePaths { + s.Build.Context = localContext + } + // } else { + // might be a remote http/git context. Unfortunately supported "remote" syntax is highly ambiguous + // in moby/moby and not defined by compose-spec, so let's assume runtime will check + } + s.Build.Args = s.Build.Args.Resolve(fn) + } + s.Environment = s.Environment.Resolve(fn) + + err := relocateLogDriver(&s) + if err != nil { + return err + } + + err = relocateLogOpt(&s) + if err != nil { + return err + } + + err = relocateDockerfile(&s) + if err != nil { + return err + } + + err = relocateScale(&s) + if err != nil { + return err + } + + project.Services[i] = s + } + + setNameFromKey(project) + + return nil +} + +func relocateScale(s *types.ServiceConfig) error { + scale := uint64(s.Scale) + if scale != 1 { + logrus.Warn("`scale` is deprecated. Use the `deploy.replicas` element") + if s.Deploy == nil { + s.Deploy = &types.DeployConfig{} + } + if s.Deploy.Replicas != nil && *s.Deploy.Replicas != scale { + return errors.Wrap(errdefs.ErrInvalid, "can't use both 'scale' (deprecated) and 'deploy.replicas'") + } + s.Deploy.Replicas = &scale + } + return nil +} + +func absComposeFiles(composeFiles []string) ([]string, error) { + absComposeFiles := make([]string, len(composeFiles)) + for i, composeFile := range composeFiles { + absComposefile, err := filepath.Abs(composeFile) + if err != nil { + return nil, err + } + absComposeFiles[i] = absComposefile + } + return absComposeFiles, nil +} + +// Resources with no explicit name are actually named by their key in map +func setNameFromKey(project *types.Project) { + for i, n := range project.Networks { + if n.Name == "" { + n.Name = fmt.Sprintf("%s_%s", project.Name, i) + project.Networks[i] = n + } + } + + for i, v := range project.Volumes { + if v.Name == "" { + v.Name = fmt.Sprintf("%s_%s", project.Name, i) + project.Volumes[i] = v + } + } + + for i, c := range project.Configs { + if c.Name == "" { + c.Name = fmt.Sprintf("%s_%s", project.Name, i) + project.Configs[i] = c + } + } + + for i, s := range project.Secrets { + if s.Name == "" { + s.Name = fmt.Sprintf("%s_%s", project.Name, i) + project.Secrets[i] = s + } + } +} + +func relocateExternalName(project *types.Project) error { + for i, n := range project.Networks { + if n.External.Name != "" { + if n.Name != "" { + return errors.Wrap(errdefs.ErrInvalid, "can't use both 'networks.external.name' (deprecated) and 'networks.name'") + } + n.Name = n.External.Name + } + project.Networks[i] = n + } + + for i, v := range project.Volumes { + if v.External.Name != "" { + if v.Name != "" { + return errors.Wrap(errdefs.ErrInvalid, "can't use both 'volumes.external.name' (deprecated) and 'volumes.name'") + } + v.Name = v.External.Name + } + project.Volumes[i] = v + } + + for i, s := range project.Secrets { + if s.External.Name != "" { + if s.Name != "" { + return errors.Wrap(errdefs.ErrInvalid, "can't use both 'secrets.external.name' (deprecated) and 'secrets.name'") + } + s.Name = s.External.Name + } + project.Secrets[i] = s + } + + for i, c := range project.Configs { + if c.External.Name != "" { + if c.Name != "" { + return errors.Wrap(errdefs.ErrInvalid, "can't use both 'configs.external.name' (deprecated) and 'configs.name'") + } + c.Name = c.External.Name + } + project.Configs[i] = c + } + return nil +} + +func relocateLogOpt(s *types.ServiceConfig) error { + if len(s.LogOpt) != 0 { + logrus.Warn("`log_opts` is deprecated. Use the `logging` element") + if s.Logging == nil { + s.Logging = &types.LoggingConfig{} + } + for k, v := range s.LogOpt { + if _, ok := s.Logging.Options[k]; !ok { + s.Logging.Options[k] = v + } else { + return errors.Wrap(errdefs.ErrInvalid, "can't use both 'log_opt' (deprecated) and 'logging.options'") + } + } + } + return nil +} + +func relocateLogDriver(s *types.ServiceConfig) error { + if s.LogDriver != "" { + logrus.Warn("`log_driver` is deprecated. Use the `logging` element") + if s.Logging == nil { + s.Logging = &types.LoggingConfig{} + } + if s.Logging.Driver == "" { + s.Logging.Driver = s.LogDriver + } else { + return errors.Wrap(errdefs.ErrInvalid, "can't use both 'log_driver' (deprecated) and 'logging.driver'") + } + } + return nil +} + +func relocateDockerfile(s *types.ServiceConfig) error { + if s.Dockerfile != "" { + logrus.Warn("`dockerfile` is deprecated. Use the `build` element") + if s.Build == nil { + s.Build = &types.BuildConfig{} + } + if s.Dockerfile == "" { + s.Build.Dockerfile = s.Dockerfile + } else { + return errors.Wrap(errdefs.ErrInvalid, "can't use both 'dockerfile' (deprecated) and 'build.dockerfile'") + } + } + return nil +} diff --git a/vendor/github.com/compose-spec/compose-go/loader/validate.go b/vendor/github.com/compose-spec/compose-go/loader/validate.go new file mode 100644 index 0000000000..4493c051da --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/loader/validate.go @@ -0,0 +1,70 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package loader + +import ( + "fmt" + "strings" + + "github.com/compose-spec/compose-go/errdefs" + "github.com/compose-spec/compose-go/types" + "github.com/pkg/errors" +) + +// checkConsistency validate a compose model is consistent +func checkConsistency(project *types.Project) error { + for _, s := range project.Services { + if s.Build == nil && s.Image == "" { + return errors.Wrapf(errdefs.ErrInvalid, "service %q has neither an image nor a build context specified", s.Name) + } + + for network := range s.Networks { + if _, ok := project.Networks[network]; !ok { + return errors.Wrap(errdefs.ErrInvalid, fmt.Sprintf("service %q refers to undefined network %s", s.Name, network)) + } + } + + if strings.HasPrefix(s.NetworkMode, types.ServicePrefix) { + serviceName := s.NetworkMode[len(types.ServicePrefix):] + if _, err := project.GetServices(serviceName); err != nil { + return fmt.Errorf("service %q not found for network_mode 'service:%s'", serviceName, serviceName) + } + } + + for _, volume := range s.Volumes { + switch volume.Type { + case types.VolumeTypeVolume: + if volume.Source != "" { // non anonymous volumes + if _, ok := project.Volumes[volume.Source]; !ok { + return errors.Wrap(errdefs.ErrInvalid, fmt.Sprintf("service %q refers to undefined volume %s", s.Name, volume.Source)) + } + } + } + } + for _, secret := range s.Secrets { + if _, ok := project.Secrets[secret.Source]; !ok { + return errors.Wrap(errdefs.ErrInvalid, fmt.Sprintf("service %q refers to undefined secret %s", s.Name, secret.Source)) + } + } + for _, config := range s.Configs { + if _, ok := project.Configs[config.Source]; !ok { + return errors.Wrap(errdefs.ErrInvalid, fmt.Sprintf("service %q refers to undefined config %s", s.Name, config.Source)) + } + } + } + return nil +} diff --git a/vendor/github.com/compose-spec/compose-go/loader/volume.go b/vendor/github.com/compose-spec/compose-go/loader/volume.go new file mode 100644 index 0000000000..f1e66cde83 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/loader/volume.go @@ -0,0 +1,180 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package loader + +import ( + "strings" + "unicode" + "unicode/utf8" + + "github.com/compose-spec/compose-go/types" + "github.com/pkg/errors" +) + +const endOfSpec = rune(0) + +// ParseVolume parses a volume spec without any knowledge of the target platform +func ParseVolume(spec string) (types.ServiceVolumeConfig, error) { + volume := types.ServiceVolumeConfig{} + + switch len(spec) { + case 0: + return volume, errors.New("invalid empty volume spec") + case 1, 2: + volume.Target = spec + volume.Type = types.VolumeTypeVolume + return volume, nil + } + + var buffer []rune + for _, char := range spec + string(endOfSpec) { + switch { + case isWindowsDrive(buffer, char): + buffer = append(buffer, char) + case char == ':' || char == endOfSpec: + if err := populateFieldFromBuffer(char, buffer, &volume); err != nil { + populateType(&volume) + return volume, errors.Wrapf(err, "invalid spec: %s", spec) + } + buffer = nil + default: + buffer = append(buffer, char) + } + } + + populateType(&volume) + return volume, nil +} + +func isWindowsDrive(buffer []rune, char rune) bool { + return char == ':' && len(buffer) == 1 && unicode.IsLetter(buffer[0]) +} + +func populateFieldFromBuffer(char rune, buffer []rune, volume *types.ServiceVolumeConfig) error { + strBuffer := string(buffer) + switch { + case len(buffer) == 0: + return errors.New("empty section between colons") + // Anonymous volume + case volume.Source == "" && char == endOfSpec: + volume.Target = strBuffer + return nil + case volume.Source == "": + volume.Source = strBuffer + return nil + case volume.Target == "": + volume.Target = strBuffer + return nil + case char == ':': + return errors.New("too many colons") + } + for _, option := range strings.Split(strBuffer, ",") { + switch option { + case "ro": + volume.ReadOnly = true + case "rw": + volume.ReadOnly = false + case "nocopy": + volume.Volume = &types.ServiceVolumeVolume{NoCopy: true} + default: + if isBindOption(option) { + setBindOption(volume, option) + } + // ignore unknown options + } + } + return nil +} + +var Propagations = []string{ + types.PropagationRPrivate, + types.PropagationPrivate, + types.PropagationRShared, + types.PropagationShared, + types.PropagationRSlave, + types.PropagationSlave, +} + +type setBindOptionFunc func(bind *types.ServiceVolumeBind, option string) + +var bindOptions = map[string]setBindOptionFunc{ + types.PropagationRPrivate: setBindPropagation, + types.PropagationPrivate: setBindPropagation, + types.PropagationRShared: setBindPropagation, + types.PropagationShared: setBindPropagation, + types.PropagationRSlave: setBindPropagation, + types.PropagationSlave: setBindPropagation, + types.SELinuxShared: setBindSELinux, + types.SELinuxPrivate: setBindSELinux, +} + +func setBindPropagation(bind *types.ServiceVolumeBind, option string) { + bind.Propagation = option +} + +func setBindSELinux(bind *types.ServiceVolumeBind, option string) { + bind.SELinux = option +} + +func isBindOption(option string) bool { + _, ok := bindOptions[option] + + return ok +} + +func setBindOption(volume *types.ServiceVolumeConfig, option string) { + if volume.Bind == nil { + volume.Bind = &types.ServiceVolumeBind{} + } + + bindOptions[option](volume.Bind, option) +} + +func populateType(volume *types.ServiceVolumeConfig) { + if isFilePath(volume.Source) { + volume.Type = types.VolumeTypeBind + if volume.Bind == nil { + volume.Bind = &types.ServiceVolumeBind{} + } + // For backward compatibility with docker-compose legacy, using short notation involves + // bind will create missing host path + volume.Bind.CreateHostPath = true + } else { + volume.Type = types.VolumeTypeVolume + if volume.Volume == nil { + volume.Volume = &types.ServiceVolumeVolume{} + } + } +} + +func isFilePath(source string) bool { + if source == "" { + return false + } + switch source[0] { + case '.', '/', '~': + return true + } + + // windows named pipes + if strings.HasPrefix(source, `\\`) { + return true + } + + first, nextIndex := utf8.DecodeRuneInString(source) + return isWindowsDrive([]rune{first}, rune(source[nextIndex])) +} diff --git a/vendor/github.com/compose-spec/compose-go/loader/windows_path.go b/vendor/github.com/compose-spec/compose-go/loader/windows_path.go new file mode 100644 index 0000000000..5094f5b576 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/loader/windows_path.go @@ -0,0 +1,82 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package loader + +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// https://github.com/golang/go/blob/master/LICENSE + +// This file contains utilities to check for Windows absolute paths on Linux. +// The code in this file was largely copied from the Golang filepath package +// https://github.com/golang/go/blob/1d0e94b1e13d5e8a323a63cd1cc1ef95290c9c36/src/path/filepath/path_windows.go#L12-L65 + +func isSlash(c uint8) bool { + return c == '\\' || c == '/' +} + +// isAbs reports whether the path is a Windows absolute path. +func isAbs(path string) (b bool) { + l := volumeNameLen(path) + if l == 0 { + return false + } + path = path[l:] + if path == "" { + return false + } + return isSlash(path[0]) +} + +// volumeNameLen returns length of the leading volume name on Windows. +// It returns 0 elsewhere. +// nolint: gocyclo +func volumeNameLen(path string) int { + if len(path) < 2 { + return 0 + } + // with drive letter + c := path[0] + if path[1] == ':' && ('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z') { + return 2 + } + // is it UNC? https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx + if l := len(path); l >= 5 && isSlash(path[0]) && isSlash(path[1]) && + !isSlash(path[2]) && path[2] != '.' { + // first, leading `\\` and next shouldn't be `\`. its server name. + for n := 3; n < l-1; n++ { + // second, next '\' shouldn't be repeated. + if isSlash(path[n]) { + n++ + // third, following something characters. its share name. + if !isSlash(path[n]) { + if path[n] == '.' { + break + } + for ; n < l; n++ { + if isSlash(path[n]) { + break + } + } + return n + } + break + } + } + } + return 0 +} diff --git a/vendor/github.com/compose-spec/compose-go/schema/compose-spec.json b/vendor/github.com/compose-spec/compose-go/schema/compose-spec.json new file mode 100644 index 0000000000..b2088998b9 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/schema/compose-spec.json @@ -0,0 +1,827 @@ +{ + "$schema": "http://json-schema.org/draft/2019-09/schema#", + "id": "compose_spec.json", + "type": "object", + "title": "Compose Specification", + "description": "The Compose file is a YAML file defining a multi-containers based application.", + + "properties": { + "version": { + "type": "string", + "description": "declared for backward compatibility, ignored." + }, + + "name": { + "type": "string", + "description": "define the Compose project name, until user defines one explicitly." + }, + + "services": { + "id": "#/properties/services", + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "$ref": "#/definitions/service" + } + }, + "additionalProperties": false + }, + + "networks": { + "id": "#/properties/networks", + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "$ref": "#/definitions/network" + } + } + }, + + "volumes": { + "id": "#/properties/volumes", + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "$ref": "#/definitions/volume" + } + }, + "additionalProperties": false + }, + + "secrets": { + "id": "#/properties/secrets", + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "$ref": "#/definitions/secret" + } + }, + "additionalProperties": false + }, + + "configs": { + "id": "#/properties/configs", + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "$ref": "#/definitions/config" + } + }, + "additionalProperties": false + } + }, + + "patternProperties": {"^x-": {}}, + "additionalProperties": false, + + "definitions": { + + "service": { + "id": "#/definitions/service", + "type": "object", + + "properties": { + "deploy": {"$ref": "#/definitions/deployment"}, + "build": { + "oneOf": [ + {"type": "string"}, + { + "type": "object", + "properties": { + "context": {"type": "string"}, + "dockerfile": {"type": "string"}, + "args": {"$ref": "#/definitions/list_or_dict"}, + "ssh": {"$ref": "#/definitions/list_or_dict"}, + "labels": {"$ref": "#/definitions/list_or_dict"}, + "cache_from": {"type": "array", "items": {"type": "string"}}, + "cache_to": {"type": "array", "items": {"type": "string"}}, + "no_cache": {"type": "boolean"}, + "network": {"type": "string"}, + "pull": {"type": "boolean"}, + "target": {"type": "string"}, + "shm_size": {"type": ["integer", "string"]}, + "extra_hosts": {"$ref": "#/definitions/list_or_dict"}, + "isolation": {"type": "string"} + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + } + ] + }, + "blkio_config": { + "type": "object", + "properties": { + "device_read_bps": { + "type": "array", + "items": {"$ref": "#/definitions/blkio_limit"} + }, + "device_read_iops": { + "type": "array", + "items": {"$ref": "#/definitions/blkio_limit"} + }, + "device_write_bps": { + "type": "array", + "items": {"$ref": "#/definitions/blkio_limit"} + }, + "device_write_iops": { + "type": "array", + "items": {"$ref": "#/definitions/blkio_limit"} + }, + "weight": {"type": "integer"}, + "weight_device": { + "type": "array", + "items": {"$ref": "#/definitions/blkio_weight"} + } + }, + "additionalProperties": false + }, + "cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "cgroup_parent": {"type": "string"}, + "command": { + "oneOf": [ + {"type": "string"}, + {"type": "array", "items": {"type": "string"}} + ] + }, + "configs": { + "type": "array", + "items": { + "oneOf": [ + {"type": "string"}, + { + "type": "object", + "properties": { + "source": {"type": "string"}, + "target": {"type": "string"}, + "uid": {"type": "string"}, + "gid": {"type": "string"}, + "mode": {"type": "number"} + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + } + ] + } + }, + "container_name": {"type": "string"}, + "cpu_count": {"type": "integer", "minimum": 0}, + "cpu_percent": {"type": "integer", "minimum": 0, "maximum": 100}, + "cpu_shares": {"type": ["number", "string"]}, + "cpu_quota": {"type": ["number", "string"]}, + "cpu_period": {"type": ["number", "string"]}, + "cpu_rt_period": {"type": ["number", "string"]}, + "cpu_rt_runtime": {"type": ["number", "string"]}, + "cpus": {"type": ["number", "string"]}, + "cpuset": {"type": "string"}, + "credential_spec": { + "type": "object", + "properties": { + "config": {"type": "string"}, + "file": {"type": "string"}, + "registry": {"type": "string"} + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + }, + "depends_on": { + "oneOf": [ + {"$ref": "#/definitions/list_of_strings"}, + { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "type": "object", + "additionalProperties": false, + "properties": { + "condition": { + "type": "string", + "enum": ["service_started", "service_healthy", "service_completed_successfully"] + } + }, + "required": ["condition"] + } + } + } + ] + }, + "device_cgroup_rules": {"$ref": "#/definitions/list_of_strings"}, + "devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "dns": {"$ref": "#/definitions/string_or_list"}, + "dns_opt": {"type": "array","items": {"type": "string"}, "uniqueItems": true}, + "dns_search": {"$ref": "#/definitions/string_or_list"}, + "domainname": {"type": "string"}, + "entrypoint": { + "oneOf": [ + {"type": "string"}, + {"type": "array", "items": {"type": "string"}} + ] + }, + "env_file": {"$ref": "#/definitions/string_or_list"}, + "environment": {"$ref": "#/definitions/list_or_dict"}, + + "expose": { + "type": "array", + "items": { + "type": ["string", "number"], + "format": "expose" + }, + "uniqueItems": true + }, + "extends": { + "oneOf": [ + {"type": "string"}, + { + "type": "object", + + "properties": { + "service": {"type": "string"}, + "file": {"type": "string"} + }, + "required": ["service"], + "additionalProperties": false + } + ] + }, + "external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "extra_hosts": {"$ref": "#/definitions/list_or_dict"}, + "group_add": { + "type": "array", + "items": { + "type": ["string", "number"] + }, + "uniqueItems": true + }, + "healthcheck": {"$ref": "#/definitions/healthcheck"}, + "hostname": {"type": "string"}, + "image": {"type": "string"}, + "init": {"type": "boolean"}, + "ipc": {"type": "string"}, + "isolation": {"type": "string"}, + "labels": {"$ref": "#/definitions/list_or_dict"}, + "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "logging": { + "type": "object", + + "properties": { + "driver": {"type": "string"}, + "options": { + "type": "object", + "patternProperties": { + "^.+$": {"type": ["string", "number", "null"]} + } + } + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + }, + "mac_address": {"type": "string"}, + "mem_limit": {"type": ["number", "string"]}, + "mem_reservation": {"type": ["string", "integer"]}, + "mem_swappiness": {"type": "integer"}, + "memswap_limit": {"type": ["number", "string"]}, + "network_mode": {"type": "string"}, + "networks": { + "oneOf": [ + {"$ref": "#/definitions/list_of_strings"}, + { + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "oneOf": [ + { + "type": "object", + "properties": { + "aliases": {"$ref": "#/definitions/list_of_strings"}, + "ipv4_address": {"type": "string"}, + "ipv6_address": {"type": "string"}, + "link_local_ips": {"$ref": "#/definitions/list_of_strings"}, + "priority": {"type": "number"} + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + }, + {"type": "null"} + ] + } + }, + "additionalProperties": false + } + ] + }, + "oom_kill_disable": {"type": "boolean"}, + "oom_score_adj": {"type": "integer", "minimum": -1000, "maximum": 1000}, + "pid": {"type": ["string", "null"]}, + "pids_limit": {"type": ["number", "string"]}, + "platform": {"type": "string"}, + "ports": { + "type": "array", + "items": { + "oneOf": [ + {"type": "number", "format": "ports"}, + {"type": "string", "format": "ports"}, + { + "type": "object", + "properties": { + "mode": {"type": "string"}, + "host_ip": {"type": "string"}, + "target": {"type": "integer"}, + "published": {"type": ["string", "integer"]}, + "protocol": {"type": "string"} + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + } + ] + }, + "uniqueItems": true + }, + "privileged": {"type": "boolean"}, + "profiles": {"$ref": "#/definitions/list_of_strings"}, + "pull_policy": {"type": "string", "enum": [ + "always", "never", "if_not_present", "build", "missing" + ]}, + "read_only": {"type": "boolean"}, + "restart": {"type": "string"}, + "runtime": { + "type": "string" + }, + "scale": { + "type": "integer" + }, + "security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "shm_size": {"type": ["number", "string"]}, + "secrets": { + "type": "array", + "items": { + "oneOf": [ + {"type": "string"}, + { + "type": "object", + "properties": { + "source": {"type": "string"}, + "target": {"type": "string"}, + "uid": {"type": "string"}, + "gid": {"type": "string"}, + "mode": {"type": "number"} + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + } + ] + } + }, + "sysctls": {"$ref": "#/definitions/list_or_dict"}, + "stdin_open": {"type": "boolean"}, + "stop_grace_period": {"type": "string", "format": "duration"}, + "stop_signal": {"type": "string"}, + "storage_opt": {"type": "object"}, + "tmpfs": {"$ref": "#/definitions/string_or_list"}, + "tty": {"type": "boolean"}, + "ulimits": { + "type": "object", + "patternProperties": { + "^[a-z]+$": { + "oneOf": [ + {"type": "integer"}, + { + "type": "object", + "properties": { + "hard": {"type": "integer"}, + "soft": {"type": "integer"} + }, + "required": ["soft", "hard"], + "additionalProperties": false, + "patternProperties": {"^x-": {}} + } + ] + } + } + }, + "user": {"type": "string"}, + "userns_mode": {"type": "string"}, + "volumes": { + "type": "array", + "items": { + "oneOf": [ + {"type": "string"}, + { + "type": "object", + "required": ["type"], + "properties": { + "type": {"type": "string"}, + "source": {"type": "string"}, + "target": {"type": "string"}, + "read_only": {"type": "boolean"}, + "consistency": {"type": "string"}, + "bind": { + "type": "object", + "properties": { + "propagation": {"type": "string"}, + "create_host_path": {"type": "boolean"}, + "selinux": {"type": "string", "enum": ["z", "Z"]} + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + }, + "volume": { + "type": "object", + "properties": { + "nocopy": {"type": "boolean"} + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + }, + "tmpfs": { + "type": "object", + "properties": { + "size": { + "oneOf": [ + {"type": "integer", "minimum": 0}, + {"type": "string"} + ] + } + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + } + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + } + ] + }, + "uniqueItems": true + }, + "volumes_from": { + "type": "array", + "items": {"type": "string"}, + "uniqueItems": true + }, + "working_dir": {"type": "string"} + }, + "patternProperties": {"^x-": {}}, + "additionalProperties": false + }, + + "healthcheck": { + "id": "#/definitions/healthcheck", + "type": "object", + "properties": { + "disable": {"type": "boolean"}, + "interval": {"type": "string", "format": "duration"}, + "retries": {"type": "number"}, + "test": { + "oneOf": [ + {"type": "string"}, + {"type": "array", "items": {"type": "string"}} + ] + }, + "timeout": {"type": "string", "format": "duration"}, + "start_period": {"type": "string", "format": "duration"} + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + }, + "deployment": { + "id": "#/definitions/deployment", + "type": ["object", "null"], + "properties": { + "mode": {"type": "string"}, + "endpoint_mode": {"type": "string"}, + "replicas": {"type": "integer"}, + "labels": {"$ref": "#/definitions/list_or_dict"}, + "rollback_config": { + "type": "object", + "properties": { + "parallelism": {"type": "integer"}, + "delay": {"type": "string", "format": "duration"}, + "failure_action": {"type": "string"}, + "monitor": {"type": "string", "format": "duration"}, + "max_failure_ratio": {"type": "number"}, + "order": {"type": "string", "enum": [ + "start-first", "stop-first" + ]} + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + }, + "update_config": { + "type": "object", + "properties": { + "parallelism": {"type": "integer"}, + "delay": {"type": "string", "format": "duration"}, + "failure_action": {"type": "string"}, + "monitor": {"type": "string", "format": "duration"}, + "max_failure_ratio": {"type": "number"}, + "order": {"type": "string", "enum": [ + "start-first", "stop-first" + ]} + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + }, + "resources": { + "type": "object", + "properties": { + "limits": { + "type": "object", + "properties": { + "cpus": {"type": ["number", "string"]}, + "memory": {"type": "string"}, + "pids": {"type": "integer"} + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + }, + "reservations": { + "type": "object", + "properties": { + "cpus": {"type": ["number", "string"]}, + "memory": {"type": "string"}, + "generic_resources": {"$ref": "#/definitions/generic_resources"}, + "devices": {"$ref": "#/definitions/devices"} + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + } + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + }, + "restart_policy": { + "type": "object", + "properties": { + "condition": {"type": "string"}, + "delay": {"type": "string", "format": "duration"}, + "max_attempts": {"type": "integer"}, + "window": {"type": "string", "format": "duration"} + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + }, + "placement": { + "type": "object", + "properties": { + "constraints": {"type": "array", "items": {"type": "string"}}, + "preferences": { + "type": "array", + "items": { + "type": "object", + "properties": { + "spread": {"type": "string"} + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + } + }, + "max_replicas_per_node": {"type": "integer"} + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + } + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + }, + + "generic_resources": { + "id": "#/definitions/generic_resources", + "type": "array", + "items": { + "type": "object", + "properties": { + "discrete_resource_spec": { + "type": "object", + "properties": { + "kind": {"type": "string"}, + "value": {"type": "number"} + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + } + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + } + }, + + "devices": { + "id": "#/definitions/devices", + "type": "array", + "items": { + "type": "object", + "properties": { + "capabilities": {"$ref": "#/definitions/list_of_strings"}, + "count": {"type": ["string", "integer"]}, + "device_ids": {"$ref": "#/definitions/list_of_strings"}, + "driver":{"type": "string"}, + "options":{"$ref": "#/definitions/list_or_dict"} + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + } + }, + + "network": { + "id": "#/definitions/network", + "type": ["object", "null"], + "properties": { + "name": {"type": "string"}, + "driver": {"type": "string"}, + "driver_opts": { + "type": "object", + "patternProperties": { + "^.+$": {"type": ["string", "number"]} + } + }, + "ipam": { + "type": "object", + "properties": { + "driver": {"type": "string"}, + "config": { + "type": "array", + "items": { + "type": "object", + "properties": { + "subnet": {"type": "string", "format": "subnet_ip_address"}, + "ip_range": {"type": "string"}, + "gateway": {"type": "string"}, + "aux_addresses": { + "type": "object", + "additionalProperties": false, + "patternProperties": {"^.+$": {"type": "string"}} + } + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + } + }, + "options": { + "type": "object", + "additionalProperties": false, + "patternProperties": {"^.+$": {"type": "string"}} + } + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + }, + "external": { + "type": ["boolean", "object"], + "properties": { + "name": { + "deprecated": true, + "type": "string" + } + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + }, + "internal": {"type": "boolean"}, + "enable_ipv6": {"type": "boolean"}, + "attachable": {"type": "boolean"}, + "labels": {"$ref": "#/definitions/list_or_dict"} + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + }, + + "volume": { + "id": "#/definitions/volume", + "type": ["object", "null"], + "properties": { + "name": {"type": "string"}, + "driver": {"type": "string"}, + "driver_opts": { + "type": "object", + "patternProperties": { + "^.+$": {"type": ["string", "number"]} + } + }, + "external": { + "type": ["boolean", "object"], + "properties": { + "name": { + "deprecated": true, + "type": "string" + } + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + }, + "labels": {"$ref": "#/definitions/list_or_dict"} + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + }, + + "secret": { + "id": "#/definitions/secret", + "type": "object", + "properties": { + "name": {"type": "string"}, + "file": {"type": "string"}, + "external": { + "type": ["boolean", "object"], + "properties": { + "name": {"type": "string"} + } + }, + "labels": {"$ref": "#/definitions/list_or_dict"}, + "driver": {"type": "string"}, + "driver_opts": { + "type": "object", + "patternProperties": { + "^.+$": {"type": ["string", "number"]} + } + }, + "template_driver": {"type": "string"} + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + }, + + "config": { + "id": "#/definitions/config", + "type": "object", + "properties": { + "name": {"type": "string"}, + "file": {"type": "string"}, + "external": { + "type": ["boolean", "object"], + "properties": { + "name": { + "deprecated": true, + "type": "string" + } + } + }, + "labels": {"$ref": "#/definitions/list_or_dict"}, + "template_driver": {"type": "string"} + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + }, + + "string_or_list": { + "oneOf": [ + {"type": "string"}, + {"$ref": "#/definitions/list_of_strings"} + ] + }, + + "list_of_strings": { + "type": "array", + "items": {"type": "string"}, + "uniqueItems": true + }, + + "list_or_dict": { + "oneOf": [ + { + "type": "object", + "patternProperties": { + ".+": { + "type": ["string", "number", "boolean", "null"] + } + }, + "additionalProperties": false + }, + {"type": "array", "items": {"type": "string"}, "uniqueItems": true} + ] + }, + + "blkio_limit": { + "type": "object", + "properties": { + "path": {"type": "string"}, + "rate": {"type": ["integer", "string"]} + }, + "additionalProperties": false + }, + "blkio_weight": { + "type": "object", + "properties": { + "path": {"type": "string"}, + "weight": {"type": "integer"} + }, + "additionalProperties": false + }, + + "constraints": { + "service": { + "id": "#/definitions/constraints/service", + "anyOf": [ + {"required": ["build"]}, + {"required": ["image"]} + ], + "properties": { + "build": { + "required": ["context"] + } + } + } + } + } +} diff --git a/vendor/github.com/compose-spec/compose-go/schema/schema.go b/vendor/github.com/compose-spec/compose-go/schema/schema.go new file mode 100644 index 0000000000..af3cb0a3be --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/schema/schema.go @@ -0,0 +1,164 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package schema + +import ( + "fmt" + "strings" + "time" + + "github.com/xeipuuv/gojsonschema" + + // Enable support for embedded static resources + _ "embed" +) + +type portsFormatChecker struct{} + +func (checker portsFormatChecker) IsFormat(input interface{}) bool { + // TODO: implement this + return true +} + +type durationFormatChecker struct{} + +func (checker durationFormatChecker) IsFormat(input interface{}) bool { + value, ok := input.(string) + if !ok { + return false + } + _, err := time.ParseDuration(value) + return err == nil +} + +func init() { + gojsonschema.FormatCheckers.Add("expose", portsFormatChecker{}) + gojsonschema.FormatCheckers.Add("ports", portsFormatChecker{}) + gojsonschema.FormatCheckers.Add("duration", durationFormatChecker{}) +} + +// Schema is the compose-spec JSON schema +//go:embed compose-spec.json +var Schema string + +// Validate uses the jsonschema to validate the configuration +func Validate(config map[string]interface{}) error { + schemaLoader := gojsonschema.NewStringLoader(Schema) + dataLoader := gojsonschema.NewGoLoader(config) + + result, err := gojsonschema.Validate(schemaLoader, dataLoader) + if err != nil { + return err + } + + if !result.Valid() { + return toError(result) + } + + return nil +} + +func toError(result *gojsonschema.Result) error { + err := getMostSpecificError(result.Errors()) + return err +} + +const ( + jsonschemaOneOf = "number_one_of" + jsonschemaAnyOf = "number_any_of" +) + +func getDescription(err validationError) string { + switch err.parent.Type() { + case "invalid_type": + if expectedType, ok := err.parent.Details()["expected"].(string); ok { + return fmt.Sprintf("must be a %s", humanReadableType(expectedType)) + } + case jsonschemaOneOf, jsonschemaAnyOf: + if err.child == nil { + return err.parent.Description() + } + return err.child.Description() + } + return err.parent.Description() +} + +func humanReadableType(definition string) string { + if definition[0:1] == "[" { + allTypes := strings.Split(definition[1:len(definition)-1], ",") + for i, t := range allTypes { + allTypes[i] = humanReadableType(t) + } + return fmt.Sprintf( + "%s or %s", + strings.Join(allTypes[0:len(allTypes)-1], ", "), + allTypes[len(allTypes)-1], + ) + } + if definition == "object" { + return "mapping" + } + if definition == "array" { + return "list" + } + return definition +} + +type validationError struct { + parent gojsonschema.ResultError + child gojsonschema.ResultError +} + +func (err validationError) Error() string { + description := getDescription(err) + return fmt.Sprintf("%s %s", err.parent.Field(), description) +} + +func getMostSpecificError(errors []gojsonschema.ResultError) validationError { + mostSpecificError := 0 + for i, err := range errors { + if specificity(err) > specificity(errors[mostSpecificError]) { + mostSpecificError = i + continue + } + + if specificity(err) == specificity(errors[mostSpecificError]) { + // Invalid type errors win in a tie-breaker for most specific field name + if err.Type() == "invalid_type" && errors[mostSpecificError].Type() != "invalid_type" { + mostSpecificError = i + } + } + } + + if mostSpecificError+1 == len(errors) { + return validationError{parent: errors[mostSpecificError]} + } + + switch errors[mostSpecificError].Type() { + case "number_one_of", "number_any_of": + return validationError{ + parent: errors[mostSpecificError], + child: errors[mostSpecificError+1], + } + default: + return validationError{parent: errors[mostSpecificError]} + } +} + +func specificity(err gojsonschema.ResultError) int { + return len(strings.Split(err.Field(), ".")) +} diff --git a/vendor/github.com/compose-spec/compose-go/template/template.go b/vendor/github.com/compose-spec/compose-go/template/template.go new file mode 100644 index 0000000000..22e4e95ada --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/template/template.go @@ -0,0 +1,334 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package template + +import ( + "fmt" + "regexp" + "strings" + + "github.com/sirupsen/logrus" +) + +var delimiter = "\\$" +var substitutionNamed = "[_a-z][_a-z0-9]*" + +var substitutionBraced = "[_a-z][_a-z0-9]*(?::?[-?](.*}|[^}]*))?" + +var patternString = fmt.Sprintf( + "%s(?i:(?P%s)|(?P%s)|{(?P%s)}|(?P))", + delimiter, delimiter, substitutionNamed, substitutionBraced, +) + +var defaultPattern = regexp.MustCompile(patternString) + +// InvalidTemplateError is returned when a variable template is not in a valid +// format +type InvalidTemplateError struct { + Template string +} + +func (e InvalidTemplateError) Error() string { + return fmt.Sprintf("Invalid template: %#v", e.Template) +} + +// Mapping is a user-supplied function which maps from variable names to values. +// Returns the value as a string and a bool indicating whether +// the value is present, to distinguish between an empty string +// and the absence of a value. +type Mapping func(string) (string, bool) + +// SubstituteFunc is a user-supplied function that apply substitution. +// Returns the value as a string, a bool indicating if the function could apply +// the substitution and an error. +type SubstituteFunc func(string, Mapping) (string, bool, error) + +// SubstituteWith substitute variables in the string with their values. +// It accepts additional substitute function. +func SubstituteWith(template string, mapping Mapping, pattern *regexp.Regexp, subsFuncs ...SubstituteFunc) (string, error) { + if len(subsFuncs) == 0 { + subsFuncs = getDefaultSortedSubstitutionFunctions(template) + } + var err error + result := pattern.ReplaceAllStringFunc(template, func(substring string) string { + closingBraceIndex := getFirstBraceClosingIndex(substring) + rest := "" + if closingBraceIndex > -1 { + rest = substring[closingBraceIndex+1:] + substring = substring[0 : closingBraceIndex+1] + } + + matches := pattern.FindStringSubmatch(substring) + groups := matchGroups(matches, pattern) + if escaped := groups["escaped"]; escaped != "" { + return escaped + } + + braced := false + substitution := groups["named"] + if substitution == "" { + substitution = groups["braced"] + braced = true + } + + if substitution == "" { + err = &InvalidTemplateError{Template: template} + return "" + } + + if braced { + for _, f := range subsFuncs { + var ( + value string + applied bool + ) + value, applied, err = f(substitution, mapping) + if err != nil { + return "" + } + if !applied { + continue + } + interpolatedNested, err := SubstituteWith(rest, mapping, pattern, subsFuncs...) + if err != nil { + return "" + } + return value + interpolatedNested + } + } + + value, ok := mapping(substitution) + if !ok { + logrus.Warnf("The %q variable is not set. Defaulting to a blank string.", substitution) + } + return value + }) + + return result, err +} + +func getDefaultSortedSubstitutionFunctions(template string, fns ...SubstituteFunc) []SubstituteFunc { + hyphenIndex := strings.IndexByte(template, '-') + questionIndex := strings.IndexByte(template, '?') + if hyphenIndex < 0 || hyphenIndex > questionIndex { + return []SubstituteFunc{ + requiredNonEmpty, + required, + softDefault, + hardDefault, + } + } + return []SubstituteFunc{ + softDefault, + hardDefault, + requiredNonEmpty, + required, + } +} + +func getFirstBraceClosingIndex(s string) int { + openVariableBraces := 0 + for i := 0; i < len(s); i++ { + if s[i] == '}' { + openVariableBraces-- + if openVariableBraces == 0 { + return i + } + } + if strings.HasPrefix(s[i:], "${") { + openVariableBraces++ + i++ + } + } + return -1 +} + +// Substitute variables in the string with their values +func Substitute(template string, mapping Mapping) (string, error) { + return SubstituteWith(template, mapping, defaultPattern) +} + +// ExtractVariables returns a map of all the variables defined in the specified +// composefile (dict representation) and their default value if any. +func ExtractVariables(configDict map[string]interface{}, pattern *regexp.Regexp) map[string]Variable { + if pattern == nil { + pattern = defaultPattern + } + return recurseExtract(configDict, pattern) +} + +func recurseExtract(value interface{}, pattern *regexp.Regexp) map[string]Variable { + m := map[string]Variable{} + + switch value := value.(type) { + case string: + if values, is := extractVariable(value, pattern); is { + for _, v := range values { + m[v.Name] = v + } + } + case map[string]interface{}: + for _, elem := range value { + submap := recurseExtract(elem, pattern) + for key, value := range submap { + m[key] = value + } + } + + case []interface{}: + for _, elem := range value { + if values, is := extractVariable(elem, pattern); is { + for _, v := range values { + m[v.Name] = v + } + } + } + } + + return m +} + +type Variable struct { + Name string + DefaultValue string + Required bool +} + +func extractVariable(value interface{}, pattern *regexp.Regexp) ([]Variable, bool) { + sValue, ok := value.(string) + if !ok { + return []Variable{}, false + } + matches := pattern.FindAllStringSubmatch(sValue, -1) + if len(matches) == 0 { + return []Variable{}, false + } + values := []Variable{} + for _, match := range matches { + groups := matchGroups(match, pattern) + if escaped := groups["escaped"]; escaped != "" { + continue + } + val := groups["named"] + if val == "" { + val = groups["braced"] + } + name := val + var defaultValue string + var required bool + switch { + case strings.Contains(val, ":?"): + name, _ = partition(val, ":?") + required = true + case strings.Contains(val, "?"): + name, _ = partition(val, "?") + required = true + case strings.Contains(val, ":-"): + name, defaultValue = partition(val, ":-") + case strings.Contains(val, "-"): + name, defaultValue = partition(val, "-") + } + values = append(values, Variable{ + Name: name, + DefaultValue: defaultValue, + Required: required, + }) + } + return values, len(values) > 0 +} + +// Soft default (fall back if unset or empty) +func softDefault(substitution string, mapping Mapping) (string, bool, error) { + sep := ":-" + if !strings.Contains(substitution, sep) { + return "", false, nil + } + name, defaultValue := partition(substitution, sep) + defaultValue, err := Substitute(defaultValue, mapping) + if err != nil { + return "", false, err + } + value, ok := mapping(name) + if !ok || value == "" { + return defaultValue, true, nil + } + return value, true, nil +} + +// Hard default (fall back if-and-only-if empty) +func hardDefault(substitution string, mapping Mapping) (string, bool, error) { + sep := "-" + if !strings.Contains(substitution, sep) { + return "", false, nil + } + name, defaultValue := partition(substitution, sep) + defaultValue, err := Substitute(defaultValue, mapping) + if err != nil { + return "", false, err + } + value, ok := mapping(name) + if !ok { + return defaultValue, true, nil + } + return value, true, nil +} + +func requiredNonEmpty(substitution string, mapping Mapping) (string, bool, error) { + return withRequired(substitution, mapping, ":?", func(v string) bool { return v != "" }) +} + +func required(substitution string, mapping Mapping) (string, bool, error) { + return withRequired(substitution, mapping, "?", func(_ string) bool { return true }) +} + +func withRequired(substitution string, mapping Mapping, sep string, valid func(string) bool) (string, bool, error) { + if !strings.Contains(substitution, sep) { + return "", false, nil + } + name, errorMessage := partition(substitution, sep) + errorMessage, err := Substitute(errorMessage, mapping) + if err != nil { + return "", false, err + } + value, ok := mapping(name) + if !ok || !valid(value) { + return "", true, &InvalidTemplateError{ + Template: fmt.Sprintf("required variable %s is missing a value: %s", name, errorMessage), + } + } + return value, true, nil +} + +func matchGroups(matches []string, pattern *regexp.Regexp) map[string]string { + groups := make(map[string]string) + for i, name := range pattern.SubexpNames()[1:] { + groups[name] = matches[i+1] + } + return groups +} + +// Split the string at the first occurrence of sep, and return the part before the separator, +// and the part after the separator. +// +// If the separator is not found, return the string itself, followed by an empty string. +func partition(s, sep string) (string, string) { + if strings.Contains(s, sep) { + parts := strings.SplitN(s, sep, 2) + return parts[0], parts[1] + } + return s, "" +} diff --git a/vendor/github.com/compose-spec/compose-go/types/config.go b/vendor/github.com/compose-spec/compose-go/types/config.go new file mode 100644 index 0000000000..b395363bd3 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/types/config.go @@ -0,0 +1,106 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package types + +import ( + "encoding/json" + + "github.com/mitchellh/mapstructure" +) + +// ConfigDetails are the details about a group of ConfigFiles +type ConfigDetails struct { + Version string + WorkingDir string + ConfigFiles []ConfigFile + Environment map[string]string +} + +// LookupEnv provides a lookup function for environment variables +func (cd ConfigDetails) LookupEnv(key string) (string, bool) { + v, ok := cd.Environment[key] + return v, ok +} + +// ConfigFile is a filename and the contents of the file as a Dict +type ConfigFile struct { + // Filename is the name of the yaml configuration file + Filename string + // Content is the raw yaml content. Will be loaded from Filename if not set + Content []byte + // Config if the yaml tree for this config file. Will be parsed from Content if not set + Config map[string]interface{} +} + +// Config is a full compose file configuration and model +type Config struct { + Filename string `yaml:"-" json:"-"` + Name string `yaml:",omitempty" json:"name,omitempty"` + Services Services `json:"services"` + Networks Networks `yaml:",omitempty" json:"networks,omitempty"` + Volumes Volumes `yaml:",omitempty" json:"volumes,omitempty"` + Secrets Secrets `yaml:",omitempty" json:"secrets,omitempty"` + Configs Configs `yaml:",omitempty" json:"configs,omitempty"` + Extensions Extensions `yaml:",inline" json:"-"` +} + +// Volumes is a map of VolumeConfig +type Volumes map[string]VolumeConfig + +// Networks is a map of NetworkConfig +type Networks map[string]NetworkConfig + +// Secrets is a map of SecretConfig +type Secrets map[string]SecretConfig + +// Configs is a map of ConfigObjConfig +type Configs map[string]ConfigObjConfig + +// Extensions is a map of custom extension +type Extensions map[string]interface{} + +// MarshalJSON makes Config implement json.Marshaler +func (c Config) MarshalJSON() ([]byte, error) { + m := map[string]interface{}{ + "services": c.Services, + } + + if len(c.Networks) > 0 { + m["networks"] = c.Networks + } + if len(c.Volumes) > 0 { + m["volumes"] = c.Volumes + } + if len(c.Secrets) > 0 { + m["secrets"] = c.Secrets + } + if len(c.Configs) > 0 { + m["configs"] = c.Configs + } + for k, v := range c.Extensions { + m[k] = v + } + return json.Marshal(m) +} + +func (e Extensions) Get(name string, target interface{}) (bool, error) { + if v, ok := e[name]; ok { + err := mapstructure.Decode(v, target) + return true, err + } + return false, nil +} diff --git a/vendor/github.com/compose-spec/compose-go/types/project.go b/vendor/github.com/compose-spec/compose-go/types/project.go new file mode 100644 index 0000000000..dc208ed587 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/types/project.go @@ -0,0 +1,342 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package types + +import ( + "fmt" + "os" + "path/filepath" + "sort" + + "github.com/distribution/distribution/v3/reference" + "github.com/opencontainers/go-digest" + "golang.org/x/sync/errgroup" +) + +// Project is the result of loading a set of compose files +type Project struct { + Name string `yaml:"name,omitempty" json:"name,omitempty"` + WorkingDir string `yaml:"-" json:"-"` + Services Services `json:"services"` + Networks Networks `yaml:",omitempty" json:"networks,omitempty"` + Volumes Volumes `yaml:",omitempty" json:"volumes,omitempty"` + Secrets Secrets `yaml:",omitempty" json:"secrets,omitempty"` + Configs Configs `yaml:",omitempty" json:"configs,omitempty"` + Extensions Extensions `yaml:",inline" json:"-"` // https://github.com/golang/go/issues/6213 + ComposeFiles []string `yaml:"-" json:"-"` + Environment map[string]string `yaml:"-" json:"-"` + + // DisabledServices track services which have been disable as profile is not active + DisabledServices Services `yaml:"-" json:"-"` +} + +// ServiceNames return names for all services in this Compose config +func (p Project) ServiceNames() []string { + var names []string + for _, s := range p.Services { + names = append(names, s.Name) + } + sort.Strings(names) + return names +} + +// VolumeNames return names for all volumes in this Compose config +func (p Project) VolumeNames() []string { + var names []string + for k := range p.Volumes { + names = append(names, k) + } + sort.Strings(names) + return names +} + +// NetworkNames return names for all volumes in this Compose config +func (p Project) NetworkNames() []string { + var names []string + for k := range p.Networks { + names = append(names, k) + } + sort.Strings(names) + return names +} + +// SecretNames return names for all secrets in this Compose config +func (p Project) SecretNames() []string { + var names []string + for k := range p.Secrets { + names = append(names, k) + } + sort.Strings(names) + return names +} + +// ConfigNames return names for all configs in this Compose config +func (p Project) ConfigNames() []string { + var names []string + for k := range p.Configs { + names = append(names, k) + } + sort.Strings(names) + return names +} + +// GetServices retrieve services by names, or return all services if no name specified +func (p Project) GetServices(names ...string) (Services, error) { + if len(names) == 0 { + return p.Services, nil + } + services := Services{} + for _, name := range names { + var serviceConfig *ServiceConfig + for _, s := range p.Services { + if s.Name == name { + serviceConfig = &s + break + } + } + if serviceConfig == nil { + return services, fmt.Errorf("no such service: %s", name) + } + services = append(services, *serviceConfig) + } + return services, nil +} + +// GetService retrieve a specific service by name +func (p Project) GetService(name string) (ServiceConfig, error) { + services, err := p.GetServices(name) + if err != nil { + return ServiceConfig{}, err + } + if len(services) == 0 { + return ServiceConfig{}, fmt.Errorf("no such service: %s", name) + } + return services[0], nil +} + +func (p Project) AllServices() Services { + var all Services + all = append(all, p.Services...) + all = append(all, p.DisabledServices...) + return all +} + +type ServiceFunc func(service ServiceConfig) error + +// WithServices run ServiceFunc on each service and dependencies in dependency order +func (p Project) WithServices(names []string, fn ServiceFunc) error { + return p.withServices(names, fn, map[string]bool{}) +} + +func (p Project) withServices(names []string, fn ServiceFunc, done map[string]bool) error { + services, err := p.GetServices(names...) + if err != nil { + return err + } + for _, service := range services { + if done[service.Name] { + continue + } + dependencies := service.GetDependencies() + if len(dependencies) > 0 { + err := p.withServices(dependencies, fn, done) + if err != nil { + return err + } + } + if err := fn(service); err != nil { + return err + } + done[service.Name] = true + } + return nil +} + +// RelativePath resolve a relative path based project's working directory +func (p *Project) RelativePath(path string) string { + if path[0] == '~' { + home, _ := os.UserHomeDir() + path = filepath.Join(home, path[1:]) + } + if filepath.IsAbs(path) { + return path + } + return filepath.Join(p.WorkingDir, path) +} + +// HasProfile return true if service has no profile declared or has at least one profile matching +func (s ServiceConfig) HasProfile(profiles []string) bool { + if len(s.Profiles) == 0 { + return true + } + for _, p := range profiles { + for _, sp := range s.Profiles { + if sp == p { + return true + } + } + } + return false +} + +// GetProfiles retrieve the profiles implicitly enabled by explicitly targeting selected services +func (s Services) GetProfiles() []string { + set := map[string]struct{}{} + for _, service := range s { + for _, p := range service.Profiles { + set[p] = struct{}{} + } + } + var profiles []string + for k := range set { + profiles = append(profiles, k) + } + return profiles +} + +// ApplyProfiles disables service which don't match selected profiles +func (p *Project) ApplyProfiles(profiles []string) { + for _, p := range profiles { + if p == "*" { + return + } + } + var enabled, disabled Services + for _, service := range p.Services { + if service.HasProfile(profiles) { + enabled = append(enabled, service) + } else { + disabled = append(disabled, service) + } + } + p.Services = enabled + p.DisabledServices = disabled +} + +// WithoutUnnecessaryResources drops networks/volumes/secrets/configs that are not referenced by active services +func (p *Project) WithoutUnnecessaryResources() { + requiredNetworks := map[string]struct{}{} + requiredVolumes := map[string]struct{}{} + requiredSecrets := map[string]struct{}{} + requiredConfigs := map[string]struct{}{} + for _, s := range p.Services { + for k := range s.Networks { + requiredNetworks[k] = struct{}{} + } + for _, v := range s.Volumes { + if v.Type != VolumeTypeVolume || v.Source == "" { + continue + } + requiredVolumes[v.Source] = struct{}{} + } + for _, v := range s.Secrets { + requiredSecrets[v.Source] = struct{}{} + } + for _, v := range s.Configs { + requiredConfigs[v.Source] = struct{}{} + } + } + + networks := Networks{} + for k := range requiredNetworks { + networks[k] = p.Networks[k] + } + p.Networks = networks + + volumes := Volumes{} + for k := range requiredVolumes { + volumes[k] = p.Volumes[k] + } + p.Volumes = volumes + + secrets := Secrets{} + for k := range requiredSecrets { + secrets[k] = p.Secrets[k] + } + p.Secrets = secrets + + configs := Configs{} + for k := range requiredConfigs { + configs[k] = p.Configs[k] + } + p.Configs = configs +} + +// ForServices restrict the project model to a subset of services +func (p *Project) ForServices(names []string) error { + if len(names) == 0 { + // All services + return nil + } + + set := map[string]struct{}{} + err := p.WithServices(names, func(service ServiceConfig) error { + set[service.Name] = struct{}{} + return nil + }) + if err != nil { + return err + } + + // Disable all services which are not explicit target or dependencies + var enabled Services + for _, s := range p.Services { + if _, ok := set[s.Name]; ok { + enabled = append(enabled, s) + } else { + p.DisabledServices = append(p.DisabledServices, s) + } + } + p.Services = enabled + return nil +} + +// ResolveImages updates services images to include digest computed by a resolver function +func (p *Project) ResolveImages(resolver func(named reference.Named) (digest.Digest, error)) error { + eg := errgroup.Group{} + for i, s := range p.Services { + idx := i + service := s + + if service.Image == "" { + continue + } + eg.Go(func() error { + named, err := reference.ParseDockerRef(service.Image) + if err != nil { + return err + } + + if _, ok := named.(reference.Canonical); !ok { + // image is named but not digested reference + digest, err := resolver(named) + if err != nil { + return err + } + named, err = reference.WithDigest(named, digest) + if err != nil { + return err + } + } + + service.Image = named.String() + p.Services[idx] = service + return nil + }) + } + return eg.Wait() +} diff --git a/vendor/github.com/compose-spec/compose-go/types/types.go b/vendor/github.com/compose-spec/compose-go/types/types.go new file mode 100644 index 0000000000..ec4b0bc73f --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/types/types.go @@ -0,0 +1,896 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package types + +import ( + "encoding/json" + "fmt" + "sort" + "strings" + "time" + + "github.com/docker/go-connections/nat" +) + +// Duration is a thin wrapper around time.Duration with improved JSON marshalling +type Duration time.Duration + +func (d Duration) String() string { + return time.Duration(d).String() +} + +// ConvertDurationPtr converts a type defined Duration pointer to a time.Duration pointer with the same value. +func ConvertDurationPtr(d *Duration) *time.Duration { + if d == nil { + return nil + } + res := time.Duration(*d) + return &res +} + +// MarshalJSON makes Duration implement json.Marshaler +func (d Duration) MarshalJSON() ([]byte, error) { + return json.Marshal(d.String()) +} + +// MarshalYAML makes Duration implement yaml.Marshaler +func (d Duration) MarshalYAML() (interface{}, error) { + return d.String(), nil +} + +func (d *Duration) UnmarshalJSON(b []byte) error { + s := strings.Trim(string(b), "\"") + timeDuration, err := time.ParseDuration(s) + if err != nil { + return err + } + *d = Duration(timeDuration) + return nil +} + +// Services is a list of ServiceConfig +type Services []ServiceConfig + +// MarshalYAML makes Services implement yaml.Marshaller +func (s Services) MarshalYAML() (interface{}, error) { + services := map[string]ServiceConfig{} + for _, service := range s { + services[service.Name] = service + } + return services, nil +} + +// MarshalJSON makes Services implement json.Marshaler +func (s Services) MarshalJSON() ([]byte, error) { + data, err := s.MarshalYAML() + if err != nil { + return nil, err + } + return json.MarshalIndent(data, "", " ") +} + +// ServiceConfig is the configuration of one service +type ServiceConfig struct { + Name string `yaml:"-" json:"-"` + Profiles []string `mapstructure:"profiles" yaml:"profiles,omitempty" json:"profiles,omitempty"` + + Build *BuildConfig `yaml:",omitempty" json:"build,omitempty"` + BlkioConfig *BlkioConfig `mapstructure:"blkio_config" yaml:",omitempty" json:"blkio_config,omitempty"` + CapAdd []string `mapstructure:"cap_add" yaml:"cap_add,omitempty" json:"cap_add,omitempty"` + CapDrop []string `mapstructure:"cap_drop" yaml:"cap_drop,omitempty" json:"cap_drop,omitempty"` + CgroupParent string `mapstructure:"cgroup_parent" yaml:"cgroup_parent,omitempty" json:"cgroup_parent,omitempty"` + CPUCount int64 `mapstructure:"cpu_count" yaml:"cpu_count,omitempty" json:"cpu_count,omitempty"` + CPUPercent float32 `mapstructure:"cpu_percent" yaml:"cpu_percent,omitempty" json:"cpu_percent,omitempty"` + CPUPeriod int64 `mapstructure:"cpu_period" yaml:"cpu_period,omitempty" json:"cpu_period,omitempty"` + CPUQuota int64 `mapstructure:"cpu_quota" yaml:"cpu_quota,omitempty" json:"cpu_quota,omitempty"` + CPURTPeriod int64 `mapstructure:"cpu_rt_period" yaml:"cpu_rt_period,omitempty" json:"cpu_rt_period,omitempty"` + CPURTRuntime int64 `mapstructure:"cpu_rt_runtime" yaml:"cpu_rt_runtime,omitempty" json:"cpu_rt_runtime,omitempty"` + CPUS float32 `mapstructure:"cpus" yaml:"cpus,omitempty" json:"cpus,omitempty"` + CPUSet string `mapstructure:"cpuset" yaml:"cpuset,omitempty" json:"cpuset,omitempty"` + CPUShares int64 `mapstructure:"cpu_shares" yaml:"cpu_shares,omitempty" json:"cpu_shares,omitempty"` + Command ShellCommand `yaml:",omitempty" json:"command,omitempty"` + Configs []ServiceConfigObjConfig `yaml:",omitempty" json:"configs,omitempty"` + ContainerName string `mapstructure:"container_name" yaml:"container_name,omitempty" json:"container_name,omitempty"` + CredentialSpec *CredentialSpecConfig `mapstructure:"credential_spec" yaml:"credential_spec,omitempty" json:"credential_spec,omitempty"` + DependsOn DependsOnConfig `mapstructure:"depends_on" yaml:"depends_on,omitempty" json:"depends_on,omitempty"` + Deploy *DeployConfig `yaml:",omitempty" json:"deploy,omitempty"` + DeviceCgroupRules []string `mapstructure:"device_cgroup_rules" yaml:"device_cgroup_rules,omitempty" json:"device_cgroup_rules,omitempty"` + Devices []string `yaml:",omitempty" json:"devices,omitempty"` + DNS StringList `yaml:",omitempty" json:"dns,omitempty"` + DNSOpts []string `mapstructure:"dns_opt" yaml:"dns_opt,omitempty" json:"dns_opt,omitempty"` + DNSSearch StringList `mapstructure:"dns_search" yaml:"dns_search,omitempty" json:"dns_search,omitempty"` + Dockerfile string `yaml:"dockerfile,omitempty" json:"dockerfile,omitempty"` + DomainName string `mapstructure:"domainname" yaml:"domainname,omitempty" json:"domainname,omitempty"` + Entrypoint ShellCommand `yaml:",omitempty" json:"entrypoint,omitempty"` + Environment MappingWithEquals `yaml:",omitempty" json:"environment,omitempty"` + EnvFile StringList `mapstructure:"env_file" yaml:"env_file,omitempty" json:"env_file,omitempty"` + Expose StringOrNumberList `yaml:",omitempty" json:"expose,omitempty"` + Extends ExtendsConfig `yaml:"extends,omitempty" json:"extends,omitempty"` + ExternalLinks []string `mapstructure:"external_links" yaml:"external_links,omitempty" json:"external_links,omitempty"` + ExtraHosts HostsList `mapstructure:"extra_hosts" yaml:"extra_hosts,omitempty" json:"extra_hosts,omitempty"` + GroupAdd []string `mapstructure:"group_add" yaml:"group_add,omitempty" json:"group_add,omitempty"` + Hostname string `yaml:",omitempty" json:"hostname,omitempty"` + HealthCheck *HealthCheckConfig `yaml:",omitempty" json:"healthcheck,omitempty"` + Image string `yaml:",omitempty" json:"image,omitempty"` + Init *bool `yaml:",omitempty" json:"init,omitempty"` + Ipc string `yaml:",omitempty" json:"ipc,omitempty"` + Isolation string `mapstructure:"isolation" yaml:"isolation,omitempty" json:"isolation,omitempty"` + Labels Labels `yaml:",omitempty" json:"labels,omitempty"` + CustomLabels Labels `yaml:"-" json:"-"` + Links []string `yaml:",omitempty" json:"links,omitempty"` + Logging *LoggingConfig `yaml:",omitempty" json:"logging,omitempty"` + LogDriver string `mapstructure:"log_driver" yaml:"log_driver,omitempty" json:"log_driver,omitempty"` + LogOpt map[string]string `mapstructure:"log_opt" yaml:"log_opt,omitempty" json:"log_opt,omitempty"` + MemLimit UnitBytes `mapstructure:"mem_limit" yaml:"mem_limit,omitempty" json:"mem_limit,omitempty"` + MemReservation UnitBytes `mapstructure:"mem_reservation" yaml:"mem_reservation,omitempty" json:"mem_reservation,omitempty"` + MemSwapLimit UnitBytes `mapstructure:"memswap_limit" yaml:"memswap_limit,omitempty" json:"memswap_limit,omitempty"` + MemSwappiness UnitBytes `mapstructure:"mem_swappiness" yaml:"mem_swappiness,omitempty" json:"mem_swappiness,omitempty"` + MacAddress string `mapstructure:"mac_address" yaml:"mac_address,omitempty" json:"mac_address,omitempty"` + Net string `yaml:"net,omitempty" json:"net,omitempty"` + NetworkMode string `mapstructure:"network_mode" yaml:"network_mode,omitempty" json:"network_mode,omitempty"` + Networks map[string]*ServiceNetworkConfig `yaml:",omitempty" json:"networks,omitempty"` + OomKillDisable bool `mapstructure:"oom_kill_disable" yaml:"oom_kill_disable,omitempty" json:"oom_kill_disable,omitempty"` + OomScoreAdj int64 `mapstructure:"oom_score_adj" yaml:"oom_score_adj,omitempty" json:"oom_score_adj,omitempty"` + Pid string `yaml:",omitempty" json:"pid,omitempty"` + PidsLimit int64 `mapstructure:"pids_limit" yaml:"pids_limit,omitempty" json:"pids_limit,omitempty"` + Platform string `yaml:",omitempty" json:"platform,omitempty"` + Ports []ServicePortConfig `yaml:",omitempty" json:"ports,omitempty"` + Privileged bool `yaml:",omitempty" json:"privileged,omitempty"` + PullPolicy string `mapstructure:"pull_policy" yaml:"pull_policy,omitempty" json:"pull_policy,omitempty"` + ReadOnly bool `mapstructure:"read_only" yaml:"read_only,omitempty" json:"read_only,omitempty"` + Restart string `yaml:",omitempty" json:"restart,omitempty"` + Runtime string `yaml:",omitempty" json:"runtime,omitempty"` + Scale int `yaml:"-" json:"-"` + Secrets []ServiceSecretConfig `yaml:",omitempty" json:"secrets,omitempty"` + SecurityOpt []string `mapstructure:"security_opt" yaml:"security_opt,omitempty" json:"security_opt,omitempty"` + ShmSize UnitBytes `mapstructure:"shm_size" yaml:"shm_size,omitempty" json:"shm_size,omitempty"` + StdinOpen bool `mapstructure:"stdin_open" yaml:"stdin_open,omitempty" json:"stdin_open,omitempty"` + StopGracePeriod *Duration `mapstructure:"stop_grace_period" yaml:"stop_grace_period,omitempty" json:"stop_grace_period,omitempty"` + StopSignal string `mapstructure:"stop_signal" yaml:"stop_signal,omitempty" json:"stop_signal,omitempty"` + Sysctls Mapping `yaml:",omitempty" json:"sysctls,omitempty"` + Tmpfs StringList `yaml:",omitempty" json:"tmpfs,omitempty"` + Tty bool `mapstructure:"tty" yaml:"tty,omitempty" json:"tty,omitempty"` + Ulimits map[string]*UlimitsConfig `yaml:",omitempty" json:"ulimits,omitempty"` + User string `yaml:",omitempty" json:"user,omitempty"` + UserNSMode string `mapstructure:"userns_mode" yaml:"userns_mode,omitempty" json:"userns_mode,omitempty"` + Uts string `yaml:"uts,omitempty" json:"uts,omitempty"` + VolumeDriver string `mapstructure:"volume_driver" yaml:"volume_driver,omitempty" json:"volume_driver,omitempty"` + Volumes []ServiceVolumeConfig `yaml:",omitempty" json:"volumes,omitempty"` + VolumesFrom []string `mapstructure:"volumes_from" yaml:"volumes_from,omitempty" json:"volumes_from,omitempty"` + WorkingDir string `mapstructure:"working_dir" yaml:"working_dir,omitempty" json:"working_dir,omitempty"` + + Extensions map[string]interface{} `yaml:",inline" json:"-"` +} + +// NetworksByPriority return the service networks IDs sorted according to Priority +func (s *ServiceConfig) NetworksByPriority() []string { + type key struct { + name string + priority int + } + var keys []key + for k, v := range s.Networks { + priority := 0 + if v != nil { + priority = v.Priority + } + keys = append(keys, key{ + name: k, + priority: priority, + }) + } + sort.Slice(keys, func(i, j int) bool { + return keys[i].priority > keys[j].priority + }) + var sorted []string + for _, k := range keys { + sorted = append(sorted, k.name) + } + return sorted +} + +const ( + //PullPolicyAlways always pull images + PullPolicyAlways = "always" + //PullPolicyNever never pull images + PullPolicyNever = "never" + //PullPolicyIfNotPresent pull missing images + PullPolicyIfNotPresent = "if_not_present" + //PullPolicyMissing pull missing images + PullPolicyMissing = "missing" + //PullPolicyBuild force building images + PullPolicyBuild = "build" +) + +const ( + //RestartPolicyAlways always restart the container if it stops + RestartPolicyAlways = "always" + //RestartPolicyOnFailure restart the container if it exits due to an error + RestartPolicyOnFailure = "on-failure" + //RestartPolicyNo do not automatically restart the container + RestartPolicyNo = "no" + //RestartPolicyUnlessStopped always restart the container unless the container is stopped (manually or otherwise) + RestartPolicyUnlessStopped = "unless-stopped" +) + +const ( + // ServicePrefix is the prefix for references pointing to a service + ServicePrefix = "service:" + // ContainerPrefix is the prefix for references pointing to a container + ContainerPrefix = "container:" + + // NetworkModeServicePrefix is the prefix for network_mode pointing to a service + // Deprecated prefer ServicePrefix + NetworkModeServicePrefix = ServicePrefix + // NetworkModeContainerPrefix is the prefix for network_mode pointing to a container + // Deprecated prefer ContainerPrefix + NetworkModeContainerPrefix = ContainerPrefix +) + +// GetDependencies retrieve all services this service depends on +func (s ServiceConfig) GetDependencies() []string { + dependencies := make(set) + for dependency := range s.DependsOn { + dependencies.append(dependency) + } + for _, link := range s.Links { + parts := strings.Split(link, ":") + if len(parts) == 2 { + dependencies.append(parts[0]) + } else { + dependencies.append(link) + } + } + if strings.HasPrefix(s.NetworkMode, ServicePrefix) { + dependencies.append(s.NetworkMode[len(ServicePrefix):]) + } + if strings.HasPrefix(s.Ipc, ServicePrefix) { + dependencies.append(s.Ipc[len(ServicePrefix):]) + } + if strings.HasPrefix(s.Pid, ServicePrefix) { + dependencies.append(s.Pid[len(ServicePrefix):]) + } + for _, vol := range s.VolumesFrom { + if !strings.HasPrefix(s.Pid, ContainerPrefix) { + dependencies.append(vol) + } + } + + return dependencies.toSlice() +} + +type set map[string]struct{} + +func (s set) append(strings ...string) { + for _, str := range strings { + s[str] = struct{}{} + } +} + +func (s set) toSlice() []string { + slice := make([]string, 0, len(s)) + for v := range s { + slice = append(slice, v) + } + return slice +} + +// BuildConfig is a type for build +type BuildConfig struct { + Context string `yaml:",omitempty" json:"context,omitempty"` + Dockerfile string `yaml:",omitempty" json:"dockerfile,omitempty"` + Args MappingWithEquals `yaml:",omitempty" json:"args,omitempty"` + SSH SSHConfig `yaml:"ssh,omitempty" json:"ssh,omitempty"` + Labels Labels `yaml:",omitempty" json:"labels,omitempty"` + CacheFrom StringList `mapstructure:"cache_from" yaml:"cache_from,omitempty" json:"cache_from,omitempty"` + CacheTo StringList `mapstructure:"cache_to" yaml:"cache_to,omitempty" json:"cache_to,omitempty"` + NoCache bool `mapstructure:"no_cache" yaml:"no_cache,omitempty" json:"no_cache,omitempty"` + Pull bool `mapstructure:"pull" yaml:"pull,omitempty" json:"pull,omitempty"` + ExtraHosts HostsList `mapstructure:"extra_hosts" yaml:"extra_hosts,omitempty" json:"extra_hosts,omitempty"` + Isolation string `yaml:",omitempty" json:"isolation,omitempty"` + Network string `yaml:",omitempty" json:"network,omitempty"` + Target string `yaml:",omitempty" json:"target,omitempty"` + + Extensions map[string]interface{} `yaml:",inline" json:"-"` +} + +// BlkioConfig define blkio config +type BlkioConfig struct { + Weight uint16 `yaml:",omitempty" json:"weight,omitempty"` + WeightDevice []WeightDevice `mapstructure:"weight_device" yaml:",omitempty" json:"weight_device,omitempty"` + DeviceReadBps []ThrottleDevice `mapstructure:"device_read_bps" yaml:",omitempty" json:"device_read_bps,omitempty"` + DeviceReadIOps []ThrottleDevice `mapstructure:"device_read_iops" yaml:",omitempty" json:"device_read_iops,omitempty"` + DeviceWriteBps []ThrottleDevice `mapstructure:"device_write_bps" yaml:",omitempty" json:"device_write_bps,omitempty"` + DeviceWriteIOps []ThrottleDevice `mapstructure:"device_write_iops" yaml:",omitempty" json:"device_write_iops,omitempty"` + + Extensions map[string]interface{} `yaml:",inline" json:"-"` +} + +// WeightDevice is a structure that holds device:weight pair +type WeightDevice struct { + Path string + Weight uint16 + + Extensions map[string]interface{} `yaml:",inline" json:"-"` +} + +// ThrottleDevice is a structure that holds device:rate_per_second pair +type ThrottleDevice struct { + Path string + Rate uint64 + + Extensions map[string]interface{} `yaml:",inline" json:"-"` +} + +// ShellCommand is a string or list of string args +type ShellCommand []string + +// StringList is a type for fields that can be a string or list of strings +type StringList []string + +// StringOrNumberList is a type for fields that can be a list of strings or +// numbers +type StringOrNumberList []string + +// MappingWithEquals is a mapping type that can be converted from a list of +// key[=value] strings. +// For the key with an empty value (`key=`), the mapped value is set to a pointer to `""`. +// For the key without value (`key`), the mapped value is set to nil. +type MappingWithEquals map[string]*string + +// NewMappingWithEquals build a new Mapping from a set of KEY=VALUE strings +func NewMappingWithEquals(values []string) MappingWithEquals { + mapping := MappingWithEquals{} + for _, env := range values { + tokens := strings.SplitN(env, "=", 2) + if len(tokens) > 1 { + mapping[tokens[0]] = &tokens[1] + } else { + mapping[env] = nil + } + } + return mapping +} + +// OverrideBy update MappingWithEquals with values from another MappingWithEquals +func (e MappingWithEquals) OverrideBy(other MappingWithEquals) MappingWithEquals { + for k, v := range other { + e[k] = v + } + return e +} + +// Resolve update a MappingWithEquals for keys without value (`key`, but not `key=`) +func (e MappingWithEquals) Resolve(lookupFn func(string) (string, bool)) MappingWithEquals { + for k, v := range e { + if v == nil { + if value, ok := lookupFn(k); ok { + e[k] = &value + } + } + } + return e +} + +// RemoveEmpty excludes keys that are not associated with a value +func (e MappingWithEquals) RemoveEmpty() MappingWithEquals { + for k, v := range e { + if v == nil { + delete(e, k) + } + } + return e +} + +// Mapping is a mapping type that can be converted from a list of +// key[=value] strings. +// For the key with an empty value (`key=`), or key without value (`key`), the +// mapped value is set to an empty string `""`. +type Mapping map[string]string + +// NewMapping build a new Mapping from a set of KEY=VALUE strings +func NewMapping(values []string) Mapping { + mapping := Mapping{} + for _, value := range values { + parts := strings.SplitN(value, "=", 2) + key := parts[0] + switch { + case len(parts) == 1: + mapping[key] = "" + default: + mapping[key] = parts[1] + } + } + return mapping +} + +// Labels is a mapping type for labels +type Labels map[string]string + +func (l Labels) Add(key, value string) Labels { + if l == nil { + l = Labels{} + } + l[key] = value + return l +} + +type SSHKey struct { + ID string + Path string +} + +// SSHConfig is a mapping type for SSH build config +type SSHConfig []SSHKey + +func (s SSHConfig) Get(id string) (string, error) { + for _, sshKey := range s { + if sshKey.ID == id { + return sshKey.Path, nil + } + } + return "", fmt.Errorf("ID %s not found in SSH keys", id) +} + +// MarshalYAML makes SSHKey implement yaml.Marshaller +func (s SSHKey) MarshalYAML() (interface{}, error) { + if s.Path == "" { + return s.ID, nil + } + return fmt.Sprintf("%s: %s", s.ID, s.Path), nil +} + +// MarshalJSON makes SSHKey implement json.Marshaller +func (s SSHKey) MarshalJSON() ([]byte, error) { + if s.Path == "" { + return []byte(fmt.Sprintf(`"%s"`, s.ID)), nil + } + return []byte(fmt.Sprintf(`"%s": %s`, s.ID, s.Path)), nil +} + +// MappingWithColon is a mapping type that can be converted from a list of +// 'key: value' strings +type MappingWithColon map[string]string + +// HostsList is a list of colon-separated host-ip mappings +type HostsList []string + +// LoggingConfig the logging configuration for a service +type LoggingConfig struct { + Driver string `yaml:",omitempty" json:"driver,omitempty"` + Options map[string]string `yaml:",omitempty" json:"options,omitempty"` + + Extensions map[string]interface{} `yaml:",inline" json:"-"` +} + +// DeployConfig the deployment configuration for a service +type DeployConfig struct { + Mode string `yaml:",omitempty" json:"mode,omitempty"` + Replicas *uint64 `yaml:",omitempty" json:"replicas,omitempty"` + Labels Labels `yaml:",omitempty" json:"labels,omitempty"` + UpdateConfig *UpdateConfig `mapstructure:"update_config" yaml:"update_config,omitempty" json:"update_config,omitempty"` + RollbackConfig *UpdateConfig `mapstructure:"rollback_config" yaml:"rollback_config,omitempty" json:"rollback_config,omitempty"` + Resources Resources `yaml:",omitempty" json:"resources,omitempty"` + RestartPolicy *RestartPolicy `mapstructure:"restart_policy" yaml:"restart_policy,omitempty" json:"restart_policy,omitempty"` + Placement Placement `yaml:",omitempty" json:"placement,omitempty"` + EndpointMode string `mapstructure:"endpoint_mode" yaml:"endpoint_mode,omitempty" json:"endpoint_mode,omitempty"` + + Extensions map[string]interface{} `yaml:",inline" json:"-"` +} + +// HealthCheckConfig the healthcheck configuration for a service +type HealthCheckConfig struct { + Test HealthCheckTest `yaml:",omitempty" json:"test,omitempty"` + Timeout *Duration `yaml:",omitempty" json:"timeout,omitempty"` + Interval *Duration `yaml:",omitempty" json:"interval,omitempty"` + Retries *uint64 `yaml:",omitempty" json:"retries,omitempty"` + StartPeriod *Duration `mapstructure:"start_period" yaml:"start_period,omitempty" json:"start_period,omitempty"` + Disable bool `yaml:",omitempty" json:"disable,omitempty"` + + Extensions map[string]interface{} `yaml:",inline" json:"-"` +} + +// HealthCheckTest is the command run to test the health of a service +type HealthCheckTest []string + +// UpdateConfig the service update configuration +type UpdateConfig struct { + Parallelism *uint64 `yaml:",omitempty" json:"parallelism,omitempty"` + Delay Duration `yaml:",omitempty" json:"delay,omitempty"` + FailureAction string `mapstructure:"failure_action" yaml:"failure_action,omitempty" json:"failure_action,omitempty"` + Monitor Duration `yaml:",omitempty" json:"monitor,omitempty"` + MaxFailureRatio float32 `mapstructure:"max_failure_ratio" yaml:"max_failure_ratio,omitempty" json:"max_failure_ratio,omitempty"` + Order string `yaml:",omitempty" json:"order,omitempty"` + + Extensions map[string]interface{} `yaml:",inline" json:"-"` +} + +// Resources the resource limits and reservations +type Resources struct { + Limits *Resource `yaml:",omitempty" json:"limits,omitempty"` + Reservations *Resource `yaml:",omitempty" json:"reservations,omitempty"` + + Extensions map[string]interface{} `yaml:",inline" json:"-"` +} + +// Resource is a resource to be limited or reserved +type Resource struct { + // TODO: types to convert from units and ratios + NanoCPUs string `mapstructure:"cpus" yaml:"cpus,omitempty" json:"cpus,omitempty"` + MemoryBytes UnitBytes `mapstructure:"memory" yaml:"memory,omitempty" json:"memory,omitempty"` + PIds int64 `mapstructure:"pids" yaml:"pids,omitempty" json:"pids,omitempty"` + Devices []DeviceRequest `mapstructure:"devices" yaml:"devices,omitempty" json:"devices,omitempty"` + GenericResources []GenericResource `mapstructure:"generic_resources" yaml:"generic_resources,omitempty" json:"generic_resources,omitempty"` + + Extensions map[string]interface{} `yaml:",inline" json:"-"` +} + +type DeviceRequest struct { + Capabilities []string `mapstructure:"capabilities" yaml:"capabilities,omitempty" json:"capabilities,omitempty"` + Driver string `mapstructure:"driver" yaml:"driver,omitempty" json:"driver,omitempty"` + Count int64 `mapstructure:"count" yaml:"count,omitempty" json:"count,omitempty"` + IDs []string `mapstructure:"device_ids" yaml:"device_ids,omitempty" json:"device_ids,omitempty"` +} + +// GenericResource represents a "user defined" resource which can +// only be an integer (e.g: SSD=3) for a service +type GenericResource struct { + DiscreteResourceSpec *DiscreteGenericResource `mapstructure:"discrete_resource_spec" yaml:"discrete_resource_spec,omitempty" json:"discrete_resource_spec,omitempty"` + + Extensions map[string]interface{} `yaml:",inline" json:"-"` +} + +// DiscreteGenericResource represents a "user defined" resource which is defined +// as an integer +// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) +// Value is used to count the resource (SSD=5, HDD=3, ...) +type DiscreteGenericResource struct { + Kind string `json:"kind"` + Value int64 `json:"value"` + + Extensions map[string]interface{} `yaml:",inline" json:"-"` +} + +// UnitBytes is the bytes type +type UnitBytes int64 + +// MarshalYAML makes UnitBytes implement yaml.Marshaller +func (u UnitBytes) MarshalYAML() (interface{}, error) { + return fmt.Sprintf("%d", u), nil +} + +// MarshalJSON makes UnitBytes implement json.Marshaler +func (u UnitBytes) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf(`"%d"`, u)), nil +} + +// RestartPolicy the service restart policy +type RestartPolicy struct { + Condition string `yaml:",omitempty" json:"condition,omitempty"` + Delay *Duration `yaml:",omitempty" json:"delay,omitempty"` + MaxAttempts *uint64 `mapstructure:"max_attempts" yaml:"max_attempts,omitempty" json:"max_attempts,omitempty"` + Window *Duration `yaml:",omitempty" json:"window,omitempty"` + + Extensions map[string]interface{} `yaml:",inline" json:"-"` +} + +// Placement constraints for the service +type Placement struct { + Constraints []string `yaml:",omitempty" json:"constraints,omitempty"` + Preferences []PlacementPreferences `yaml:",omitempty" json:"preferences,omitempty"` + MaxReplicas uint64 `mapstructure:"max_replicas_per_node" yaml:"max_replicas_per_node,omitempty" json:"max_replicas_per_node,omitempty"` + + Extensions map[string]interface{} `yaml:",inline" json:"-"` +} + +// PlacementPreferences is the preferences for a service placement +type PlacementPreferences struct { + Spread string `yaml:",omitempty" json:"spread,omitempty"` + + Extensions map[string]interface{} `yaml:",inline" json:"-"` +} + +// ServiceNetworkConfig is the network configuration for a service +type ServiceNetworkConfig struct { + Priority int `yaml:",omitempty" json:"priotirt,omitempty"` + Aliases []string `yaml:",omitempty" json:"aliases,omitempty"` + Ipv4Address string `mapstructure:"ipv4_address" yaml:"ipv4_address,omitempty" json:"ipv4_address,omitempty"` + Ipv6Address string `mapstructure:"ipv6_address" yaml:"ipv6_address,omitempty" json:"ipv6_address,omitempty"` + + Extensions map[string]interface{} `yaml:",inline" json:"-"` +} + +// ServicePortConfig is the port configuration for a service +type ServicePortConfig struct { + Mode string `yaml:",omitempty" json:"mode,omitempty"` + HostIP string `mapstructure:"host_ip" yaml:"host_ip,omitempty" json:"host_ip,omitempty"` + Target uint32 `yaml:",omitempty" json:"target,omitempty"` + Published string `yaml:",omitempty" json:"published,omitempty"` + Protocol string `yaml:",omitempty" json:"protocol,omitempty"` + + Extensions map[string]interface{} `yaml:",inline" json:"-"` +} + +// ParsePortConfig parse short syntax for service port configuration +func ParsePortConfig(value string) ([]ServicePortConfig, error) { + var portConfigs []ServicePortConfig + ports, portBindings, err := nat.ParsePortSpecs([]string{value}) + if err != nil { + return nil, err + } + // We need to sort the key of the ports to make sure it is consistent + keys := []string{} + for port := range ports { + keys = append(keys, string(port)) + } + sort.Strings(keys) + + for _, key := range keys { + port := nat.Port(key) + converted, err := convertPortToPortConfig(port, portBindings) + if err != nil { + return nil, err + } + portConfigs = append(portConfigs, converted...) + } + return portConfigs, nil +} + +func convertPortToPortConfig(port nat.Port, portBindings map[nat.Port][]nat.PortBinding) ([]ServicePortConfig, error) { + var portConfigs []ServicePortConfig + for _, binding := range portBindings[port] { + portConfigs = append(portConfigs, ServicePortConfig{ + HostIP: binding.HostIP, + Protocol: strings.ToLower(port.Proto()), + Target: uint32(port.Int()), + Published: binding.HostPort, + Mode: "ingress", + }) + } + return portConfigs, nil +} + +// ServiceVolumeConfig are references to a volume used by a service +type ServiceVolumeConfig struct { + Type string `yaml:",omitempty" json:"type,omitempty"` + Source string `yaml:",omitempty" json:"source,omitempty"` + Target string `yaml:",omitempty" json:"target,omitempty"` + ReadOnly bool `mapstructure:"read_only" yaml:"read_only,omitempty" json:"read_only,omitempty"` + Consistency string `yaml:",omitempty" json:"consistency,omitempty"` + Bind *ServiceVolumeBind `yaml:",omitempty" json:"bind,omitempty"` + Volume *ServiceVolumeVolume `yaml:",omitempty" json:"volume,omitempty"` + Tmpfs *ServiceVolumeTmpfs `yaml:",omitempty" json:"tmpfs,omitempty"` + + Extensions map[string]interface{} `yaml:",inline" json:"-"` +} + +const ( + // VolumeTypeBind is the type for mounting host dir + VolumeTypeBind = "bind" + // VolumeTypeVolume is the type for remote storage volumes + VolumeTypeVolume = "volume" + // VolumeTypeTmpfs is the type for mounting tmpfs + VolumeTypeTmpfs = "tmpfs" + // VolumeTypeNamedPipe is the type for mounting Windows named pipes + VolumeTypeNamedPipe = "npipe" + + // SElinuxShared share the volume content + SElinuxShared = "z" + // SElinuxUnshared label content as private unshared + SElinuxUnshared = "Z" +) + +// ServiceVolumeBind are options for a service volume of type bind +type ServiceVolumeBind struct { + SELinux string `mapstructure:"selinux" yaml:",omitempty" json:"selinux,omitempty"` + Propagation string `yaml:",omitempty" json:"propagation,omitempty"` + CreateHostPath bool `mapstructure:"create_host_path" yaml:"create_host_path,omitempty" json:"create_host_path,omitempty"` + + Extensions map[string]interface{} `yaml:",inline" json:"-"` +} + +// SELinux represents the SELinux re-labeling options. +const ( + // SELinuxShared option indicates that the bind mount content is shared among multiple containers + SELinuxShared string = "z" + // SELinuxPrivate option indicates that the bind mount content is private and unshared + SELinuxPrivate string = "Z" +) + +// Propagation represents the propagation of a mount. +const ( + // PropagationRPrivate RPRIVATE + PropagationRPrivate string = "rprivate" + // PropagationPrivate PRIVATE + PropagationPrivate string = "private" + // PropagationRShared RSHARED + PropagationRShared string = "rshared" + // PropagationShared SHARED + PropagationShared string = "shared" + // PropagationRSlave RSLAVE + PropagationRSlave string = "rslave" + // PropagationSlave SLAVE + PropagationSlave string = "slave" +) + +// ServiceVolumeVolume are options for a service volume of type volume +type ServiceVolumeVolume struct { + NoCopy bool `mapstructure:"nocopy" yaml:"nocopy,omitempty" json:"nocopy,omitempty"` + + Extensions map[string]interface{} `yaml:",inline" json:"-"` +} + +// ServiceVolumeTmpfs are options for a service volume of type tmpfs +type ServiceVolumeTmpfs struct { + Size UnitBytes `yaml:",omitempty" json:"size,omitempty"` + + Extensions map[string]interface{} `yaml:",inline" json:"-"` +} + +// FileReferenceConfig for a reference to a swarm file object +type FileReferenceConfig struct { + Source string `yaml:",omitempty" json:"source,omitempty"` + Target string `yaml:",omitempty" json:"target,omitempty"` + UID string `yaml:",omitempty" json:"uid,omitempty"` + GID string `yaml:",omitempty" json:"gid,omitempty"` + Mode *uint32 `yaml:",omitempty" json:"mode,omitempty"` + + Extensions map[string]interface{} `yaml:",inline" json:"-"` +} + +// ServiceConfigObjConfig is the config obj configuration for a service +type ServiceConfigObjConfig FileReferenceConfig + +// ServiceSecretConfig is the secret configuration for a service +type ServiceSecretConfig FileReferenceConfig + +// UlimitsConfig the ulimit configuration +type UlimitsConfig struct { + Single int `yaml:",omitempty" json:"single,omitempty"` + Soft int `yaml:",omitempty" json:"soft,omitempty"` + Hard int `yaml:",omitempty" json:"hard,omitempty"` + + Extensions map[string]interface{} `yaml:",inline" json:"-"` +} + +// MarshalYAML makes UlimitsConfig implement yaml.Marshaller +func (u *UlimitsConfig) MarshalYAML() (interface{}, error) { + if u.Single != 0 { + return u.Single, nil + } + return u, nil +} + +// MarshalJSON makes UlimitsConfig implement json.Marshaller +func (u *UlimitsConfig) MarshalJSON() ([]byte, error) { + if u.Single != 0 { + return json.Marshal(u.Single) + } + // Pass as a value to avoid re-entering this method and use the default implementation + return json.Marshal(*u) +} + +// NetworkConfig for a network +type NetworkConfig struct { + Name string `yaml:",omitempty" json:"name,omitempty"` + Driver string `yaml:",omitempty" json:"driver,omitempty"` + DriverOpts map[string]string `mapstructure:"driver_opts" yaml:"driver_opts,omitempty" json:"driver_opts,omitempty"` + Ipam IPAMConfig `yaml:",omitempty" json:"ipam,omitempty"` + External External `yaml:",omitempty" json:"external,omitempty"` + Internal bool `yaml:",omitempty" json:"internal,omitempty"` + Attachable bool `yaml:",omitempty" json:"attachable,omitempty"` + Labels Labels `yaml:",omitempty" json:"labels,omitempty"` + EnableIPv6 bool `mapstructure:"enable_ipv6" yaml:"enable_ipv6,omitempty" json:"enable_ipv6,omitempty"` + Extensions map[string]interface{} `yaml:",inline" json:"-"` +} + +// IPAMConfig for a network +type IPAMConfig struct { + Driver string `yaml:",omitempty" json:"driver,omitempty"` + Config []*IPAMPool `yaml:",omitempty" json:"config,omitempty"` + Extensions map[string]interface{} `yaml:",inline" json:"-"` +} + +// IPAMPool for a network +type IPAMPool struct { + Subnet string `yaml:",omitempty" json:"subnet,omitempty"` + Gateway string `yaml:",omitempty" json:"gateway,omitempty"` + IPRange string `mapstructure:"ip_range" yaml:"ip_range,omitempty" json:"ip_range,omitempty"` + AuxiliaryAddresses map[string]string `mapstructure:"aux_addresses" yaml:"aux_addresses,omitempty" json:"aux_addresses,omitempty"` + Extensions map[string]interface{} `yaml:",inline" json:"-"` +} + +// VolumeConfig for a volume +type VolumeConfig struct { + Name string `yaml:",omitempty" json:"name,omitempty"` + Driver string `yaml:",omitempty" json:"driver,omitempty"` + DriverOpts map[string]string `mapstructure:"driver_opts" yaml:"driver_opts,omitempty" json:"driver_opts,omitempty"` + External External `yaml:",omitempty" json:"external,omitempty"` + Labels Labels `yaml:",omitempty" json:"labels,omitempty"` + Extensions map[string]interface{} `yaml:",inline" json:"-"` +} + +// External identifies a Volume or Network as a reference to a resource that is +// not managed, and should already exist. +// External.name is deprecated and replaced by Volume.name +type External struct { + Name string `yaml:",omitempty" json:"name,omitempty"` + External bool `yaml:",omitempty" json:"external,omitempty"` + Extensions map[string]interface{} `yaml:",inline" json:"-"` +} + +// MarshalYAML makes External implement yaml.Marshaller +func (e External) MarshalYAML() (interface{}, error) { + if e.Name == "" { + return e.External, nil + } + return External{Name: e.Name}, nil +} + +// MarshalJSON makes External implement json.Marshaller +func (e External) MarshalJSON() ([]byte, error) { + if e.Name == "" { + return []byte(fmt.Sprintf("%v", e.External)), nil + } + return []byte(fmt.Sprintf(`{"name": %q}`, e.Name)), nil +} + +// CredentialSpecConfig for credential spec on Windows +type CredentialSpecConfig struct { + Config string `yaml:",omitempty" json:"config,omitempty"` // Config was added in API v1.40 + File string `yaml:",omitempty" json:"file,omitempty"` + Registry string `yaml:",omitempty" json:"registry,omitempty"` + Extensions map[string]interface{} `yaml:",inline" json:"-"` +} + +// FileObjectConfig is a config type for a file used by a service +type FileObjectConfig struct { + Name string `yaml:",omitempty" json:"name,omitempty"` + File string `yaml:",omitempty" json:"file,omitempty"` + External External `yaml:",omitempty" json:"external,omitempty"` + Labels Labels `yaml:",omitempty" json:"labels,omitempty"` + Driver string `yaml:",omitempty" json:"driver,omitempty"` + DriverOpts map[string]string `mapstructure:"driver_opts" yaml:"driver_opts,omitempty" json:"driver_opts,omitempty"` + TemplateDriver string `mapstructure:"template_driver" yaml:"template_driver,omitempty" json:"template_driver,omitempty"` + Extensions map[string]interface{} `yaml:",inline" json:"-"` +} + +const ( + // ServiceConditionCompletedSuccessfully is the type for waiting until a service has completed successfully (exit code 0). + ServiceConditionCompletedSuccessfully = "service_completed_successfully" + + // ServiceConditionHealthy is the type for waiting until a service is healthy. + ServiceConditionHealthy = "service_healthy" + + // ServiceConditionStarted is the type for waiting until a service has started (default). + ServiceConditionStarted = "service_started" +) + +type DependsOnConfig map[string]ServiceDependency + +type ServiceDependency struct { + Condition string `yaml:",omitempty" json:"condition,omitempty"` + Extensions map[string]interface{} `yaml:",inline" json:"-"` +} + +type ExtendsConfig MappingWithEquals + +// SecretConfig for a secret +type SecretConfig FileObjectConfig + +// ConfigObjConfig is the config for the swarm "Config" object +type ConfigObjConfig FileObjectConfig diff --git a/vendor/github.com/distribution/distribution/v3/LICENSE b/vendor/github.com/distribution/distribution/v3/LICENSE new file mode 100644 index 0000000000..e06d208186 --- /dev/null +++ b/vendor/github.com/distribution/distribution/v3/LICENSE @@ -0,0 +1,202 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/distribution/distribution/v3/digestset/set.go b/vendor/github.com/distribution/distribution/v3/digestset/set.go new file mode 100644 index 0000000000..71327dca72 --- /dev/null +++ b/vendor/github.com/distribution/distribution/v3/digestset/set.go @@ -0,0 +1,247 @@ +package digestset + +import ( + "errors" + "sort" + "strings" + "sync" + + digest "github.com/opencontainers/go-digest" +) + +var ( + // ErrDigestNotFound is used when a matching digest + // could not be found in a set. + ErrDigestNotFound = errors.New("digest not found") + + // ErrDigestAmbiguous is used when multiple digests + // are found in a set. None of the matching digests + // should be considered valid matches. + ErrDigestAmbiguous = errors.New("ambiguous digest string") +) + +// Set is used to hold a unique set of digests which +// may be easily referenced by easily referenced by a string +// representation of the digest as well as short representation. +// The uniqueness of the short representation is based on other +// digests in the set. If digests are omitted from this set, +// collisions in a larger set may not be detected, therefore it +// is important to always do short representation lookups on +// the complete set of digests. To mitigate collisions, an +// appropriately long short code should be used. +type Set struct { + mutex sync.RWMutex + entries digestEntries +} + +// NewSet creates an empty set of digests +// which may have digests added. +func NewSet() *Set { + return &Set{ + entries: digestEntries{}, + } +} + +// checkShortMatch checks whether two digests match as either whole +// values or short values. This function does not test equality, +// rather whether the second value could match against the first +// value. +func checkShortMatch(alg digest.Algorithm, hex, shortAlg, shortHex string) bool { + if len(hex) == len(shortHex) { + if hex != shortHex { + return false + } + if len(shortAlg) > 0 && string(alg) != shortAlg { + return false + } + } else if !strings.HasPrefix(hex, shortHex) { + return false + } else if len(shortAlg) > 0 && string(alg) != shortAlg { + return false + } + return true +} + +// Lookup looks for a digest matching the given string representation. +// If no digests could be found ErrDigestNotFound will be returned +// with an empty digest value. If multiple matches are found +// ErrDigestAmbiguous will be returned with an empty digest value. +func (dst *Set) Lookup(d string) (digest.Digest, error) { + dst.mutex.RLock() + defer dst.mutex.RUnlock() + if len(dst.entries) == 0 { + return "", ErrDigestNotFound + } + var ( + searchFunc func(int) bool + alg digest.Algorithm + hex string + ) + dgst, err := digest.Parse(d) + if err == digest.ErrDigestInvalidFormat { + hex = d + searchFunc = func(i int) bool { + return dst.entries[i].val >= d + } + } else { + hex = dgst.Hex() + alg = dgst.Algorithm() + searchFunc = func(i int) bool { + if dst.entries[i].val == hex { + return dst.entries[i].alg >= alg + } + return dst.entries[i].val >= hex + } + } + idx := sort.Search(len(dst.entries), searchFunc) + if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) { + return "", ErrDigestNotFound + } + if dst.entries[idx].alg == alg && dst.entries[idx].val == hex { + return dst.entries[idx].digest, nil + } + if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) { + return "", ErrDigestAmbiguous + } + + return dst.entries[idx].digest, nil +} + +// Add adds the given digest to the set. An error will be returned +// if the given digest is invalid. If the digest already exists in the +// set, this operation will be a no-op. +func (dst *Set) Add(d digest.Digest) error { + if err := d.Validate(); err != nil { + return err + } + dst.mutex.Lock() + defer dst.mutex.Unlock() + entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} + searchFunc := func(i int) bool { + if dst.entries[i].val == entry.val { + return dst.entries[i].alg >= entry.alg + } + return dst.entries[i].val >= entry.val + } + idx := sort.Search(len(dst.entries), searchFunc) + if idx == len(dst.entries) { + dst.entries = append(dst.entries, entry) + return nil + } else if dst.entries[idx].digest == d { + return nil + } + + entries := append(dst.entries, nil) + copy(entries[idx+1:], entries[idx:len(entries)-1]) + entries[idx] = entry + dst.entries = entries + return nil +} + +// Remove removes the given digest from the set. An err will be +// returned if the given digest is invalid. If the digest does +// not exist in the set, this operation will be a no-op. +func (dst *Set) Remove(d digest.Digest) error { + if err := d.Validate(); err != nil { + return err + } + dst.mutex.Lock() + defer dst.mutex.Unlock() + entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} + searchFunc := func(i int) bool { + if dst.entries[i].val == entry.val { + return dst.entries[i].alg >= entry.alg + } + return dst.entries[i].val >= entry.val + } + idx := sort.Search(len(dst.entries), searchFunc) + // Not found if idx is after or value at idx is not digest + if idx == len(dst.entries) || dst.entries[idx].digest != d { + return nil + } + + entries := dst.entries + copy(entries[idx:], entries[idx+1:]) + entries = entries[:len(entries)-1] + dst.entries = entries + + return nil +} + +// All returns all the digests in the set +func (dst *Set) All() []digest.Digest { + dst.mutex.RLock() + defer dst.mutex.RUnlock() + retValues := make([]digest.Digest, len(dst.entries)) + for i := range dst.entries { + retValues[i] = dst.entries[i].digest + } + + return retValues +} + +// ShortCodeTable returns a map of Digest to unique short codes. The +// length represents the minimum value, the maximum length may be the +// entire value of digest if uniqueness cannot be achieved without the +// full value. This function will attempt to make short codes as short +// as possible to be unique. +func ShortCodeTable(dst *Set, length int) map[digest.Digest]string { + dst.mutex.RLock() + defer dst.mutex.RUnlock() + m := make(map[digest.Digest]string, len(dst.entries)) + l := length + resetIdx := 0 + for i := 0; i < len(dst.entries); i++ { + var short string + extended := true + for extended { + extended = false + if len(dst.entries[i].val) <= l { + short = dst.entries[i].digest.String() + } else { + short = dst.entries[i].val[:l] + for j := i + 1; j < len(dst.entries); j++ { + if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) { + if j > resetIdx { + resetIdx = j + } + extended = true + } else { + break + } + } + if extended { + l++ + } + } + } + m[dst.entries[i].digest] = short + if i >= resetIdx { + l = length + } + } + return m +} + +type digestEntry struct { + alg digest.Algorithm + val string + digest digest.Digest +} + +type digestEntries []*digestEntry + +func (d digestEntries) Len() int { + return len(d) +} + +func (d digestEntries) Less(i, j int) bool { + if d[i].val != d[j].val { + return d[i].val < d[j].val + } + return d[i].alg < d[j].alg +} + +func (d digestEntries) Swap(i, j int) { + d[i], d[j] = d[j], d[i] +} diff --git a/vendor/github.com/distribution/distribution/v3/reference/helpers.go b/vendor/github.com/distribution/distribution/v3/reference/helpers.go new file mode 100644 index 0000000000..978df7eabb --- /dev/null +++ b/vendor/github.com/distribution/distribution/v3/reference/helpers.go @@ -0,0 +1,42 @@ +package reference + +import "path" + +// IsNameOnly returns true if reference only contains a repo name. +func IsNameOnly(ref Named) bool { + if _, ok := ref.(NamedTagged); ok { + return false + } + if _, ok := ref.(Canonical); ok { + return false + } + return true +} + +// FamiliarName returns the familiar name string +// for the given named, familiarizing if needed. +func FamiliarName(ref Named) string { + if nn, ok := ref.(normalizedNamed); ok { + return nn.Familiar().Name() + } + return ref.Name() +} + +// FamiliarString returns the familiar string representation +// for the given reference, familiarizing if needed. +func FamiliarString(ref Reference) string { + if nn, ok := ref.(normalizedNamed); ok { + return nn.Familiar().String() + } + return ref.String() +} + +// FamiliarMatch reports whether ref matches the specified pattern. +// See https://godoc.org/path#Match for supported patterns. +func FamiliarMatch(pattern string, ref Reference) (bool, error) { + matched, err := path.Match(pattern, FamiliarString(ref)) + if namedRef, isNamed := ref.(Named); isNamed && !matched { + matched, _ = path.Match(pattern, FamiliarName(namedRef)) + } + return matched, err +} diff --git a/vendor/github.com/distribution/distribution/v3/reference/normalize.go b/vendor/github.com/distribution/distribution/v3/reference/normalize.go new file mode 100644 index 0000000000..e7a1f9b528 --- /dev/null +++ b/vendor/github.com/distribution/distribution/v3/reference/normalize.go @@ -0,0 +1,198 @@ +package reference + +import ( + "fmt" + "strings" + + "github.com/distribution/distribution/v3/digestset" + "github.com/opencontainers/go-digest" +) + +var ( + legacyDefaultDomain = "index.docker.io" + defaultDomain = "docker.io" + officialRepoName = "library" + defaultTag = "latest" +) + +// normalizedNamed represents a name which has been +// normalized and has a familiar form. A familiar name +// is what is used in Docker UI. An example normalized +// name is "docker.io/library/ubuntu" and corresponding +// familiar name of "ubuntu". +type normalizedNamed interface { + Named + Familiar() Named +} + +// ParseNormalizedNamed parses a string into a named reference +// transforming a familiar name from Docker UI to a fully +// qualified reference. If the value may be an identifier +// use ParseAnyReference. +func ParseNormalizedNamed(s string) (Named, error) { + if ok := anchoredIdentifierRegexp.MatchString(s); ok { + return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s) + } + domain, remainder := splitDockerDomain(s) + var remoteName string + if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 { + remoteName = remainder[:tagSep] + } else { + remoteName = remainder + } + if strings.ToLower(remoteName) != remoteName { + return nil, fmt.Errorf("invalid reference format: repository name (%s) must be lowercase", remoteName) + } + + ref, err := Parse(domain + "/" + remainder) + if err != nil { + return nil, err + } + named, isNamed := ref.(Named) + if !isNamed { + return nil, fmt.Errorf("reference %s has no name", ref.String()) + } + return named, nil +} + +// ParseDockerRef normalizes the image reference following the docker convention. This is added +// mainly for backward compatibility. +// The reference returned can only be either tagged or digested. For reference contains both tag +// and digest, the function returns digested reference, e.g. docker.io/library/busybox:latest@ +// sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa will be returned as +// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa. +func ParseDockerRef(ref string) (Named, error) { + named, err := ParseNormalizedNamed(ref) + if err != nil { + return nil, err + } + if _, ok := named.(NamedTagged); ok { + if canonical, ok := named.(Canonical); ok { + // The reference is both tagged and digested, only + // return digested. + newNamed, err := WithName(canonical.Name()) + if err != nil { + return nil, err + } + newCanonical, err := WithDigest(newNamed, canonical.Digest()) + if err != nil { + return nil, err + } + return newCanonical, nil + } + } + return TagNameOnly(named), nil +} + +// splitDockerDomain splits a repository name to domain and remotename string. +// If no valid domain is found, the default domain is used. Repository name +// needs to be already validated before. +func splitDockerDomain(name string) (domain, remainder string) { + i := strings.IndexRune(name, '/') + if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost" && strings.ToLower(name[:i]) == name[:i]) { + domain, remainder = defaultDomain, name + } else { + domain, remainder = name[:i], name[i+1:] + } + if domain == legacyDefaultDomain { + domain = defaultDomain + } + if domain == defaultDomain && !strings.ContainsRune(remainder, '/') { + remainder = officialRepoName + "/" + remainder + } + return +} + +// familiarizeName returns a shortened version of the name familiar +// to to the Docker UI. Familiar names have the default domain +// "docker.io" and "library/" repository prefix removed. +// For example, "docker.io/library/redis" will have the familiar +// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp". +// Returns a familiarized named only reference. +func familiarizeName(named namedRepository) repository { + repo := repository{ + domain: named.Domain(), + path: named.Path(), + } + + if repo.domain == defaultDomain { + repo.domain = "" + // Handle official repositories which have the pattern "library/" + if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName { + repo.path = split[1] + } + } + return repo +} + +func (r reference) Familiar() Named { + return reference{ + namedRepository: familiarizeName(r.namedRepository), + tag: r.tag, + digest: r.digest, + } +} + +func (r repository) Familiar() Named { + return familiarizeName(r) +} + +func (t taggedReference) Familiar() Named { + return taggedReference{ + namedRepository: familiarizeName(t.namedRepository), + tag: t.tag, + } +} + +func (c canonicalReference) Familiar() Named { + return canonicalReference{ + namedRepository: familiarizeName(c.namedRepository), + digest: c.digest, + } +} + +// TagNameOnly adds the default tag "latest" to a reference if it only has +// a repo name. +func TagNameOnly(ref Named) Named { + if IsNameOnly(ref) { + namedTagged, err := WithTag(ref, defaultTag) + if err != nil { + // Default tag must be valid, to create a NamedTagged + // type with non-validated input the WithTag function + // should be used instead + panic(err) + } + return namedTagged + } + return ref +} + +// ParseAnyReference parses a reference string as a possible identifier, +// full digest, or familiar name. +func ParseAnyReference(ref string) (Reference, error) { + if ok := anchoredIdentifierRegexp.MatchString(ref); ok { + return digestReference("sha256:" + ref), nil + } + if dgst, err := digest.Parse(ref); err == nil { + return digestReference(dgst), nil + } + + return ParseNormalizedNamed(ref) +} + +// ParseAnyReferenceWithSet parses a reference string as a possible short +// identifier to be matched in a digest set, a full digest, or familiar name. +func ParseAnyReferenceWithSet(ref string, ds *digestset.Set) (Reference, error) { + if ok := anchoredShortIdentifierRegexp.MatchString(ref); ok { + dgst, err := ds.Lookup(ref) + if err == nil { + return digestReference(dgst), nil + } + } else { + if dgst, err := digest.Parse(ref); err == nil { + return digestReference(dgst), nil + } + } + + return ParseNormalizedNamed(ref) +} diff --git a/vendor/github.com/distribution/distribution/v3/reference/reference.go b/vendor/github.com/distribution/distribution/v3/reference/reference.go new file mode 100644 index 0000000000..8c0c23b2fe --- /dev/null +++ b/vendor/github.com/distribution/distribution/v3/reference/reference.go @@ -0,0 +1,433 @@ +// Package reference provides a general type to represent any way of referencing images within the registry. +// Its main purpose is to abstract tags and digests (content-addressable hash). +// +// Grammar +// +// reference := name [ ":" tag ] [ "@" digest ] +// name := [domain '/'] path-component ['/' path-component]* +// domain := domain-component ['.' domain-component]* [':' port-number] +// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ +// port-number := /[0-9]+/ +// path-component := alpha-numeric [separator alpha-numeric]* +// alpha-numeric := /[a-z0-9]+/ +// separator := /[_.]|__|[-]*/ +// +// tag := /[\w][\w.-]{0,127}/ +// +// digest := digest-algorithm ":" digest-hex +// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]* +// digest-algorithm-separator := /[+.-_]/ +// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/ +// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value +// +// identifier := /[a-f0-9]{64}/ +// short-identifier := /[a-f0-9]{6,64}/ +package reference + +import ( + "errors" + "fmt" + "strings" + + "github.com/opencontainers/go-digest" +) + +const ( + // NameTotalLengthMax is the maximum total number of characters in a repository name. + NameTotalLengthMax = 255 +) + +var ( + // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference. + ErrReferenceInvalidFormat = errors.New("invalid reference format") + + // ErrTagInvalidFormat represents an error while trying to parse a string as a tag. + ErrTagInvalidFormat = errors.New("invalid tag format") + + // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag. + ErrDigestInvalidFormat = errors.New("invalid digest format") + + // ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters. + ErrNameContainsUppercase = errors.New("repository name must be lowercase") + + // ErrNameEmpty is returned for empty, invalid repository names. + ErrNameEmpty = errors.New("repository name must have at least one component") + + // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. + ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) + + // ErrNameNotCanonical is returned when a name is not canonical. + ErrNameNotCanonical = errors.New("repository name must be canonical") +) + +// Reference is an opaque object reference identifier that may include +// modifiers such as a hostname, name, tag, and digest. +type Reference interface { + // String returns the full reference + String() string +} + +// Field provides a wrapper type for resolving correct reference types when +// working with encoding. +type Field struct { + reference Reference +} + +// AsField wraps a reference in a Field for encoding. +func AsField(reference Reference) Field { + return Field{reference} +} + +// Reference unwraps the reference type from the field to +// return the Reference object. This object should be +// of the appropriate type to further check for different +// reference types. +func (f Field) Reference() Reference { + return f.reference +} + +// MarshalText serializes the field to byte text which +// is the string of the reference. +func (f Field) MarshalText() (p []byte, err error) { + return []byte(f.reference.String()), nil +} + +// UnmarshalText parses text bytes by invoking the +// reference parser to ensure the appropriately +// typed reference object is wrapped by field. +func (f *Field) UnmarshalText(p []byte) error { + r, err := Parse(string(p)) + if err != nil { + return err + } + + f.reference = r + return nil +} + +// Named is an object with a full name +type Named interface { + Reference + Name() string +} + +// Tagged is an object which has a tag +type Tagged interface { + Reference + Tag() string +} + +// NamedTagged is an object including a name and tag. +type NamedTagged interface { + Named + Tag() string +} + +// Digested is an object which has a digest +// in which it can be referenced by +type Digested interface { + Reference + Digest() digest.Digest +} + +// Canonical reference is an object with a fully unique +// name including a name with domain and digest +type Canonical interface { + Named + Digest() digest.Digest +} + +// namedRepository is a reference to a repository with a name. +// A namedRepository has both domain and path components. +type namedRepository interface { + Named + Domain() string + Path() string +} + +// Domain returns the domain part of the Named reference +func Domain(named Named) string { + if r, ok := named.(namedRepository); ok { + return r.Domain() + } + domain, _ := splitDomain(named.Name()) + return domain +} + +// Path returns the name without the domain part of the Named reference +func Path(named Named) (name string) { + if r, ok := named.(namedRepository); ok { + return r.Path() + } + _, path := splitDomain(named.Name()) + return path +} + +func splitDomain(name string) (string, string) { + match := anchoredNameRegexp.FindStringSubmatch(name) + if len(match) != 3 { + return "", name + } + return match[1], match[2] +} + +// SplitHostname splits a named reference into a +// hostname and name string. If no valid hostname is +// found, the hostname is empty and the full value +// is returned as name +// DEPRECATED: Use Domain or Path +func SplitHostname(named Named) (string, string) { + if r, ok := named.(namedRepository); ok { + return r.Domain(), r.Path() + } + return splitDomain(named.Name()) +} + +// Parse parses s and returns a syntactically valid Reference. +// If an error was encountered it is returned, along with a nil Reference. +// NOTE: Parse will not handle short digests. +func Parse(s string) (Reference, error) { + matches := ReferenceRegexp.FindStringSubmatch(s) + if matches == nil { + if s == "" { + return nil, ErrNameEmpty + } + if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil { + return nil, ErrNameContainsUppercase + } + return nil, ErrReferenceInvalidFormat + } + + if len(matches[1]) > NameTotalLengthMax { + return nil, ErrNameTooLong + } + + var repo repository + + nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1]) + if len(nameMatch) == 3 { + repo.domain = nameMatch[1] + repo.path = nameMatch[2] + } else { + repo.domain = "" + repo.path = matches[1] + } + + ref := reference{ + namedRepository: repo, + tag: matches[2], + } + if matches[3] != "" { + var err error + ref.digest, err = digest.Parse(matches[3]) + if err != nil { + return nil, err + } + } + + r := getBestReferenceType(ref) + if r == nil { + return nil, ErrNameEmpty + } + + return r, nil +} + +// ParseNamed parses s and returns a syntactically valid reference implementing +// the Named interface. The reference must have a name and be in the canonical +// form, otherwise an error is returned. +// If an error was encountered it is returned, along with a nil Reference. +// NOTE: ParseNamed will not handle short digests. +func ParseNamed(s string) (Named, error) { + named, err := ParseNormalizedNamed(s) + if err != nil { + return nil, err + } + if named.String() != s { + return nil, ErrNameNotCanonical + } + return named, nil +} + +// WithName returns a named object representing the given string. If the input +// is invalid ErrReferenceInvalidFormat will be returned. +func WithName(name string) (Named, error) { + if len(name) > NameTotalLengthMax { + return nil, ErrNameTooLong + } + + match := anchoredNameRegexp.FindStringSubmatch(name) + if match == nil || len(match) != 3 { + return nil, ErrReferenceInvalidFormat + } + return repository{ + domain: match[1], + path: match[2], + }, nil +} + +// WithTag combines the name from "name" and the tag from "tag" to form a +// reference incorporating both the name and the tag. +func WithTag(name Named, tag string) (NamedTagged, error) { + if !anchoredTagRegexp.MatchString(tag) { + return nil, ErrTagInvalidFormat + } + var repo repository + if r, ok := name.(namedRepository); ok { + repo.domain = r.Domain() + repo.path = r.Path() + } else { + repo.path = name.Name() + } + if canonical, ok := name.(Canonical); ok { + return reference{ + namedRepository: repo, + tag: tag, + digest: canonical.Digest(), + }, nil + } + return taggedReference{ + namedRepository: repo, + tag: tag, + }, nil +} + +// WithDigest combines the name from "name" and the digest from "digest" to form +// a reference incorporating both the name and the digest. +func WithDigest(name Named, digest digest.Digest) (Canonical, error) { + if !anchoredDigestRegexp.MatchString(digest.String()) { + return nil, ErrDigestInvalidFormat + } + var repo repository + if r, ok := name.(namedRepository); ok { + repo.domain = r.Domain() + repo.path = r.Path() + } else { + repo.path = name.Name() + } + if tagged, ok := name.(Tagged); ok { + return reference{ + namedRepository: repo, + tag: tagged.Tag(), + digest: digest, + }, nil + } + return canonicalReference{ + namedRepository: repo, + digest: digest, + }, nil +} + +// TrimNamed removes any tag or digest from the named reference. +func TrimNamed(ref Named) Named { + domain, path := SplitHostname(ref) + return repository{ + domain: domain, + path: path, + } +} + +func getBestReferenceType(ref reference) Reference { + if ref.Name() == "" { + // Allow digest only references + if ref.digest != "" { + return digestReference(ref.digest) + } + return nil + } + if ref.tag == "" { + if ref.digest != "" { + return canonicalReference{ + namedRepository: ref.namedRepository, + digest: ref.digest, + } + } + return ref.namedRepository + } + if ref.digest == "" { + return taggedReference{ + namedRepository: ref.namedRepository, + tag: ref.tag, + } + } + + return ref +} + +type reference struct { + namedRepository + tag string + digest digest.Digest +} + +func (r reference) String() string { + return r.Name() + ":" + r.tag + "@" + r.digest.String() +} + +func (r reference) Tag() string { + return r.tag +} + +func (r reference) Digest() digest.Digest { + return r.digest +} + +type repository struct { + domain string + path string +} + +func (r repository) String() string { + return r.Name() +} + +func (r repository) Name() string { + if r.domain == "" { + return r.path + } + return r.domain + "/" + r.path +} + +func (r repository) Domain() string { + return r.domain +} + +func (r repository) Path() string { + return r.path +} + +type digestReference digest.Digest + +func (d digestReference) String() string { + return digest.Digest(d).String() +} + +func (d digestReference) Digest() digest.Digest { + return digest.Digest(d) +} + +type taggedReference struct { + namedRepository + tag string +} + +func (t taggedReference) String() string { + return t.Name() + ":" + t.tag +} + +func (t taggedReference) Tag() string { + return t.tag +} + +type canonicalReference struct { + namedRepository + digest digest.Digest +} + +func (c canonicalReference) String() string { + return c.Name() + "@" + c.digest.String() +} + +func (c canonicalReference) Digest() digest.Digest { + return c.digest +} diff --git a/vendor/github.com/distribution/distribution/v3/reference/regexp.go b/vendor/github.com/distribution/distribution/v3/reference/regexp.go new file mode 100644 index 0000000000..78e2f9170e --- /dev/null +++ b/vendor/github.com/distribution/distribution/v3/reference/regexp.go @@ -0,0 +1,147 @@ +package reference + +import "regexp" + +var ( + // alphaNumericRegexp defines the alpha numeric atom, typically a + // component of names. This only allows lower case characters and digits. + alphaNumericRegexp = match(`[a-z0-9]+`) + + // separatorRegexp defines the separators allowed to be embedded in name + // components. This allow one period, one or two underscore and multiple + // dashes. Repeated dashes and underscores are intentionally treated + // differently. In order to support valid hostnames as name components, + // supporting repeated dash was added. Additionally double underscore is + // now allowed as a separator to loosen the restriction for previously + // supported names. + separatorRegexp = match(`(?:[._]|__|[-]*)`) + + // nameComponentRegexp restricts registry path component names to start + // with at least one letter or number, with following parts able to be + // separated by one period, one or two underscore and multiple dashes. + nameComponentRegexp = expression( + alphaNumericRegexp, + optional(repeated(separatorRegexp, alphaNumericRegexp))) + + // domainComponentRegexp restricts the registry domain component of a + // repository name to start with a component as defined by DomainRegexp + // and followed by an optional port. + domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) + + // DomainRegexp defines the structure of potential domain components + // that may be part of image names. This is purposely a subset of what is + // allowed by DNS to ensure backwards compatibility with Docker image + // names. + DomainRegexp = expression( + domainComponentRegexp, + optional(repeated(literal(`.`), domainComponentRegexp)), + optional(literal(`:`), match(`[0-9]+`))) + + // TagRegexp matches valid tag names. From docker/docker:graph/tags.go. + TagRegexp = match(`[\w][\w.-]{0,127}`) + + // anchoredTagRegexp matches valid tag names, anchored at the start and + // end of the matched string. + anchoredTagRegexp = anchored(TagRegexp) + + // DigestRegexp matches valid digests. + DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`) + + // anchoredDigestRegexp matches valid digests, anchored at the start and + // end of the matched string. + anchoredDigestRegexp = anchored(DigestRegexp) + + // NameRegexp is the format for the name component of references. The + // regexp has capturing groups for the domain and name part omitting + // the separating forward slash from either. + NameRegexp = expression( + optional(DomainRegexp, literal(`/`)), + nameComponentRegexp, + optional(repeated(literal(`/`), nameComponentRegexp))) + + // anchoredNameRegexp is used to parse a name value, capturing the + // domain and trailing components. + anchoredNameRegexp = anchored( + optional(capture(DomainRegexp), literal(`/`)), + capture(nameComponentRegexp, + optional(repeated(literal(`/`), nameComponentRegexp)))) + + // ReferenceRegexp is the full supported format of a reference. The regexp + // is anchored and has capturing groups for name, tag, and digest + // components. + ReferenceRegexp = anchored(capture(NameRegexp), + optional(literal(":"), capture(TagRegexp)), + optional(literal("@"), capture(DigestRegexp))) + + // IdentifierRegexp is the format for string identifier used as a + // content addressable identifier using sha256. These identifiers + // are like digests without the algorithm, since sha256 is used. + IdentifierRegexp = match(`([a-f0-9]{64})`) + + // ShortIdentifierRegexp is the format used to represent a prefix + // of an identifier. A prefix may be used to match a sha256 identifier + // within a list of trusted identifiers. + ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`) + + // anchoredIdentifierRegexp is used to check or match an + // identifier value, anchored at start and end of string. + anchoredIdentifierRegexp = anchored(IdentifierRegexp) + + // anchoredShortIdentifierRegexp is used to check if a value + // is a possible identifier prefix, anchored at start and end + // of string. + anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp) +) + +// match compiles the string to a regular expression. +var match = regexp.MustCompile + +// literal compiles s into a literal regular expression, escaping any regexp +// reserved characters. +func literal(s string) *regexp.Regexp { + re := match(regexp.QuoteMeta(s)) + + if _, complete := re.LiteralPrefix(); !complete { + panic("must be a literal") + } + + return re +} + +// expression defines a full expression, where each regular expression must +// follow the previous. +func expression(res ...*regexp.Regexp) *regexp.Regexp { + var s string + for _, re := range res { + s += re.String() + } + + return match(s) +} + +// optional wraps the expression in a non-capturing group and makes the +// production optional. +func optional(res ...*regexp.Regexp) *regexp.Regexp { + return match(group(expression(res...)).String() + `?`) +} + +// repeated wraps the regexp in a non-capturing group to get one or more +// matches. +func repeated(res ...*regexp.Regexp) *regexp.Regexp { + return match(group(expression(res...)).String() + `+`) +} + +// group wraps the regexp in a non-capturing group. +func group(res ...*regexp.Regexp) *regexp.Regexp { + return match(`(?:` + expression(res...).String() + `)`) +} + +// capture wraps the expression in a capturing group. +func capture(res ...*regexp.Regexp) *regexp.Regexp { + return match(`(` + expression(res...).String() + `)`) +} + +// anchored anchors the regular expression by adding start and end delimiters. +func anchored(res ...*regexp.Regexp) *regexp.Regexp { + return match(`^` + expression(res...).String() + `$`) +} diff --git a/vendor/github.com/mattn/go-shellwords/.travis.yml b/vendor/github.com/mattn/go-shellwords/.travis.yml new file mode 100644 index 0000000000..ebd5edd898 --- /dev/null +++ b/vendor/github.com/mattn/go-shellwords/.travis.yml @@ -0,0 +1,16 @@ +arch: + - amd64 + - ppc64le +language: go +sudo: false +go: + - tip + +before_install: + - go get -t -v ./... + +script: + - ./go.test.sh + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/mattn/go-shellwords/LICENSE b/vendor/github.com/mattn/go-shellwords/LICENSE new file mode 100644 index 0000000000..740fa93132 --- /dev/null +++ b/vendor/github.com/mattn/go-shellwords/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2017 Yasuhiro Matsumoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/mattn/go-shellwords/README.md b/vendor/github.com/mattn/go-shellwords/README.md new file mode 100644 index 0000000000..bdd531918c --- /dev/null +++ b/vendor/github.com/mattn/go-shellwords/README.md @@ -0,0 +1,55 @@ +# go-shellwords + +[![codecov](https://codecov.io/gh/mattn/go-shellwords/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-shellwords) +[![Build Status](https://travis-ci.org/mattn/go-shellwords.svg?branch=master)](https://travis-ci.org/mattn/go-shellwords) +[![PkgGoDev](https://pkg.go.dev/badge/github.com/mattn/go-shellwords)](https://pkg.go.dev/github.com/mattn/go-shellwords) +[![ci](https://github.com/mattn/go-shellwords/ci/badge.svg)](https://github.com/mattn/go-shellwords/actions) + +Parse line as shell words. + +## Usage + +```go +args, err := shellwords.Parse("./foo --bar=baz") +// args should be ["./foo", "--bar=baz"] +``` + +```go +envs, args, err := shellwords.ParseWithEnvs("FOO=foo BAR=baz ./foo --bar=baz") +// envs should be ["FOO=foo", "BAR=baz"] +// args should be ["./foo", "--bar=baz"] +``` + +```go +os.Setenv("FOO", "bar") +p := shellwords.NewParser() +p.ParseEnv = true +args, err := p.Parse("./foo $FOO") +// args should be ["./foo", "bar"] +``` + +```go +p := shellwords.NewParser() +p.ParseBacktick = true +args, err := p.Parse("./foo `echo $SHELL`") +// args should be ["./foo", "/bin/bash"] +``` + +```go +shellwords.ParseBacktick = true +p := shellwords.NewParser() +args, err := p.Parse("./foo `echo $SHELL`") +// args should be ["./foo", "/bin/bash"] +``` + +# Thanks + +This is based on cpan module [Parse::CommandLine](https://metacpan.org/pod/Parse::CommandLine). + +# License + +under the MIT License: http://mattn.mit-license.org/2017 + +# Author + +Yasuhiro Matsumoto (a.k.a mattn) diff --git a/vendor/github.com/mattn/go-shellwords/go.mod b/vendor/github.com/mattn/go-shellwords/go.mod new file mode 100644 index 0000000000..927c8c7d6a --- /dev/null +++ b/vendor/github.com/mattn/go-shellwords/go.mod @@ -0,0 +1,3 @@ +module github.com/mattn/go-shellwords + +go 1.13 diff --git a/vendor/github.com/mattn/go-shellwords/go.test.sh b/vendor/github.com/mattn/go-shellwords/go.test.sh new file mode 100644 index 0000000000..a7deaca96a --- /dev/null +++ b/vendor/github.com/mattn/go-shellwords/go.test.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -e +echo "" > coverage.txt + +for d in $(go list ./... | grep -v vendor); do + go test -coverprofile=profile.out -covermode=atomic "$d" + if [ -f profile.out ]; then + cat profile.out >> coverage.txt + rm profile.out + fi +done diff --git a/vendor/github.com/mattn/go-shellwords/shellwords.go b/vendor/github.com/mattn/go-shellwords/shellwords.go new file mode 100644 index 0000000000..1b42a00170 --- /dev/null +++ b/vendor/github.com/mattn/go-shellwords/shellwords.go @@ -0,0 +1,317 @@ +package shellwords + +import ( + "bytes" + "errors" + "os" + "strings" + "unicode" +) + +var ( + ParseEnv bool = false + ParseBacktick bool = false +) + +func isSpace(r rune) bool { + switch r { + case ' ', '\t', '\r', '\n': + return true + } + return false +} + +func replaceEnv(getenv func(string) string, s string) string { + if getenv == nil { + getenv = os.Getenv + } + + var buf bytes.Buffer + rs := []rune(s) + for i := 0; i < len(rs); i++ { + r := rs[i] + if r == '\\' { + i++ + if i == len(rs) { + break + } + buf.WriteRune(rs[i]) + continue + } else if r == '$' { + i++ + if i == len(rs) { + buf.WriteRune(r) + break + } + if rs[i] == 0x7b { + i++ + p := i + for ; i < len(rs); i++ { + r = rs[i] + if r == '\\' { + i++ + if i == len(rs) { + return s + } + continue + } + if r == 0x7d || (!unicode.IsLetter(r) && r != '_' && !unicode.IsDigit(r)) { + break + } + } + if r != 0x7d { + return s + } + if i > p { + buf.WriteString(getenv(s[p:i])) + } + } else { + p := i + for ; i < len(rs); i++ { + r := rs[i] + if r == '\\' { + i++ + if i == len(rs) { + return s + } + continue + } + if !unicode.IsLetter(r) && r != '_' && !unicode.IsDigit(r) { + break + } + } + if i > p { + buf.WriteString(getenv(s[p:i])) + i-- + } else { + buf.WriteString(s[p:]) + } + } + } else { + buf.WriteRune(r) + } + } + return buf.String() +} + +type Parser struct { + ParseEnv bool + ParseBacktick bool + Position int + Dir string + + // If ParseEnv is true, use this for getenv. + // If nil, use os.Getenv. + Getenv func(string) string +} + +func NewParser() *Parser { + return &Parser{ + ParseEnv: ParseEnv, + ParseBacktick: ParseBacktick, + Position: 0, + Dir: "", + } +} + +type argType int + +const ( + argNo argType = iota + argSingle + argQuoted +) + +func (p *Parser) Parse(line string) ([]string, error) { + args := []string{} + buf := "" + var escaped, doubleQuoted, singleQuoted, backQuote, dollarQuote bool + backtick := "" + + pos := -1 + got := argNo + + i := -1 +loop: + for _, r := range line { + i++ + if escaped { + buf += string(r) + escaped = false + got = argSingle + continue + } + + if r == '\\' { + if singleQuoted { + buf += string(r) + } else { + escaped = true + } + continue + } + + if isSpace(r) { + if singleQuoted || doubleQuoted || backQuote || dollarQuote { + buf += string(r) + backtick += string(r) + } else if got != argNo { + if p.ParseEnv { + if got == argSingle { + parser := &Parser{ParseEnv: false, ParseBacktick: false, Position: 0, Dir: p.Dir} + strs, err := parser.Parse(replaceEnv(p.Getenv, buf)) + if err != nil { + return nil, err + } + args = append(args, strs...) + } else { + args = append(args, replaceEnv(p.Getenv, buf)) + } + } else { + args = append(args, buf) + } + buf = "" + got = argNo + } + continue + } + + switch r { + case '`': + if !singleQuoted && !doubleQuoted && !dollarQuote { + if p.ParseBacktick { + if backQuote { + out, err := shellRun(backtick, p.Dir) + if err != nil { + return nil, err + } + buf = buf[:len(buf)-len(backtick)] + out + } + backtick = "" + backQuote = !backQuote + continue + } + backtick = "" + backQuote = !backQuote + } + case ')': + if !singleQuoted && !doubleQuoted && !backQuote { + if p.ParseBacktick { + if dollarQuote { + out, err := shellRun(backtick, p.Dir) + if err != nil { + return nil, err + } + buf = buf[:len(buf)-len(backtick)-2] + out + } + backtick = "" + dollarQuote = !dollarQuote + continue + } + backtick = "" + dollarQuote = !dollarQuote + } + case '(': + if !singleQuoted && !doubleQuoted && !backQuote { + if !dollarQuote && strings.HasSuffix(buf, "$") { + dollarQuote = true + buf += "(" + continue + } else { + return nil, errors.New("invalid command line string") + } + } + case '"': + if !singleQuoted && !dollarQuote { + if doubleQuoted { + got = argQuoted + } + doubleQuoted = !doubleQuoted + continue + } + case '\'': + if !doubleQuoted && !dollarQuote { + if singleQuoted { + got = argQuoted + } + singleQuoted = !singleQuoted + continue + } + case ';', '&', '|', '<', '>': + if !(escaped || singleQuoted || doubleQuoted || backQuote || dollarQuote) { + if r == '>' && len(buf) > 0 { + if c := buf[0]; '0' <= c && c <= '9' { + i -= 1 + got = argNo + } + } + pos = i + break loop + } + } + + got = argSingle + buf += string(r) + if backQuote || dollarQuote { + backtick += string(r) + } + } + + if got != argNo { + if p.ParseEnv { + if got == argSingle { + parser := &Parser{ParseEnv: false, ParseBacktick: false, Position: 0, Dir: p.Dir} + strs, err := parser.Parse(replaceEnv(p.Getenv, buf)) + if err != nil { + return nil, err + } + args = append(args, strs...) + } else { + args = append(args, replaceEnv(p.Getenv, buf)) + } + } else { + args = append(args, buf) + } + } + + if escaped || singleQuoted || doubleQuoted || backQuote || dollarQuote { + return nil, errors.New("invalid command line string") + } + + p.Position = pos + + return args, nil +} + +func (p *Parser) ParseWithEnvs(line string) (envs []string, args []string, err error) { + _args, err := p.Parse(line) + if err != nil { + return nil, nil, err + } + envs = []string{} + args = []string{} + parsingEnv := true + for _, arg := range _args { + if parsingEnv && isEnv(arg) { + envs = append(envs, arg) + } else { + if parsingEnv { + parsingEnv = false + } + args = append(args, arg) + } + } + return envs, args, nil +} + +func isEnv(arg string) bool { + return len(strings.Split(arg, "=")) == 2 +} + +func Parse(line string) ([]string, error) { + return NewParser().Parse(line) +} + +func ParseWithEnvs(line string) (envs []string, args []string, err error) { + return NewParser().ParseWithEnvs(line) +} diff --git a/vendor/github.com/mattn/go-shellwords/util_posix.go b/vendor/github.com/mattn/go-shellwords/util_posix.go new file mode 100644 index 0000000000..b56a90120a --- /dev/null +++ b/vendor/github.com/mattn/go-shellwords/util_posix.go @@ -0,0 +1,29 @@ +// +build !windows + +package shellwords + +import ( + "fmt" + "os" + "os/exec" + "strings" +) + +func shellRun(line, dir string) (string, error) { + var shell string + if shell = os.Getenv("SHELL"); shell == "" { + shell = "/bin/sh" + } + cmd := exec.Command(shell, "-c", line) + if dir != "" { + cmd.Dir = dir + } + b, err := cmd.Output() + if err != nil { + if eerr, ok := err.(*exec.ExitError); ok { + b = eerr.Stderr + } + return "", fmt.Errorf("%s: %w", string(b), err) + } + return strings.TrimSpace(string(b)), nil +} diff --git a/vendor/github.com/mattn/go-shellwords/util_windows.go b/vendor/github.com/mattn/go-shellwords/util_windows.go new file mode 100644 index 0000000000..fd738a7211 --- /dev/null +++ b/vendor/github.com/mattn/go-shellwords/util_windows.go @@ -0,0 +1,29 @@ +// +build windows + +package shellwords + +import ( + "fmt" + "os" + "os/exec" + "strings" +) + +func shellRun(line, dir string) (string, error) { + var shell string + if shell = os.Getenv("COMSPEC"); shell == "" { + shell = "cmd" + } + cmd := exec.Command(shell, "/c", line) + if dir != "" { + cmd.Dir = dir + } + b, err := cmd.Output() + if err != nil { + if eerr, ok := err.(*exec.ExitError); ok { + b = eerr.Stderr + } + return "", fmt.Errorf("%s: %w", string(b), err) + } + return strings.TrimSpace(string(b)), nil +} diff --git a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md new file mode 100644 index 0000000000..38a099162c --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md @@ -0,0 +1,83 @@ +## 1.4.3 + +* Fix cases where `json.Number` didn't decode properly [GH-261] + +## 1.4.2 + +* Custom name matchers to support any sort of casing, formatting, etc. for + field names. [GH-250] +* Fix possible panic in ComposeDecodeHookFunc [GH-251] + +## 1.4.1 + +* Fix regression where `*time.Time` value would be set to empty and not be sent + to decode hooks properly [GH-232] + +## 1.4.0 + +* A new decode hook type `DecodeHookFuncValue` has been added that has + access to the full values. [GH-183] +* Squash is now supported with embedded fields that are struct pointers [GH-205] +* Empty strings will convert to 0 for all numeric types when weakly decoding [GH-206] + +## 1.3.3 + +* Decoding maps from maps creates a settable value for decode hooks [GH-203] + +## 1.3.2 + +* Decode into interface type with a struct value is supported [GH-187] + +## 1.3.1 + +* Squash should only squash embedded structs. [GH-194] + +## 1.3.0 + +* Added `",omitempty"` support. This will ignore zero values in the source + structure when encoding. [GH-145] + +## 1.2.3 + +* Fix duplicate entries in Keys list with pointer values. [GH-185] + +## 1.2.2 + +* Do not add unsettable (unexported) values to the unused metadata key + or "remain" value. [GH-150] + +## 1.2.1 + +* Go modules checksum mismatch fix + +## 1.2.0 + +* Added support to capture unused values in a field using the `",remain"` value + in the mapstructure tag. There is an example to showcase usage. +* Added `DecoderConfig` option to always squash embedded structs +* `json.Number` can decode into `uint` types +* Empty slices are preserved and not replaced with nil slices +* Fix panic that can occur in when decoding a map into a nil slice of structs +* Improved package documentation for godoc + +## 1.1.2 + +* Fix error when decode hook decodes interface implementation into interface + type. [GH-140] + +## 1.1.1 + +* Fix panic that can happen in `decodePtr` + +## 1.1.0 + +* Added `StringToIPHookFunc` to convert `string` to `net.IP` and `net.IPNet` [GH-133] +* Support struct to struct decoding [GH-137] +* If source map value is nil, then destination map value is nil (instead of empty) +* If source slice value is nil, then destination slice value is nil (instead of empty) +* If source pointer is nil, then destination pointer is set to nil (instead of + allocated zero value of type) + +## 1.0.0 + +* Initial tagged stable release. diff --git a/vendor/github.com/mitchellh/mapstructure/LICENSE b/vendor/github.com/mitchellh/mapstructure/LICENSE new file mode 100644 index 0000000000..f9c841a51e --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/mapstructure/README.md b/vendor/github.com/mitchellh/mapstructure/README.md new file mode 100644 index 0000000000..0018dc7d9f --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/README.md @@ -0,0 +1,46 @@ +# mapstructure [![Godoc](https://godoc.org/github.com/mitchellh/mapstructure?status.svg)](https://godoc.org/github.com/mitchellh/mapstructure) + +mapstructure is a Go library for decoding generic map values to structures +and vice versa, while providing helpful error handling. + +This library is most useful when decoding values from some data stream (JSON, +Gob, etc.) where you don't _quite_ know the structure of the underlying data +until you read a part of it. You can therefore read a `map[string]interface{}` +and use this library to decode it into the proper underlying native Go +structure. + +## Installation + +Standard `go get`: + +``` +$ go get github.com/mitchellh/mapstructure +``` + +## Usage & Example + +For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/mapstructure). + +The `Decode` function has examples associated with it there. + +## But Why?! + +Go offers fantastic standard libraries for decoding formats such as JSON. +The standard method is to have a struct pre-created, and populate that struct +from the bytes of the encoded format. This is great, but the problem is if +you have configuration or an encoding that changes slightly depending on +specific fields. For example, consider this JSON: + +```json +{ + "type": "person", + "name": "Mitchell" +} +``` + +Perhaps we can't populate a specific structure without first reading +the "type" field from the JSON. We could always do two passes over the +decoding of the JSON (reading the "type" first, and the rest later). +However, it is much simpler to just decode this into a `map[string]interface{}` +structure, read the "type" key, then use something like this library +to decode it into the proper structure. diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go new file mode 100644 index 0000000000..4d4bbc733b --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go @@ -0,0 +1,257 @@ +package mapstructure + +import ( + "encoding" + "errors" + "fmt" + "net" + "reflect" + "strconv" + "strings" + "time" +) + +// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns +// it into the proper DecodeHookFunc type, such as DecodeHookFuncType. +func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc { + // Create variables here so we can reference them with the reflect pkg + var f1 DecodeHookFuncType + var f2 DecodeHookFuncKind + var f3 DecodeHookFuncValue + + // Fill in the variables into this interface and the rest is done + // automatically using the reflect package. + potential := []interface{}{f1, f2, f3} + + v := reflect.ValueOf(h) + vt := v.Type() + for _, raw := range potential { + pt := reflect.ValueOf(raw).Type() + if vt.ConvertibleTo(pt) { + return v.Convert(pt).Interface() + } + } + + return nil +} + +// DecodeHookExec executes the given decode hook. This should be used +// since it'll naturally degrade to the older backwards compatible DecodeHookFunc +// that took reflect.Kind instead of reflect.Type. +func DecodeHookExec( + raw DecodeHookFunc, + from reflect.Value, to reflect.Value) (interface{}, error) { + + switch f := typedDecodeHook(raw).(type) { + case DecodeHookFuncType: + return f(from.Type(), to.Type(), from.Interface()) + case DecodeHookFuncKind: + return f(from.Kind(), to.Kind(), from.Interface()) + case DecodeHookFuncValue: + return f(from, to) + default: + return nil, errors.New("invalid decode hook signature") + } +} + +// ComposeDecodeHookFunc creates a single DecodeHookFunc that +// automatically composes multiple DecodeHookFuncs. +// +// The composed funcs are called in order, with the result of the +// previous transformation. +func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc { + return func(f reflect.Value, t reflect.Value) (interface{}, error) { + var err error + data := f.Interface() + + newFrom := f + for _, f1 := range fs { + data, err = DecodeHookExec(f1, newFrom, t) + if err != nil { + return nil, err + } + newFrom = reflect.ValueOf(data) + } + + return data, nil + } +} + +// StringToSliceHookFunc returns a DecodeHookFunc that converts +// string to []string by splitting on the given sep. +func StringToSliceHookFunc(sep string) DecodeHookFunc { + return func( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + if f != reflect.String || t != reflect.Slice { + return data, nil + } + + raw := data.(string) + if raw == "" { + return []string{}, nil + } + + return strings.Split(raw, sep), nil + } +} + +// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts +// strings to time.Duration. +func StringToTimeDurationHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(time.Duration(5)) { + return data, nil + } + + // Convert it by parsing + return time.ParseDuration(data.(string)) + } +} + +// StringToIPHookFunc returns a DecodeHookFunc that converts +// strings to net.IP +func StringToIPHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(net.IP{}) { + return data, nil + } + + // Convert it by parsing + ip := net.ParseIP(data.(string)) + if ip == nil { + return net.IP{}, fmt.Errorf("failed parsing ip %v", data) + } + + return ip, nil + } +} + +// StringToIPNetHookFunc returns a DecodeHookFunc that converts +// strings to net.IPNet +func StringToIPNetHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(net.IPNet{}) { + return data, nil + } + + // Convert it by parsing + _, net, err := net.ParseCIDR(data.(string)) + return net, err + } +} + +// StringToTimeHookFunc returns a DecodeHookFunc that converts +// strings to time.Time. +func StringToTimeHookFunc(layout string) DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(time.Time{}) { + return data, nil + } + + // Convert it by parsing + return time.Parse(layout, data.(string)) + } +} + +// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to +// the decoder. +// +// Note that this is significantly different from the WeaklyTypedInput option +// of the DecoderConfig. +func WeaklyTypedHook( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + dataVal := reflect.ValueOf(data) + switch t { + case reflect.String: + switch f { + case reflect.Bool: + if dataVal.Bool() { + return "1", nil + } + return "0", nil + case reflect.Float32: + return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil + case reflect.Int: + return strconv.FormatInt(dataVal.Int(), 10), nil + case reflect.Slice: + dataType := dataVal.Type() + elemKind := dataType.Elem().Kind() + if elemKind == reflect.Uint8 { + return string(dataVal.Interface().([]uint8)), nil + } + case reflect.Uint: + return strconv.FormatUint(dataVal.Uint(), 10), nil + } + } + + return data, nil +} + +func RecursiveStructToMapHookFunc() DecodeHookFunc { + return func(f reflect.Value, t reflect.Value) (interface{}, error) { + if f.Kind() != reflect.Struct { + return f.Interface(), nil + } + + var i interface{} = struct{}{} + if t.Type() != reflect.TypeOf(&i).Elem() { + return f.Interface(), nil + } + + m := make(map[string]interface{}) + t.Set(reflect.ValueOf(m)) + + return f.Interface(), nil + } +} + +// TextUnmarshallerHookFunc returns a DecodeHookFunc that applies +// strings to the UnmarshalText function, when the target type +// implements the encoding.TextUnmarshaler interface +func TextUnmarshallerHookFunc() DecodeHookFuncType { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + result := reflect.New(t).Interface() + unmarshaller, ok := result.(encoding.TextUnmarshaler) + if !ok { + return data, nil + } + if err := unmarshaller.UnmarshalText([]byte(data.(string))); err != nil { + return nil, err + } + return result, nil + } +} diff --git a/vendor/github.com/mitchellh/mapstructure/error.go b/vendor/github.com/mitchellh/mapstructure/error.go new file mode 100644 index 0000000000..47a99e5af3 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/error.go @@ -0,0 +1,50 @@ +package mapstructure + +import ( + "errors" + "fmt" + "sort" + "strings" +) + +// Error implements the error interface and can represents multiple +// errors that occur in the course of a single decode. +type Error struct { + Errors []string +} + +func (e *Error) Error() string { + points := make([]string, len(e.Errors)) + for i, err := range e.Errors { + points[i] = fmt.Sprintf("* %s", err) + } + + sort.Strings(points) + return fmt.Sprintf( + "%d error(s) decoding:\n\n%s", + len(e.Errors), strings.Join(points, "\n")) +} + +// WrappedErrors implements the errwrap.Wrapper interface to make this +// return value more useful with the errwrap and go-multierror libraries. +func (e *Error) WrappedErrors() []error { + if e == nil { + return nil + } + + result := make([]error, len(e.Errors)) + for i, e := range e.Errors { + result[i] = errors.New(e) + } + + return result +} + +func appendErrors(errors []string, err error) []string { + switch e := err.(type) { + case *Error: + return append(errors, e.Errors...) + default: + return append(errors, e.Error()) + } +} diff --git a/vendor/github.com/mitchellh/mapstructure/go.mod b/vendor/github.com/mitchellh/mapstructure/go.mod new file mode 100644 index 0000000000..a03ae97308 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/go.mod @@ -0,0 +1,3 @@ +module github.com/mitchellh/mapstructure + +go 1.14 diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go new file mode 100644 index 0000000000..6b81b00679 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go @@ -0,0 +1,1467 @@ +// Package mapstructure exposes functionality to convert one arbitrary +// Go type into another, typically to convert a map[string]interface{} +// into a native Go structure. +// +// The Go structure can be arbitrarily complex, containing slices, +// other structs, etc. and the decoder will properly decode nested +// maps and so on into the proper structures in the native Go struct. +// See the examples to see what the decoder is capable of. +// +// The simplest function to start with is Decode. +// +// Field Tags +// +// When decoding to a struct, mapstructure will use the field name by +// default to perform the mapping. For example, if a struct has a field +// "Username" then mapstructure will look for a key in the source value +// of "username" (case insensitive). +// +// type User struct { +// Username string +// } +// +// You can change the behavior of mapstructure by using struct tags. +// The default struct tag that mapstructure looks for is "mapstructure" +// but you can customize it using DecoderConfig. +// +// Renaming Fields +// +// To rename the key that mapstructure looks for, use the "mapstructure" +// tag and set a value directly. For example, to change the "username" example +// above to "user": +// +// type User struct { +// Username string `mapstructure:"user"` +// } +// +// Embedded Structs and Squashing +// +// Embedded structs are treated as if they're another field with that name. +// By default, the two structs below are equivalent when decoding with +// mapstructure: +// +// type Person struct { +// Name string +// } +// +// type Friend struct { +// Person +// } +// +// type Friend struct { +// Person Person +// } +// +// This would require an input that looks like below: +// +// map[string]interface{}{ +// "person": map[string]interface{}{"name": "alice"}, +// } +// +// If your "person" value is NOT nested, then you can append ",squash" to +// your tag value and mapstructure will treat it as if the embedded struct +// were part of the struct directly. Example: +// +// type Friend struct { +// Person `mapstructure:",squash"` +// } +// +// Now the following input would be accepted: +// +// map[string]interface{}{ +// "name": "alice", +// } +// +// When decoding from a struct to a map, the squash tag squashes the struct +// fields into a single map. Using the example structs from above: +// +// Friend{Person: Person{Name: "alice"}} +// +// Will be decoded into a map: +// +// map[string]interface{}{ +// "name": "alice", +// } +// +// DecoderConfig has a field that changes the behavior of mapstructure +// to always squash embedded structs. +// +// Remainder Values +// +// If there are any unmapped keys in the source value, mapstructure by +// default will silently ignore them. You can error by setting ErrorUnused +// in DecoderConfig. If you're using Metadata you can also maintain a slice +// of the unused keys. +// +// You can also use the ",remain" suffix on your tag to collect all unused +// values in a map. The field with this tag MUST be a map type and should +// probably be a "map[string]interface{}" or "map[interface{}]interface{}". +// See example below: +// +// type Friend struct { +// Name string +// Other map[string]interface{} `mapstructure:",remain"` +// } +// +// Given the input below, Other would be populated with the other +// values that weren't used (everything but "name"): +// +// map[string]interface{}{ +// "name": "bob", +// "address": "123 Maple St.", +// } +// +// Omit Empty Values +// +// When decoding from a struct to any other value, you may use the +// ",omitempty" suffix on your tag to omit that value if it equates to +// the zero value. The zero value of all types is specified in the Go +// specification. +// +// For example, the zero type of a numeric type is zero ("0"). If the struct +// field value is zero and a numeric type, the field is empty, and it won't +// be encoded into the destination type. +// +// type Source { +// Age int `mapstructure:",omitempty"` +// } +// +// Unexported fields +// +// Since unexported (private) struct fields cannot be set outside the package +// where they are defined, the decoder will simply skip them. +// +// For this output type definition: +// +// type Exported struct { +// private string // this unexported field will be skipped +// Public string +// } +// +// Using this map as input: +// +// map[string]interface{}{ +// "private": "I will be ignored", +// "Public": "I made it through!", +// } +// +// The following struct will be decoded: +// +// type Exported struct { +// private: "" // field is left with an empty string (zero value) +// Public: "I made it through!" +// } +// +// Other Configuration +// +// mapstructure is highly configurable. See the DecoderConfig struct +// for other features and options that are supported. +package mapstructure + +import ( + "encoding/json" + "errors" + "fmt" + "reflect" + "sort" + "strconv" + "strings" +) + +// DecodeHookFunc is the callback function that can be used for +// data transformations. See "DecodeHook" in the DecoderConfig +// struct. +// +// The type must be one of DecodeHookFuncType, DecodeHookFuncKind, or +// DecodeHookFuncValue. +// Values are a superset of Types (Values can return types), and Types are a +// superset of Kinds (Types can return Kinds) and are generally a richer thing +// to use, but Kinds are simpler if you only need those. +// +// The reason DecodeHookFunc is multi-typed is for backwards compatibility: +// we started with Kinds and then realized Types were the better solution, +// but have a promise to not break backwards compat so we now support +// both. +type DecodeHookFunc interface{} + +// DecodeHookFuncType is a DecodeHookFunc which has complete information about +// the source and target types. +type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error) + +// DecodeHookFuncKind is a DecodeHookFunc which knows only the Kinds of the +// source and target types. +type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) + +// DecodeHookFuncValue is a DecodeHookFunc which has complete access to both the source and target +// values. +type DecodeHookFuncValue func(from reflect.Value, to reflect.Value) (interface{}, error) + +// DecoderConfig is the configuration that is used to create a new decoder +// and allows customization of various aspects of decoding. +type DecoderConfig struct { + // DecodeHook, if set, will be called before any decoding and any + // type conversion (if WeaklyTypedInput is on). This lets you modify + // the values before they're set down onto the resulting struct. The + // DecodeHook is called for every map and value in the input. This means + // that if a struct has embedded fields with squash tags the decode hook + // is called only once with all of the input data, not once for each + // embedded struct. + // + // If an error is returned, the entire decode will fail with that error. + DecodeHook DecodeHookFunc + + // If ErrorUnused is true, then it is an error for there to exist + // keys in the original map that were unused in the decoding process + // (extra keys). + ErrorUnused bool + + // ZeroFields, if set to true, will zero fields before writing them. + // For example, a map will be emptied before decoded values are put in + // it. If this is false, a map will be merged. + ZeroFields bool + + // If WeaklyTypedInput is true, the decoder will make the following + // "weak" conversions: + // + // - bools to string (true = "1", false = "0") + // - numbers to string (base 10) + // - bools to int/uint (true = 1, false = 0) + // - strings to int/uint (base implied by prefix) + // - int to bool (true if value != 0) + // - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F, + // FALSE, false, False. Anything else is an error) + // - empty array = empty map and vice versa + // - negative numbers to overflowed uint values (base 10) + // - slice of maps to a merged map + // - single values are converted to slices if required. Each + // element is weakly decoded. For example: "4" can become []int{4} + // if the target type is an int slice. + // + WeaklyTypedInput bool + + // Squash will squash embedded structs. A squash tag may also be + // added to an individual struct field using a tag. For example: + // + // type Parent struct { + // Child `mapstructure:",squash"` + // } + Squash bool + + // Metadata is the struct that will contain extra metadata about + // the decoding. If this is nil, then no metadata will be tracked. + Metadata *Metadata + + // Result is a pointer to the struct that will contain the decoded + // value. + Result interface{} + + // The tag name that mapstructure reads for field names. This + // defaults to "mapstructure" + TagName string + + // MatchName is the function used to match the map key to the struct + // field name or tag. Defaults to `strings.EqualFold`. This can be used + // to implement case-sensitive tag values, support snake casing, etc. + MatchName func(mapKey, fieldName string) bool +} + +// A Decoder takes a raw interface value and turns it into structured +// data, keeping track of rich error information along the way in case +// anything goes wrong. Unlike the basic top-level Decode method, you can +// more finely control how the Decoder behaves using the DecoderConfig +// structure. The top-level Decode method is just a convenience that sets +// up the most basic Decoder. +type Decoder struct { + config *DecoderConfig +} + +// Metadata contains information about decoding a structure that +// is tedious or difficult to get otherwise. +type Metadata struct { + // Keys are the keys of the structure which were successfully decoded + Keys []string + + // Unused is a slice of keys that were found in the raw value but + // weren't decoded since there was no matching field in the result interface + Unused []string +} + +// Decode takes an input structure and uses reflection to translate it to +// the output structure. output must be a pointer to a map or struct. +func Decode(input interface{}, output interface{}) error { + config := &DecoderConfig{ + Metadata: nil, + Result: output, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// WeakDecode is the same as Decode but is shorthand to enable +// WeaklyTypedInput. See DecoderConfig for more info. +func WeakDecode(input, output interface{}) error { + config := &DecoderConfig{ + Metadata: nil, + Result: output, + WeaklyTypedInput: true, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// DecodeMetadata is the same as Decode, but is shorthand to +// enable metadata collection. See DecoderConfig for more info. +func DecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error { + config := &DecoderConfig{ + Metadata: metadata, + Result: output, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// WeakDecodeMetadata is the same as Decode, but is shorthand to +// enable both WeaklyTypedInput and metadata collection. See +// DecoderConfig for more info. +func WeakDecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error { + config := &DecoderConfig{ + Metadata: metadata, + Result: output, + WeaklyTypedInput: true, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// NewDecoder returns a new decoder for the given configuration. Once +// a decoder has been returned, the same configuration must not be used +// again. +func NewDecoder(config *DecoderConfig) (*Decoder, error) { + val := reflect.ValueOf(config.Result) + if val.Kind() != reflect.Ptr { + return nil, errors.New("result must be a pointer") + } + + val = val.Elem() + if !val.CanAddr() { + return nil, errors.New("result must be addressable (a pointer)") + } + + if config.Metadata != nil { + if config.Metadata.Keys == nil { + config.Metadata.Keys = make([]string, 0) + } + + if config.Metadata.Unused == nil { + config.Metadata.Unused = make([]string, 0) + } + } + + if config.TagName == "" { + config.TagName = "mapstructure" + } + + if config.MatchName == nil { + config.MatchName = strings.EqualFold + } + + result := &Decoder{ + config: config, + } + + return result, nil +} + +// Decode decodes the given raw interface to the target pointer specified +// by the configuration. +func (d *Decoder) Decode(input interface{}) error { + return d.decode("", input, reflect.ValueOf(d.config.Result).Elem()) +} + +// Decodes an unknown data type into a specific reflection value. +func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) error { + var inputVal reflect.Value + if input != nil { + inputVal = reflect.ValueOf(input) + + // We need to check here if input is a typed nil. Typed nils won't + // match the "input == nil" below so we check that here. + if inputVal.Kind() == reflect.Ptr && inputVal.IsNil() { + input = nil + } + } + + if input == nil { + // If the data is nil, then we don't set anything, unless ZeroFields is set + // to true. + if d.config.ZeroFields { + outVal.Set(reflect.Zero(outVal.Type())) + + if d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + } + return nil + } + + if !inputVal.IsValid() { + // If the input value is invalid, then we just set the value + // to be the zero value. + outVal.Set(reflect.Zero(outVal.Type())) + if d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + return nil + } + + if d.config.DecodeHook != nil { + // We have a DecodeHook, so let's pre-process the input. + var err error + input, err = DecodeHookExec(d.config.DecodeHook, inputVal, outVal) + if err != nil { + return fmt.Errorf("error decoding '%s': %s", name, err) + } + } + + var err error + outputKind := getKind(outVal) + addMetaKey := true + switch outputKind { + case reflect.Bool: + err = d.decodeBool(name, input, outVal) + case reflect.Interface: + err = d.decodeBasic(name, input, outVal) + case reflect.String: + err = d.decodeString(name, input, outVal) + case reflect.Int: + err = d.decodeInt(name, input, outVal) + case reflect.Uint: + err = d.decodeUint(name, input, outVal) + case reflect.Float32: + err = d.decodeFloat(name, input, outVal) + case reflect.Struct: + err = d.decodeStruct(name, input, outVal) + case reflect.Map: + err = d.decodeMap(name, input, outVal) + case reflect.Ptr: + addMetaKey, err = d.decodePtr(name, input, outVal) + case reflect.Slice: + err = d.decodeSlice(name, input, outVal) + case reflect.Array: + err = d.decodeArray(name, input, outVal) + case reflect.Func: + err = d.decodeFunc(name, input, outVal) + default: + // If we reached this point then we weren't able to decode it + return fmt.Errorf("%s: unsupported type: %s", name, outputKind) + } + + // If we reached here, then we successfully decoded SOMETHING, so + // mark the key as used if we're tracking metainput. + if addMetaKey && d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + + return err +} + +// This decodes a basic type (bool, int, string, etc.) and sets the +// value to "data" of that type. +func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error { + if val.IsValid() && val.Elem().IsValid() { + elem := val.Elem() + + // If we can't address this element, then its not writable. Instead, + // we make a copy of the value (which is a pointer and therefore + // writable), decode into that, and replace the whole value. + copied := false + if !elem.CanAddr() { + copied = true + + // Make *T + copy := reflect.New(elem.Type()) + + // *T = elem + copy.Elem().Set(elem) + + // Set elem so we decode into it + elem = copy + } + + // Decode. If we have an error then return. We also return right + // away if we're not a copy because that means we decoded directly. + if err := d.decode(name, data, elem); err != nil || !copied { + return err + } + + // If we're a copy, we need to set te final result + val.Set(elem.Elem()) + return nil + } + + dataVal := reflect.ValueOf(data) + + // If the input data is a pointer, and the assigned type is the dereference + // of that exact pointer, then indirect it so that we can assign it. + // Example: *string to string + if dataVal.Kind() == reflect.Ptr && dataVal.Type().Elem() == val.Type() { + dataVal = reflect.Indirect(dataVal) + } + + if !dataVal.IsValid() { + dataVal = reflect.Zero(val.Type()) + } + + dataValType := dataVal.Type() + if !dataValType.AssignableTo(val.Type()) { + return fmt.Errorf( + "'%s' expected type '%s', got '%s'", + name, val.Type(), dataValType) + } + + val.Set(dataVal) + return nil +} + +func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + + converted := true + switch { + case dataKind == reflect.String: + val.SetString(dataVal.String()) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetString("1") + } else { + val.SetString("0") + } + case dataKind == reflect.Int && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatInt(dataVal.Int(), 10)) + case dataKind == reflect.Uint && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatUint(dataVal.Uint(), 10)) + case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64)) + case dataKind == reflect.Slice && d.config.WeaklyTypedInput, + dataKind == reflect.Array && d.config.WeaklyTypedInput: + dataType := dataVal.Type() + elemKind := dataType.Elem().Kind() + switch elemKind { + case reflect.Uint8: + var uints []uint8 + if dataKind == reflect.Array { + uints = make([]uint8, dataVal.Len(), dataVal.Len()) + for i := range uints { + uints[i] = dataVal.Index(i).Interface().(uint8) + } + } else { + uints = dataVal.Interface().([]uint8) + } + val.SetString(string(uints)) + default: + converted = false + } + default: + converted = false + } + + if !converted { + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + + return nil +} + +func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + dataType := dataVal.Type() + + switch { + case dataKind == reflect.Int: + val.SetInt(dataVal.Int()) + case dataKind == reflect.Uint: + val.SetInt(int64(dataVal.Uint())) + case dataKind == reflect.Float32: + val.SetInt(int64(dataVal.Float())) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetInt(1) + } else { + val.SetInt(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + str := dataVal.String() + if str == "" { + str = "0" + } + + i, err := strconv.ParseInt(str, 0, val.Type().Bits()) + if err == nil { + val.SetInt(i) + } else { + return fmt.Errorf("cannot parse '%s' as int: %s", name, err) + } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := jn.Int64() + if err != nil { + return fmt.Errorf( + "error decoding json.Number into %s: %s", name, err) + } + val.SetInt(i) + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + + return nil +} + +func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + dataType := dataVal.Type() + + switch { + case dataKind == reflect.Int: + i := dataVal.Int() + if i < 0 && !d.config.WeaklyTypedInput { + return fmt.Errorf("cannot parse '%s', %d overflows uint", + name, i) + } + val.SetUint(uint64(i)) + case dataKind == reflect.Uint: + val.SetUint(dataVal.Uint()) + case dataKind == reflect.Float32: + f := dataVal.Float() + if f < 0 && !d.config.WeaklyTypedInput { + return fmt.Errorf("cannot parse '%s', %f overflows uint", + name, f) + } + val.SetUint(uint64(f)) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetUint(1) + } else { + val.SetUint(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + str := dataVal.String() + if str == "" { + str = "0" + } + + i, err := strconv.ParseUint(str, 0, val.Type().Bits()) + if err == nil { + val.SetUint(i) + } else { + return fmt.Errorf("cannot parse '%s' as uint: %s", name, err) + } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := strconv.ParseUint(string(jn), 0, 64) + if err != nil { + return fmt.Errorf( + "error decoding json.Number into %s: %s", name, err) + } + val.SetUint(i) + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + + return nil +} + +func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + + switch { + case dataKind == reflect.Bool: + val.SetBool(dataVal.Bool()) + case dataKind == reflect.Int && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Int() != 0) + case dataKind == reflect.Uint && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Uint() != 0) + case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Float() != 0) + case dataKind == reflect.String && d.config.WeaklyTypedInput: + b, err := strconv.ParseBool(dataVal.String()) + if err == nil { + val.SetBool(b) + } else if dataVal.String() == "" { + val.SetBool(false) + } else { + return fmt.Errorf("cannot parse '%s' as bool: %s", name, err) + } + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + + return nil +} + +func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + dataType := dataVal.Type() + + switch { + case dataKind == reflect.Int: + val.SetFloat(float64(dataVal.Int())) + case dataKind == reflect.Uint: + val.SetFloat(float64(dataVal.Uint())) + case dataKind == reflect.Float32: + val.SetFloat(dataVal.Float()) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetFloat(1) + } else { + val.SetFloat(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + str := dataVal.String() + if str == "" { + str = "0" + } + + f, err := strconv.ParseFloat(str, val.Type().Bits()) + if err == nil { + val.SetFloat(f) + } else { + return fmt.Errorf("cannot parse '%s' as float: %s", name, err) + } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := jn.Float64() + if err != nil { + return fmt.Errorf( + "error decoding json.Number into %s: %s", name, err) + } + val.SetFloat(i) + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + + return nil +} + +func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error { + valType := val.Type() + valKeyType := valType.Key() + valElemType := valType.Elem() + + // By default we overwrite keys in the current map + valMap := val + + // If the map is nil or we're purposely zeroing fields, make a new map + if valMap.IsNil() || d.config.ZeroFields { + // Make a new map to hold our result + mapType := reflect.MapOf(valKeyType, valElemType) + valMap = reflect.MakeMap(mapType) + } + + // Check input type and based on the input type jump to the proper func + dataVal := reflect.Indirect(reflect.ValueOf(data)) + switch dataVal.Kind() { + case reflect.Map: + return d.decodeMapFromMap(name, dataVal, val, valMap) + + case reflect.Struct: + return d.decodeMapFromStruct(name, dataVal, val, valMap) + + case reflect.Array, reflect.Slice: + if d.config.WeaklyTypedInput { + return d.decodeMapFromSlice(name, dataVal, val, valMap) + } + + fallthrough + + default: + return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) + } +} + +func (d *Decoder) decodeMapFromSlice(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { + // Special case for BC reasons (covered by tests) + if dataVal.Len() == 0 { + val.Set(valMap) + return nil + } + + for i := 0; i < dataVal.Len(); i++ { + err := d.decode( + name+"["+strconv.Itoa(i)+"]", + dataVal.Index(i).Interface(), val) + if err != nil { + return err + } + } + + return nil +} + +func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { + valType := val.Type() + valKeyType := valType.Key() + valElemType := valType.Elem() + + // Accumulate errors + errors := make([]string, 0) + + // If the input data is empty, then we just match what the input data is. + if dataVal.Len() == 0 { + if dataVal.IsNil() { + if !val.IsNil() { + val.Set(dataVal) + } + } else { + // Set to empty allocated value + val.Set(valMap) + } + + return nil + } + + for _, k := range dataVal.MapKeys() { + fieldName := name + "[" + k.String() + "]" + + // First decode the key into the proper type + currentKey := reflect.Indirect(reflect.New(valKeyType)) + if err := d.decode(fieldName, k.Interface(), currentKey); err != nil { + errors = appendErrors(errors, err) + continue + } + + // Next decode the data into the proper type + v := dataVal.MapIndex(k).Interface() + currentVal := reflect.Indirect(reflect.New(valElemType)) + if err := d.decode(fieldName, v, currentVal); err != nil { + errors = appendErrors(errors, err) + continue + } + + valMap.SetMapIndex(currentKey, currentVal) + } + + // Set the built up map to the value + val.Set(valMap) + + // If we had errors, return those + if len(errors) > 0 { + return &Error{errors} + } + + return nil +} + +func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { + typ := dataVal.Type() + for i := 0; i < typ.NumField(); i++ { + // Get the StructField first since this is a cheap operation. If the + // field is unexported, then ignore it. + f := typ.Field(i) + if f.PkgPath != "" { + continue + } + + // Next get the actual value of this field and verify it is assignable + // to the map value. + v := dataVal.Field(i) + if !v.Type().AssignableTo(valMap.Type().Elem()) { + return fmt.Errorf("cannot assign type '%s' to map value field of type '%s'", v.Type(), valMap.Type().Elem()) + } + + tagValue := f.Tag.Get(d.config.TagName) + keyName := f.Name + + // If Squash is set in the config, we squash the field down. + squash := d.config.Squash && v.Kind() == reflect.Struct && f.Anonymous + + // Determine the name of the key in the map + if index := strings.Index(tagValue, ","); index != -1 { + if tagValue[:index] == "-" { + continue + } + // If "omitempty" is specified in the tag, it ignores empty values. + if strings.Index(tagValue[index+1:], "omitempty") != -1 && isEmptyValue(v) { + continue + } + + // If "squash" is specified in the tag, we squash the field down. + squash = !squash && strings.Index(tagValue[index+1:], "squash") != -1 + if squash { + // When squashing, the embedded type can be a pointer to a struct. + if v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Struct { + v = v.Elem() + } + + // The final type must be a struct + if v.Kind() != reflect.Struct { + return fmt.Errorf("cannot squash non-struct type '%s'", v.Type()) + } + } + keyName = tagValue[:index] + } else if len(tagValue) > 0 { + if tagValue == "-" { + continue + } + keyName = tagValue + } + + switch v.Kind() { + // this is an embedded struct, so handle it differently + case reflect.Struct: + x := reflect.New(v.Type()) + x.Elem().Set(v) + + vType := valMap.Type() + vKeyType := vType.Key() + vElemType := vType.Elem() + mType := reflect.MapOf(vKeyType, vElemType) + vMap := reflect.MakeMap(mType) + + // Creating a pointer to a map so that other methods can completely + // overwrite the map if need be (looking at you decodeMapFromMap). The + // indirection allows the underlying map to be settable (CanSet() == true) + // where as reflect.MakeMap returns an unsettable map. + addrVal := reflect.New(vMap.Type()) + reflect.Indirect(addrVal).Set(vMap) + + err := d.decode(keyName, x.Interface(), reflect.Indirect(addrVal)) + if err != nil { + return err + } + + // the underlying map may have been completely overwritten so pull + // it indirectly out of the enclosing value. + vMap = reflect.Indirect(addrVal) + + if squash { + for _, k := range vMap.MapKeys() { + valMap.SetMapIndex(k, vMap.MapIndex(k)) + } + } else { + valMap.SetMapIndex(reflect.ValueOf(keyName), vMap) + } + + default: + valMap.SetMapIndex(reflect.ValueOf(keyName), v) + } + } + + if val.CanAddr() { + val.Set(valMap) + } + + return nil +} + +func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) (bool, error) { + // If the input data is nil, then we want to just set the output + // pointer to be nil as well. + isNil := data == nil + if !isNil { + switch v := reflect.Indirect(reflect.ValueOf(data)); v.Kind() { + case reflect.Chan, + reflect.Func, + reflect.Interface, + reflect.Map, + reflect.Ptr, + reflect.Slice: + isNil = v.IsNil() + } + } + if isNil { + if !val.IsNil() && val.CanSet() { + nilValue := reflect.New(val.Type()).Elem() + val.Set(nilValue) + } + + return true, nil + } + + // Create an element of the concrete (non pointer) type and decode + // into that. Then set the value of the pointer to this type. + valType := val.Type() + valElemType := valType.Elem() + if val.CanSet() { + realVal := val + if realVal.IsNil() || d.config.ZeroFields { + realVal = reflect.New(valElemType) + } + + if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil { + return false, err + } + + val.Set(realVal) + } else { + if err := d.decode(name, data, reflect.Indirect(val)); err != nil { + return false, err + } + } + return false, nil +} + +func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) error { + // Create an element of the concrete (non pointer) type and decode + // into that. Then set the value of the pointer to this type. + dataVal := reflect.Indirect(reflect.ValueOf(data)) + if val.Type() != dataVal.Type() { + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + val.Set(dataVal) + return nil +} + +func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataValKind := dataVal.Kind() + valType := val.Type() + valElemType := valType.Elem() + sliceType := reflect.SliceOf(valElemType) + + // If we have a non array/slice type then we first attempt to convert. + if dataValKind != reflect.Array && dataValKind != reflect.Slice { + if d.config.WeaklyTypedInput { + switch { + // Slice and array we use the normal logic + case dataValKind == reflect.Slice, dataValKind == reflect.Array: + break + + // Empty maps turn into empty slices + case dataValKind == reflect.Map: + if dataVal.Len() == 0 { + val.Set(reflect.MakeSlice(sliceType, 0, 0)) + return nil + } + // Create slice of maps of other sizes + return d.decodeSlice(name, []interface{}{data}, val) + + case dataValKind == reflect.String && valElemType.Kind() == reflect.Uint8: + return d.decodeSlice(name, []byte(dataVal.String()), val) + + // All other types we try to convert to the slice type + // and "lift" it into it. i.e. a string becomes a string slice. + default: + // Just re-try this function with data as a slice. + return d.decodeSlice(name, []interface{}{data}, val) + } + } + + return fmt.Errorf( + "'%s': source data must be an array or slice, got %s", name, dataValKind) + } + + // If the input value is nil, then don't allocate since empty != nil + if dataVal.IsNil() { + return nil + } + + valSlice := val + if valSlice.IsNil() || d.config.ZeroFields { + // Make a new slice to hold our result, same size as the original data. + valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len()) + } + + // Accumulate any errors + errors := make([]string, 0) + + for i := 0; i < dataVal.Len(); i++ { + currentData := dataVal.Index(i).Interface() + for valSlice.Len() <= i { + valSlice = reflect.Append(valSlice, reflect.Zero(valElemType)) + } + currentField := valSlice.Index(i) + + fieldName := name + "[" + strconv.Itoa(i) + "]" + if err := d.decode(fieldName, currentData, currentField); err != nil { + errors = appendErrors(errors, err) + } + } + + // Finally, set the value to the slice we built up + val.Set(valSlice) + + // If there were errors, we return those + if len(errors) > 0 { + return &Error{errors} + } + + return nil +} + +func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataValKind := dataVal.Kind() + valType := val.Type() + valElemType := valType.Elem() + arrayType := reflect.ArrayOf(valType.Len(), valElemType) + + valArray := val + + if valArray.Interface() == reflect.Zero(valArray.Type()).Interface() || d.config.ZeroFields { + // Check input type + if dataValKind != reflect.Array && dataValKind != reflect.Slice { + if d.config.WeaklyTypedInput { + switch { + // Empty maps turn into empty arrays + case dataValKind == reflect.Map: + if dataVal.Len() == 0 { + val.Set(reflect.Zero(arrayType)) + return nil + } + + // All other types we try to convert to the array type + // and "lift" it into it. i.e. a string becomes a string array. + default: + // Just re-try this function with data as a slice. + return d.decodeArray(name, []interface{}{data}, val) + } + } + + return fmt.Errorf( + "'%s': source data must be an array or slice, got %s", name, dataValKind) + + } + if dataVal.Len() > arrayType.Len() { + return fmt.Errorf( + "'%s': expected source data to have length less or equal to %d, got %d", name, arrayType.Len(), dataVal.Len()) + + } + + // Make a new array to hold our result, same size as the original data. + valArray = reflect.New(arrayType).Elem() + } + + // Accumulate any errors + errors := make([]string, 0) + + for i := 0; i < dataVal.Len(); i++ { + currentData := dataVal.Index(i).Interface() + currentField := valArray.Index(i) + + fieldName := name + "[" + strconv.Itoa(i) + "]" + if err := d.decode(fieldName, currentData, currentField); err != nil { + errors = appendErrors(errors, err) + } + } + + // Finally, set the value to the array we built up + val.Set(valArray) + + // If there were errors, we return those + if len(errors) > 0 { + return &Error{errors} + } + + return nil +} + +func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + + // If the type of the value to write to and the data match directly, + // then we just set it directly instead of recursing into the structure. + if dataVal.Type() == val.Type() { + val.Set(dataVal) + return nil + } + + dataValKind := dataVal.Kind() + switch dataValKind { + case reflect.Map: + return d.decodeStructFromMap(name, dataVal, val) + + case reflect.Struct: + // Not the most efficient way to do this but we can optimize later if + // we want to. To convert from struct to struct we go to map first + // as an intermediary. + + // Make a new map to hold our result + mapType := reflect.TypeOf((map[string]interface{})(nil)) + mval := reflect.MakeMap(mapType) + + // Creating a pointer to a map so that other methods can completely + // overwrite the map if need be (looking at you decodeMapFromMap). The + // indirection allows the underlying map to be settable (CanSet() == true) + // where as reflect.MakeMap returns an unsettable map. + addrVal := reflect.New(mval.Type()) + + reflect.Indirect(addrVal).Set(mval) + if err := d.decodeMapFromStruct(name, dataVal, reflect.Indirect(addrVal), mval); err != nil { + return err + } + + result := d.decodeStructFromMap(name, reflect.Indirect(addrVal), val) + return result + + default: + return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) + } +} + +func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) error { + dataValType := dataVal.Type() + if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface { + return fmt.Errorf( + "'%s' needs a map with string keys, has '%s' keys", + name, dataValType.Key().Kind()) + } + + dataValKeys := make(map[reflect.Value]struct{}) + dataValKeysUnused := make(map[interface{}]struct{}) + for _, dataValKey := range dataVal.MapKeys() { + dataValKeys[dataValKey] = struct{}{} + dataValKeysUnused[dataValKey.Interface()] = struct{}{} + } + + errors := make([]string, 0) + + // This slice will keep track of all the structs we'll be decoding. + // There can be more than one struct if there are embedded structs + // that are squashed. + structs := make([]reflect.Value, 1, 5) + structs[0] = val + + // Compile the list of all the fields that we're going to be decoding + // from all the structs. + type field struct { + field reflect.StructField + val reflect.Value + } + + // remainField is set to a valid field set with the "remain" tag if + // we are keeping track of remaining values. + var remainField *field + + fields := []field{} + for len(structs) > 0 { + structVal := structs[0] + structs = structs[1:] + + structType := structVal.Type() + + for i := 0; i < structType.NumField(); i++ { + fieldType := structType.Field(i) + fieldVal := structVal.Field(i) + if fieldVal.Kind() == reflect.Ptr && fieldVal.Elem().Kind() == reflect.Struct { + // Handle embedded struct pointers as embedded structs. + fieldVal = fieldVal.Elem() + } + + // If "squash" is specified in the tag, we squash the field down. + squash := d.config.Squash && fieldVal.Kind() == reflect.Struct && fieldType.Anonymous + remain := false + + // We always parse the tags cause we're looking for other tags too + tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",") + for _, tag := range tagParts[1:] { + if tag == "squash" { + squash = true + break + } + + if tag == "remain" { + remain = true + break + } + } + + if squash { + if fieldVal.Kind() != reflect.Struct { + errors = appendErrors(errors, + fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldVal.Kind())) + } else { + structs = append(structs, fieldVal) + } + continue + } + + // Build our field + if remain { + remainField = &field{fieldType, fieldVal} + } else { + // Normal struct field, store it away + fields = append(fields, field{fieldType, fieldVal}) + } + } + } + + // for fieldType, field := range fields { + for _, f := range fields { + field, fieldValue := f.field, f.val + fieldName := field.Name + + tagValue := field.Tag.Get(d.config.TagName) + tagValue = strings.SplitN(tagValue, ",", 2)[0] + if tagValue != "" { + fieldName = tagValue + } + + rawMapKey := reflect.ValueOf(fieldName) + rawMapVal := dataVal.MapIndex(rawMapKey) + if !rawMapVal.IsValid() { + // Do a slower search by iterating over each key and + // doing case-insensitive search. + for dataValKey := range dataValKeys { + mK, ok := dataValKey.Interface().(string) + if !ok { + // Not a string key + continue + } + + if d.config.MatchName(mK, fieldName) { + rawMapKey = dataValKey + rawMapVal = dataVal.MapIndex(dataValKey) + break + } + } + + if !rawMapVal.IsValid() { + // There was no matching key in the map for the value in + // the struct. Just ignore. + continue + } + } + + if !fieldValue.IsValid() { + // This should never happen + panic("field is not valid") + } + + // If we can't set the field, then it is unexported or something, + // and we just continue onwards. + if !fieldValue.CanSet() { + continue + } + + // Delete the key we're using from the unused map so we stop tracking + delete(dataValKeysUnused, rawMapKey.Interface()) + + // If the name is empty string, then we're at the root, and we + // don't dot-join the fields. + if name != "" { + fieldName = name + "." + fieldName + } + + if err := d.decode(fieldName, rawMapVal.Interface(), fieldValue); err != nil { + errors = appendErrors(errors, err) + } + } + + // If we have a "remain"-tagged field and we have unused keys then + // we put the unused keys directly into the remain field. + if remainField != nil && len(dataValKeysUnused) > 0 { + // Build a map of only the unused values + remain := map[interface{}]interface{}{} + for key := range dataValKeysUnused { + remain[key] = dataVal.MapIndex(reflect.ValueOf(key)).Interface() + } + + // Decode it as-if we were just decoding this map onto our map. + if err := d.decodeMap(name, remain, remainField.val); err != nil { + errors = appendErrors(errors, err) + } + + // Set the map to nil so we have none so that the next check will + // not error (ErrorUnused) + dataValKeysUnused = nil + } + + if d.config.ErrorUnused && len(dataValKeysUnused) > 0 { + keys := make([]string, 0, len(dataValKeysUnused)) + for rawKey := range dataValKeysUnused { + keys = append(keys, rawKey.(string)) + } + sort.Strings(keys) + + err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", ")) + errors = appendErrors(errors, err) + } + + if len(errors) > 0 { + return &Error{errors} + } + + // Add the unused keys to the list of unused keys if we're tracking metadata + if d.config.Metadata != nil { + for rawKey := range dataValKeysUnused { + key := rawKey.(string) + if name != "" { + key = name + "." + key + } + + d.config.Metadata.Unused = append(d.config.Metadata.Unused, key) + } + } + + return nil +} + +func isEmptyValue(v reflect.Value) bool { + switch getKind(v) { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + +func getKind(val reflect.Value) reflect.Kind { + kind := val.Kind() + + switch { + case kind >= reflect.Int && kind <= reflect.Int64: + return reflect.Int + case kind >= reflect.Uint && kind <= reflect.Uint64: + return reflect.Uint + case kind >= reflect.Float32 && kind <= reflect.Float64: + return reflect.Float32 + default: + return kind + } +} diff --git a/vendor/github.com/xeipuuv/gojsonpointer/LICENSE-APACHE-2.0.txt b/vendor/github.com/xeipuuv/gojsonpointer/LICENSE-APACHE-2.0.txt new file mode 100644 index 0000000000..55ede8a42c --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonpointer/LICENSE-APACHE-2.0.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2015 xeipuuv + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/xeipuuv/gojsonpointer/README.md b/vendor/github.com/xeipuuv/gojsonpointer/README.md new file mode 100644 index 0000000000..00059242ca --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonpointer/README.md @@ -0,0 +1,41 @@ +# gojsonpointer +An implementation of JSON Pointer - Go language + +## Usage + jsonText := `{ + "name": "Bobby B", + "occupation": { + "title" : "King", + "years" : 15, + "heir" : "Joffrey B" + } + }` + + var jsonDocument map[string]interface{} + json.Unmarshal([]byte(jsonText), &jsonDocument) + + //create a JSON pointer + pointerString := "/occupation/title" + pointer, _ := NewJsonPointer(pointerString) + + //SET a new value for the "title" in the document + pointer.Set(jsonDocument, "Supreme Leader of Westeros") + + //GET the new "title" from the document + title, _, _ := pointer.Get(jsonDocument) + fmt.Println(title) //outputs "Supreme Leader of Westeros" + + //DELETE the "heir" from the document + deletePointer := NewJsonPointer("/occupation/heir") + deletePointer.Delete(jsonDocument) + + b, _ := json.Marshal(jsonDocument) + fmt.Println(string(b)) + //outputs `{"name":"Bobby B","occupation":{"title":"Supreme Leader of Westeros","years":15}}` + + +## References +http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 + +### Note +The 4.Evaluation part of the previous reference, starting with 'If the currently referenced value is a JSON array, the reference token MUST contain either...' is not implemented. diff --git a/vendor/github.com/xeipuuv/gojsonpointer/pointer.go b/vendor/github.com/xeipuuv/gojsonpointer/pointer.go new file mode 100644 index 0000000000..7faf5d7f94 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonpointer/pointer.go @@ -0,0 +1,211 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonpointer +// repository-desc An implementation of JSON Pointer - Go language +// +// description Main and unique file. +// +// created 25-02-2013 + +package gojsonpointer + +import ( + "errors" + "fmt" + "reflect" + "strconv" + "strings" +) + +const ( + const_empty_pointer = `` + const_pointer_separator = `/` + + const_invalid_start = `JSON pointer must be empty or start with a "` + const_pointer_separator + `"` +) + +type implStruct struct { + mode string // "SET" or "GET" + + inDocument interface{} + + setInValue interface{} + + getOutNode interface{} + getOutKind reflect.Kind + outError error +} + +type JsonPointer struct { + referenceTokens []string +} + +// NewJsonPointer parses the given string JSON pointer and returns an object +func NewJsonPointer(jsonPointerString string) (p JsonPointer, err error) { + + // Pointer to the root of the document + if len(jsonPointerString) == 0 { + // Keep referenceTokens nil + return + } + if jsonPointerString[0] != '/' { + return p, errors.New(const_invalid_start) + } + + p.referenceTokens = strings.Split(jsonPointerString[1:], const_pointer_separator) + return +} + +// Uses the pointer to retrieve a value from a JSON document +func (p *JsonPointer) Get(document interface{}) (interface{}, reflect.Kind, error) { + + is := &implStruct{mode: "GET", inDocument: document} + p.implementation(is) + return is.getOutNode, is.getOutKind, is.outError + +} + +// Uses the pointer to update a value from a JSON document +func (p *JsonPointer) Set(document interface{}, value interface{}) (interface{}, error) { + + is := &implStruct{mode: "SET", inDocument: document, setInValue: value} + p.implementation(is) + return document, is.outError + +} + +// Uses the pointer to delete a value from a JSON document +func (p *JsonPointer) Delete(document interface{}) (interface{}, error) { + is := &implStruct{mode: "DEL", inDocument: document} + p.implementation(is) + return document, is.outError +} + +// Both Get and Set functions use the same implementation to avoid code duplication +func (p *JsonPointer) implementation(i *implStruct) { + + kind := reflect.Invalid + + // Full document when empty + if len(p.referenceTokens) == 0 { + i.getOutNode = i.inDocument + i.outError = nil + i.getOutKind = kind + i.outError = nil + return + } + + node := i.inDocument + + previousNodes := make([]interface{}, len(p.referenceTokens)) + previousTokens := make([]string, len(p.referenceTokens)) + + for ti, token := range p.referenceTokens { + + isLastToken := ti == len(p.referenceTokens)-1 + previousNodes[ti] = node + previousTokens[ti] = token + + switch v := node.(type) { + + case map[string]interface{}: + decodedToken := decodeReferenceToken(token) + if _, ok := v[decodedToken]; ok { + node = v[decodedToken] + if isLastToken && i.mode == "SET" { + v[decodedToken] = i.setInValue + } else if isLastToken && i.mode =="DEL" { + delete(v,decodedToken) + } + } else if (isLastToken && i.mode == "SET") { + v[decodedToken] = i.setInValue + } else { + i.outError = fmt.Errorf("Object has no key '%s'", decodedToken) + i.getOutKind = reflect.Map + i.getOutNode = nil + return + } + + case []interface{}: + tokenIndex, err := strconv.Atoi(token) + if err != nil { + i.outError = fmt.Errorf("Invalid array index '%s'", token) + i.getOutKind = reflect.Slice + i.getOutNode = nil + return + } + if tokenIndex < 0 || tokenIndex >= len(v) { + i.outError = fmt.Errorf("Out of bound array[0,%d] index '%d'", len(v), tokenIndex) + i.getOutKind = reflect.Slice + i.getOutNode = nil + return + } + + node = v[tokenIndex] + if isLastToken && i.mode == "SET" { + v[tokenIndex] = i.setInValue + } else if isLastToken && i.mode =="DEL" { + v[tokenIndex] = v[len(v)-1] + v[len(v)-1] = nil + v = v[:len(v)-1] + previousNodes[ti-1].(map[string]interface{})[previousTokens[ti-1]] = v + } + + default: + i.outError = fmt.Errorf("Invalid token reference '%s'", token) + i.getOutKind = reflect.ValueOf(node).Kind() + i.getOutNode = nil + return + } + + } + + i.getOutNode = node + i.getOutKind = reflect.ValueOf(node).Kind() + i.outError = nil +} + +// Pointer to string representation function +func (p *JsonPointer) String() string { + + if len(p.referenceTokens) == 0 { + return const_empty_pointer + } + + pointerString := const_pointer_separator + strings.Join(p.referenceTokens, const_pointer_separator) + + return pointerString +} + +// Specific JSON pointer encoding here +// ~0 => ~ +// ~1 => / +// ... and vice versa + +func decodeReferenceToken(token string) string { + step1 := strings.Replace(token, `~1`, `/`, -1) + step2 := strings.Replace(step1, `~0`, `~`, -1) + return step2 +} + +func encodeReferenceToken(token string) string { + step1 := strings.Replace(token, `~`, `~0`, -1) + step2 := strings.Replace(step1, `/`, `~1`, -1) + return step2 +} diff --git a/vendor/github.com/xeipuuv/gojsonreference/LICENSE-APACHE-2.0.txt b/vendor/github.com/xeipuuv/gojsonreference/LICENSE-APACHE-2.0.txt new file mode 100644 index 0000000000..55ede8a42c --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonreference/LICENSE-APACHE-2.0.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2015 xeipuuv + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/xeipuuv/gojsonreference/README.md b/vendor/github.com/xeipuuv/gojsonreference/README.md new file mode 100644 index 0000000000..9ab6e1eb13 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonreference/README.md @@ -0,0 +1,10 @@ +# gojsonreference +An implementation of JSON Reference - Go language + +## Dependencies +https://github.com/xeipuuv/gojsonpointer + +## References +http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 + +http://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03 diff --git a/vendor/github.com/xeipuuv/gojsonreference/reference.go b/vendor/github.com/xeipuuv/gojsonreference/reference.go new file mode 100644 index 0000000000..6457291301 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonreference/reference.go @@ -0,0 +1,147 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonreference +// repository-desc An implementation of JSON Reference - Go language +// +// description Main and unique file. +// +// created 26-02-2013 + +package gojsonreference + +import ( + "errors" + "net/url" + "path/filepath" + "runtime" + "strings" + + "github.com/xeipuuv/gojsonpointer" +) + +const ( + const_fragment_char = `#` +) + +func NewJsonReference(jsonReferenceString string) (JsonReference, error) { + + var r JsonReference + err := r.parse(jsonReferenceString) + return r, err + +} + +type JsonReference struct { + referenceUrl *url.URL + referencePointer gojsonpointer.JsonPointer + + HasFullUrl bool + HasUrlPathOnly bool + HasFragmentOnly bool + HasFileScheme bool + HasFullFilePath bool +} + +func (r *JsonReference) GetUrl() *url.URL { + return r.referenceUrl +} + +func (r *JsonReference) GetPointer() *gojsonpointer.JsonPointer { + return &r.referencePointer +} + +func (r *JsonReference) String() string { + + if r.referenceUrl != nil { + return r.referenceUrl.String() + } + + if r.HasFragmentOnly { + return const_fragment_char + r.referencePointer.String() + } + + return r.referencePointer.String() +} + +func (r *JsonReference) IsCanonical() bool { + return (r.HasFileScheme && r.HasFullFilePath) || (!r.HasFileScheme && r.HasFullUrl) +} + +// "Constructor", parses the given string JSON reference +func (r *JsonReference) parse(jsonReferenceString string) (err error) { + + r.referenceUrl, err = url.Parse(jsonReferenceString) + if err != nil { + return + } + refUrl := r.referenceUrl + + if refUrl.Scheme != "" && refUrl.Host != "" { + r.HasFullUrl = true + } else { + if refUrl.Path != "" { + r.HasUrlPathOnly = true + } else if refUrl.RawQuery == "" && refUrl.Fragment != "" { + r.HasFragmentOnly = true + } + } + + r.HasFileScheme = refUrl.Scheme == "file" + if runtime.GOOS == "windows" { + // on Windows, a file URL may have an extra leading slash, and if it + // doesn't then its first component will be treated as the host by the + // Go runtime + if refUrl.Host == "" && strings.HasPrefix(refUrl.Path, "/") { + r.HasFullFilePath = filepath.IsAbs(refUrl.Path[1:]) + } else { + r.HasFullFilePath = filepath.IsAbs(refUrl.Host + refUrl.Path) + } + } else { + r.HasFullFilePath = filepath.IsAbs(refUrl.Path) + } + + // invalid json-pointer error means url has no json-pointer fragment. simply ignore error + r.referencePointer, _ = gojsonpointer.NewJsonPointer(refUrl.Fragment) + + return +} + +// Creates a new reference from a parent and a child +// If the child cannot inherit from the parent, an error is returned +func (r *JsonReference) Inherits(child JsonReference) (*JsonReference, error) { + if child.GetUrl() == nil { + return nil, errors.New("childUrl is nil!") + } + + if r.GetUrl() == nil { + return nil, errors.New("parentUrl is nil!") + } + + // Get a copy of the parent url to make sure we do not modify the original. + // URL reference resolving fails if the fragment of the child is empty, but the parent's is not. + // The fragment of the child must be used, so the fragment of the parent is manually removed. + parentUrl := *r.GetUrl() + parentUrl.Fragment = "" + + ref, err := NewJsonReference(parentUrl.ResolveReference(child.GetUrl()).String()) + if err != nil { + return nil, err + } + return &ref, err +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/.gitignore b/vendor/github.com/xeipuuv/gojsonschema/.gitignore new file mode 100644 index 0000000000..68e993ce3e --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/.gitignore @@ -0,0 +1,3 @@ +*.sw[nop] +*.iml +.vscode/ diff --git a/vendor/github.com/xeipuuv/gojsonschema/.travis.yml b/vendor/github.com/xeipuuv/gojsonschema/.travis.yml new file mode 100644 index 0000000000..3289001cd1 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/.travis.yml @@ -0,0 +1,9 @@ +language: go +go: + - "1.11" + - "1.12" + - "1.13" +before_install: + - go get github.com/xeipuuv/gojsonreference + - go get github.com/xeipuuv/gojsonpointer + - go get github.com/stretchr/testify/assert diff --git a/vendor/github.com/xeipuuv/gojsonschema/LICENSE-APACHE-2.0.txt b/vendor/github.com/xeipuuv/gojsonschema/LICENSE-APACHE-2.0.txt new file mode 100644 index 0000000000..55ede8a42c --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/LICENSE-APACHE-2.0.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2015 xeipuuv + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/xeipuuv/gojsonschema/README.md b/vendor/github.com/xeipuuv/gojsonschema/README.md new file mode 100644 index 0000000000..758f26df0f --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/README.md @@ -0,0 +1,466 @@ +[![GoDoc](https://godoc.org/github.com/xeipuuv/gojsonschema?status.svg)](https://godoc.org/github.com/xeipuuv/gojsonschema) +[![Build Status](https://travis-ci.org/xeipuuv/gojsonschema.svg)](https://travis-ci.org/xeipuuv/gojsonschema) +[![Go Report Card](https://goreportcard.com/badge/github.com/xeipuuv/gojsonschema)](https://goreportcard.com/report/github.com/xeipuuv/gojsonschema) + +# gojsonschema + +## Description + +An implementation of JSON Schema for the Go programming language. Supports draft-04, draft-06 and draft-07. + +References : + +* http://json-schema.org +* http://json-schema.org/latest/json-schema-core.html +* http://json-schema.org/latest/json-schema-validation.html + +## Installation + +``` +go get github.com/xeipuuv/gojsonschema +``` + +Dependencies : +* [github.com/xeipuuv/gojsonpointer](https://github.com/xeipuuv/gojsonpointer) +* [github.com/xeipuuv/gojsonreference](https://github.com/xeipuuv/gojsonreference) +* [github.com/stretchr/testify/assert](https://github.com/stretchr/testify#assert-package) + +## Usage + +### Example + +```go + +package main + +import ( + "fmt" + "github.com/xeipuuv/gojsonschema" +) + +func main() { + + schemaLoader := gojsonschema.NewReferenceLoader("file:///home/me/schema.json") + documentLoader := gojsonschema.NewReferenceLoader("file:///home/me/document.json") + + result, err := gojsonschema.Validate(schemaLoader, documentLoader) + if err != nil { + panic(err.Error()) + } + + if result.Valid() { + fmt.Printf("The document is valid\n") + } else { + fmt.Printf("The document is not valid. see errors :\n") + for _, desc := range result.Errors() { + fmt.Printf("- %s\n", desc) + } + } +} + + +``` + +#### Loaders + +There are various ways to load your JSON data. +In order to load your schemas and documents, +first declare an appropriate loader : + +* Web / HTTP, using a reference : + +```go +loader := gojsonschema.NewReferenceLoader("http://www.some_host.com/schema.json") +``` + +* Local file, using a reference : + +```go +loader := gojsonschema.NewReferenceLoader("file:///home/me/schema.json") +``` + +References use the URI scheme, the prefix (file://) and a full path to the file are required. + +* JSON strings : + +```go +loader := gojsonschema.NewStringLoader(`{"type": "string"}`) +``` + +* Custom Go types : + +```go +m := map[string]interface{}{"type": "string"} +loader := gojsonschema.NewGoLoader(m) +``` + +And + +```go +type Root struct { + Users []User `json:"users"` +} + +type User struct { + Name string `json:"name"` +} + +... + +data := Root{} +data.Users = append(data.Users, User{"John"}) +data.Users = append(data.Users, User{"Sophia"}) +data.Users = append(data.Users, User{"Bill"}) + +loader := gojsonschema.NewGoLoader(data) +``` + +#### Validation + +Once the loaders are set, validation is easy : + +```go +result, err := gojsonschema.Validate(schemaLoader, documentLoader) +``` + +Alternatively, you might want to load a schema only once and process to multiple validations : + +```go +schema, err := gojsonschema.NewSchema(schemaLoader) +... +result1, err := schema.Validate(documentLoader1) +... +result2, err := schema.Validate(documentLoader2) +... +// etc ... +``` + +To check the result : + +```go + if result.Valid() { + fmt.Printf("The document is valid\n") + } else { + fmt.Printf("The document is not valid. see errors :\n") + for _, err := range result.Errors() { + // Err implements the ResultError interface + fmt.Printf("- %s\n", err) + } + } +``` + + +## Loading local schemas + +By default `file` and `http(s)` references to external schemas are loaded automatically via the file system or via http(s). An external schema can also be loaded using a `SchemaLoader`. + +```go + sl := gojsonschema.NewSchemaLoader() + loader1 := gojsonschema.NewStringLoader(`{ "type" : "string" }`) + err := sl.AddSchema("http://some_host.com/string.json", loader1) +``` + +Alternatively if your schema already has an `$id` you can use the `AddSchemas` function +```go + loader2 := gojsonschema.NewStringLoader(`{ + "$id" : "http://some_host.com/maxlength.json", + "maxLength" : 5 + }`) + err = sl.AddSchemas(loader2) +``` + +The main schema should be passed to the `Compile` function. This main schema can then directly reference the added schemas without needing to download them. +```go + loader3 := gojsonschema.NewStringLoader(`{ + "$id" : "http://some_host.com/main.json", + "allOf" : [ + { "$ref" : "http://some_host.com/string.json" }, + { "$ref" : "http://some_host.com/maxlength.json" } + ] + }`) + + schema, err := sl.Compile(loader3) + + documentLoader := gojsonschema.NewStringLoader(`"hello world"`) + + result, err := schema.Validate(documentLoader) +``` + +It's also possible to pass a `ReferenceLoader` to the `Compile` function that references a loaded schema. + +```go +err = sl.AddSchemas(loader3) +schema, err := sl.Compile(gojsonschema.NewReferenceLoader("http://some_host.com/main.json")) +``` + +Schemas added by `AddSchema` and `AddSchemas` are only validated when the entire schema is compiled, unless meta-schema validation is used. + +## Using a specific draft +By default `gojsonschema` will try to detect the draft of a schema by using the `$schema` keyword and parse it in a strict draft-04, draft-06 or draft-07 mode. If `$schema` is missing, or the draft version is not explicitely set, a hybrid mode is used which merges together functionality of all drafts into one mode. + +Autodectection can be turned off with the `AutoDetect` property. Specific draft versions can be specified with the `Draft` property. + +```go +sl := gojsonschema.NewSchemaLoader() +sl.Draft = gojsonschema.Draft7 +sl.AutoDetect = false +``` + +If autodetection is on (default), a draft-07 schema can savely reference draft-04 schemas and vice-versa, as long as `$schema` is specified in all schemas. + +## Meta-schema validation +Schemas that are added using the `AddSchema`, `AddSchemas` and `Compile` can be validated against their meta-schema by setting the `Validate` property. + +The following example will produce an error as `multipleOf` must be a number. If `Validate` is off (default), this error is only returned at the `Compile` step. + +```go +sl := gojsonschema.NewSchemaLoader() +sl.Validate = true +err := sl.AddSchemas(gojsonschema.NewStringLoader(`{ + $id" : "http://some_host.com/invalid.json", + "$schema": "http://json-schema.org/draft-07/schema#", + "multipleOf" : true +}`)) + ``` +``` + ``` + +Errors returned by meta-schema validation are more readable and contain more information, which helps significantly if you are developing a schema. + +Meta-schema validation also works with a custom `$schema`. In case `$schema` is missing, or `AutoDetect` is set to `false`, the meta-schema of the used draft is used. + + +## Working with Errors + +The library handles string error codes which you can customize by creating your own gojsonschema.locale and setting it +```go +gojsonschema.Locale = YourCustomLocale{} +``` + +However, each error contains additional contextual information. + +Newer versions of `gojsonschema` may have new additional errors, so code that uses a custom locale will need to be updated when this happens. + +**err.Type()**: *string* Returns the "type" of error that occurred. Note you can also type check. See below + +Note: An error of RequiredType has an err.Type() return value of "required" + + "required": RequiredError + "invalid_type": InvalidTypeError + "number_any_of": NumberAnyOfError + "number_one_of": NumberOneOfError + "number_all_of": NumberAllOfError + "number_not": NumberNotError + "missing_dependency": MissingDependencyError + "internal": InternalError + "const": ConstEror + "enum": EnumError + "array_no_additional_items": ArrayNoAdditionalItemsError + "array_min_items": ArrayMinItemsError + "array_max_items": ArrayMaxItemsError + "unique": ItemsMustBeUniqueError + "contains" : ArrayContainsError + "array_min_properties": ArrayMinPropertiesError + "array_max_properties": ArrayMaxPropertiesError + "additional_property_not_allowed": AdditionalPropertyNotAllowedError + "invalid_property_pattern": InvalidPropertyPatternError + "invalid_property_name": InvalidPropertyNameError + "string_gte": StringLengthGTEError + "string_lte": StringLengthLTEError + "pattern": DoesNotMatchPatternError + "multiple_of": MultipleOfError + "number_gte": NumberGTEError + "number_gt": NumberGTError + "number_lte": NumberLTEError + "number_lt": NumberLTError + "condition_then" : ConditionThenError + "condition_else" : ConditionElseError + +**err.Value()**: *interface{}* Returns the value given + +**err.Context()**: *gojsonschema.JsonContext* Returns the context. This has a String() method that will print something like this: (root).firstName + +**err.Field()**: *string* Returns the fieldname in the format firstName, or for embedded properties, person.firstName. This returns the same as the String() method on *err.Context()* but removes the (root). prefix. + +**err.Description()**: *string* The error description. This is based on the locale you are using. See the beginning of this section for overwriting the locale with a custom implementation. + +**err.DescriptionFormat()**: *string* The error description format. This is relevant if you are adding custom validation errors afterwards to the result. + +**err.Details()**: *gojsonschema.ErrorDetails* Returns a map[string]interface{} of additional error details specific to the error. For example, GTE errors will have a "min" value, LTE will have a "max" value. See errors.go for a full description of all the error details. Every error always contains a "field" key that holds the value of *err.Field()* + +Note in most cases, the err.Details() will be used to generate replacement strings in your locales, and not used directly. These strings follow the text/template format i.e. +``` +{{.field}} must be greater than or equal to {{.min}} +``` + +The library allows you to specify custom template functions, should you require more complex error message handling. +```go +gojsonschema.ErrorTemplateFuncs = map[string]interface{}{ + "allcaps": func(s string) string { + return strings.ToUpper(s) + }, +} +``` + +Given the above definition, you can use the custom function `"allcaps"` in your localization templates: +``` +{{allcaps .field}} must be greater than or equal to {{.min}} +``` + +The above error message would then be rendered with the `field` value in capital letters. For example: +``` +"PASSWORD must be greater than or equal to 8" +``` + +Learn more about what types of template functions you can use in `ErrorTemplateFuncs` by referring to Go's [text/template FuncMap](https://golang.org/pkg/text/template/#FuncMap) type. + +## Formats +JSON Schema allows for optional "format" property to validate instances against well-known formats. gojsonschema ships with all of the formats defined in the spec that you can use like this: + +````json +{"type": "string", "format": "email"} +```` + +Not all formats defined in draft-07 are available. Implemented formats are: + +* `date` +* `time` +* `date-time` +* `hostname`. Subdomains that start with a number are also supported, but this means that it doesn't strictly follow [RFC1034](http://tools.ietf.org/html/rfc1034#section-3.5) and has the implication that ipv4 addresses are also recognized as valid hostnames. +* `email`. Go's email parser deviates slightly from [RFC5322](https://tools.ietf.org/html/rfc5322). Includes unicode support. +* `idn-email`. Same caveat as `email`. +* `ipv4` +* `ipv6` +* `uri`. Includes unicode support. +* `uri-reference`. Includes unicode support. +* `iri` +* `iri-reference` +* `uri-template` +* `uuid` +* `regex`. Go uses the [RE2](https://github.com/google/re2/wiki/Syntax) engine and is not [ECMA262](http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-262.pdf) compatible. +* `json-pointer` +* `relative-json-pointer` + +`email`, `uri` and `uri-reference` use the same validation code as their unicode counterparts `idn-email`, `iri` and `iri-reference`. If you rely on unicode support you should use the specific +unicode enabled formats for the sake of interoperability as other implementations might not support unicode in the regular formats. + +The validation code for `uri`, `idn-email` and their relatives use mostly standard library code. + +For repetitive or more complex formats, you can create custom format checkers and add them to gojsonschema like this: + +```go +// Define the format checker +type RoleFormatChecker struct {} + +// Ensure it meets the gojsonschema.FormatChecker interface +func (f RoleFormatChecker) IsFormat(input interface{}) bool { + + asString, ok := input.(string) + if ok == false { + return false + } + + return strings.HasPrefix("ROLE_", asString) +} + +// Add it to the library +gojsonschema.FormatCheckers.Add("role", RoleFormatChecker{}) +```` + +Now to use in your json schema: +````json +{"type": "string", "format": "role"} +```` + +Another example would be to check if the provided integer matches an id on database: + +JSON schema: +```json +{"type": "integer", "format": "ValidUserId"} +``` + +```go +// Define the format checker +type ValidUserIdFormatChecker struct {} + +// Ensure it meets the gojsonschema.FormatChecker interface +func (f ValidUserIdFormatChecker) IsFormat(input interface{}) bool { + + asFloat64, ok := input.(float64) // Numbers are always float64 here + if ok == false { + return false + } + + // XXX + // do the magic on the database looking for the int(asFloat64) + + return true +} + +// Add it to the library +gojsonschema.FormatCheckers.Add("ValidUserId", ValidUserIdFormatChecker{}) +```` + +Formats can also be removed, for example if you want to override one of the formats that is defined by default. + +```go +gojsonschema.FormatCheckers.Remove("hostname") +``` + + +## Additional custom validation +After the validation has run and you have the results, you may add additional +errors using `Result.AddError`. This is useful to maintain the same format within the resultset instead +of having to add special exceptions for your own errors. Below is an example. + +```go +type AnswerInvalidError struct { + gojsonschema.ResultErrorFields +} + +func newAnswerInvalidError(context *gojsonschema.JsonContext, value interface{}, details gojsonschema.ErrorDetails) *AnswerInvalidError { + err := AnswerInvalidError{} + err.SetContext(context) + err.SetType("custom_invalid_error") + // it is important to use SetDescriptionFormat() as this is used to call SetDescription() after it has been parsed + // using the description of err will be overridden by this. + err.SetDescriptionFormat("Answer to the Ultimate Question of Life, the Universe, and Everything is {{.answer}}") + err.SetValue(value) + err.SetDetails(details) + + return &err +} + +func main() { + // ... + schema, err := gojsonschema.NewSchema(schemaLoader) + result, err := gojsonschema.Validate(schemaLoader, documentLoader) + + if true { // some validation + jsonContext := gojsonschema.NewJsonContext("question", nil) + errDetail := gojsonschema.ErrorDetails{ + "answer": 42, + } + result.AddError( + newAnswerInvalidError( + gojsonschema.NewJsonContext("answer", jsonContext), + 52, + errDetail, + ), + errDetail, + ) + } + + return result, err + +} +``` + +This is especially useful if you want to add validation beyond what the +json schema drafts can provide such business specific logic. + +## Uses + +gojsonschema uses the following test suite : + +https://github.com/json-schema/JSON-Schema-Test-Suite diff --git a/vendor/github.com/xeipuuv/gojsonschema/draft.go b/vendor/github.com/xeipuuv/gojsonschema/draft.go new file mode 100644 index 0000000000..61298e7aa0 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/draft.go @@ -0,0 +1,125 @@ +// Copyright 2018 johandorland ( https://github.com/johandorland ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gojsonschema + +import ( + "errors" + "math" + "reflect" + + "github.com/xeipuuv/gojsonreference" +) + +// Draft is a JSON-schema draft version +type Draft int + +// Supported Draft versions +const ( + Draft4 Draft = 4 + Draft6 Draft = 6 + Draft7 Draft = 7 + Hybrid Draft = math.MaxInt32 +) + +type draftConfig struct { + Version Draft + MetaSchemaURL string + MetaSchema string +} +type draftConfigs []draftConfig + +var drafts draftConfigs + +func init() { + drafts = []draftConfig{ + { + Version: Draft4, + MetaSchemaURL: "http://json-schema.org/draft-04/schema", + MetaSchema: `{"id":"http://json-schema.org/draft-04/schema#","$schema":"http://json-schema.org/draft-04/schema#","description":"Core schema meta-schema","definitions":{"schemaArray":{"type":"array","minItems":1,"items":{"$ref":"#"}},"positiveInteger":{"type":"integer","minimum":0},"positiveIntegerDefault0":{"allOf":[{"$ref":"#/definitions/positiveInteger"},{"default":0}]},"simpleTypes":{"enum":["array","boolean","integer","null","number","object","string"]},"stringArray":{"type":"array","items":{"type":"string"},"minItems":1,"uniqueItems":true}},"type":"object","properties":{"id":{"type":"string"},"$schema":{"type":"string"},"title":{"type":"string"},"description":{"type":"string"},"default":{},"multipleOf":{"type":"number","minimum":0,"exclusiveMinimum":true},"maximum":{"type":"number"},"exclusiveMaximum":{"type":"boolean","default":false},"minimum":{"type":"number"},"exclusiveMinimum":{"type":"boolean","default":false},"maxLength":{"$ref":"#/definitions/positiveInteger"},"minLength":{"$ref":"#/definitions/positiveIntegerDefault0"},"pattern":{"type":"string","format":"regex"},"additionalItems":{"anyOf":[{"type":"boolean"},{"$ref":"#"}],"default":{}},"items":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/schemaArray"}],"default":{}},"maxItems":{"$ref":"#/definitions/positiveInteger"},"minItems":{"$ref":"#/definitions/positiveIntegerDefault0"},"uniqueItems":{"type":"boolean","default":false},"maxProperties":{"$ref":"#/definitions/positiveInteger"},"minProperties":{"$ref":"#/definitions/positiveIntegerDefault0"},"required":{"$ref":"#/definitions/stringArray"},"additionalProperties":{"anyOf":[{"type":"boolean"},{"$ref":"#"}],"default":{}},"definitions":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"properties":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"patternProperties":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"dependencies":{"type":"object","additionalProperties":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/stringArray"}]}},"enum":{"type":"array","minItems":1,"uniqueItems":true},"type":{"anyOf":[{"$ref":"#/definitions/simpleTypes"},{"type":"array","items":{"$ref":"#/definitions/simpleTypes"},"minItems":1,"uniqueItems":true}]},"format":{"type":"string"},"allOf":{"$ref":"#/definitions/schemaArray"},"anyOf":{"$ref":"#/definitions/schemaArray"},"oneOf":{"$ref":"#/definitions/schemaArray"},"not":{"$ref":"#"}},"dependencies":{"exclusiveMaximum":["maximum"],"exclusiveMinimum":["minimum"]},"default":{}}`, + }, + { + Version: Draft6, + MetaSchemaURL: "http://json-schema.org/draft-06/schema", + MetaSchema: `{"$schema":"http://json-schema.org/draft-06/schema#","$id":"http://json-schema.org/draft-06/schema#","title":"Core schema meta-schema","definitions":{"schemaArray":{"type":"array","minItems":1,"items":{"$ref":"#"}},"nonNegativeInteger":{"type":"integer","minimum":0},"nonNegativeIntegerDefault0":{"allOf":[{"$ref":"#/definitions/nonNegativeInteger"},{"default":0}]},"simpleTypes":{"enum":["array","boolean","integer","null","number","object","string"]},"stringArray":{"type":"array","items":{"type":"string"},"uniqueItems":true,"default":[]}},"type":["object","boolean"],"properties":{"$id":{"type":"string","format":"uri-reference"},"$schema":{"type":"string","format":"uri"},"$ref":{"type":"string","format":"uri-reference"},"title":{"type":"string"},"description":{"type":"string"},"default":{},"examples":{"type":"array","items":{}},"multipleOf":{"type":"number","exclusiveMinimum":0},"maximum":{"type":"number"},"exclusiveMaximum":{"type":"number"},"minimum":{"type":"number"},"exclusiveMinimum":{"type":"number"},"maxLength":{"$ref":"#/definitions/nonNegativeInteger"},"minLength":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"pattern":{"type":"string","format":"regex"},"additionalItems":{"$ref":"#"},"items":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/schemaArray"}],"default":{}},"maxItems":{"$ref":"#/definitions/nonNegativeInteger"},"minItems":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"uniqueItems":{"type":"boolean","default":false},"contains":{"$ref":"#"},"maxProperties":{"$ref":"#/definitions/nonNegativeInteger"},"minProperties":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"required":{"$ref":"#/definitions/stringArray"},"additionalProperties":{"$ref":"#"},"definitions":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"properties":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"patternProperties":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"dependencies":{"type":"object","additionalProperties":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/stringArray"}]}},"propertyNames":{"$ref":"#"},"const":{},"enum":{"type":"array","minItems":1,"uniqueItems":true},"type":{"anyOf":[{"$ref":"#/definitions/simpleTypes"},{"type":"array","items":{"$ref":"#/definitions/simpleTypes"},"minItems":1,"uniqueItems":true}]},"format":{"type":"string"},"allOf":{"$ref":"#/definitions/schemaArray"},"anyOf":{"$ref":"#/definitions/schemaArray"},"oneOf":{"$ref":"#/definitions/schemaArray"},"not":{"$ref":"#"}},"default":{}}`, + }, + { + Version: Draft7, + MetaSchemaURL: "http://json-schema.org/draft-07/schema", + MetaSchema: `{"$schema":"http://json-schema.org/draft-07/schema#","$id":"http://json-schema.org/draft-07/schema#","title":"Core schema meta-schema","definitions":{"schemaArray":{"type":"array","minItems":1,"items":{"$ref":"#"}},"nonNegativeInteger":{"type":"integer","minimum":0},"nonNegativeIntegerDefault0":{"allOf":[{"$ref":"#/definitions/nonNegativeInteger"},{"default":0}]},"simpleTypes":{"enum":["array","boolean","integer","null","number","object","string"]},"stringArray":{"type":"array","items":{"type":"string"},"uniqueItems":true,"default":[]}},"type":["object","boolean"],"properties":{"$id":{"type":"string","format":"uri-reference"},"$schema":{"type":"string","format":"uri"},"$ref":{"type":"string","format":"uri-reference"},"$comment":{"type":"string"},"title":{"type":"string"},"description":{"type":"string"},"default":true,"readOnly":{"type":"boolean","default":false},"examples":{"type":"array","items":true},"multipleOf":{"type":"number","exclusiveMinimum":0},"maximum":{"type":"number"},"exclusiveMaximum":{"type":"number"},"minimum":{"type":"number"},"exclusiveMinimum":{"type":"number"},"maxLength":{"$ref":"#/definitions/nonNegativeInteger"},"minLength":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"pattern":{"type":"string","format":"regex"},"additionalItems":{"$ref":"#"},"items":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/schemaArray"}],"default":true},"maxItems":{"$ref":"#/definitions/nonNegativeInteger"},"minItems":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"uniqueItems":{"type":"boolean","default":false},"contains":{"$ref":"#"},"maxProperties":{"$ref":"#/definitions/nonNegativeInteger"},"minProperties":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"required":{"$ref":"#/definitions/stringArray"},"additionalProperties":{"$ref":"#"},"definitions":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"properties":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"patternProperties":{"type":"object","additionalProperties":{"$ref":"#"},"propertyNames":{"format":"regex"},"default":{}},"dependencies":{"type":"object","additionalProperties":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/stringArray"}]}},"propertyNames":{"$ref":"#"},"const":true,"enum":{"type":"array","items":true,"minItems":1,"uniqueItems":true},"type":{"anyOf":[{"$ref":"#/definitions/simpleTypes"},{"type":"array","items":{"$ref":"#/definitions/simpleTypes"},"minItems":1,"uniqueItems":true}]},"format":{"type":"string"},"contentMediaType":{"type":"string"},"contentEncoding":{"type":"string"},"if":{"$ref":"#"},"then":{"$ref":"#"},"else":{"$ref":"#"},"allOf":{"$ref":"#/definitions/schemaArray"},"anyOf":{"$ref":"#/definitions/schemaArray"},"oneOf":{"$ref":"#/definitions/schemaArray"},"not":{"$ref":"#"}},"default":true}`, + }, + } +} + +func (dc draftConfigs) GetMetaSchema(url string) string { + for _, config := range dc { + if config.MetaSchemaURL == url { + return config.MetaSchema + } + } + return "" +} +func (dc draftConfigs) GetDraftVersion(url string) *Draft { + for _, config := range dc { + if config.MetaSchemaURL == url { + return &config.Version + } + } + return nil +} +func (dc draftConfigs) GetSchemaURL(draft Draft) string { + for _, config := range dc { + if config.Version == draft { + return config.MetaSchemaURL + } + } + return "" +} + +func parseSchemaURL(documentNode interface{}) (string, *Draft, error) { + + if isKind(documentNode, reflect.Bool) { + return "", nil, nil + } + + if !isKind(documentNode, reflect.Map) { + return "", nil, errors.New("schema is invalid") + } + + m := documentNode.(map[string]interface{}) + + if existsMapKey(m, KEY_SCHEMA) { + if !isKind(m[KEY_SCHEMA], reflect.String) { + return "", nil, errors.New(formatErrorDescription( + Locale.MustBeOfType(), + ErrorDetails{ + "key": KEY_SCHEMA, + "type": TYPE_STRING, + }, + )) + } + + schemaReference, err := gojsonreference.NewJsonReference(m[KEY_SCHEMA].(string)) + + if err != nil { + return "", nil, err + } + + schema := schemaReference.String() + + return schema, drafts.GetDraftVersion(schema), nil + } + + return "", nil, nil +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/errors.go b/vendor/github.com/xeipuuv/gojsonschema/errors.go new file mode 100644 index 0000000000..e4e9814f31 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/errors.go @@ -0,0 +1,364 @@ +package gojsonschema + +import ( + "bytes" + "sync" + "text/template" +) + +var errorTemplates = errorTemplate{template.New("errors-new"), sync.RWMutex{}} + +// template.Template is not thread-safe for writing, so some locking is done +// sync.RWMutex is used for efficiently locking when new templates are created +type errorTemplate struct { + *template.Template + sync.RWMutex +} + +type ( + + // FalseError. ErrorDetails: - + FalseError struct { + ResultErrorFields + } + + // RequiredError indicates that a required field is missing + // ErrorDetails: property string + RequiredError struct { + ResultErrorFields + } + + // InvalidTypeError indicates that a field has the incorrect type + // ErrorDetails: expected, given + InvalidTypeError struct { + ResultErrorFields + } + + // NumberAnyOfError is produced in case of a failing "anyOf" validation + // ErrorDetails: - + NumberAnyOfError struct { + ResultErrorFields + } + + // NumberOneOfError is produced in case of a failing "oneOf" validation + // ErrorDetails: - + NumberOneOfError struct { + ResultErrorFields + } + + // NumberAllOfError is produced in case of a failing "allOf" validation + // ErrorDetails: - + NumberAllOfError struct { + ResultErrorFields + } + + // NumberNotError is produced if a "not" validation failed + // ErrorDetails: - + NumberNotError struct { + ResultErrorFields + } + + // MissingDependencyError is produced in case of a "missing dependency" problem + // ErrorDetails: dependency + MissingDependencyError struct { + ResultErrorFields + } + + // InternalError indicates an internal error + // ErrorDetails: error + InternalError struct { + ResultErrorFields + } + + // ConstError indicates a const error + // ErrorDetails: allowed + ConstError struct { + ResultErrorFields + } + + // EnumError indicates an enum error + // ErrorDetails: allowed + EnumError struct { + ResultErrorFields + } + + // ArrayNoAdditionalItemsError is produced if additional items were found, but not allowed + // ErrorDetails: - + ArrayNoAdditionalItemsError struct { + ResultErrorFields + } + + // ArrayMinItemsError is produced if an array contains less items than the allowed minimum + // ErrorDetails: min + ArrayMinItemsError struct { + ResultErrorFields + } + + // ArrayMaxItemsError is produced if an array contains more items than the allowed maximum + // ErrorDetails: max + ArrayMaxItemsError struct { + ResultErrorFields + } + + // ItemsMustBeUniqueError is produced if an array requires unique items, but contains non-unique items + // ErrorDetails: type, i, j + ItemsMustBeUniqueError struct { + ResultErrorFields + } + + // ArrayContainsError is produced if an array contains invalid items + // ErrorDetails: + ArrayContainsError struct { + ResultErrorFields + } + + // ArrayMinPropertiesError is produced if an object contains less properties than the allowed minimum + // ErrorDetails: min + ArrayMinPropertiesError struct { + ResultErrorFields + } + + // ArrayMaxPropertiesError is produced if an object contains more properties than the allowed maximum + // ErrorDetails: max + ArrayMaxPropertiesError struct { + ResultErrorFields + } + + // AdditionalPropertyNotAllowedError is produced if an object has additional properties, but not allowed + // ErrorDetails: property + AdditionalPropertyNotAllowedError struct { + ResultErrorFields + } + + // InvalidPropertyPatternError is produced if an pattern was found + // ErrorDetails: property, pattern + InvalidPropertyPatternError struct { + ResultErrorFields + } + + // InvalidPropertyNameError is produced if an invalid-named property was found + // ErrorDetails: property + InvalidPropertyNameError struct { + ResultErrorFields + } + + // StringLengthGTEError is produced if a string is shorter than the minimum required length + // ErrorDetails: min + StringLengthGTEError struct { + ResultErrorFields + } + + // StringLengthLTEError is produced if a string is longer than the maximum allowed length + // ErrorDetails: max + StringLengthLTEError struct { + ResultErrorFields + } + + // DoesNotMatchPatternError is produced if a string does not match the defined pattern + // ErrorDetails: pattern + DoesNotMatchPatternError struct { + ResultErrorFields + } + + // DoesNotMatchFormatError is produced if a string does not match the defined format + // ErrorDetails: format + DoesNotMatchFormatError struct { + ResultErrorFields + } + + // MultipleOfError is produced if a number is not a multiple of the defined multipleOf + // ErrorDetails: multiple + MultipleOfError struct { + ResultErrorFields + } + + // NumberGTEError is produced if a number is lower than the allowed minimum + // ErrorDetails: min + NumberGTEError struct { + ResultErrorFields + } + + // NumberGTError is produced if a number is lower than, or equal to the specified minimum, and exclusiveMinimum is set + // ErrorDetails: min + NumberGTError struct { + ResultErrorFields + } + + // NumberLTEError is produced if a number is higher than the allowed maximum + // ErrorDetails: max + NumberLTEError struct { + ResultErrorFields + } + + // NumberLTError is produced if a number is higher than, or equal to the specified maximum, and exclusiveMaximum is set + // ErrorDetails: max + NumberLTError struct { + ResultErrorFields + } + + // ConditionThenError is produced if a condition's "then" validation is invalid + // ErrorDetails: - + ConditionThenError struct { + ResultErrorFields + } + + // ConditionElseError is produced if a condition's "else" condition is invalid + // ErrorDetails: - + ConditionElseError struct { + ResultErrorFields + } +) + +// newError takes a ResultError type and sets the type, context, description, details, value, and field +func newError(err ResultError, context *JsonContext, value interface{}, locale locale, details ErrorDetails) { + var t string + var d string + switch err.(type) { + case *FalseError: + t = "false" + d = locale.False() + case *RequiredError: + t = "required" + d = locale.Required() + case *InvalidTypeError: + t = "invalid_type" + d = locale.InvalidType() + case *NumberAnyOfError: + t = "number_any_of" + d = locale.NumberAnyOf() + case *NumberOneOfError: + t = "number_one_of" + d = locale.NumberOneOf() + case *NumberAllOfError: + t = "number_all_of" + d = locale.NumberAllOf() + case *NumberNotError: + t = "number_not" + d = locale.NumberNot() + case *MissingDependencyError: + t = "missing_dependency" + d = locale.MissingDependency() + case *InternalError: + t = "internal" + d = locale.Internal() + case *ConstError: + t = "const" + d = locale.Const() + case *EnumError: + t = "enum" + d = locale.Enum() + case *ArrayNoAdditionalItemsError: + t = "array_no_additional_items" + d = locale.ArrayNoAdditionalItems() + case *ArrayMinItemsError: + t = "array_min_items" + d = locale.ArrayMinItems() + case *ArrayMaxItemsError: + t = "array_max_items" + d = locale.ArrayMaxItems() + case *ItemsMustBeUniqueError: + t = "unique" + d = locale.Unique() + case *ArrayContainsError: + t = "contains" + d = locale.ArrayContains() + case *ArrayMinPropertiesError: + t = "array_min_properties" + d = locale.ArrayMinProperties() + case *ArrayMaxPropertiesError: + t = "array_max_properties" + d = locale.ArrayMaxProperties() + case *AdditionalPropertyNotAllowedError: + t = "additional_property_not_allowed" + d = locale.AdditionalPropertyNotAllowed() + case *InvalidPropertyPatternError: + t = "invalid_property_pattern" + d = locale.InvalidPropertyPattern() + case *InvalidPropertyNameError: + t = "invalid_property_name" + d = locale.InvalidPropertyName() + case *StringLengthGTEError: + t = "string_gte" + d = locale.StringGTE() + case *StringLengthLTEError: + t = "string_lte" + d = locale.StringLTE() + case *DoesNotMatchPatternError: + t = "pattern" + d = locale.DoesNotMatchPattern() + case *DoesNotMatchFormatError: + t = "format" + d = locale.DoesNotMatchFormat() + case *MultipleOfError: + t = "multiple_of" + d = locale.MultipleOf() + case *NumberGTEError: + t = "number_gte" + d = locale.NumberGTE() + case *NumberGTError: + t = "number_gt" + d = locale.NumberGT() + case *NumberLTEError: + t = "number_lte" + d = locale.NumberLTE() + case *NumberLTError: + t = "number_lt" + d = locale.NumberLT() + case *ConditionThenError: + t = "condition_then" + d = locale.ConditionThen() + case *ConditionElseError: + t = "condition_else" + d = locale.ConditionElse() + } + + err.SetType(t) + err.SetContext(context) + err.SetValue(value) + err.SetDetails(details) + err.SetDescriptionFormat(d) + details["field"] = err.Field() + + if _, exists := details["context"]; !exists && context != nil { + details["context"] = context.String() + } + + err.SetDescription(formatErrorDescription(err.DescriptionFormat(), details)) +} + +// formatErrorDescription takes a string in the default text/template +// format and converts it to a string with replacements. The fields come +// from the ErrorDetails struct and vary for each type of error. +func formatErrorDescription(s string, details ErrorDetails) string { + + var tpl *template.Template + var descrAsBuffer bytes.Buffer + var err error + + errorTemplates.RLock() + tpl = errorTemplates.Lookup(s) + errorTemplates.RUnlock() + + if tpl == nil { + errorTemplates.Lock() + tpl = errorTemplates.New(s) + + if ErrorTemplateFuncs != nil { + tpl.Funcs(ErrorTemplateFuncs) + } + + tpl, err = tpl.Parse(s) + errorTemplates.Unlock() + + if err != nil { + return err.Error() + } + } + + err = tpl.Execute(&descrAsBuffer, details) + if err != nil { + return err.Error() + } + + return descrAsBuffer.String() +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/format_checkers.go b/vendor/github.com/xeipuuv/gojsonschema/format_checkers.go new file mode 100644 index 0000000000..873ffc7d79 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/format_checkers.go @@ -0,0 +1,368 @@ +package gojsonschema + +import ( + "net" + "net/mail" + "net/url" + "regexp" + "strings" + "sync" + "time" +) + +type ( + // FormatChecker is the interface all formatters added to FormatCheckerChain must implement + FormatChecker interface { + // IsFormat checks if input has the correct format and type + IsFormat(input interface{}) bool + } + + // FormatCheckerChain holds the formatters + FormatCheckerChain struct { + formatters map[string]FormatChecker + } + + // EmailFormatChecker verifies email address formats + EmailFormatChecker struct{} + + // IPV4FormatChecker verifies IP addresses in the IPv4 format + IPV4FormatChecker struct{} + + // IPV6FormatChecker verifies IP addresses in the IPv6 format + IPV6FormatChecker struct{} + + // DateTimeFormatChecker verifies date/time formats per RFC3339 5.6 + // + // Valid formats: + // Partial Time: HH:MM:SS + // Full Date: YYYY-MM-DD + // Full Time: HH:MM:SSZ-07:00 + // Date Time: YYYY-MM-DDTHH:MM:SSZ-0700 + // + // Where + // YYYY = 4DIGIT year + // MM = 2DIGIT month ; 01-12 + // DD = 2DIGIT day-month ; 01-28, 01-29, 01-30, 01-31 based on month/year + // HH = 2DIGIT hour ; 00-23 + // MM = 2DIGIT ; 00-59 + // SS = 2DIGIT ; 00-58, 00-60 based on leap second rules + // T = Literal + // Z = Literal + // + // Note: Nanoseconds are also suported in all formats + // + // http://tools.ietf.org/html/rfc3339#section-5.6 + DateTimeFormatChecker struct{} + + // DateFormatChecker verifies date formats + // + // Valid format: + // Full Date: YYYY-MM-DD + // + // Where + // YYYY = 4DIGIT year + // MM = 2DIGIT month ; 01-12 + // DD = 2DIGIT day-month ; 01-28, 01-29, 01-30, 01-31 based on month/year + DateFormatChecker struct{} + + // TimeFormatChecker verifies time formats + // + // Valid formats: + // Partial Time: HH:MM:SS + // Full Time: HH:MM:SSZ-07:00 + // + // Where + // HH = 2DIGIT hour ; 00-23 + // MM = 2DIGIT ; 00-59 + // SS = 2DIGIT ; 00-58, 00-60 based on leap second rules + // T = Literal + // Z = Literal + TimeFormatChecker struct{} + + // URIFormatChecker validates a URI with a valid Scheme per RFC3986 + URIFormatChecker struct{} + + // URIReferenceFormatChecker validates a URI or relative-reference per RFC3986 + URIReferenceFormatChecker struct{} + + // URITemplateFormatChecker validates a URI template per RFC6570 + URITemplateFormatChecker struct{} + + // HostnameFormatChecker validates a hostname is in the correct format + HostnameFormatChecker struct{} + + // UUIDFormatChecker validates a UUID is in the correct format + UUIDFormatChecker struct{} + + // RegexFormatChecker validates a regex is in the correct format + RegexFormatChecker struct{} + + // JSONPointerFormatChecker validates a JSON Pointer per RFC6901 + JSONPointerFormatChecker struct{} + + // RelativeJSONPointerFormatChecker validates a relative JSON Pointer is in the correct format + RelativeJSONPointerFormatChecker struct{} +) + +var ( + // FormatCheckers holds the valid formatters, and is a public variable + // so library users can add custom formatters + FormatCheckers = FormatCheckerChain{ + formatters: map[string]FormatChecker{ + "date": DateFormatChecker{}, + "time": TimeFormatChecker{}, + "date-time": DateTimeFormatChecker{}, + "hostname": HostnameFormatChecker{}, + "email": EmailFormatChecker{}, + "idn-email": EmailFormatChecker{}, + "ipv4": IPV4FormatChecker{}, + "ipv6": IPV6FormatChecker{}, + "uri": URIFormatChecker{}, + "uri-reference": URIReferenceFormatChecker{}, + "iri": URIFormatChecker{}, + "iri-reference": URIReferenceFormatChecker{}, + "uri-template": URITemplateFormatChecker{}, + "uuid": UUIDFormatChecker{}, + "regex": RegexFormatChecker{}, + "json-pointer": JSONPointerFormatChecker{}, + "relative-json-pointer": RelativeJSONPointerFormatChecker{}, + }, + } + + // Regex credit: https://www.socketloop.com/tutorials/golang-validate-hostname + rxHostname = regexp.MustCompile(`^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$`) + + // Use a regex to make sure curly brackets are balanced properly after validating it as a AURI + rxURITemplate = regexp.MustCompile("^([^{]*({[^}]*})?)*$") + + rxUUID = regexp.MustCompile("^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$") + + rxJSONPointer = regexp.MustCompile("^(?:/(?:[^~/]|~0|~1)*)*$") + + rxRelJSONPointer = regexp.MustCompile("^(?:0|[1-9][0-9]*)(?:#|(?:/(?:[^~/]|~0|~1)*)*)$") + + lock = new(sync.RWMutex) +) + +// Add adds a FormatChecker to the FormatCheckerChain +// The name used will be the value used for the format key in your json schema +func (c *FormatCheckerChain) Add(name string, f FormatChecker) *FormatCheckerChain { + lock.Lock() + c.formatters[name] = f + lock.Unlock() + + return c +} + +// Remove deletes a FormatChecker from the FormatCheckerChain (if it exists) +func (c *FormatCheckerChain) Remove(name string) *FormatCheckerChain { + lock.Lock() + delete(c.formatters, name) + lock.Unlock() + + return c +} + +// Has checks to see if the FormatCheckerChain holds a FormatChecker with the given name +func (c *FormatCheckerChain) Has(name string) bool { + lock.RLock() + _, ok := c.formatters[name] + lock.RUnlock() + + return ok +} + +// IsFormat will check an input against a FormatChecker with the given name +// to see if it is the correct format +func (c *FormatCheckerChain) IsFormat(name string, input interface{}) bool { + lock.RLock() + f, ok := c.formatters[name] + lock.RUnlock() + + // If a format is unrecognized it should always pass validation + if !ok { + return true + } + + return f.IsFormat(input) +} + +// IsFormat checks if input is a correctly formatted e-mail address +func (f EmailFormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + _, err := mail.ParseAddress(asString) + return err == nil +} + +// IsFormat checks if input is a correctly formatted IPv4-address +func (f IPV4FormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + // Credit: https://github.com/asaskevich/govalidator + ip := net.ParseIP(asString) + return ip != nil && strings.Contains(asString, ".") +} + +// IsFormat checks if input is a correctly formatted IPv6=address +func (f IPV6FormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + // Credit: https://github.com/asaskevich/govalidator + ip := net.ParseIP(asString) + return ip != nil && strings.Contains(asString, ":") +} + +// IsFormat checks if input is a correctly formatted date/time per RFC3339 5.6 +func (f DateTimeFormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + formats := []string{ + "15:04:05", + "15:04:05Z07:00", + "2006-01-02", + time.RFC3339, + time.RFC3339Nano, + } + + for _, format := range formats { + if _, err := time.Parse(format, asString); err == nil { + return true + } + } + + return false +} + +// IsFormat checks if input is a correctly formatted date (YYYY-MM-DD) +func (f DateFormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + _, err := time.Parse("2006-01-02", asString) + return err == nil +} + +// IsFormat checks if input correctly formatted time (HH:MM:SS or HH:MM:SSZ-07:00) +func (f TimeFormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + if _, err := time.Parse("15:04:05Z07:00", asString); err == nil { + return true + } + + _, err := time.Parse("15:04:05", asString) + return err == nil +} + +// IsFormat checks if input is correctly formatted URI with a valid Scheme per RFC3986 +func (f URIFormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + u, err := url.Parse(asString) + + if err != nil || u.Scheme == "" { + return false + } + + return !strings.Contains(asString, `\`) +} + +// IsFormat checks if input is a correctly formatted URI or relative-reference per RFC3986 +func (f URIReferenceFormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + _, err := url.Parse(asString) + return err == nil && !strings.Contains(asString, `\`) +} + +// IsFormat checks if input is a correctly formatted URI template per RFC6570 +func (f URITemplateFormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + u, err := url.Parse(asString) + if err != nil || strings.Contains(asString, `\`) { + return false + } + + return rxURITemplate.MatchString(u.Path) +} + +// IsFormat checks if input is a correctly formatted hostname +func (f HostnameFormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + return rxHostname.MatchString(asString) && len(asString) < 256 +} + +// IsFormat checks if input is a correctly formatted UUID +func (f UUIDFormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + return rxUUID.MatchString(asString) +} + +// IsFormat checks if input is a correctly formatted regular expression +func (f RegexFormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + if asString == "" { + return true + } + _, err := regexp.Compile(asString) + return err == nil +} + +// IsFormat checks if input is a correctly formatted JSON Pointer per RFC6901 +func (f JSONPointerFormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + return rxJSONPointer.MatchString(asString) +} + +// IsFormat checks if input is a correctly formatted relative JSON Pointer +func (f RelativeJSONPointerFormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + return rxRelJSONPointer.MatchString(asString) +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/glide.yaml b/vendor/github.com/xeipuuv/gojsonschema/glide.yaml new file mode 100644 index 0000000000..ab6fb867c5 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/glide.yaml @@ -0,0 +1,13 @@ +package: github.com/xeipuuv/gojsonschema +license: Apache 2.0 +import: +- package: github.com/xeipuuv/gojsonschema + +- package: github.com/xeipuuv/gojsonpointer + +- package: github.com/xeipuuv/gojsonreference + +testImport: +- package: github.com/stretchr/testify + subpackages: + - assert diff --git a/vendor/github.com/xeipuuv/gojsonschema/go.mod b/vendor/github.com/xeipuuv/gojsonschema/go.mod new file mode 100644 index 0000000000..b709d7fcd6 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/go.mod @@ -0,0 +1,7 @@ +module github.com/xeipuuv/gojsonschema + +require ( + github.com/stretchr/testify v1.3.0 + github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect + github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 +) diff --git a/vendor/github.com/xeipuuv/gojsonschema/go.sum b/vendor/github.com/xeipuuv/gojsonschema/go.sum new file mode 100644 index 0000000000..0e865ac759 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/go.sum @@ -0,0 +1,11 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= diff --git a/vendor/github.com/xeipuuv/gojsonschema/internalLog.go b/vendor/github.com/xeipuuv/gojsonschema/internalLog.go new file mode 100644 index 0000000000..4ef7a8d03e --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/internalLog.go @@ -0,0 +1,37 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Very simple log wrapper. +// Used for debugging/testing purposes. +// +// created 01-01-2015 + +package gojsonschema + +import ( + "log" +) + +const internalLogEnabled = false + +func internalLog(format string, v ...interface{}) { + log.Printf(format, v...) +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/jsonContext.go b/vendor/github.com/xeipuuv/gojsonschema/jsonContext.go new file mode 100644 index 0000000000..0e979707b4 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/jsonContext.go @@ -0,0 +1,73 @@ +// Copyright 2013 MongoDB, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author tolsen +// author-github https://github.com/tolsen +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Implements a persistent (immutable w/ shared structure) singly-linked list of strings for the purpose of storing a json context +// +// created 04-09-2013 + +package gojsonschema + +import "bytes" + +// JsonContext implements a persistent linked-list of strings +type JsonContext struct { + head string + tail *JsonContext +} + +// NewJsonContext creates a new JsonContext +func NewJsonContext(head string, tail *JsonContext) *JsonContext { + return &JsonContext{head, tail} +} + +// String displays the context in reverse. +// This plays well with the data structure's persistent nature with +// Cons and a json document's tree structure. +func (c *JsonContext) String(del ...string) string { + byteArr := make([]byte, 0, c.stringLen()) + buf := bytes.NewBuffer(byteArr) + c.writeStringToBuffer(buf, del) + + return buf.String() +} + +func (c *JsonContext) stringLen() int { + length := 0 + if c.tail != nil { + length = c.tail.stringLen() + 1 // add 1 for "." + } + + length += len(c.head) + return length +} + +func (c *JsonContext) writeStringToBuffer(buf *bytes.Buffer, del []string) { + if c.tail != nil { + c.tail.writeStringToBuffer(buf, del) + + if len(del) > 0 { + buf.WriteString(del[0]) + } else { + buf.WriteString(".") + } + } + + buf.WriteString(c.head) +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/jsonLoader.go b/vendor/github.com/xeipuuv/gojsonschema/jsonLoader.go new file mode 100644 index 0000000000..5d88af263e --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/jsonLoader.go @@ -0,0 +1,386 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Different strategies to load JSON files. +// Includes References (file and HTTP), JSON strings and Go types. +// +// created 01-02-2015 + +package gojsonschema + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/xeipuuv/gojsonreference" +) + +var osFS = osFileSystem(os.Open) + +// JSONLoader defines the JSON loader interface +type JSONLoader interface { + JsonSource() interface{} + LoadJSON() (interface{}, error) + JsonReference() (gojsonreference.JsonReference, error) + LoaderFactory() JSONLoaderFactory +} + +// JSONLoaderFactory defines the JSON loader factory interface +type JSONLoaderFactory interface { + // New creates a new JSON loader for the given source + New(source string) JSONLoader +} + +// DefaultJSONLoaderFactory is the default JSON loader factory +type DefaultJSONLoaderFactory struct { +} + +// FileSystemJSONLoaderFactory is a JSON loader factory that uses http.FileSystem +type FileSystemJSONLoaderFactory struct { + fs http.FileSystem +} + +// New creates a new JSON loader for the given source +func (d DefaultJSONLoaderFactory) New(source string) JSONLoader { + return &jsonReferenceLoader{ + fs: osFS, + source: source, + } +} + +// New creates a new JSON loader for the given source +func (f FileSystemJSONLoaderFactory) New(source string) JSONLoader { + return &jsonReferenceLoader{ + fs: f.fs, + source: source, + } +} + +// osFileSystem is a functional wrapper for os.Open that implements http.FileSystem. +type osFileSystem func(string) (*os.File, error) + +// Opens a file with the given name +func (o osFileSystem) Open(name string) (http.File, error) { + return o(name) +} + +// JSON Reference loader +// references are used to load JSONs from files and HTTP + +type jsonReferenceLoader struct { + fs http.FileSystem + source string +} + +func (l *jsonReferenceLoader) JsonSource() interface{} { + return l.source +} + +func (l *jsonReferenceLoader) JsonReference() (gojsonreference.JsonReference, error) { + return gojsonreference.NewJsonReference(l.JsonSource().(string)) +} + +func (l *jsonReferenceLoader) LoaderFactory() JSONLoaderFactory { + return &FileSystemJSONLoaderFactory{ + fs: l.fs, + } +} + +// NewReferenceLoader returns a JSON reference loader using the given source and the local OS file system. +func NewReferenceLoader(source string) JSONLoader { + return &jsonReferenceLoader{ + fs: osFS, + source: source, + } +} + +// NewReferenceLoaderFileSystem returns a JSON reference loader using the given source and file system. +func NewReferenceLoaderFileSystem(source string, fs http.FileSystem) JSONLoader { + return &jsonReferenceLoader{ + fs: fs, + source: source, + } +} + +func (l *jsonReferenceLoader) LoadJSON() (interface{}, error) { + + var err error + + reference, err := gojsonreference.NewJsonReference(l.JsonSource().(string)) + if err != nil { + return nil, err + } + + refToURL := reference + refToURL.GetUrl().Fragment = "" + + var document interface{} + + if reference.HasFileScheme { + + filename := strings.TrimPrefix(refToURL.String(), "file://") + filename, err = url.QueryUnescape(filename) + + if err != nil { + return nil, err + } + + if runtime.GOOS == "windows" { + // on Windows, a file URL may have an extra leading slash, use slashes + // instead of backslashes, and have spaces escaped + filename = strings.TrimPrefix(filename, "/") + filename = filepath.FromSlash(filename) + } + + document, err = l.loadFromFile(filename) + if err != nil { + return nil, err + } + + } else { + + document, err = l.loadFromHTTP(refToURL.String()) + if err != nil { + return nil, err + } + + } + + return document, nil + +} + +func (l *jsonReferenceLoader) loadFromHTTP(address string) (interface{}, error) { + + // returned cached versions for metaschemas for drafts 4, 6 and 7 + // for performance and allow for easier offline use + if metaSchema := drafts.GetMetaSchema(address); metaSchema != "" { + return decodeJSONUsingNumber(strings.NewReader(metaSchema)) + } + + resp, err := http.Get(address) + if err != nil { + return nil, err + } + + // must return HTTP Status 200 OK + if resp.StatusCode != http.StatusOK { + return nil, errors.New(formatErrorDescription(Locale.HttpBadStatus(), ErrorDetails{"status": resp.Status})) + } + + bodyBuff, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + return decodeJSONUsingNumber(bytes.NewReader(bodyBuff)) +} + +func (l *jsonReferenceLoader) loadFromFile(path string) (interface{}, error) { + f, err := l.fs.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + + bodyBuff, err := ioutil.ReadAll(f) + if err != nil { + return nil, err + } + + return decodeJSONUsingNumber(bytes.NewReader(bodyBuff)) + +} + +// JSON string loader + +type jsonStringLoader struct { + source string +} + +func (l *jsonStringLoader) JsonSource() interface{} { + return l.source +} + +func (l *jsonStringLoader) JsonReference() (gojsonreference.JsonReference, error) { + return gojsonreference.NewJsonReference("#") +} + +func (l *jsonStringLoader) LoaderFactory() JSONLoaderFactory { + return &DefaultJSONLoaderFactory{} +} + +// NewStringLoader creates a new JSONLoader, taking a string as source +func NewStringLoader(source string) JSONLoader { + return &jsonStringLoader{source: source} +} + +func (l *jsonStringLoader) LoadJSON() (interface{}, error) { + + return decodeJSONUsingNumber(strings.NewReader(l.JsonSource().(string))) + +} + +// JSON bytes loader + +type jsonBytesLoader struct { + source []byte +} + +func (l *jsonBytesLoader) JsonSource() interface{} { + return l.source +} + +func (l *jsonBytesLoader) JsonReference() (gojsonreference.JsonReference, error) { + return gojsonreference.NewJsonReference("#") +} + +func (l *jsonBytesLoader) LoaderFactory() JSONLoaderFactory { + return &DefaultJSONLoaderFactory{} +} + +// NewBytesLoader creates a new JSONLoader, taking a `[]byte` as source +func NewBytesLoader(source []byte) JSONLoader { + return &jsonBytesLoader{source: source} +} + +func (l *jsonBytesLoader) LoadJSON() (interface{}, error) { + return decodeJSONUsingNumber(bytes.NewReader(l.JsonSource().([]byte))) +} + +// JSON Go (types) loader +// used to load JSONs from the code as maps, interface{}, structs ... + +type jsonGoLoader struct { + source interface{} +} + +func (l *jsonGoLoader) JsonSource() interface{} { + return l.source +} + +func (l *jsonGoLoader) JsonReference() (gojsonreference.JsonReference, error) { + return gojsonreference.NewJsonReference("#") +} + +func (l *jsonGoLoader) LoaderFactory() JSONLoaderFactory { + return &DefaultJSONLoaderFactory{} +} + +// NewGoLoader creates a new JSONLoader from a given Go struct +func NewGoLoader(source interface{}) JSONLoader { + return &jsonGoLoader{source: source} +} + +func (l *jsonGoLoader) LoadJSON() (interface{}, error) { + + // convert it to a compliant JSON first to avoid types "mismatches" + + jsonBytes, err := json.Marshal(l.JsonSource()) + if err != nil { + return nil, err + } + + return decodeJSONUsingNumber(bytes.NewReader(jsonBytes)) + +} + +type jsonIOLoader struct { + buf *bytes.Buffer +} + +// NewReaderLoader creates a new JSON loader using the provided io.Reader +func NewReaderLoader(source io.Reader) (JSONLoader, io.Reader) { + buf := &bytes.Buffer{} + return &jsonIOLoader{buf: buf}, io.TeeReader(source, buf) +} + +// NewWriterLoader creates a new JSON loader using the provided io.Writer +func NewWriterLoader(source io.Writer) (JSONLoader, io.Writer) { + buf := &bytes.Buffer{} + return &jsonIOLoader{buf: buf}, io.MultiWriter(source, buf) +} + +func (l *jsonIOLoader) JsonSource() interface{} { + return l.buf.String() +} + +func (l *jsonIOLoader) LoadJSON() (interface{}, error) { + return decodeJSONUsingNumber(l.buf) +} + +func (l *jsonIOLoader) JsonReference() (gojsonreference.JsonReference, error) { + return gojsonreference.NewJsonReference("#") +} + +func (l *jsonIOLoader) LoaderFactory() JSONLoaderFactory { + return &DefaultJSONLoaderFactory{} +} + +// JSON raw loader +// In case the JSON is already marshalled to interface{} use this loader +// This is used for testing as otherwise there is no guarantee the JSON is marshalled +// "properly" by using https://golang.org/pkg/encoding/json/#Decoder.UseNumber +type jsonRawLoader struct { + source interface{} +} + +// NewRawLoader creates a new JSON raw loader for the given source +func NewRawLoader(source interface{}) JSONLoader { + return &jsonRawLoader{source: source} +} +func (l *jsonRawLoader) JsonSource() interface{} { + return l.source +} +func (l *jsonRawLoader) LoadJSON() (interface{}, error) { + return l.source, nil +} +func (l *jsonRawLoader) JsonReference() (gojsonreference.JsonReference, error) { + return gojsonreference.NewJsonReference("#") +} +func (l *jsonRawLoader) LoaderFactory() JSONLoaderFactory { + return &DefaultJSONLoaderFactory{} +} + +func decodeJSONUsingNumber(r io.Reader) (interface{}, error) { + + var document interface{} + + decoder := json.NewDecoder(r) + decoder.UseNumber() + + err := decoder.Decode(&document) + if err != nil { + return nil, err + } + + return document, nil + +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/locales.go b/vendor/github.com/xeipuuv/gojsonschema/locales.go new file mode 100644 index 0000000000..a416225cdb --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/locales.go @@ -0,0 +1,472 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Contains const string and messages. +// +// created 01-01-2015 + +package gojsonschema + +type ( + // locale is an interface for defining custom error strings + locale interface { + + // False returns a format-string for "false" schema validation errors + False() string + + // Required returns a format-string for "required" schema validation errors + Required() string + + // InvalidType returns a format-string for "invalid type" schema validation errors + InvalidType() string + + // NumberAnyOf returns a format-string for "anyOf" schema validation errors + NumberAnyOf() string + + // NumberOneOf returns a format-string for "oneOf" schema validation errors + NumberOneOf() string + + // NumberAllOf returns a format-string for "allOf" schema validation errors + NumberAllOf() string + + // NumberNot returns a format-string to format a NumberNotError + NumberNot() string + + // MissingDependency returns a format-string for "missing dependency" schema validation errors + MissingDependency() string + + // Internal returns a format-string for internal errors + Internal() string + + // Const returns a format-string to format a ConstError + Const() string + + // Enum returns a format-string to format an EnumError + Enum() string + + // ArrayNotEnoughItems returns a format-string to format an error for arrays having not enough items to match positional list of schema + ArrayNotEnoughItems() string + + // ArrayNoAdditionalItems returns a format-string to format an ArrayNoAdditionalItemsError + ArrayNoAdditionalItems() string + + // ArrayMinItems returns a format-string to format an ArrayMinItemsError + ArrayMinItems() string + + // ArrayMaxItems returns a format-string to format an ArrayMaxItemsError + ArrayMaxItems() string + + // Unique returns a format-string to format an ItemsMustBeUniqueError + Unique() string + + // ArrayContains returns a format-string to format an ArrayContainsError + ArrayContains() string + + // ArrayMinProperties returns a format-string to format an ArrayMinPropertiesError + ArrayMinProperties() string + + // ArrayMaxProperties returns a format-string to format an ArrayMaxPropertiesError + ArrayMaxProperties() string + + // AdditionalPropertyNotAllowed returns a format-string to format an AdditionalPropertyNotAllowedError + AdditionalPropertyNotAllowed() string + + // InvalidPropertyPattern returns a format-string to format an InvalidPropertyPatternError + InvalidPropertyPattern() string + + // InvalidPropertyName returns a format-string to format an InvalidPropertyNameError + InvalidPropertyName() string + + // StringGTE returns a format-string to format an StringLengthGTEError + StringGTE() string + + // StringLTE returns a format-string to format an StringLengthLTEError + StringLTE() string + + // DoesNotMatchPattern returns a format-string to format an DoesNotMatchPatternError + DoesNotMatchPattern() string + + // DoesNotMatchFormat returns a format-string to format an DoesNotMatchFormatError + DoesNotMatchFormat() string + + // MultipleOf returns a format-string to format an MultipleOfError + MultipleOf() string + + // NumberGTE returns a format-string to format an NumberGTEError + NumberGTE() string + + // NumberGT returns a format-string to format an NumberGTError + NumberGT() string + + // NumberLTE returns a format-string to format an NumberLTEError + NumberLTE() string + + // NumberLT returns a format-string to format an NumberLTError + NumberLT() string + + // Schema validations + + // RegexPattern returns a format-string to format a regex-pattern error + RegexPattern() string + + // GreaterThanZero returns a format-string to format an error where a number must be greater than zero + GreaterThanZero() string + + // MustBeOfA returns a format-string to format an error where a value is of the wrong type + MustBeOfA() string + + // MustBeOfAn returns a format-string to format an error where a value is of the wrong type + MustBeOfAn() string + + // CannotBeUsedWithout returns a format-string to format a "cannot be used without" error + CannotBeUsedWithout() string + + // CannotBeGT returns a format-string to format an error where a value are greater than allowed + CannotBeGT() string + + // MustBeOfType returns a format-string to format an error where a value does not match the required type + MustBeOfType() string + + // MustBeValidRegex returns a format-string to format an error where a regex is invalid + MustBeValidRegex() string + + // MustBeValidFormat returns a format-string to format an error where a value does not match the expected format + MustBeValidFormat() string + + // MustBeGTEZero returns a format-string to format an error where a value must be greater or equal than 0 + MustBeGTEZero() string + + // KeyCannotBeGreaterThan returns a format-string to format an error where a key is greater than the maximum allowed + KeyCannotBeGreaterThan() string + + // KeyItemsMustBeOfType returns a format-string to format an error where a key is of the wrong type + KeyItemsMustBeOfType() string + + // KeyItemsMustBeUnique returns a format-string to format an error where keys are not unique + KeyItemsMustBeUnique() string + + // ReferenceMustBeCanonical returns a format-string to format a "reference must be canonical" error + ReferenceMustBeCanonical() string + + // NotAValidType returns a format-string to format an invalid type error + NotAValidType() string + + // Duplicated returns a format-string to format an error where types are duplicated + Duplicated() string + + // HttpBadStatus returns a format-string for errors when loading a schema using HTTP + HttpBadStatus() string + + // ParseError returns a format-string for JSON parsing errors + ParseError() string + + // ConditionThen returns a format-string for ConditionThenError errors + ConditionThen() string + + // ConditionElse returns a format-string for ConditionElseError errors + ConditionElse() string + + // ErrorFormat returns a format string for errors + ErrorFormat() string + } + + // DefaultLocale is the default locale for this package + DefaultLocale struct{} +) + +// False returns a format-string for "false" schema validation errors +func (l DefaultLocale) False() string { + return "False always fails validation" +} + +// Required returns a format-string for "required" schema validation errors +func (l DefaultLocale) Required() string { + return `{{.property}} is required` +} + +// InvalidType returns a format-string for "invalid type" schema validation errors +func (l DefaultLocale) InvalidType() string { + return `Invalid type. Expected: {{.expected}}, given: {{.given}}` +} + +// NumberAnyOf returns a format-string for "anyOf" schema validation errors +func (l DefaultLocale) NumberAnyOf() string { + return `Must validate at least one schema (anyOf)` +} + +// NumberOneOf returns a format-string for "oneOf" schema validation errors +func (l DefaultLocale) NumberOneOf() string { + return `Must validate one and only one schema (oneOf)` +} + +// NumberAllOf returns a format-string for "allOf" schema validation errors +func (l DefaultLocale) NumberAllOf() string { + return `Must validate all the schemas (allOf)` +} + +// NumberNot returns a format-string to format a NumberNotError +func (l DefaultLocale) NumberNot() string { + return `Must not validate the schema (not)` +} + +// MissingDependency returns a format-string for "missing dependency" schema validation errors +func (l DefaultLocale) MissingDependency() string { + return `Has a dependency on {{.dependency}}` +} + +// Internal returns a format-string for internal errors +func (l DefaultLocale) Internal() string { + return `Internal Error {{.error}}` +} + +// Const returns a format-string to format a ConstError +func (l DefaultLocale) Const() string { + return `{{.field}} does not match: {{.allowed}}` +} + +// Enum returns a format-string to format an EnumError +func (l DefaultLocale) Enum() string { + return `{{.field}} must be one of the following: {{.allowed}}` +} + +// ArrayNoAdditionalItems returns a format-string to format an ArrayNoAdditionalItemsError +func (l DefaultLocale) ArrayNoAdditionalItems() string { + return `No additional items allowed on array` +} + +// ArrayNotEnoughItems returns a format-string to format an error for arrays having not enough items to match positional list of schema +func (l DefaultLocale) ArrayNotEnoughItems() string { + return `Not enough items on array to match positional list of schema` +} + +// ArrayMinItems returns a format-string to format an ArrayMinItemsError +func (l DefaultLocale) ArrayMinItems() string { + return `Array must have at least {{.min}} items` +} + +// ArrayMaxItems returns a format-string to format an ArrayMaxItemsError +func (l DefaultLocale) ArrayMaxItems() string { + return `Array must have at most {{.max}} items` +} + +// Unique returns a format-string to format an ItemsMustBeUniqueError +func (l DefaultLocale) Unique() string { + return `{{.type}} items[{{.i}},{{.j}}] must be unique` +} + +// ArrayContains returns a format-string to format an ArrayContainsError +func (l DefaultLocale) ArrayContains() string { + return `At least one of the items must match` +} + +// ArrayMinProperties returns a format-string to format an ArrayMinPropertiesError +func (l DefaultLocale) ArrayMinProperties() string { + return `Must have at least {{.min}} properties` +} + +// ArrayMaxProperties returns a format-string to format an ArrayMaxPropertiesError +func (l DefaultLocale) ArrayMaxProperties() string { + return `Must have at most {{.max}} properties` +} + +// AdditionalPropertyNotAllowed returns a format-string to format an AdditionalPropertyNotAllowedError +func (l DefaultLocale) AdditionalPropertyNotAllowed() string { + return `Additional property {{.property}} is not allowed` +} + +// InvalidPropertyPattern returns a format-string to format an InvalidPropertyPatternError +func (l DefaultLocale) InvalidPropertyPattern() string { + return `Property "{{.property}}" does not match pattern {{.pattern}}` +} + +// InvalidPropertyName returns a format-string to format an InvalidPropertyNameError +func (l DefaultLocale) InvalidPropertyName() string { + return `Property name of "{{.property}}" does not match` +} + +// StringGTE returns a format-string to format an StringLengthGTEError +func (l DefaultLocale) StringGTE() string { + return `String length must be greater than or equal to {{.min}}` +} + +// StringLTE returns a format-string to format an StringLengthLTEError +func (l DefaultLocale) StringLTE() string { + return `String length must be less than or equal to {{.max}}` +} + +// DoesNotMatchPattern returns a format-string to format an DoesNotMatchPatternError +func (l DefaultLocale) DoesNotMatchPattern() string { + return `Does not match pattern '{{.pattern}}'` +} + +// DoesNotMatchFormat returns a format-string to format an DoesNotMatchFormatError +func (l DefaultLocale) DoesNotMatchFormat() string { + return `Does not match format '{{.format}}'` +} + +// MultipleOf returns a format-string to format an MultipleOfError +func (l DefaultLocale) MultipleOf() string { + return `Must be a multiple of {{.multiple}}` +} + +// NumberGTE returns the format string to format a NumberGTEError +func (l DefaultLocale) NumberGTE() string { + return `Must be greater than or equal to {{.min}}` +} + +// NumberGT returns the format string to format a NumberGTError +func (l DefaultLocale) NumberGT() string { + return `Must be greater than {{.min}}` +} + +// NumberLTE returns the format string to format a NumberLTEError +func (l DefaultLocale) NumberLTE() string { + return `Must be less than or equal to {{.max}}` +} + +// NumberLT returns the format string to format a NumberLTError +func (l DefaultLocale) NumberLT() string { + return `Must be less than {{.max}}` +} + +// Schema validators + +// RegexPattern returns a format-string to format a regex-pattern error +func (l DefaultLocale) RegexPattern() string { + return `Invalid regex pattern '{{.pattern}}'` +} + +// GreaterThanZero returns a format-string to format an error where a number must be greater than zero +func (l DefaultLocale) GreaterThanZero() string { + return `{{.number}} must be strictly greater than 0` +} + +// MustBeOfA returns a format-string to format an error where a value is of the wrong type +func (l DefaultLocale) MustBeOfA() string { + return `{{.x}} must be of a {{.y}}` +} + +// MustBeOfAn returns a format-string to format an error where a value is of the wrong type +func (l DefaultLocale) MustBeOfAn() string { + return `{{.x}} must be of an {{.y}}` +} + +// CannotBeUsedWithout returns a format-string to format a "cannot be used without" error +func (l DefaultLocale) CannotBeUsedWithout() string { + return `{{.x}} cannot be used without {{.y}}` +} + +// CannotBeGT returns a format-string to format an error where a value are greater than allowed +func (l DefaultLocale) CannotBeGT() string { + return `{{.x}} cannot be greater than {{.y}}` +} + +// MustBeOfType returns a format-string to format an error where a value does not match the required type +func (l DefaultLocale) MustBeOfType() string { + return `{{.key}} must be of type {{.type}}` +} + +// MustBeValidRegex returns a format-string to format an error where a regex is invalid +func (l DefaultLocale) MustBeValidRegex() string { + return `{{.key}} must be a valid regex` +} + +// MustBeValidFormat returns a format-string to format an error where a value does not match the expected format +func (l DefaultLocale) MustBeValidFormat() string { + return `{{.key}} must be a valid format {{.given}}` +} + +// MustBeGTEZero returns a format-string to format an error where a value must be greater or equal than 0 +func (l DefaultLocale) MustBeGTEZero() string { + return `{{.key}} must be greater than or equal to 0` +} + +// KeyCannotBeGreaterThan returns a format-string to format an error where a value is greater than the maximum allowed +func (l DefaultLocale) KeyCannotBeGreaterThan() string { + return `{{.key}} cannot be greater than {{.y}}` +} + +// KeyItemsMustBeOfType returns a format-string to format an error where a key is of the wrong type +func (l DefaultLocale) KeyItemsMustBeOfType() string { + return `{{.key}} items must be {{.type}}` +} + +// KeyItemsMustBeUnique returns a format-string to format an error where keys are not unique +func (l DefaultLocale) KeyItemsMustBeUnique() string { + return `{{.key}} items must be unique` +} + +// ReferenceMustBeCanonical returns a format-string to format a "reference must be canonical" error +func (l DefaultLocale) ReferenceMustBeCanonical() string { + return `Reference {{.reference}} must be canonical` +} + +// NotAValidType returns a format-string to format an invalid type error +func (l DefaultLocale) NotAValidType() string { + return `has a primitive type that is NOT VALID -- given: {{.given}} Expected valid values are:{{.expected}}` +} + +// Duplicated returns a format-string to format an error where types are duplicated +func (l DefaultLocale) Duplicated() string { + return `{{.type}} type is duplicated` +} + +// HttpBadStatus returns a format-string for errors when loading a schema using HTTP +func (l DefaultLocale) HttpBadStatus() string { + return `Could not read schema from HTTP, response status is {{.status}}` +} + +// ErrorFormat returns a format string for errors +// Replacement options: field, description, context, value +func (l DefaultLocale) ErrorFormat() string { + return `{{.field}}: {{.description}}` +} + +// ParseError returns a format-string for JSON parsing errors +func (l DefaultLocale) ParseError() string { + return `Expected: {{.expected}}, given: Invalid JSON` +} + +// ConditionThen returns a format-string for ConditionThenError errors +// If/Else +func (l DefaultLocale) ConditionThen() string { + return `Must validate "then" as "if" was valid` +} + +// ConditionElse returns a format-string for ConditionElseError errors +func (l DefaultLocale) ConditionElse() string { + return `Must validate "else" as "if" was not valid` +} + +// constants +const ( + STRING_NUMBER = "number" + STRING_ARRAY_OF_STRINGS = "array of strings" + STRING_ARRAY_OF_SCHEMAS = "array of schemas" + STRING_SCHEMA = "valid schema" + STRING_SCHEMA_OR_ARRAY_OF_STRINGS = "schema or array of strings" + STRING_PROPERTIES = "properties" + STRING_DEPENDENCY = "dependency" + STRING_PROPERTY = "property" + STRING_UNDEFINED = "undefined" + STRING_CONTEXT_ROOT = "(root)" + STRING_ROOT_SCHEMA_PROPERTY = "(root)" +) diff --git a/vendor/github.com/xeipuuv/gojsonschema/result.go b/vendor/github.com/xeipuuv/gojsonschema/result.go new file mode 100644 index 0000000000..0a0179148b --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/result.go @@ -0,0 +1,220 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Result and ResultError implementations. +// +// created 01-01-2015 + +package gojsonschema + +import ( + "fmt" + "strings" +) + +type ( + // ErrorDetails is a map of details specific to each error. + // While the values will vary, every error will contain a "field" value + ErrorDetails map[string]interface{} + + // ResultError is the interface that library errors must implement + ResultError interface { + // Field returns the field name without the root context + // i.e. firstName or person.firstName instead of (root).firstName or (root).person.firstName + Field() string + // SetType sets the error-type + SetType(string) + // Type returns the error-type + Type() string + // SetContext sets the JSON-context for the error + SetContext(*JsonContext) + // Context returns the JSON-context of the error + Context() *JsonContext + // SetDescription sets a description for the error + SetDescription(string) + // Description returns the description of the error + Description() string + // SetDescriptionFormat sets the format for the description in the default text/template format + SetDescriptionFormat(string) + // DescriptionFormat returns the format for the description in the default text/template format + DescriptionFormat() string + // SetValue sets the value related to the error + SetValue(interface{}) + // Value returns the value related to the error + Value() interface{} + // SetDetails sets the details specific to the error + SetDetails(ErrorDetails) + // Details returns details about the error + Details() ErrorDetails + // String returns a string representation of the error + String() string + } + + // ResultErrorFields holds the fields for each ResultError implementation. + // ResultErrorFields implements the ResultError interface, so custom errors + // can be defined by just embedding this type + ResultErrorFields struct { + errorType string // A string with the type of error (i.e. invalid_type) + context *JsonContext // Tree like notation of the part that failed the validation. ex (root).a.b ... + description string // A human readable error message + descriptionFormat string // A format for human readable error message + value interface{} // Value given by the JSON file that is the source of the error + details ErrorDetails + } + + // Result holds the result of a validation + Result struct { + errors []ResultError + // Scores how well the validation matched. Useful in generating + // better error messages for anyOf and oneOf. + score int + } +) + +// Field returns the field name without the root context +// i.e. firstName or person.firstName instead of (root).firstName or (root).person.firstName +func (v *ResultErrorFields) Field() string { + return strings.TrimPrefix(v.context.String(), STRING_ROOT_SCHEMA_PROPERTY+".") +} + +// SetType sets the error-type +func (v *ResultErrorFields) SetType(errorType string) { + v.errorType = errorType +} + +// Type returns the error-type +func (v *ResultErrorFields) Type() string { + return v.errorType +} + +// SetContext sets the JSON-context for the error +func (v *ResultErrorFields) SetContext(context *JsonContext) { + v.context = context +} + +// Context returns the JSON-context of the error +func (v *ResultErrorFields) Context() *JsonContext { + return v.context +} + +// SetDescription sets a description for the error +func (v *ResultErrorFields) SetDescription(description string) { + v.description = description +} + +// Description returns the description of the error +func (v *ResultErrorFields) Description() string { + return v.description +} + +// SetDescriptionFormat sets the format for the description in the default text/template format +func (v *ResultErrorFields) SetDescriptionFormat(descriptionFormat string) { + v.descriptionFormat = descriptionFormat +} + +// DescriptionFormat returns the format for the description in the default text/template format +func (v *ResultErrorFields) DescriptionFormat() string { + return v.descriptionFormat +} + +// SetValue sets the value related to the error +func (v *ResultErrorFields) SetValue(value interface{}) { + v.value = value +} + +// Value returns the value related to the error +func (v *ResultErrorFields) Value() interface{} { + return v.value +} + +// SetDetails sets the details specific to the error +func (v *ResultErrorFields) SetDetails(details ErrorDetails) { + v.details = details +} + +// Details returns details about the error +func (v *ResultErrorFields) Details() ErrorDetails { + return v.details +} + +// String returns a string representation of the error +func (v ResultErrorFields) String() string { + // as a fallback, the value is displayed go style + valueString := fmt.Sprintf("%v", v.value) + + // marshal the go value value to json + if v.value == nil { + valueString = TYPE_NULL + } else { + if vs, err := marshalToJSONString(v.value); err == nil { + if vs == nil { + valueString = TYPE_NULL + } else { + valueString = *vs + } + } + } + + return formatErrorDescription(Locale.ErrorFormat(), ErrorDetails{ + "context": v.context.String(), + "description": v.description, + "value": valueString, + "field": v.Field(), + }) +} + +// Valid indicates if no errors were found +func (v *Result) Valid() bool { + return len(v.errors) == 0 +} + +// Errors returns the errors that were found +func (v *Result) Errors() []ResultError { + return v.errors +} + +// AddError appends a fully filled error to the error set +// SetDescription() will be called with the result of the parsed err.DescriptionFormat() +func (v *Result) AddError(err ResultError, details ErrorDetails) { + if _, exists := details["context"]; !exists && err.Context() != nil { + details["context"] = err.Context().String() + } + + err.SetDescription(formatErrorDescription(err.DescriptionFormat(), details)) + + v.errors = append(v.errors, err) +} + +func (v *Result) addInternalError(err ResultError, context *JsonContext, value interface{}, details ErrorDetails) { + newError(err, context, value, Locale, details) + v.errors = append(v.errors, err) + v.score -= 2 // results in a net -1 when added to the +1 we get at the end of the validation function +} + +// Used to copy errors from a sub-schema to the main one +func (v *Result) mergeErrors(otherResult *Result) { + v.errors = append(v.errors, otherResult.Errors()...) + v.score += otherResult.score +} + +func (v *Result) incrementScore() { + v.score++ +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/schema.go b/vendor/github.com/xeipuuv/gojsonschema/schema.go new file mode 100644 index 0000000000..9e93cd7955 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/schema.go @@ -0,0 +1,1087 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Defines Schema, the main entry to every subSchema. +// Contains the parsing logic and error checking. +// +// created 26-02-2013 + +package gojsonschema + +import ( + "errors" + "math/big" + "reflect" + "regexp" + "text/template" + + "github.com/xeipuuv/gojsonreference" +) + +var ( + // Locale is the default locale to use + // Library users can overwrite with their own implementation + Locale locale = DefaultLocale{} + + // ErrorTemplateFuncs allows you to define custom template funcs for use in localization. + ErrorTemplateFuncs template.FuncMap +) + +// NewSchema instances a schema using the given JSONLoader +func NewSchema(l JSONLoader) (*Schema, error) { + return NewSchemaLoader().Compile(l) +} + +// Schema holds a schema +type Schema struct { + documentReference gojsonreference.JsonReference + rootSchema *subSchema + pool *schemaPool + referencePool *schemaReferencePool +} + +func (d *Schema) parse(document interface{}, draft Draft) error { + d.rootSchema = &subSchema{property: STRING_ROOT_SCHEMA_PROPERTY, draft: &draft} + return d.parseSchema(document, d.rootSchema) +} + +// SetRootSchemaName sets the root-schema name +func (d *Schema) SetRootSchemaName(name string) { + d.rootSchema.property = name +} + +// Parses a subSchema +// +// Pretty long function ( sorry :) )... but pretty straight forward, repetitive and boring +// Not much magic involved here, most of the job is to validate the key names and their values, +// then the values are copied into subSchema struct +// +func (d *Schema) parseSchema(documentNode interface{}, currentSchema *subSchema) error { + + if currentSchema.draft == nil { + if currentSchema.parent == nil { + return errors.New("Draft not set") + } + currentSchema.draft = currentSchema.parent.draft + } + + // As of draft 6 "true" is equivalent to an empty schema "{}" and false equals "{"not":{}}" + if *currentSchema.draft >= Draft6 && isKind(documentNode, reflect.Bool) { + b := documentNode.(bool) + currentSchema.pass = &b + return nil + } + + if !isKind(documentNode, reflect.Map) { + return errors.New(formatErrorDescription( + Locale.ParseError(), + ErrorDetails{ + "expected": STRING_SCHEMA, + }, + )) + } + + m := documentNode.(map[string]interface{}) + + if currentSchema.parent == nil { + currentSchema.ref = &d.documentReference + currentSchema.id = &d.documentReference + } + + if currentSchema.id == nil && currentSchema.parent != nil { + currentSchema.id = currentSchema.parent.id + } + + // In draft 6 the id keyword was renamed to $id + // Hybrid mode uses the old id by default + var keyID string + + switch *currentSchema.draft { + case Draft4: + keyID = KEY_ID + case Hybrid: + keyID = KEY_ID_NEW + if existsMapKey(m, KEY_ID) { + keyID = KEY_ID + } + default: + keyID = KEY_ID_NEW + } + if existsMapKey(m, keyID) && !isKind(m[keyID], reflect.String) { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_STRING, + "given": keyID, + }, + )) + } + if k, ok := m[keyID].(string); ok { + jsonReference, err := gojsonreference.NewJsonReference(k) + if err != nil { + return err + } + if currentSchema == d.rootSchema { + currentSchema.id = &jsonReference + } else { + ref, err := currentSchema.parent.id.Inherits(jsonReference) + if err != nil { + return err + } + currentSchema.id = ref + } + } + + // definitions + if existsMapKey(m, KEY_DEFINITIONS) { + if isKind(m[KEY_DEFINITIONS], reflect.Map, reflect.Bool) { + for _, dv := range m[KEY_DEFINITIONS].(map[string]interface{}) { + if isKind(dv, reflect.Map, reflect.Bool) { + + newSchema := &subSchema{property: KEY_DEFINITIONS, parent: currentSchema} + + err := d.parseSchema(dv, newSchema) + + if err != nil { + return err + } + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": STRING_ARRAY_OF_SCHEMAS, + "given": KEY_DEFINITIONS, + }, + )) + } + } + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": STRING_ARRAY_OF_SCHEMAS, + "given": KEY_DEFINITIONS, + }, + )) + } + + } + + // title + if existsMapKey(m, KEY_TITLE) && !isKind(m[KEY_TITLE], reflect.String) { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_STRING, + "given": KEY_TITLE, + }, + )) + } + if k, ok := m[KEY_TITLE].(string); ok { + currentSchema.title = &k + } + + // description + if existsMapKey(m, KEY_DESCRIPTION) && !isKind(m[KEY_DESCRIPTION], reflect.String) { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_STRING, + "given": KEY_DESCRIPTION, + }, + )) + } + if k, ok := m[KEY_DESCRIPTION].(string); ok { + currentSchema.description = &k + } + + // $ref + if existsMapKey(m, KEY_REF) && !isKind(m[KEY_REF], reflect.String) { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_STRING, + "given": KEY_REF, + }, + )) + } + + if k, ok := m[KEY_REF].(string); ok { + + jsonReference, err := gojsonreference.NewJsonReference(k) + if err != nil { + return err + } + + currentSchema.ref = &jsonReference + + if sch, ok := d.referencePool.Get(currentSchema.ref.String()); ok { + currentSchema.refSchema = sch + } else { + err := d.parseReference(documentNode, currentSchema) + + if err != nil { + return err + } + + return nil + } + } + + // type + if existsMapKey(m, KEY_TYPE) { + if isKind(m[KEY_TYPE], reflect.String) { + if k, ok := m[KEY_TYPE].(string); ok { + err := currentSchema.types.Add(k) + if err != nil { + return err + } + } + } else { + if isKind(m[KEY_TYPE], reflect.Slice) { + arrayOfTypes := m[KEY_TYPE].([]interface{}) + for _, typeInArray := range arrayOfTypes { + if reflect.ValueOf(typeInArray).Kind() != reflect.String { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_STRING + "/" + STRING_ARRAY_OF_STRINGS, + "given": KEY_TYPE, + }, + )) + } + if err := currentSchema.types.Add(typeInArray.(string)); err != nil { + return err + } + } + + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_STRING + "/" + STRING_ARRAY_OF_STRINGS, + "given": KEY_TYPE, + }, + )) + } + } + } + + // properties + if existsMapKey(m, KEY_PROPERTIES) { + err := d.parseProperties(m[KEY_PROPERTIES], currentSchema) + if err != nil { + return err + } + } + + // additionalProperties + if existsMapKey(m, KEY_ADDITIONAL_PROPERTIES) { + if isKind(m[KEY_ADDITIONAL_PROPERTIES], reflect.Bool) { + currentSchema.additionalProperties = m[KEY_ADDITIONAL_PROPERTIES].(bool) + } else if isKind(m[KEY_ADDITIONAL_PROPERTIES], reflect.Map) { + newSchema := &subSchema{property: KEY_ADDITIONAL_PROPERTIES, parent: currentSchema, ref: currentSchema.ref} + currentSchema.additionalProperties = newSchema + err := d.parseSchema(m[KEY_ADDITIONAL_PROPERTIES], newSchema) + if err != nil { + return errors.New(err.Error()) + } + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_BOOLEAN + "/" + STRING_SCHEMA, + "given": KEY_ADDITIONAL_PROPERTIES, + }, + )) + } + } + + // patternProperties + if existsMapKey(m, KEY_PATTERN_PROPERTIES) { + if isKind(m[KEY_PATTERN_PROPERTIES], reflect.Map) { + patternPropertiesMap := m[KEY_PATTERN_PROPERTIES].(map[string]interface{}) + if len(patternPropertiesMap) > 0 { + currentSchema.patternProperties = make(map[string]*subSchema) + for k, v := range patternPropertiesMap { + _, err := regexp.MatchString(k, "") + if err != nil { + return errors.New(formatErrorDescription( + Locale.RegexPattern(), + ErrorDetails{"pattern": k}, + )) + } + newSchema := &subSchema{property: k, parent: currentSchema, ref: currentSchema.ref} + err = d.parseSchema(v, newSchema) + if err != nil { + return errors.New(err.Error()) + } + currentSchema.patternProperties[k] = newSchema + } + } + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": STRING_SCHEMA, + "given": KEY_PATTERN_PROPERTIES, + }, + )) + } + } + + // propertyNames + if existsMapKey(m, KEY_PROPERTY_NAMES) && *currentSchema.draft >= Draft6 { + if isKind(m[KEY_PROPERTY_NAMES], reflect.Map, reflect.Bool) { + newSchema := &subSchema{property: KEY_PROPERTY_NAMES, parent: currentSchema, ref: currentSchema.ref} + currentSchema.propertyNames = newSchema + err := d.parseSchema(m[KEY_PROPERTY_NAMES], newSchema) + if err != nil { + return err + } + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": STRING_SCHEMA, + "given": KEY_PATTERN_PROPERTIES, + }, + )) + } + } + + // dependencies + if existsMapKey(m, KEY_DEPENDENCIES) { + err := d.parseDependencies(m[KEY_DEPENDENCIES], currentSchema) + if err != nil { + return err + } + } + + // items + if existsMapKey(m, KEY_ITEMS) { + if isKind(m[KEY_ITEMS], reflect.Slice) { + for _, itemElement := range m[KEY_ITEMS].([]interface{}) { + if isKind(itemElement, reflect.Map, reflect.Bool) { + newSchema := &subSchema{parent: currentSchema, property: KEY_ITEMS} + newSchema.ref = currentSchema.ref + currentSchema.itemsChildren = append(currentSchema.itemsChildren, newSchema) + err := d.parseSchema(itemElement, newSchema) + if err != nil { + return err + } + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": STRING_SCHEMA + "/" + STRING_ARRAY_OF_SCHEMAS, + "given": KEY_ITEMS, + }, + )) + } + currentSchema.itemsChildrenIsSingleSchema = false + } + } else if isKind(m[KEY_ITEMS], reflect.Map, reflect.Bool) { + newSchema := &subSchema{parent: currentSchema, property: KEY_ITEMS} + newSchema.ref = currentSchema.ref + currentSchema.itemsChildren = append(currentSchema.itemsChildren, newSchema) + err := d.parseSchema(m[KEY_ITEMS], newSchema) + if err != nil { + return err + } + currentSchema.itemsChildrenIsSingleSchema = true + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": STRING_SCHEMA + "/" + STRING_ARRAY_OF_SCHEMAS, + "given": KEY_ITEMS, + }, + )) + } + } + + // additionalItems + if existsMapKey(m, KEY_ADDITIONAL_ITEMS) { + if isKind(m[KEY_ADDITIONAL_ITEMS], reflect.Bool) { + currentSchema.additionalItems = m[KEY_ADDITIONAL_ITEMS].(bool) + } else if isKind(m[KEY_ADDITIONAL_ITEMS], reflect.Map) { + newSchema := &subSchema{property: KEY_ADDITIONAL_ITEMS, parent: currentSchema, ref: currentSchema.ref} + currentSchema.additionalItems = newSchema + err := d.parseSchema(m[KEY_ADDITIONAL_ITEMS], newSchema) + if err != nil { + return errors.New(err.Error()) + } + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_BOOLEAN + "/" + STRING_SCHEMA, + "given": KEY_ADDITIONAL_ITEMS, + }, + )) + } + } + + // validation : number / integer + + if existsMapKey(m, KEY_MULTIPLE_OF) { + multipleOfValue := mustBeNumber(m[KEY_MULTIPLE_OF]) + if multipleOfValue == nil { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": STRING_NUMBER, + "given": KEY_MULTIPLE_OF, + }, + )) + } + if multipleOfValue.Cmp(big.NewRat(0, 1)) <= 0 { + return errors.New(formatErrorDescription( + Locale.GreaterThanZero(), + ErrorDetails{"number": KEY_MULTIPLE_OF}, + )) + } + currentSchema.multipleOf = multipleOfValue + } + + if existsMapKey(m, KEY_MINIMUM) { + minimumValue := mustBeNumber(m[KEY_MINIMUM]) + if minimumValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfA(), + ErrorDetails{"x": KEY_MINIMUM, "y": STRING_NUMBER}, + )) + } + currentSchema.minimum = minimumValue + } + + if existsMapKey(m, KEY_EXCLUSIVE_MINIMUM) { + switch *currentSchema.draft { + case Draft4: + if !isKind(m[KEY_EXCLUSIVE_MINIMUM], reflect.Bool) { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_BOOLEAN, + "given": KEY_EXCLUSIVE_MINIMUM, + }, + )) + } + if currentSchema.minimum == nil { + return errors.New(formatErrorDescription( + Locale.CannotBeUsedWithout(), + ErrorDetails{"x": KEY_EXCLUSIVE_MINIMUM, "y": KEY_MINIMUM}, + )) + } + if m[KEY_EXCLUSIVE_MINIMUM].(bool) { + currentSchema.exclusiveMinimum = currentSchema.minimum + currentSchema.minimum = nil + } + case Hybrid: + if isKind(m[KEY_EXCLUSIVE_MINIMUM], reflect.Bool) { + if currentSchema.minimum == nil { + return errors.New(formatErrorDescription( + Locale.CannotBeUsedWithout(), + ErrorDetails{"x": KEY_EXCLUSIVE_MINIMUM, "y": KEY_MINIMUM}, + )) + } + if m[KEY_EXCLUSIVE_MINIMUM].(bool) { + currentSchema.exclusiveMinimum = currentSchema.minimum + currentSchema.minimum = nil + } + } else if isJSONNumber(m[KEY_EXCLUSIVE_MINIMUM]) { + currentSchema.exclusiveMinimum = mustBeNumber(m[KEY_EXCLUSIVE_MINIMUM]) + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_BOOLEAN + "/" + TYPE_NUMBER, + "given": KEY_EXCLUSIVE_MINIMUM, + }, + )) + } + default: + if isJSONNumber(m[KEY_EXCLUSIVE_MINIMUM]) { + currentSchema.exclusiveMinimum = mustBeNumber(m[KEY_EXCLUSIVE_MINIMUM]) + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_NUMBER, + "given": KEY_EXCLUSIVE_MINIMUM, + }, + )) + } + } + } + + if existsMapKey(m, KEY_MAXIMUM) { + maximumValue := mustBeNumber(m[KEY_MAXIMUM]) + if maximumValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfA(), + ErrorDetails{"x": KEY_MAXIMUM, "y": STRING_NUMBER}, + )) + } + currentSchema.maximum = maximumValue + } + + if existsMapKey(m, KEY_EXCLUSIVE_MAXIMUM) { + switch *currentSchema.draft { + case Draft4: + if !isKind(m[KEY_EXCLUSIVE_MAXIMUM], reflect.Bool) { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_BOOLEAN, + "given": KEY_EXCLUSIVE_MAXIMUM, + }, + )) + } + if currentSchema.maximum == nil { + return errors.New(formatErrorDescription( + Locale.CannotBeUsedWithout(), + ErrorDetails{"x": KEY_EXCLUSIVE_MAXIMUM, "y": KEY_MAXIMUM}, + )) + } + if m[KEY_EXCLUSIVE_MAXIMUM].(bool) { + currentSchema.exclusiveMaximum = currentSchema.maximum + currentSchema.maximum = nil + } + case Hybrid: + if isKind(m[KEY_EXCLUSIVE_MAXIMUM], reflect.Bool) { + if currentSchema.maximum == nil { + return errors.New(formatErrorDescription( + Locale.CannotBeUsedWithout(), + ErrorDetails{"x": KEY_EXCLUSIVE_MAXIMUM, "y": KEY_MAXIMUM}, + )) + } + if m[KEY_EXCLUSIVE_MAXIMUM].(bool) { + currentSchema.exclusiveMaximum = currentSchema.maximum + currentSchema.maximum = nil + } + } else if isJSONNumber(m[KEY_EXCLUSIVE_MAXIMUM]) { + currentSchema.exclusiveMaximum = mustBeNumber(m[KEY_EXCLUSIVE_MAXIMUM]) + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_BOOLEAN + "/" + TYPE_NUMBER, + "given": KEY_EXCLUSIVE_MAXIMUM, + }, + )) + } + default: + if isJSONNumber(m[KEY_EXCLUSIVE_MAXIMUM]) { + currentSchema.exclusiveMaximum = mustBeNumber(m[KEY_EXCLUSIVE_MAXIMUM]) + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_NUMBER, + "given": KEY_EXCLUSIVE_MAXIMUM, + }, + )) + } + } + } + + // validation : string + + if existsMapKey(m, KEY_MIN_LENGTH) { + minLengthIntegerValue := mustBeInteger(m[KEY_MIN_LENGTH]) + if minLengthIntegerValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_MIN_LENGTH, "y": TYPE_INTEGER}, + )) + } + if *minLengthIntegerValue < 0 { + return errors.New(formatErrorDescription( + Locale.MustBeGTEZero(), + ErrorDetails{"key": KEY_MIN_LENGTH}, + )) + } + currentSchema.minLength = minLengthIntegerValue + } + + if existsMapKey(m, KEY_MAX_LENGTH) { + maxLengthIntegerValue := mustBeInteger(m[KEY_MAX_LENGTH]) + if maxLengthIntegerValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_MAX_LENGTH, "y": TYPE_INTEGER}, + )) + } + if *maxLengthIntegerValue < 0 { + return errors.New(formatErrorDescription( + Locale.MustBeGTEZero(), + ErrorDetails{"key": KEY_MAX_LENGTH}, + )) + } + currentSchema.maxLength = maxLengthIntegerValue + } + + if currentSchema.minLength != nil && currentSchema.maxLength != nil { + if *currentSchema.minLength > *currentSchema.maxLength { + return errors.New(formatErrorDescription( + Locale.CannotBeGT(), + ErrorDetails{"x": KEY_MIN_LENGTH, "y": KEY_MAX_LENGTH}, + )) + } + } + + if existsMapKey(m, KEY_PATTERN) { + if isKind(m[KEY_PATTERN], reflect.String) { + regexpObject, err := regexp.Compile(m[KEY_PATTERN].(string)) + if err != nil { + return errors.New(formatErrorDescription( + Locale.MustBeValidRegex(), + ErrorDetails{"key": KEY_PATTERN}, + )) + } + currentSchema.pattern = regexpObject + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfA(), + ErrorDetails{"x": KEY_PATTERN, "y": TYPE_STRING}, + )) + } + } + + if existsMapKey(m, KEY_FORMAT) { + formatString, ok := m[KEY_FORMAT].(string) + if !ok { + return errors.New(formatErrorDescription( + Locale.MustBeOfType(), + ErrorDetails{"key": KEY_FORMAT, "type": TYPE_STRING}, + )) + } + currentSchema.format = formatString + } + + // validation : object + + if existsMapKey(m, KEY_MIN_PROPERTIES) { + minPropertiesIntegerValue := mustBeInteger(m[KEY_MIN_PROPERTIES]) + if minPropertiesIntegerValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_MIN_PROPERTIES, "y": TYPE_INTEGER}, + )) + } + if *minPropertiesIntegerValue < 0 { + return errors.New(formatErrorDescription( + Locale.MustBeGTEZero(), + ErrorDetails{"key": KEY_MIN_PROPERTIES}, + )) + } + currentSchema.minProperties = minPropertiesIntegerValue + } + + if existsMapKey(m, KEY_MAX_PROPERTIES) { + maxPropertiesIntegerValue := mustBeInteger(m[KEY_MAX_PROPERTIES]) + if maxPropertiesIntegerValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_MAX_PROPERTIES, "y": TYPE_INTEGER}, + )) + } + if *maxPropertiesIntegerValue < 0 { + return errors.New(formatErrorDescription( + Locale.MustBeGTEZero(), + ErrorDetails{"key": KEY_MAX_PROPERTIES}, + )) + } + currentSchema.maxProperties = maxPropertiesIntegerValue + } + + if currentSchema.minProperties != nil && currentSchema.maxProperties != nil { + if *currentSchema.minProperties > *currentSchema.maxProperties { + return errors.New(formatErrorDescription( + Locale.KeyCannotBeGreaterThan(), + ErrorDetails{"key": KEY_MIN_PROPERTIES, "y": KEY_MAX_PROPERTIES}, + )) + } + } + + if existsMapKey(m, KEY_REQUIRED) { + if isKind(m[KEY_REQUIRED], reflect.Slice) { + requiredValues := m[KEY_REQUIRED].([]interface{}) + for _, requiredValue := range requiredValues { + if isKind(requiredValue, reflect.String) { + if isStringInSlice(currentSchema.required, requiredValue.(string)) { + return errors.New(formatErrorDescription( + Locale.KeyItemsMustBeUnique(), + ErrorDetails{"key": KEY_REQUIRED}, + )) + } + currentSchema.required = append(currentSchema.required, requiredValue.(string)) + } else { + return errors.New(formatErrorDescription( + Locale.KeyItemsMustBeOfType(), + ErrorDetails{"key": KEY_REQUIRED, "type": TYPE_STRING}, + )) + } + } + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_REQUIRED, "y": TYPE_ARRAY}, + )) + } + } + + // validation : array + + if existsMapKey(m, KEY_MIN_ITEMS) { + minItemsIntegerValue := mustBeInteger(m[KEY_MIN_ITEMS]) + if minItemsIntegerValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_MIN_ITEMS, "y": TYPE_INTEGER}, + )) + } + if *minItemsIntegerValue < 0 { + return errors.New(formatErrorDescription( + Locale.MustBeGTEZero(), + ErrorDetails{"key": KEY_MIN_ITEMS}, + )) + } + currentSchema.minItems = minItemsIntegerValue + } + + if existsMapKey(m, KEY_MAX_ITEMS) { + maxItemsIntegerValue := mustBeInteger(m[KEY_MAX_ITEMS]) + if maxItemsIntegerValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_MAX_ITEMS, "y": TYPE_INTEGER}, + )) + } + if *maxItemsIntegerValue < 0 { + return errors.New(formatErrorDescription( + Locale.MustBeGTEZero(), + ErrorDetails{"key": KEY_MAX_ITEMS}, + )) + } + currentSchema.maxItems = maxItemsIntegerValue + } + + if existsMapKey(m, KEY_UNIQUE_ITEMS) { + if isKind(m[KEY_UNIQUE_ITEMS], reflect.Bool) { + currentSchema.uniqueItems = m[KEY_UNIQUE_ITEMS].(bool) + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfA(), + ErrorDetails{"x": KEY_UNIQUE_ITEMS, "y": TYPE_BOOLEAN}, + )) + } + } + + if existsMapKey(m, KEY_CONTAINS) && *currentSchema.draft >= Draft6 { + newSchema := &subSchema{property: KEY_CONTAINS, parent: currentSchema, ref: currentSchema.ref} + currentSchema.contains = newSchema + err := d.parseSchema(m[KEY_CONTAINS], newSchema) + if err != nil { + return err + } + } + + // validation : all + + if existsMapKey(m, KEY_CONST) && *currentSchema.draft >= Draft6 { + is, err := marshalWithoutNumber(m[KEY_CONST]) + if err != nil { + return err + } + currentSchema._const = is + } + + if existsMapKey(m, KEY_ENUM) { + if isKind(m[KEY_ENUM], reflect.Slice) { + for _, v := range m[KEY_ENUM].([]interface{}) { + is, err := marshalWithoutNumber(v) + if err != nil { + return err + } + if isStringInSlice(currentSchema.enum, *is) { + return errors.New(formatErrorDescription( + Locale.KeyItemsMustBeUnique(), + ErrorDetails{"key": KEY_ENUM}, + )) + } + currentSchema.enum = append(currentSchema.enum, *is) + } + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_ENUM, "y": TYPE_ARRAY}, + )) + } + } + + // validation : subSchema + + if existsMapKey(m, KEY_ONE_OF) { + if isKind(m[KEY_ONE_OF], reflect.Slice) { + for _, v := range m[KEY_ONE_OF].([]interface{}) { + newSchema := &subSchema{property: KEY_ONE_OF, parent: currentSchema, ref: currentSchema.ref} + currentSchema.oneOf = append(currentSchema.oneOf, newSchema) + err := d.parseSchema(v, newSchema) + if err != nil { + return err + } + } + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_ONE_OF, "y": TYPE_ARRAY}, + )) + } + } + + if existsMapKey(m, KEY_ANY_OF) { + if isKind(m[KEY_ANY_OF], reflect.Slice) { + for _, v := range m[KEY_ANY_OF].([]interface{}) { + newSchema := &subSchema{property: KEY_ANY_OF, parent: currentSchema, ref: currentSchema.ref} + currentSchema.anyOf = append(currentSchema.anyOf, newSchema) + err := d.parseSchema(v, newSchema) + if err != nil { + return err + } + } + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_ANY_OF, "y": TYPE_ARRAY}, + )) + } + } + + if existsMapKey(m, KEY_ALL_OF) { + if isKind(m[KEY_ALL_OF], reflect.Slice) { + for _, v := range m[KEY_ALL_OF].([]interface{}) { + newSchema := &subSchema{property: KEY_ALL_OF, parent: currentSchema, ref: currentSchema.ref} + currentSchema.allOf = append(currentSchema.allOf, newSchema) + err := d.parseSchema(v, newSchema) + if err != nil { + return err + } + } + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_ANY_OF, "y": TYPE_ARRAY}, + )) + } + } + + if existsMapKey(m, KEY_NOT) { + if isKind(m[KEY_NOT], reflect.Map, reflect.Bool) { + newSchema := &subSchema{property: KEY_NOT, parent: currentSchema, ref: currentSchema.ref} + currentSchema.not = newSchema + err := d.parseSchema(m[KEY_NOT], newSchema) + if err != nil { + return err + } + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_NOT, "y": TYPE_OBJECT}, + )) + } + } + + if *currentSchema.draft >= Draft7 { + if existsMapKey(m, KEY_IF) { + if isKind(m[KEY_IF], reflect.Map, reflect.Bool) { + newSchema := &subSchema{property: KEY_IF, parent: currentSchema, ref: currentSchema.ref} + currentSchema._if = newSchema + err := d.parseSchema(m[KEY_IF], newSchema) + if err != nil { + return err + } + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_IF, "y": TYPE_OBJECT}, + )) + } + } + + if existsMapKey(m, KEY_THEN) { + if isKind(m[KEY_THEN], reflect.Map, reflect.Bool) { + newSchema := &subSchema{property: KEY_THEN, parent: currentSchema, ref: currentSchema.ref} + currentSchema._then = newSchema + err := d.parseSchema(m[KEY_THEN], newSchema) + if err != nil { + return err + } + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_THEN, "y": TYPE_OBJECT}, + )) + } + } + + if existsMapKey(m, KEY_ELSE) { + if isKind(m[KEY_ELSE], reflect.Map, reflect.Bool) { + newSchema := &subSchema{property: KEY_ELSE, parent: currentSchema, ref: currentSchema.ref} + currentSchema._else = newSchema + err := d.parseSchema(m[KEY_ELSE], newSchema) + if err != nil { + return err + } + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_ELSE, "y": TYPE_OBJECT}, + )) + } + } + } + + return nil +} + +func (d *Schema) parseReference(documentNode interface{}, currentSchema *subSchema) error { + var ( + refdDocumentNode interface{} + dsp *schemaPoolDocument + err error + ) + + newSchema := &subSchema{property: KEY_REF, parent: currentSchema, ref: currentSchema.ref} + + d.referencePool.Add(currentSchema.ref.String(), newSchema) + + dsp, err = d.pool.GetDocument(*currentSchema.ref) + if err != nil { + return err + } + newSchema.id = currentSchema.ref + + refdDocumentNode = dsp.Document + newSchema.draft = dsp.Draft + + if err != nil { + return err + } + + if !isKind(refdDocumentNode, reflect.Map, reflect.Bool) { + return errors.New(formatErrorDescription( + Locale.MustBeOfType(), + ErrorDetails{"key": STRING_SCHEMA, "type": TYPE_OBJECT}, + )) + } + + err = d.parseSchema(refdDocumentNode, newSchema) + if err != nil { + return err + } + + currentSchema.refSchema = newSchema + + return nil + +} + +func (d *Schema) parseProperties(documentNode interface{}, currentSchema *subSchema) error { + + if !isKind(documentNode, reflect.Map) { + return errors.New(formatErrorDescription( + Locale.MustBeOfType(), + ErrorDetails{"key": STRING_PROPERTIES, "type": TYPE_OBJECT}, + )) + } + + m := documentNode.(map[string]interface{}) + for k := range m { + schemaProperty := k + newSchema := &subSchema{property: schemaProperty, parent: currentSchema, ref: currentSchema.ref} + currentSchema.propertiesChildren = append(currentSchema.propertiesChildren, newSchema) + err := d.parseSchema(m[k], newSchema) + if err != nil { + return err + } + } + + return nil +} + +func (d *Schema) parseDependencies(documentNode interface{}, currentSchema *subSchema) error { + + if !isKind(documentNode, reflect.Map) { + return errors.New(formatErrorDescription( + Locale.MustBeOfType(), + ErrorDetails{"key": KEY_DEPENDENCIES, "type": TYPE_OBJECT}, + )) + } + + m := documentNode.(map[string]interface{}) + currentSchema.dependencies = make(map[string]interface{}) + + for k := range m { + switch reflect.ValueOf(m[k]).Kind() { + + case reflect.Slice: + values := m[k].([]interface{}) + var valuesToRegister []string + + for _, value := range values { + if !isKind(value, reflect.String) { + return errors.New(formatErrorDescription( + Locale.MustBeOfType(), + ErrorDetails{ + "key": STRING_DEPENDENCY, + "type": STRING_SCHEMA_OR_ARRAY_OF_STRINGS, + }, + )) + } + valuesToRegister = append(valuesToRegister, value.(string)) + currentSchema.dependencies[k] = valuesToRegister + } + + case reflect.Map, reflect.Bool: + depSchema := &subSchema{property: k, parent: currentSchema, ref: currentSchema.ref} + err := d.parseSchema(m[k], depSchema) + if err != nil { + return err + } + currentSchema.dependencies[k] = depSchema + + default: + return errors.New(formatErrorDescription( + Locale.MustBeOfType(), + ErrorDetails{ + "key": STRING_DEPENDENCY, + "type": STRING_SCHEMA_OR_ARRAY_OF_STRINGS, + }, + )) + } + + } + + return nil +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/schemaLoader.go b/vendor/github.com/xeipuuv/gojsonschema/schemaLoader.go new file mode 100644 index 0000000000..20db0c1f99 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/schemaLoader.go @@ -0,0 +1,206 @@ +// Copyright 2018 johandorland ( https://github.com/johandorland ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gojsonschema + +import ( + "bytes" + "errors" + + "github.com/xeipuuv/gojsonreference" +) + +// SchemaLoader is used to load schemas +type SchemaLoader struct { + pool *schemaPool + AutoDetect bool + Validate bool + Draft Draft +} + +// NewSchemaLoader creates a new NewSchemaLoader +func NewSchemaLoader() *SchemaLoader { + + ps := &SchemaLoader{ + pool: &schemaPool{ + schemaPoolDocuments: make(map[string]*schemaPoolDocument), + }, + AutoDetect: true, + Validate: false, + Draft: Hybrid, + } + ps.pool.autoDetect = &ps.AutoDetect + + return ps +} + +func (sl *SchemaLoader) validateMetaschema(documentNode interface{}) error { + + var ( + schema string + err error + ) + if sl.AutoDetect { + schema, _, err = parseSchemaURL(documentNode) + if err != nil { + return err + } + } + + // If no explicit "$schema" is used, use the default metaschema associated with the draft used + if schema == "" { + if sl.Draft == Hybrid { + return nil + } + schema = drafts.GetSchemaURL(sl.Draft) + } + + //Disable validation when loading the metaschema to prevent an infinite recursive loop + sl.Validate = false + + metaSchema, err := sl.Compile(NewReferenceLoader(schema)) + + if err != nil { + return err + } + + sl.Validate = true + + result := metaSchema.validateDocument(documentNode) + + if !result.Valid() { + var res bytes.Buffer + for _, err := range result.Errors() { + res.WriteString(err.String()) + res.WriteString("\n") + } + return errors.New(res.String()) + } + + return nil +} + +// AddSchemas adds an arbritrary amount of schemas to the schema cache. As this function does not require +// an explicit URL, every schema should contain an $id, so that it can be referenced by the main schema +func (sl *SchemaLoader) AddSchemas(loaders ...JSONLoader) error { + emptyRef, _ := gojsonreference.NewJsonReference("") + + for _, loader := range loaders { + doc, err := loader.LoadJSON() + + if err != nil { + return err + } + + if sl.Validate { + if err := sl.validateMetaschema(doc); err != nil { + return err + } + } + + // Directly use the Recursive function, so that it get only added to the schema pool by $id + // and not by the ref of the document as it's empty + if err = sl.pool.parseReferences(doc, emptyRef, false); err != nil { + return err + } + } + + return nil +} + +//AddSchema adds a schema under the provided URL to the schema cache +func (sl *SchemaLoader) AddSchema(url string, loader JSONLoader) error { + + ref, err := gojsonreference.NewJsonReference(url) + + if err != nil { + return err + } + + doc, err := loader.LoadJSON() + + if err != nil { + return err + } + + if sl.Validate { + if err := sl.validateMetaschema(doc); err != nil { + return err + } + } + + return sl.pool.parseReferences(doc, ref, true) +} + +// Compile loads and compiles a schema +func (sl *SchemaLoader) Compile(rootSchema JSONLoader) (*Schema, error) { + + ref, err := rootSchema.JsonReference() + + if err != nil { + return nil, err + } + + d := Schema{} + d.pool = sl.pool + d.pool.jsonLoaderFactory = rootSchema.LoaderFactory() + d.documentReference = ref + d.referencePool = newSchemaReferencePool() + + var doc interface{} + if ref.String() != "" { + // Get document from schema pool + spd, err := d.pool.GetDocument(d.documentReference) + if err != nil { + return nil, err + } + doc = spd.Document + } else { + // Load JSON directly + doc, err = rootSchema.LoadJSON() + if err != nil { + return nil, err + } + // References need only be parsed if loading JSON directly + // as pool.GetDocument already does this for us if loading by reference + err = sl.pool.parseReferences(doc, ref, true) + if err != nil { + return nil, err + } + } + + if sl.Validate { + if err := sl.validateMetaschema(doc); err != nil { + return nil, err + } + } + + draft := sl.Draft + if sl.AutoDetect { + _, detectedDraft, err := parseSchemaURL(doc) + if err != nil { + return nil, err + } + if detectedDraft != nil { + draft = *detectedDraft + } + } + + err = d.parse(doc, draft) + if err != nil { + return nil, err + } + + return &d, nil +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/schemaPool.go b/vendor/github.com/xeipuuv/gojsonschema/schemaPool.go new file mode 100644 index 0000000000..35b1cc6306 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/schemaPool.go @@ -0,0 +1,215 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Defines resources pooling. +// Eases referencing and avoids downloading the same resource twice. +// +// created 26-02-2013 + +package gojsonschema + +import ( + "errors" + "fmt" + "reflect" + + "github.com/xeipuuv/gojsonreference" +) + +type schemaPoolDocument struct { + Document interface{} + Draft *Draft +} + +type schemaPool struct { + schemaPoolDocuments map[string]*schemaPoolDocument + jsonLoaderFactory JSONLoaderFactory + autoDetect *bool +} + +func (p *schemaPool) parseReferences(document interface{}, ref gojsonreference.JsonReference, pooled bool) error { + + var ( + draft *Draft + err error + reference = ref.String() + ) + // Only the root document should be added to the schema pool if pooled is true + if _, ok := p.schemaPoolDocuments[reference]; pooled && ok { + return fmt.Errorf("Reference already exists: \"%s\"", reference) + } + + if *p.autoDetect { + _, draft, err = parseSchemaURL(document) + if err != nil { + return err + } + } + + err = p.parseReferencesRecursive(document, ref, draft) + + if pooled { + p.schemaPoolDocuments[reference] = &schemaPoolDocument{Document: document, Draft: draft} + } + + return err +} + +func (p *schemaPool) parseReferencesRecursive(document interface{}, ref gojsonreference.JsonReference, draft *Draft) error { + // parseReferencesRecursive parses a JSON document and resolves all $id and $ref references. + // For $ref references it takes into account the $id scope it is in and replaces + // the reference by the absolute resolved reference + + // When encountering errors it fails silently. Error handling is done when the schema + // is syntactically parsed and any error encountered here should also come up there. + switch m := document.(type) { + case []interface{}: + for _, v := range m { + p.parseReferencesRecursive(v, ref, draft) + } + case map[string]interface{}: + localRef := &ref + + keyID := KEY_ID_NEW + if existsMapKey(m, KEY_ID) { + keyID = KEY_ID + } + if existsMapKey(m, keyID) && isKind(m[keyID], reflect.String) { + jsonReference, err := gojsonreference.NewJsonReference(m[keyID].(string)) + if err == nil { + localRef, err = ref.Inherits(jsonReference) + if err == nil { + if _, ok := p.schemaPoolDocuments[localRef.String()]; ok { + return fmt.Errorf("Reference already exists: \"%s\"", localRef.String()) + } + p.schemaPoolDocuments[localRef.String()] = &schemaPoolDocument{Document: document, Draft: draft} + } + } + } + + if existsMapKey(m, KEY_REF) && isKind(m[KEY_REF], reflect.String) { + jsonReference, err := gojsonreference.NewJsonReference(m[KEY_REF].(string)) + if err == nil { + absoluteRef, err := localRef.Inherits(jsonReference) + if err == nil { + m[KEY_REF] = absoluteRef.String() + } + } + } + + for k, v := range m { + // const and enums should be interpreted literally, so ignore them + if k == KEY_CONST || k == KEY_ENUM { + continue + } + // Something like a property or a dependency is not a valid schema, as it might describe properties named "$ref", "$id" or "const", etc + // Therefore don't treat it like a schema. + if k == KEY_PROPERTIES || k == KEY_DEPENDENCIES || k == KEY_PATTERN_PROPERTIES { + if child, ok := v.(map[string]interface{}); ok { + for _, v := range child { + p.parseReferencesRecursive(v, *localRef, draft) + } + } + } else { + p.parseReferencesRecursive(v, *localRef, draft) + } + } + } + return nil +} + +func (p *schemaPool) GetDocument(reference gojsonreference.JsonReference) (*schemaPoolDocument, error) { + + var ( + spd *schemaPoolDocument + draft *Draft + ok bool + err error + ) + + if internalLogEnabled { + internalLog("Get Document ( %s )", reference.String()) + } + + // Create a deep copy, so we can remove the fragment part later on without altering the original + refToURL, _ := gojsonreference.NewJsonReference(reference.String()) + + // First check if the given fragment is a location independent identifier + // http://json-schema.org/latest/json-schema-core.html#rfc.section.8.2.3 + + if spd, ok = p.schemaPoolDocuments[refToURL.String()]; ok { + if internalLogEnabled { + internalLog(" From pool") + } + return spd, nil + } + + // If the given reference is not a location independent identifier, + // strip the fragment and look for a document with it's base URI + + refToURL.GetUrl().Fragment = "" + + if cachedSpd, ok := p.schemaPoolDocuments[refToURL.String()]; ok { + document, _, err := reference.GetPointer().Get(cachedSpd.Document) + + if err != nil { + return nil, err + } + + if internalLogEnabled { + internalLog(" From pool") + } + + spd = &schemaPoolDocument{Document: document, Draft: cachedSpd.Draft} + p.schemaPoolDocuments[reference.String()] = spd + + return spd, nil + } + + // It is not possible to load anything remotely that is not canonical... + if !reference.IsCanonical() { + return nil, errors.New(formatErrorDescription( + Locale.ReferenceMustBeCanonical(), + ErrorDetails{"reference": reference.String()}, + )) + } + + jsonReferenceLoader := p.jsonLoaderFactory.New(reference.String()) + document, err := jsonReferenceLoader.LoadJSON() + + if err != nil { + return nil, err + } + + // add the whole document to the pool for potential re-use + p.parseReferences(document, refToURL, true) + + _, draft, _ = parseSchemaURL(document) + + // resolve the potential fragment and also cache it + document, _, err = reference.GetPointer().Get(document) + + if err != nil { + return nil, err + } + + return &schemaPoolDocument{Document: document, Draft: draft}, nil +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/schemaReferencePool.go b/vendor/github.com/xeipuuv/gojsonschema/schemaReferencePool.go new file mode 100644 index 0000000000..6e5e1b5cdb --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/schemaReferencePool.go @@ -0,0 +1,68 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Pool of referenced schemas. +// +// created 25-06-2013 + +package gojsonschema + +import ( + "fmt" +) + +type schemaReferencePool struct { + documents map[string]*subSchema +} + +func newSchemaReferencePool() *schemaReferencePool { + + p := &schemaReferencePool{} + p.documents = make(map[string]*subSchema) + + return p +} + +func (p *schemaReferencePool) Get(ref string) (r *subSchema, o bool) { + + if internalLogEnabled { + internalLog(fmt.Sprintf("Schema Reference ( %s )", ref)) + } + + if sch, ok := p.documents[ref]; ok { + if internalLogEnabled { + internalLog(fmt.Sprintf(" From pool")) + } + return sch, true + } + + return nil, false +} + +func (p *schemaReferencePool) Add(ref string, sch *subSchema) { + + if internalLogEnabled { + internalLog(fmt.Sprintf("Add Schema Reference %s to pool", ref)) + } + if _, ok := p.documents[ref]; !ok { + p.documents[ref] = sch + } +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/schemaType.go b/vendor/github.com/xeipuuv/gojsonschema/schemaType.go new file mode 100644 index 0000000000..36b447a291 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/schemaType.go @@ -0,0 +1,83 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Helper structure to handle schema types, and the combination of them. +// +// created 28-02-2013 + +package gojsonschema + +import ( + "errors" + "fmt" + "strings" +) + +type jsonSchemaType struct { + types []string +} + +// Is the schema typed ? that is containing at least one type +// When not typed, the schema does not need any type validation +func (t *jsonSchemaType) IsTyped() bool { + return len(t.types) > 0 +} + +func (t *jsonSchemaType) Add(etype string) error { + + if !isStringInSlice(JSON_TYPES, etype) { + return errors.New(formatErrorDescription(Locale.NotAValidType(), ErrorDetails{"given": "/" + etype + "/", "expected": JSON_TYPES})) + } + + if t.Contains(etype) { + return errors.New(formatErrorDescription(Locale.Duplicated(), ErrorDetails{"type": etype})) + } + + t.types = append(t.types, etype) + + return nil +} + +func (t *jsonSchemaType) Contains(etype string) bool { + + for _, v := range t.types { + if v == etype { + return true + } + } + + return false +} + +func (t *jsonSchemaType) String() string { + + if len(t.types) == 0 { + return STRING_UNDEFINED // should never happen + } + + // Displayed as a list [type1,type2,...] + if len(t.types) > 1 { + return fmt.Sprintf("[%s]", strings.Join(t.types, ",")) + } + + // Only one type: name only + return t.types[0] +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/subSchema.go b/vendor/github.com/xeipuuv/gojsonschema/subSchema.go new file mode 100644 index 0000000000..ec779812c3 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/subSchema.go @@ -0,0 +1,149 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Defines the structure of a sub-subSchema. +// A sub-subSchema can contain other sub-schemas. +// +// created 27-02-2013 + +package gojsonschema + +import ( + "github.com/xeipuuv/gojsonreference" + "math/big" + "regexp" +) + +// Constants +const ( + KEY_SCHEMA = "$schema" + KEY_ID = "id" + KEY_ID_NEW = "$id" + KEY_REF = "$ref" + KEY_TITLE = "title" + KEY_DESCRIPTION = "description" + KEY_TYPE = "type" + KEY_ITEMS = "items" + KEY_ADDITIONAL_ITEMS = "additionalItems" + KEY_PROPERTIES = "properties" + KEY_PATTERN_PROPERTIES = "patternProperties" + KEY_ADDITIONAL_PROPERTIES = "additionalProperties" + KEY_PROPERTY_NAMES = "propertyNames" + KEY_DEFINITIONS = "definitions" + KEY_MULTIPLE_OF = "multipleOf" + KEY_MINIMUM = "minimum" + KEY_MAXIMUM = "maximum" + KEY_EXCLUSIVE_MINIMUM = "exclusiveMinimum" + KEY_EXCLUSIVE_MAXIMUM = "exclusiveMaximum" + KEY_MIN_LENGTH = "minLength" + KEY_MAX_LENGTH = "maxLength" + KEY_PATTERN = "pattern" + KEY_FORMAT = "format" + KEY_MIN_PROPERTIES = "minProperties" + KEY_MAX_PROPERTIES = "maxProperties" + KEY_DEPENDENCIES = "dependencies" + KEY_REQUIRED = "required" + KEY_MIN_ITEMS = "minItems" + KEY_MAX_ITEMS = "maxItems" + KEY_UNIQUE_ITEMS = "uniqueItems" + KEY_CONTAINS = "contains" + KEY_CONST = "const" + KEY_ENUM = "enum" + KEY_ONE_OF = "oneOf" + KEY_ANY_OF = "anyOf" + KEY_ALL_OF = "allOf" + KEY_NOT = "not" + KEY_IF = "if" + KEY_THEN = "then" + KEY_ELSE = "else" +) + +type subSchema struct { + draft *Draft + + // basic subSchema meta properties + id *gojsonreference.JsonReference + title *string + description *string + + property string + + // Quick pass/fail for boolean schemas + pass *bool + + // Types associated with the subSchema + types jsonSchemaType + + // Reference url + ref *gojsonreference.JsonReference + // Schema referenced + refSchema *subSchema + + // hierarchy + parent *subSchema + itemsChildren []*subSchema + itemsChildrenIsSingleSchema bool + propertiesChildren []*subSchema + + // validation : number / integer + multipleOf *big.Rat + maximum *big.Rat + exclusiveMaximum *big.Rat + minimum *big.Rat + exclusiveMinimum *big.Rat + + // validation : string + minLength *int + maxLength *int + pattern *regexp.Regexp + format string + + // validation : object + minProperties *int + maxProperties *int + required []string + + dependencies map[string]interface{} + additionalProperties interface{} + patternProperties map[string]*subSchema + propertyNames *subSchema + + // validation : array + minItems *int + maxItems *int + uniqueItems bool + contains *subSchema + + additionalItems interface{} + + // validation : all + _const *string //const is a golang keyword + enum []string + + // validation : subSchema + oneOf []*subSchema + anyOf []*subSchema + allOf []*subSchema + not *subSchema + _if *subSchema // if/else are golang keywords + _then *subSchema + _else *subSchema +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/types.go b/vendor/github.com/xeipuuv/gojsonschema/types.go new file mode 100644 index 0000000000..0e6fd51735 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/types.go @@ -0,0 +1,62 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Contains const types for schema and JSON. +// +// created 28-02-2013 + +package gojsonschema + +// Type constants +const ( + TYPE_ARRAY = `array` + TYPE_BOOLEAN = `boolean` + TYPE_INTEGER = `integer` + TYPE_NUMBER = `number` + TYPE_NULL = `null` + TYPE_OBJECT = `object` + TYPE_STRING = `string` +) + +// JSON_TYPES hosts the list of type that are supported in JSON +var JSON_TYPES []string + +// SCHEMA_TYPES hosts the list of type that are supported in schemas +var SCHEMA_TYPES []string + +func init() { + JSON_TYPES = []string{ + TYPE_ARRAY, + TYPE_BOOLEAN, + TYPE_INTEGER, + TYPE_NUMBER, + TYPE_NULL, + TYPE_OBJECT, + TYPE_STRING} + + SCHEMA_TYPES = []string{ + TYPE_ARRAY, + TYPE_BOOLEAN, + TYPE_INTEGER, + TYPE_NUMBER, + TYPE_OBJECT, + TYPE_STRING} +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/utils.go b/vendor/github.com/xeipuuv/gojsonschema/utils.go new file mode 100644 index 0000000000..a17d22e3bd --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/utils.go @@ -0,0 +1,197 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Various utility functions. +// +// created 26-02-2013 + +package gojsonschema + +import ( + "encoding/json" + "math/big" + "reflect" +) + +func isKind(what interface{}, kinds ...reflect.Kind) bool { + target := what + if isJSONNumber(what) { + // JSON Numbers are strings! + target = *mustBeNumber(what) + } + targetKind := reflect.ValueOf(target).Kind() + for _, kind := range kinds { + if targetKind == kind { + return true + } + } + return false +} + +func existsMapKey(m map[string]interface{}, k string) bool { + _, ok := m[k] + return ok +} + +func isStringInSlice(s []string, what string) bool { + for i := range s { + if s[i] == what { + return true + } + } + return false +} + +// indexStringInSlice returns the index of the first instance of 'what' in s or -1 if it is not found in s. +func indexStringInSlice(s []string, what string) int { + for i := range s { + if s[i] == what { + return i + } + } + return -1 +} + +func marshalToJSONString(value interface{}) (*string, error) { + + mBytes, err := json.Marshal(value) + if err != nil { + return nil, err + } + + sBytes := string(mBytes) + return &sBytes, nil +} + +func marshalWithoutNumber(value interface{}) (*string, error) { + + // The JSON is decoded using https://golang.org/pkg/encoding/json/#Decoder.UseNumber + // This means the numbers are internally still represented as strings and therefore 1.00 is unequal to 1 + // One way to eliminate these differences is to decode and encode the JSON one more time without Decoder.UseNumber + // so that these differences in representation are removed + + jsonString, err := marshalToJSONString(value) + if err != nil { + return nil, err + } + + var document interface{} + + err = json.Unmarshal([]byte(*jsonString), &document) + if err != nil { + return nil, err + } + + return marshalToJSONString(document) +} + +func isJSONNumber(what interface{}) bool { + + switch what.(type) { + + case json.Number: + return true + } + + return false +} + +func checkJSONInteger(what interface{}) (isInt bool) { + + jsonNumber := what.(json.Number) + + bigFloat, isValidNumber := new(big.Rat).SetString(string(jsonNumber)) + + return isValidNumber && bigFloat.IsInt() + +} + +// same as ECMA Number.MAX_SAFE_INTEGER and Number.MIN_SAFE_INTEGER +const ( + maxJSONFloat = float64(1<<53 - 1) // 9007199254740991.0 2^53 - 1 + minJSONFloat = -float64(1<<53 - 1) //-9007199254740991.0 -2^53 - 1 +) + +func mustBeInteger(what interface{}) *int { + + if isJSONNumber(what) { + + number := what.(json.Number) + + isInt := checkJSONInteger(number) + + if isInt { + + int64Value, err := number.Int64() + if err != nil { + return nil + } + + int32Value := int(int64Value) + return &int32Value + } + + } + + return nil +} + +func mustBeNumber(what interface{}) *big.Rat { + + if isJSONNumber(what) { + number := what.(json.Number) + float64Value, success := new(big.Rat).SetString(string(number)) + if success { + return float64Value + } + } + + return nil + +} + +func convertDocumentNode(val interface{}) interface{} { + + if lval, ok := val.([]interface{}); ok { + + res := []interface{}{} + for _, v := range lval { + res = append(res, convertDocumentNode(v)) + } + + return res + + } + + if mval, ok := val.(map[interface{}]interface{}); ok { + + res := map[string]interface{}{} + + for k, v := range mval { + res[k.(string)] = convertDocumentNode(v) + } + + return res + + } + + return val +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/validation.go b/vendor/github.com/xeipuuv/gojsonschema/validation.go new file mode 100644 index 0000000000..74091bca19 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/validation.go @@ -0,0 +1,858 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Extends Schema and subSchema, implements the validation phase. +// +// created 28-02-2013 + +package gojsonschema + +import ( + "encoding/json" + "math/big" + "reflect" + "regexp" + "strconv" + "strings" + "unicode/utf8" +) + +// Validate loads and validates a JSON schema +func Validate(ls JSONLoader, ld JSONLoader) (*Result, error) { + // load schema + schema, err := NewSchema(ls) + if err != nil { + return nil, err + } + return schema.Validate(ld) +} + +// Validate loads and validates a JSON document +func (v *Schema) Validate(l JSONLoader) (*Result, error) { + root, err := l.LoadJSON() + if err != nil { + return nil, err + } + return v.validateDocument(root), nil +} + +func (v *Schema) validateDocument(root interface{}) *Result { + result := &Result{} + context := NewJsonContext(STRING_CONTEXT_ROOT, nil) + v.rootSchema.validateRecursive(v.rootSchema, root, result, context) + return result +} + +func (v *subSchema) subValidateWithContext(document interface{}, context *JsonContext) *Result { + result := &Result{} + v.validateRecursive(v, document, result, context) + return result +} + +// Walker function to validate the json recursively against the subSchema +func (v *subSchema) validateRecursive(currentSubSchema *subSchema, currentNode interface{}, result *Result, context *JsonContext) { + + if internalLogEnabled { + internalLog("validateRecursive %s", context.String()) + internalLog(" %v", currentNode) + } + + // Handle true/false schema as early as possible as all other fields will be nil + if currentSubSchema.pass != nil { + if !*currentSubSchema.pass { + result.addInternalError( + new(FalseError), + context, + currentNode, + ErrorDetails{}, + ) + } + return + } + + // Handle referenced schemas, returns directly when a $ref is found + if currentSubSchema.refSchema != nil { + v.validateRecursive(currentSubSchema.refSchema, currentNode, result, context) + return + } + + // Check for null value + if currentNode == nil { + if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_NULL) { + result.addInternalError( + new(InvalidTypeError), + context, + currentNode, + ErrorDetails{ + "expected": currentSubSchema.types.String(), + "given": TYPE_NULL, + }, + ) + return + } + + currentSubSchema.validateSchema(currentSubSchema, currentNode, result, context) + v.validateCommon(currentSubSchema, currentNode, result, context) + + } else { // Not a null value + + if isJSONNumber(currentNode) { + + value := currentNode.(json.Number) + + isInt := checkJSONInteger(value) + + validType := currentSubSchema.types.Contains(TYPE_NUMBER) || (isInt && currentSubSchema.types.Contains(TYPE_INTEGER)) + + if currentSubSchema.types.IsTyped() && !validType { + + givenType := TYPE_INTEGER + if !isInt { + givenType = TYPE_NUMBER + } + + result.addInternalError( + new(InvalidTypeError), + context, + currentNode, + ErrorDetails{ + "expected": currentSubSchema.types.String(), + "given": givenType, + }, + ) + return + } + + currentSubSchema.validateSchema(currentSubSchema, value, result, context) + v.validateNumber(currentSubSchema, value, result, context) + v.validateCommon(currentSubSchema, value, result, context) + v.validateString(currentSubSchema, value, result, context) + + } else { + + rValue := reflect.ValueOf(currentNode) + rKind := rValue.Kind() + + switch rKind { + + // Slice => JSON array + + case reflect.Slice: + + if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_ARRAY) { + result.addInternalError( + new(InvalidTypeError), + context, + currentNode, + ErrorDetails{ + "expected": currentSubSchema.types.String(), + "given": TYPE_ARRAY, + }, + ) + return + } + + castCurrentNode := currentNode.([]interface{}) + + currentSubSchema.validateSchema(currentSubSchema, castCurrentNode, result, context) + + v.validateArray(currentSubSchema, castCurrentNode, result, context) + v.validateCommon(currentSubSchema, castCurrentNode, result, context) + + // Map => JSON object + + case reflect.Map: + if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_OBJECT) { + result.addInternalError( + new(InvalidTypeError), + context, + currentNode, + ErrorDetails{ + "expected": currentSubSchema.types.String(), + "given": TYPE_OBJECT, + }, + ) + return + } + + castCurrentNode, ok := currentNode.(map[string]interface{}) + if !ok { + castCurrentNode = convertDocumentNode(currentNode).(map[string]interface{}) + } + + currentSubSchema.validateSchema(currentSubSchema, castCurrentNode, result, context) + + v.validateObject(currentSubSchema, castCurrentNode, result, context) + v.validateCommon(currentSubSchema, castCurrentNode, result, context) + + for _, pSchema := range currentSubSchema.propertiesChildren { + nextNode, ok := castCurrentNode[pSchema.property] + if ok { + subContext := NewJsonContext(pSchema.property, context) + v.validateRecursive(pSchema, nextNode, result, subContext) + } + } + + // Simple JSON values : string, number, boolean + + case reflect.Bool: + + if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_BOOLEAN) { + result.addInternalError( + new(InvalidTypeError), + context, + currentNode, + ErrorDetails{ + "expected": currentSubSchema.types.String(), + "given": TYPE_BOOLEAN, + }, + ) + return + } + + value := currentNode.(bool) + + currentSubSchema.validateSchema(currentSubSchema, value, result, context) + v.validateNumber(currentSubSchema, value, result, context) + v.validateCommon(currentSubSchema, value, result, context) + v.validateString(currentSubSchema, value, result, context) + + case reflect.String: + + if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_STRING) { + result.addInternalError( + new(InvalidTypeError), + context, + currentNode, + ErrorDetails{ + "expected": currentSubSchema.types.String(), + "given": TYPE_STRING, + }, + ) + return + } + + value := currentNode.(string) + + currentSubSchema.validateSchema(currentSubSchema, value, result, context) + v.validateNumber(currentSubSchema, value, result, context) + v.validateCommon(currentSubSchema, value, result, context) + v.validateString(currentSubSchema, value, result, context) + + } + + } + + } + + result.incrementScore() +} + +// Different kinds of validation there, subSchema / common / array / object / string... +func (v *subSchema) validateSchema(currentSubSchema *subSchema, currentNode interface{}, result *Result, context *JsonContext) { + + if internalLogEnabled { + internalLog("validateSchema %s", context.String()) + internalLog(" %v", currentNode) + } + + if len(currentSubSchema.anyOf) > 0 { + + validatedAnyOf := false + var bestValidationResult *Result + + for _, anyOfSchema := range currentSubSchema.anyOf { + if !validatedAnyOf { + validationResult := anyOfSchema.subValidateWithContext(currentNode, context) + validatedAnyOf = validationResult.Valid() + + if !validatedAnyOf && (bestValidationResult == nil || validationResult.score > bestValidationResult.score) { + bestValidationResult = validationResult + } + } + } + if !validatedAnyOf { + + result.addInternalError(new(NumberAnyOfError), context, currentNode, ErrorDetails{}) + + if bestValidationResult != nil { + // add error messages of closest matching subSchema as + // that's probably the one the user was trying to match + result.mergeErrors(bestValidationResult) + } + } + } + + if len(currentSubSchema.oneOf) > 0 { + + nbValidated := 0 + var bestValidationResult *Result + + for _, oneOfSchema := range currentSubSchema.oneOf { + validationResult := oneOfSchema.subValidateWithContext(currentNode, context) + if validationResult.Valid() { + nbValidated++ + } else if nbValidated == 0 && (bestValidationResult == nil || validationResult.score > bestValidationResult.score) { + bestValidationResult = validationResult + } + } + + if nbValidated != 1 { + + result.addInternalError(new(NumberOneOfError), context, currentNode, ErrorDetails{}) + + if nbValidated == 0 { + // add error messages of closest matching subSchema as + // that's probably the one the user was trying to match + result.mergeErrors(bestValidationResult) + } + } + + } + + if len(currentSubSchema.allOf) > 0 { + nbValidated := 0 + + for _, allOfSchema := range currentSubSchema.allOf { + validationResult := allOfSchema.subValidateWithContext(currentNode, context) + if validationResult.Valid() { + nbValidated++ + } + result.mergeErrors(validationResult) + } + + if nbValidated != len(currentSubSchema.allOf) { + result.addInternalError(new(NumberAllOfError), context, currentNode, ErrorDetails{}) + } + } + + if currentSubSchema.not != nil { + validationResult := currentSubSchema.not.subValidateWithContext(currentNode, context) + if validationResult.Valid() { + result.addInternalError(new(NumberNotError), context, currentNode, ErrorDetails{}) + } + } + + if currentSubSchema.dependencies != nil && len(currentSubSchema.dependencies) > 0 { + if isKind(currentNode, reflect.Map) { + for elementKey := range currentNode.(map[string]interface{}) { + if dependency, ok := currentSubSchema.dependencies[elementKey]; ok { + switch dependency := dependency.(type) { + + case []string: + for _, dependOnKey := range dependency { + if _, dependencyResolved := currentNode.(map[string]interface{})[dependOnKey]; !dependencyResolved { + result.addInternalError( + new(MissingDependencyError), + context, + currentNode, + ErrorDetails{"dependency": dependOnKey}, + ) + } + } + + case *subSchema: + dependency.validateRecursive(dependency, currentNode, result, context) + } + } + } + } + } + + if currentSubSchema._if != nil { + validationResultIf := currentSubSchema._if.subValidateWithContext(currentNode, context) + if currentSubSchema._then != nil && validationResultIf.Valid() { + validationResultThen := currentSubSchema._then.subValidateWithContext(currentNode, context) + if !validationResultThen.Valid() { + result.addInternalError(new(ConditionThenError), context, currentNode, ErrorDetails{}) + result.mergeErrors(validationResultThen) + } + } + if currentSubSchema._else != nil && !validationResultIf.Valid() { + validationResultElse := currentSubSchema._else.subValidateWithContext(currentNode, context) + if !validationResultElse.Valid() { + result.addInternalError(new(ConditionElseError), context, currentNode, ErrorDetails{}) + result.mergeErrors(validationResultElse) + } + } + } + + result.incrementScore() +} + +func (v *subSchema) validateCommon(currentSubSchema *subSchema, value interface{}, result *Result, context *JsonContext) { + + if internalLogEnabled { + internalLog("validateCommon %s", context.String()) + internalLog(" %v", value) + } + + // const: + if currentSubSchema._const != nil { + vString, err := marshalWithoutNumber(value) + if err != nil { + result.addInternalError(new(InternalError), context, value, ErrorDetails{"error": err}) + } + if *vString != *currentSubSchema._const { + result.addInternalError(new(ConstError), + context, + value, + ErrorDetails{ + "allowed": *currentSubSchema._const, + }, + ) + } + } + + // enum: + if len(currentSubSchema.enum) > 0 { + vString, err := marshalWithoutNumber(value) + if err != nil { + result.addInternalError(new(InternalError), context, value, ErrorDetails{"error": err}) + } + if !isStringInSlice(currentSubSchema.enum, *vString) { + result.addInternalError( + new(EnumError), + context, + value, + ErrorDetails{ + "allowed": strings.Join(currentSubSchema.enum, ", "), + }, + ) + } + } + + result.incrementScore() +} + +func (v *subSchema) validateArray(currentSubSchema *subSchema, value []interface{}, result *Result, context *JsonContext) { + + if internalLogEnabled { + internalLog("validateArray %s", context.String()) + internalLog(" %v", value) + } + + nbValues := len(value) + + // TODO explain + if currentSubSchema.itemsChildrenIsSingleSchema { + for i := range value { + subContext := NewJsonContext(strconv.Itoa(i), context) + validationResult := currentSubSchema.itemsChildren[0].subValidateWithContext(value[i], subContext) + result.mergeErrors(validationResult) + } + } else { + if currentSubSchema.itemsChildren != nil && len(currentSubSchema.itemsChildren) > 0 { + + nbItems := len(currentSubSchema.itemsChildren) + + // while we have both schemas and values, check them against each other + for i := 0; i != nbItems && i != nbValues; i++ { + subContext := NewJsonContext(strconv.Itoa(i), context) + validationResult := currentSubSchema.itemsChildren[i].subValidateWithContext(value[i], subContext) + result.mergeErrors(validationResult) + } + + if nbItems < nbValues { + // we have less schemas than elements in the instance array, + // but that might be ok if "additionalItems" is specified. + + switch currentSubSchema.additionalItems.(type) { + case bool: + if !currentSubSchema.additionalItems.(bool) { + result.addInternalError(new(ArrayNoAdditionalItemsError), context, value, ErrorDetails{}) + } + case *subSchema: + additionalItemSchema := currentSubSchema.additionalItems.(*subSchema) + for i := nbItems; i != nbValues; i++ { + subContext := NewJsonContext(strconv.Itoa(i), context) + validationResult := additionalItemSchema.subValidateWithContext(value[i], subContext) + result.mergeErrors(validationResult) + } + } + } + } + } + + // minItems & maxItems + if currentSubSchema.minItems != nil { + if nbValues < int(*currentSubSchema.minItems) { + result.addInternalError( + new(ArrayMinItemsError), + context, + value, + ErrorDetails{"min": *currentSubSchema.minItems}, + ) + } + } + if currentSubSchema.maxItems != nil { + if nbValues > int(*currentSubSchema.maxItems) { + result.addInternalError( + new(ArrayMaxItemsError), + context, + value, + ErrorDetails{"max": *currentSubSchema.maxItems}, + ) + } + } + + // uniqueItems: + if currentSubSchema.uniqueItems { + var stringifiedItems = make(map[string]int) + for j, v := range value { + vString, err := marshalWithoutNumber(v) + if err != nil { + result.addInternalError(new(InternalError), context, value, ErrorDetails{"err": err}) + } + if i, ok := stringifiedItems[*vString]; ok { + result.addInternalError( + new(ItemsMustBeUniqueError), + context, + value, + ErrorDetails{"type": TYPE_ARRAY, "i": i, "j": j}, + ) + } + stringifiedItems[*vString] = j + } + } + + // contains: + + if currentSubSchema.contains != nil { + validatedOne := false + var bestValidationResult *Result + + for i, v := range value { + subContext := NewJsonContext(strconv.Itoa(i), context) + + validationResult := currentSubSchema.contains.subValidateWithContext(v, subContext) + if validationResult.Valid() { + validatedOne = true + break + } else { + if bestValidationResult == nil || validationResult.score > bestValidationResult.score { + bestValidationResult = validationResult + } + } + } + if !validatedOne { + result.addInternalError( + new(ArrayContainsError), + context, + value, + ErrorDetails{}, + ) + if bestValidationResult != nil { + result.mergeErrors(bestValidationResult) + } + } + } + + result.incrementScore() +} + +func (v *subSchema) validateObject(currentSubSchema *subSchema, value map[string]interface{}, result *Result, context *JsonContext) { + + if internalLogEnabled { + internalLog("validateObject %s", context.String()) + internalLog(" %v", value) + } + + // minProperties & maxProperties: + if currentSubSchema.minProperties != nil { + if len(value) < int(*currentSubSchema.minProperties) { + result.addInternalError( + new(ArrayMinPropertiesError), + context, + value, + ErrorDetails{"min": *currentSubSchema.minProperties}, + ) + } + } + if currentSubSchema.maxProperties != nil { + if len(value) > int(*currentSubSchema.maxProperties) { + result.addInternalError( + new(ArrayMaxPropertiesError), + context, + value, + ErrorDetails{"max": *currentSubSchema.maxProperties}, + ) + } + } + + // required: + for _, requiredProperty := range currentSubSchema.required { + _, ok := value[requiredProperty] + if ok { + result.incrementScore() + } else { + result.addInternalError( + new(RequiredError), + context, + value, + ErrorDetails{"property": requiredProperty}, + ) + } + } + + // additionalProperty & patternProperty: + for pk := range value { + + // Check whether this property is described by "properties" + found := false + for _, spValue := range currentSubSchema.propertiesChildren { + if pk == spValue.property { + found = true + } + } + + // Check whether this property is described by "patternProperties" + ppMatch := v.validatePatternProperty(currentSubSchema, pk, value[pk], result, context) + + // If it is not described by neither "properties" nor "patternProperties" it must pass "additionalProperties" + if !found && !ppMatch { + switch ap := currentSubSchema.additionalProperties.(type) { + case bool: + // Handle the boolean case separately as it's cleaner to return a specific error than failing to pass the false schema + if !ap { + result.addInternalError( + new(AdditionalPropertyNotAllowedError), + context, + value[pk], + ErrorDetails{"property": pk}, + ) + + } + case *subSchema: + validationResult := ap.subValidateWithContext(value[pk], NewJsonContext(pk, context)) + result.mergeErrors(validationResult) + } + } + } + + // propertyNames: + if currentSubSchema.propertyNames != nil { + for pk := range value { + validationResult := currentSubSchema.propertyNames.subValidateWithContext(pk, context) + if !validationResult.Valid() { + result.addInternalError(new(InvalidPropertyNameError), + context, + value, ErrorDetails{ + "property": pk, + }) + result.mergeErrors(validationResult) + } + } + } + + result.incrementScore() +} + +func (v *subSchema) validatePatternProperty(currentSubSchema *subSchema, key string, value interface{}, result *Result, context *JsonContext) bool { + + if internalLogEnabled { + internalLog("validatePatternProperty %s", context.String()) + internalLog(" %s %v", key, value) + } + + validated := false + + for pk, pv := range currentSubSchema.patternProperties { + if matches, _ := regexp.MatchString(pk, key); matches { + validated = true + subContext := NewJsonContext(key, context) + validationResult := pv.subValidateWithContext(value, subContext) + result.mergeErrors(validationResult) + } + } + + if !validated { + return false + } + + result.incrementScore() + return true +} + +func (v *subSchema) validateString(currentSubSchema *subSchema, value interface{}, result *Result, context *JsonContext) { + + // Ignore JSON numbers + if isJSONNumber(value) { + return + } + + // Ignore non strings + if !isKind(value, reflect.String) { + return + } + + if internalLogEnabled { + internalLog("validateString %s", context.String()) + internalLog(" %v", value) + } + + stringValue := value.(string) + + // minLength & maxLength: + if currentSubSchema.minLength != nil { + if utf8.RuneCount([]byte(stringValue)) < int(*currentSubSchema.minLength) { + result.addInternalError( + new(StringLengthGTEError), + context, + value, + ErrorDetails{"min": *currentSubSchema.minLength}, + ) + } + } + if currentSubSchema.maxLength != nil { + if utf8.RuneCount([]byte(stringValue)) > int(*currentSubSchema.maxLength) { + result.addInternalError( + new(StringLengthLTEError), + context, + value, + ErrorDetails{"max": *currentSubSchema.maxLength}, + ) + } + } + + // pattern: + if currentSubSchema.pattern != nil { + if !currentSubSchema.pattern.MatchString(stringValue) { + result.addInternalError( + new(DoesNotMatchPatternError), + context, + value, + ErrorDetails{"pattern": currentSubSchema.pattern}, + ) + + } + } + + // format + if currentSubSchema.format != "" { + if !FormatCheckers.IsFormat(currentSubSchema.format, stringValue) { + result.addInternalError( + new(DoesNotMatchFormatError), + context, + value, + ErrorDetails{"format": currentSubSchema.format}, + ) + } + } + + result.incrementScore() +} + +func (v *subSchema) validateNumber(currentSubSchema *subSchema, value interface{}, result *Result, context *JsonContext) { + + // Ignore non numbers + if !isJSONNumber(value) { + return + } + + if internalLogEnabled { + internalLog("validateNumber %s", context.String()) + internalLog(" %v", value) + } + + number := value.(json.Number) + float64Value, _ := new(big.Rat).SetString(string(number)) + + // multipleOf: + if currentSubSchema.multipleOf != nil { + if q := new(big.Rat).Quo(float64Value, currentSubSchema.multipleOf); !q.IsInt() { + result.addInternalError( + new(MultipleOfError), + context, + number, + ErrorDetails{ + "multiple": new(big.Float).SetRat(currentSubSchema.multipleOf), + }, + ) + } + } + + //maximum & exclusiveMaximum: + if currentSubSchema.maximum != nil { + if float64Value.Cmp(currentSubSchema.maximum) == 1 { + result.addInternalError( + new(NumberLTEError), + context, + number, + ErrorDetails{ + "max": new(big.Float).SetRat(currentSubSchema.maximum), + }, + ) + } + } + if currentSubSchema.exclusiveMaximum != nil { + if float64Value.Cmp(currentSubSchema.exclusiveMaximum) >= 0 { + result.addInternalError( + new(NumberLTError), + context, + number, + ErrorDetails{ + "max": new(big.Float).SetRat(currentSubSchema.exclusiveMaximum), + }, + ) + } + } + + //minimum & exclusiveMinimum: + if currentSubSchema.minimum != nil { + if float64Value.Cmp(currentSubSchema.minimum) == -1 { + result.addInternalError( + new(NumberGTEError), + context, + number, + ErrorDetails{ + "min": new(big.Float).SetRat(currentSubSchema.minimum), + }, + ) + } + } + if currentSubSchema.exclusiveMinimum != nil { + if float64Value.Cmp(currentSubSchema.exclusiveMinimum) <= 0 { + result.addInternalError( + new(NumberGTError), + context, + number, + ErrorDetails{ + "min": new(big.Float).SetRat(currentSubSchema.exclusiveMinimum), + }, + ) + } + } + + // format + if currentSubSchema.format != "" { + if !FormatCheckers.IsFormat(currentSubSchema.format, float64Value) { + result.addInternalError( + new(DoesNotMatchFormatError), + context, + value, + ErrorDetails{"format": currentSubSchema.format}, + ) + } + } + + result.incrementScore() +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 815f2e8a79..215a78629b 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -60,6 +60,16 @@ github.com/blang/semver github.com/bmatcuk/doublestar # github.com/cespare/xxhash/v2 v2.1.1 github.com/cespare/xxhash/v2 +# github.com/compose-spec/compose-go v1.2.2 +## explicit +github.com/compose-spec/compose-go/consts +github.com/compose-spec/compose-go/dotenv +github.com/compose-spec/compose-go/errdefs +github.com/compose-spec/compose-go/interpolation +github.com/compose-spec/compose-go/loader +github.com/compose-spec/compose-go/schema +github.com/compose-spec/compose-go/template +github.com/compose-spec/compose-go/types # github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340 github.com/containerd/cgroups/stats/v1 # github.com/containerd/containerd v1.4.1-0.20201117152358-0edc412565dc @@ -77,6 +87,9 @@ github.com/cpuguy83/go-md2man/v2/md2man github.com/creack/pty # github.com/davecgh/go-spew v1.1.1 github.com/davecgh/go-spew/spew +# github.com/distribution/distribution/v3 v3.0.0-20210316161203-a01c71e2477e +github.com/distribution/distribution/v3/digestset +github.com/distribution/distribution/v3/reference # github.com/docker/cli v20.10.0-beta1.0.20201029214301-1d20b15adc38+incompatible ## explicit github.com/docker/cli/cli/command/image/build @@ -144,7 +157,6 @@ github.com/docker/go-connections/nat github.com/docker/go-connections/sockets github.com/docker/go-connections/tlsconfig # github.com/docker/go-metrics v0.0.1 -## explicit github.com/docker/go-metrics # github.com/docker/go-units v0.4.0 github.com/docker/go-units @@ -215,7 +227,6 @@ github.com/gorilla/mux ## explicit github.com/gorilla/websocket # github.com/imdario/mergo v0.3.12 -## explicit github.com/imdario/mergo # github.com/inconshreveable/go-update v0.0.0-20160112193335-8152e7eb6ccf ## explicit @@ -254,6 +265,8 @@ github.com/loft-sh/notify github.com/mattn/go-colorable # github.com/mattn/go-isatty v0.0.12 github.com/mattn/go-isatty +# github.com/mattn/go-shellwords v1.0.12 +github.com/mattn/go-shellwords # github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 github.com/matttproud/golang_protobuf_extensions/pbutil # github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b @@ -264,6 +277,8 @@ github.com/mgutz/ansi github.com/mitchellh/go-homedir # github.com/mitchellh/go-wordwrap v1.0.0 github.com/mitchellh/go-wordwrap +# github.com/mitchellh/mapstructure v1.4.3 +github.com/mitchellh/mapstructure # github.com/moby/buildkit v0.8.2 ## explicit github.com/moby/buildkit/frontend/dockerfile/dockerignore @@ -408,6 +423,12 @@ github.com/ulikunitz/xz/lzma github.com/vmware-labs/yaml-jsonpath/pkg/yamlpath # github.com/xanzy/ssh-agent v0.2.1 github.com/xanzy/ssh-agent +# github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f +github.com/xeipuuv/gojsonpointer +# github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 +github.com/xeipuuv/gojsonreference +# github.com/xeipuuv/gojsonschema v1.2.0 +github.com/xeipuuv/gojsonschema # go.opencensus.io v0.23.0 go.opencensus.io go.opencensus.io/internal From 4e43653bd1f73b25645fcaa9b580c69362f8d994 Mon Sep 17 00:00:00 2001 From: Russell Centanni Date: Tue, 5 Apr 2022 18:36:11 -0400 Subject: [PATCH 9/9] test: re-enable init e2e tests --- cmd/init.go | 8 +- e2e/tests/init/init.go | 623 +++++++++++++++-------------- pkg/devspace/compose/dependency.go | 2 +- pkg/devspace/configure/image.go | 2 +- 4 files changed, 322 insertions(+), 313 deletions(-) diff --git a/cmd/init.go b/cmd/init.go index 83569198b4..8a829b0c05 100644 --- a/cmd/init.go +++ b/cmd/init.go @@ -61,6 +61,8 @@ const ( // InitCmd is a struct that defines a command call for "init" type InitCmd struct { + *flags.GlobalFlags + // Flags Reconfigure bool Dockerfile string @@ -72,7 +74,8 @@ type InitCmd struct { // NewInitCmd creates a new init command func NewInitCmd(f factory.Factory) *cobra.Command { cmd := &InitCmd{ - log: f.GetLog(), + log: f.GetLog(), + GlobalFlags: globalFlags, } initCmd := &cobra.Command{ @@ -201,7 +204,7 @@ func (cmd *InitCmd) initDevspace(f factory.Factory, configLoader loader.ConfigLo var config *latest.Config // create kubectl client - client, err := f.NewKubeClientFromContext(globalFlags.KubeContext, globalFlags.Namespace) + client, err := f.NewKubeClientFromContext(cmd.GlobalFlags.KubeContext, cmd.GlobalFlags.Namespace) if err == nil { configInterface, err := configLoader.Load(context.TODO(), client, &loader.ConfigOptions{}, cmd.log) if err == nil { @@ -728,6 +731,7 @@ func (cmd *InitCmd) render(f factory.Factory, config *latest.Config) (string, er Silent: true, ConfigPath: renderPath, }, + Pipeline: "deploy", SkipPush: true, SkipBuild: true, Render: true, diff --git a/e2e/tests/init/init.go b/e2e/tests/init/init.go index d1ce4da2fd..c88bff4278 100644 --- a/e2e/tests/init/init.go +++ b/e2e/tests/init/init.go @@ -1,311 +1,316 @@ package init -// import ( -// "bytes" -// "os" -// "path/filepath" -// "strings" - -// "github.com/loft-sh/devspace/pkg/devspace/config/loader/variable" - -// "github.com/loft-sh/devspace/cmd" -// "github.com/loft-sh/devspace/cmd/flags" -// "github.com/loft-sh/devspace/e2e/framework" -// "github.com/loft-sh/devspace/e2e/kube" -// "github.com/loft-sh/devspace/pkg/devspace/config/versions/latest" -// "github.com/loft-sh/devspace/pkg/util/survey" -// "github.com/onsi/ginkgo" -// "gopkg.in/yaml.v3" -// ) - -// var _ = DevSpaceDescribe("init", func() { -// initialDir, err := os.Getwd() -// if err != nil { -// panic(err) -// } - -// // create a new factory -// var ( -// f *framework.DefaultFactory -// kubeClient *kube.KubeHelper -// ) - -// ginkgo.BeforeEach(func() { -// f = framework.NewDefaultFactory() - -// kubeClient, err = kube.NewKubeHelper() -// framework.ExpectNoError(err) -// }) - -// ginkgo.It("should create devspace.yml without registry details", func() { -// tempDir, err := framework.CopyToTempDir("tests/init/testdata/new") -// framework.ExpectNoError(err) -// defer framework.CleanupTempDir(initialDir, tempDir) - -// // set the question answer func here -// f.SetAnswerFunc(func(params *survey.QuestionOptions) (string, error) { -// if strings.Contains(params.Question, "Which registry would you want to use to push images to?") { -// return "Skip Registry", nil -// } - -// return params.DefaultValue, nil -// }) - -// initCmd := &cmd.InitCmd{} -// err = initCmd.Run(f) -// framework.ExpectNoError(err) - -// config, _, err := framework.LoadConfig(f, kubeClient.Client(), filepath.Join(tempDir, "devspace.yaml")) -// framework.ExpectNoError(err) - -// framework.ExpectEqual(len(config.Variables()), 1+len(variable.AlwaysResolvePredefinedVars)) -// framework.ExpectEqual(config.Variables()["IMAGE"], "username/app") - -// ns, err := kubeClient.CreateNamespace("init") -// framework.ExpectNoError(err) -// defer framework.ExpectDeleteNamespace(kubeClient, ns) - -// done := make(chan error) -// go func() { -// devCmd := &cmd.DevCmd{ -// GlobalFlags: &flags.GlobalFlags{ -// NoWarn: true, -// Namespace: ns, -// }, -// } -// done <- devCmd.Run(f, []string{"sh", "-c", "exit 0"}) -// }() - -// err = <-done -// framework.ExpectNoError(err) -// }) - -// ginkgo.It("should create devspace.yml without registry details and manifests deploy", func() { -// tempDir, err := framework.CopyToTempDir("tests/init/testdata/new") -// framework.ExpectNoError(err) -// defer framework.CleanupTempDir(initialDir, tempDir) - -// // set the question answer func here -// f.SetAnswerFunc(func(params *survey.QuestionOptions) (string, error) { -// if strings.Contains(params.Question, "Which registry would you want to use to push images to?") { -// return "Skip Registry", nil -// } - -// if strings.Contains(params.Question, "How do you want to deploy this project?") { -// return cmd.ManifestsOption, nil -// } - -// if strings.Contains(params.Question, "Please enter the paths to your Kubernetes manifests") { -// return "manifests/**", nil -// } - -// return params.DefaultValue, nil -// }) - -// initCmd := &cmd.InitCmd{} -// err = initCmd.Run(f) -// framework.ExpectNoError(err) - -// config, _, err := framework.LoadConfig(f, kubeClient.Client(), filepath.Join(tempDir, "devspace.yaml")) -// framework.ExpectNoError(err) - -// framework.ExpectEqual(len(config.Variables()), 1+len(variable.AlwaysResolvePredefinedVars)) -// framework.ExpectEqual(config.Variables()["IMAGE"], "username/app") - -// ns, err := kubeClient.CreateNamespace("init") -// framework.ExpectNoError(err) -// defer framework.ExpectDeleteNamespace(kubeClient, ns) - -// done := make(chan error) -// go func() { -// devCmd := &cmd.DevCmd{ -// GlobalFlags: &flags.GlobalFlags{ -// NoWarn: true, -// Namespace: ns, -// }, -// } -// done <- devCmd.Run(f, []string{"sh", "-c", "exit 0"}) -// }() - -// err = <-done -// framework.ExpectNoError(err) -// }) - -// ginkgo.It("should create devspace.yml without registry details and kustomize deploy", func() { -// tempDir, err := framework.CopyToTempDir("tests/init/testdata/new") -// framework.ExpectNoError(err) -// defer framework.CleanupTempDir(initialDir, tempDir) - -// // set the question answer func here -// f.SetAnswerFunc(func(params *survey.QuestionOptions) (string, error) { -// if strings.Contains(params.Question, "Which registry would you want to use to push images to?") { -// return "Skip Registry", nil -// } - -// if strings.Contains(params.Question, "How do you want to deploy this project?") { -// return cmd.KustomizeOption, nil -// } - -// if strings.Contains(params.Question, "Please enter path to your Kustomization folder") { -// return "./kustomization", nil -// } - -// return params.DefaultValue, nil -// }) - -// initCmd := &cmd.InitCmd{} -// err = initCmd.Run(f) -// framework.ExpectNoError(err) - -// config, _, err := framework.LoadConfig(f, kubeClient.Client(), filepath.Join(tempDir, "devspace.yaml")) -// framework.ExpectNoError(err) - -// framework.ExpectEqual(len(config.Variables()), 1+len(variable.AlwaysResolvePredefinedVars)) -// framework.ExpectEqual(config.Variables()["IMAGE"], "username/app") - -// ns, err := kubeClient.CreateNamespace("init") -// framework.ExpectNoError(err) -// defer framework.ExpectDeleteNamespace(kubeClient, ns) - -// done := make(chan error) -// go func() { -// devCmd := &cmd.DevCmd{ -// GlobalFlags: &flags.GlobalFlags{ -// NoWarn: true, -// Namespace: ns, -// }, -// } -// done <- devCmd.Run(f, []string{"sh", "-c", "exit 0"}) -// }() - -// err = <-done -// framework.ExpectNoError(err) -// }) - -// ginkgo.It("should create devspace.yml without registry details and local helm chart deploy", func() { -// tempDir, err := framework.CopyToTempDir("tests/init/testdata/new") -// framework.ExpectNoError(err) -// defer framework.CleanupTempDir(initialDir, tempDir) - -// // set the question answer func here -// f.SetAnswerFunc(func(params *survey.QuestionOptions) (string, error) { -// if strings.Contains(params.Question, "Which registry would you want to use to push images to?") { -// return "Skip Registry", nil -// } - -// if strings.Contains(params.Question, "How do you want to deploy this project?") { -// return cmd.HelmChartOption, nil -// } - -// if strings.Contains(params.Question, "Which Helm chart do you want to use?") { -// return `Use a local Helm chart (e.g. ./helm/chart/)`, nil -// } - -// if strings.Contains(params.Question, "Please enter the relative path to your local Helm chart") { -// return "./chart", nil -// } - -// return params.DefaultValue, nil -// }) - -// initCmd := &cmd.InitCmd{} -// err = initCmd.Run(f) -// framework.ExpectNoError(err) - -// config, _, err := framework.LoadConfig(f, kubeClient.Client(), filepath.Join(tempDir, "devspace.yaml")) -// framework.ExpectNoError(err) - -// framework.ExpectEqual(len(config.Variables()), 1+len(variable.AlwaysResolvePredefinedVars)) -// framework.ExpectEqual(config.Variables()["IMAGE"], "username/app") - -// ns, err := kubeClient.CreateNamespace("init") -// framework.ExpectNoError(err) -// defer framework.ExpectDeleteNamespace(kubeClient, ns) - -// done := make(chan error) -// go func() { -// devCmd := &cmd.DevCmd{ -// GlobalFlags: &flags.GlobalFlags{ -// NoWarn: true, -// Namespace: ns, -// }, -// } -// done <- devCmd.Run(f, []string{"sh", "-c", "exit 0"}) -// }() - -// err = <-done -// framework.ExpectNoError(err) -// }) - -// ginkgo.It("should create devspace.yml from docker-compose.yaml", func() { -// tempDir, err := framework.CopyToTempDir("tests/init/testdata/docker-compose") -// framework.ExpectNoError(err) -// defer framework.CleanupTempDir(initialDir, tempDir) - -// ns, err := kubeClient.CreateNamespace("init") -// framework.ExpectNoError(err) -// defer func() { -// err := kubeClient.DeleteNamespace(ns) -// framework.ExpectNoError(err) -// }() - -// // Answer all questions with the default -// f.SetAnswerFunc(func(params *survey.QuestionOptions) (string, error) { -// return params.DefaultValue, nil -// }) - -// initCmd := &cmd.InitCmd{ -// Reconfigure: true, -// } -// err = initCmd.Run(f) -// framework.ExpectNoError(err) - -// // Created a devspace.yaml -// _, _, err = framework.LoadConfig(f, kubeClient.Client(), filepath.Join(tempDir, "devspace.yaml")) -// framework.ExpectNoError(err) - -// // Created a .gitignore -// _, err = os.Stat(filepath.Join(tempDir, ".gitignore")) -// framework.ExpectNoError(err) - -// // Created a .devspace/generated.yaml -// _, err = os.Stat(filepath.Join(tempDir, ".devspace", "generated.yaml")) -// framework.ExpectNoError(err) - -// // Print the config to verify the expected deployment -// var configBuffer bytes.Buffer -// printCmd := &cmd.PrintCmd{ -// GlobalFlags: &flags.GlobalFlags{ -// NoWarn: true, -// Debug: true, -// }, -// Out: &configBuffer, -// } - -// err = printCmd.Run(f) -// framework.ExpectNoError(err) - -// generatedConfig := &latest.Config{} -// err = yaml.Unmarshal(configBuffer.Bytes(), generatedConfig) -// framework.ExpectNoError(err) - -// // validate config -// framework.ExpectEqual(len(generatedConfig.Deployments), 1) -// framework.ExpectEqual(generatedConfig.Deployments["db"], "db") - -// // ensure valid configuration by deploying the application -// deployCmd := &cmd.DeployCmd{ -// GlobalFlags: &flags.GlobalFlags{ -// NoWarn: true, -// Debug: true, -// Namespace: ns, -// }, -// SkipPush: true, -// Wait: true, -// Timeout: 120, -// } - -// err = deployCmd.Run(f) -// framework.ExpectNoError(err) -// }, 120) -// }) +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/loft-sh/devspace/pkg/devspace/config/loader/variable" + + "github.com/loft-sh/devspace/cmd" + "github.com/loft-sh/devspace/cmd/flags" + "github.com/loft-sh/devspace/e2e/framework" + "github.com/loft-sh/devspace/e2e/kube" + "github.com/loft-sh/devspace/pkg/devspace/config/versions/latest" + "github.com/loft-sh/devspace/pkg/util/survey" + "github.com/onsi/ginkgo" + "gopkg.in/yaml.v3" +) + +var _ = DevSpaceDescribe("init", func() { + initialDir, err := os.Getwd() + if err != nil { + panic(err) + } + + // create a new factory + var ( + f *framework.DefaultFactory + kubeClient *kube.KubeHelper + ) + + ginkgo.BeforeEach(func() { + f = framework.NewDefaultFactory() + + kubeClient, err = kube.NewKubeHelper() + framework.ExpectNoError(err) + }) + + ginkgo.It("should create devspace.yml without registry details", func() { + tempDir, err := framework.CopyToTempDir("tests/init/testdata/new") + framework.ExpectNoError(err) + defer framework.CleanupTempDir(initialDir, tempDir) + + // set the question answer func here + f.SetAnswerFunc(func(params *survey.QuestionOptions) (string, error) { + if strings.Contains(params.Question, "How do you want to deploy this project?") { + return cmd.DeployOptionHelm, nil + } + + if strings.Contains(params.Question, "If you were to push any images, which container registry would you want to push to?") { + return "Skip Registry", nil + } + + if strings.Contains(params.Question, "How should DevSpace build the container image for this project?") { + return "Skip / I don't know", nil + } + + fmt.Println(params.DefaultValue) + return params.DefaultValue, nil + }) + + initCmd := &cmd.InitCmd{GlobalFlags: &flags.GlobalFlags{}} + err = initCmd.Run(f) + framework.ExpectNoError(err) + + config, _, err := framework.LoadConfig(f, kubeClient.Client(), filepath.Join(tempDir, "devspace.yaml")) + framework.ExpectNoError(err) + + framework.ExpectEqual(len(config.Variables()), len(variable.AlwaysResolvePredefinedVars)) + + ns, err := kubeClient.CreateNamespace("init") + framework.ExpectNoError(err) + defer framework.ExpectDeleteNamespace(kubeClient, ns) + + done := make(chan error) + go func() { + devCmd := &cmd.RunPipelineCmd{ + GlobalFlags: &flags.GlobalFlags{ + NoWarn: true, + Namespace: ns, + }, + Pipeline: "dev", + } + done <- devCmd.Run(nil, []string{"sh", "-c", "exit 0"}, f, "dev", "devCommand") + }() + + err = <-done + framework.ExpectNoError(err) + }) + + ginkgo.It("should create devspace.yml without registry details and manifests deploy", func() { + tempDir, err := framework.CopyToTempDir("tests/init/testdata/new") + framework.ExpectNoError(err) + defer framework.CleanupTempDir(initialDir, tempDir) + + // set the question answer func here + f.SetAnswerFunc(func(params *survey.QuestionOptions) (string, error) { + if strings.Contains(params.Question, "Which registry would you want to use to push images to?") { + return "Skip Registry", nil + } + + if strings.Contains(params.Question, "How do you want to deploy this project?") { + return cmd.DeployOptionKubectl, nil + } + + if strings.Contains(params.Question, "Please enter the paths to your Kubernetes manifests") { + return "manifests/**", nil + } + + return params.DefaultValue, nil + }) + + initCmd := &cmd.InitCmd{GlobalFlags: &flags.GlobalFlags{}} + err = initCmd.Run(f) + framework.ExpectNoError(err) + + config, _, err := framework.LoadConfig(f, kubeClient.Client(), filepath.Join(tempDir, "devspace.yaml")) + framework.ExpectNoError(err) + + framework.ExpectEqual(len(config.Variables()), len(variable.AlwaysResolvePredefinedVars)) + + ns, err := kubeClient.CreateNamespace("init") + framework.ExpectNoError(err) + defer framework.ExpectDeleteNamespace(kubeClient, ns) + + done := make(chan error) + go func() { + devCmd := &cmd.RunPipelineCmd{ + GlobalFlags: &flags.GlobalFlags{ + NoWarn: true, + Namespace: ns, + }, + Pipeline: "dev", + } + done <- devCmd.Run(nil, []string{"sh", "-c", "exit 0"}, f, "dev", "devCommand") + }() + + err = <-done + framework.ExpectNoError(err) + }) + + ginkgo.It("should create devspace.yml without registry details and kustomize deploy", func() { + tempDir, err := framework.CopyToTempDir("tests/init/testdata/new") + framework.ExpectNoError(err) + defer framework.CleanupTempDir(initialDir, tempDir) + + // set the question answer func here + f.SetAnswerFunc(func(params *survey.QuestionOptions) (string, error) { + if strings.Contains(params.Question, "Which registry would you want to use to push images to?") { + return "Skip Registry", nil + } + + if strings.Contains(params.Question, "How do you want to deploy this project?") { + return cmd.DeployOptionKustomize, nil + } + + if strings.Contains(params.Question, "Please enter path to your Kustomization folder") { + return "./kustomization", nil + } + + return params.DefaultValue, nil + }) + + initCmd := &cmd.InitCmd{GlobalFlags: &flags.GlobalFlags{}} + err = initCmd.Run(f) + framework.ExpectNoError(err) + + config, _, err := framework.LoadConfig(f, kubeClient.Client(), filepath.Join(tempDir, "devspace.yaml")) + framework.ExpectNoError(err) + + framework.ExpectEqual(len(config.Variables()), len(variable.AlwaysResolvePredefinedVars)) + + ns, err := kubeClient.CreateNamespace("init") + framework.ExpectNoError(err) + defer framework.ExpectDeleteNamespace(kubeClient, ns) + + done := make(chan error) + go func() { + devCmd := &cmd.RunPipelineCmd{ + GlobalFlags: &flags.GlobalFlags{ + NoWarn: true, + Namespace: ns, + }, + Pipeline: "dev", + } + done <- devCmd.Run(nil, []string{"sh", "-c", "exit 0"}, f, "dev", "devCommand") + }() + + err = <-done + framework.ExpectNoError(err) + }) + + ginkgo.It("should create devspace.yml without registry details and local helm chart deploy", func() { + tempDir, err := framework.CopyToTempDir("tests/init/testdata/new") + framework.ExpectNoError(err) + defer framework.CleanupTempDir(initialDir, tempDir) + + // set the question answer func here + f.SetAnswerFunc(func(params *survey.QuestionOptions) (string, error) { + if strings.Contains(params.Question, "Which registry would you want to use to push images to?") { + return "Skip Registry", nil + } + + if strings.Contains(params.Question, "How do you want to deploy this project?") { + return cmd.DeployOptionHelm, nil + } + + if strings.Contains(params.Question, "Which Helm chart do you want to use?") { + return `Use a local Helm chart (e.g. ./helm/chart/)`, nil + } + + if strings.Contains(params.Question, "Please enter the relative path to your local Helm chart") { + return "./chart", nil + } + + return params.DefaultValue, nil + }) + + initCmd := &cmd.InitCmd{GlobalFlags: &flags.GlobalFlags{}} + err = initCmd.Run(f) + framework.ExpectNoError(err) + + config, _, err := framework.LoadConfig(f, kubeClient.Client(), filepath.Join(tempDir, "devspace.yaml")) + framework.ExpectNoError(err) + + framework.ExpectEqual(len(config.Variables()), len(variable.AlwaysResolvePredefinedVars)) + + ns, err := kubeClient.CreateNamespace("init") + framework.ExpectNoError(err) + defer framework.ExpectDeleteNamespace(kubeClient, ns) + + done := make(chan error) + go func() { + devCmd := &cmd.RunPipelineCmd{ + GlobalFlags: &flags.GlobalFlags{ + NoWarn: true, + Namespace: ns, + }, + Pipeline: "dev", + } + done <- devCmd.Run(nil, []string{"sh", "-c", "exit 0"}, f, "dev", "devCommand") + }() + + err = <-done + framework.ExpectNoError(err) + }) + + ginkgo.It("should create devspace.yml from docker-compose.yaml", func() { + tempDir, err := framework.CopyToTempDir("tests/init/testdata/docker-compose") + framework.ExpectNoError(err) + defer framework.CleanupTempDir(initialDir, tempDir) + + ns, err := kubeClient.CreateNamespace("init") + framework.ExpectNoError(err) + defer func() { + err := kubeClient.DeleteNamespace(ns) + framework.ExpectNoError(err) + }() + + // Answer all questions with the default + f.SetAnswerFunc(func(params *survey.QuestionOptions) (string, error) { + return params.DefaultValue, nil + }) + + initCmd := &cmd.InitCmd{ + Reconfigure: true, + } + err = initCmd.Run(f) + framework.ExpectNoError(err) + + // Created a devspace.yaml + _, _, err = framework.LoadConfig(f, kubeClient.Client(), filepath.Join(tempDir, "devspace.yaml")) + framework.ExpectNoError(err) + + // Created a .gitignore + _, err = os.Stat(filepath.Join(tempDir, ".gitignore")) + framework.ExpectNoError(err) + + // Print the config to verify the expected deployment + var configBuffer bytes.Buffer + printCmd := &cmd.PrintCmd{ + GlobalFlags: &flags.GlobalFlags{ + NoWarn: true, + Debug: true, + }, + Out: &configBuffer, + } + + err = printCmd.Run(f) + framework.ExpectNoError(err) + + generatedConfig := &latest.Config{} + err = yaml.Unmarshal(configBuffer.Bytes(), generatedConfig) + framework.ExpectNoError(err) + + // validate config + framework.ExpectEqual(len(generatedConfig.Deployments), 1) + framework.ExpectHaveKey(generatedConfig.Deployments, "db") + + // ensure valid configuration by deploying the application + deployCmd := &cmd.RunPipelineCmd{ + GlobalFlags: &flags.GlobalFlags{ + NoWarn: true, + Debug: true, + Namespace: ns, + }, + Pipeline: "deploy", + SkipPush: true, + } + err = deployCmd.RunDefault(f) + + framework.ExpectNoError(err) + }, 120) +}) diff --git a/pkg/devspace/compose/dependency.go b/pkg/devspace/compose/dependency.go index 38692cbe92..a5b61fc52a 100644 --- a/pkg/devspace/compose/dependency.go +++ b/pkg/devspace/compose/dependency.go @@ -42,7 +42,7 @@ func (cb *configBuilder) AddDependencies(dockerCompose *composetypes.Project, se cb.config.Dependencies[depName] = &latest.DependencyConfig{ Source: &latest.SourceConfig{ - Path: filepath.Join(relativePath, fileName), + Path: filepath.ToSlash(filepath.Join(relativePath, fileName)), }, } } diff --git a/pkg/devspace/configure/image.go b/pkg/devspace/configure/image.go index bb01216b8c..096f9e56c0 100644 --- a/pkg/devspace/configure/image.go +++ b/pkg/devspace/configure/image.go @@ -34,7 +34,7 @@ func (m *manager) AddImage(imageName, image, projectNamespace, dockerfile string registryUsernameHint = " => you are logged in as %s" rootLevelDockerfile = "Use this existing Dockerfile: " + dockerfile differentDockerfile = "Use a different Dockerfile (e.g. ./backend/Dockerfile)" - subPathDockerfile = "Use an existing Dockerfile within in this project" + subPathDockerfile = "Use an existing Dockerfile within this project" customBuild = "Use alternative build tool (e.g. jib, bazel)" skip = "Skip / I don't know" err error