Skip to content

Commit

Permalink
Add --expose flag to podman play kube
Browse files Browse the repository at this point in the history
Kubernetes deployments and pods allow to specify ports for each container in the
spec. Podman was by default exposing these ports on the host, but this causes
issues when multiple replicas are involved, as then podman would bind multiple
containers to the same port.
This commit adds the new flag `--expose` to `podman play kube` which explicitly
tells podman to publish ports on the host (off by default). Additionally, if
multiple replicas are involved, then podman will override the hostPort and pick
a random port on the host to avoid clashes.

This fixes containers#16765

Signed-off-by: Dan Čermák <dcermak@suse.com>
  • Loading branch information
dcermak committed Dec 8, 2022
1 parent 6e2e9ab commit ddb2fdc
Show file tree
Hide file tree
Showing 7 changed files with 104 additions and 30 deletions.
1 change: 1 addition & 0 deletions cmd/podman/kube/play.go
Original file line number Diff line number Diff line change
Expand Up @@ -139,6 +139,7 @@ func playFlags(cmd *cobra.Command) {
flags.BoolVar(&playOptions.TLSVerifyCLI, "tls-verify", true, "Require HTTPS and verify certificates when contacting registries")
flags.BoolVar(&playOptions.StartCLI, "start", true, "Start the pod after creating it")
flags.BoolVar(&playOptions.Force, "force", false, "Remove volumes as part of --down")
flags.BoolVar(&playOptions.Expose, "expose", false, "Expose containerPorts on the host")

authfileFlagName := "authfile"
flags.StringVar(&playOptions.Authfile, authfileFlagName, auth.GetDefaultAuthFile(), "Path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override")
Expand Down
6 changes: 6 additions & 0 deletions docs/source/markdown/podman-kube-play.1.md.in
Original file line number Diff line number Diff line change
Expand Up @@ -138,6 +138,12 @@ Use *path* as the build context directory for each image. Requires --build optio

@@option creds

#### **--expose**

Set this flag to expose `containerPorts` defined in containers on the host. If multiple replicas are defined for a container, then podman will choose random ports on the host instead of the value from `hostPort`.

By default no ports are exposed on the host to follow the behavior from k8s.

#### **--force**

Tear down the volumes linked to the PersistentVolumeClaims as part of --down
Expand Down
2 changes: 2 additions & 0 deletions pkg/domain/entities/play.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@ type PlayKubeOptions struct {
// Down indicates whether to bring contents of a yaml file "down"
// as in stop
Down bool
// Expose indicates whether containerPorts should be exposed on the host
Expose bool
// Replace indicates whether to delete and recreate a yaml file
Replace bool
// Do not create /etc/hosts within the pod's containers,
Expand Down
35 changes: 19 additions & 16 deletions pkg/domain/infra/abi/play.go
Original file line number Diff line number Diff line change
Expand Up @@ -204,6 +204,7 @@ func (ic *ContainerEngine) PlayKube(ctx context.Context, body io.Reader, options
case "Pod":
var podYAML v1.Pod
var podTemplateSpec v1.PodTemplateSpec
var deploymentSpec v1apps.DeploymentSpec

if err := yaml.Unmarshal(document, &podYAML); err != nil {
return nil, fmt.Errorf("unable to read YAML as Kube Pod: %w", err)
Expand All @@ -223,7 +224,9 @@ func (ic *ContainerEngine) PlayKube(ctx context.Context, body io.Reader, options
podYAML.Annotations[name] = val
}

r, proxies, err := ic.playKubePod(ctx, podTemplateSpec.ObjectMeta.Name, &podTemplateSpec, options, &ipIndex, podYAML.Annotations, configMaps, serviceContainer)
deploymentSpec.Template = podTemplateSpec

r, proxies, err := ic.playKubePod(ctx, podTemplateSpec.ObjectMeta.Name, &deploymentSpec, options, &ipIndex, podYAML.Annotations, configMaps, serviceContainer)
if err != nil {
return nil, err
}
Expand Down Expand Up @@ -325,7 +328,7 @@ func (ic *ContainerEngine) PlayKube(ctx context.Context, body io.Reader, options
func (ic *ContainerEngine) playKubeDeployment(ctx context.Context, deploymentYAML *v1apps.Deployment, options entities.PlayKubeOptions, ipIndex *int, configMaps []v1.ConfigMap, serviceContainer *libpod.Container) (*entities.PlayKubeReport, []*notifyproxy.NotifyProxy, error) {
var (
deploymentName string
podSpec v1.PodTemplateSpec
podSpec v1apps.DeploymentSpec
numReplicas int32
i int32
report entities.PlayKubeReport
Expand All @@ -339,7 +342,7 @@ func (ic *ContainerEngine) playKubeDeployment(ctx context.Context, deploymentYAM
if deploymentYAML.Spec.Replicas != nil {
numReplicas = *deploymentYAML.Spec.Replicas
}
podSpec = deploymentYAML.Spec.Template
podSpec = deploymentYAML.Spec

// create "replicas" number of pods
var notifyProxies []*notifyproxy.NotifyProxy
Expand All @@ -355,7 +358,7 @@ func (ic *ContainerEngine) playKubeDeployment(ctx context.Context, deploymentYAM
return &report, notifyProxies, nil
}

func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podYAML *v1.PodTemplateSpec, options entities.PlayKubeOptions, ipIndex *int, annotations map[string]string, configMaps []v1.ConfigMap, serviceContainer *libpod.Container) (*entities.PlayKubeReport, []*notifyproxy.NotifyProxy, error) {
func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podYAML *v1apps.DeploymentSpec, options entities.PlayKubeOptions, ipIndex *int, annotations map[string]string, configMaps []v1.ConfigMap, serviceContainer *libpod.Container) (*entities.PlayKubeReport, []*notifyproxy.NotifyProxy, error) {
var (
writer io.Writer
playKubePod entities.PlayKubePod
Expand Down Expand Up @@ -383,7 +386,7 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY
Net: &entities.NetOptions{NoHosts: options.NoHosts},
ExitPolicy: string(config.PodExitPolicyStop),
}
podOpt, err = kube.ToPodOpt(ctx, podName, podOpt, podYAML)
podOpt, err = kube.ToPodOpt(ctx, podName, podOpt, podYAML, options.Expose)
if err != nil {
return nil, nil, err
}
Expand Down Expand Up @@ -413,10 +416,10 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY

if options.Userns == "" {
options.Userns = "host"
if podYAML.Spec.HostUsers != nil && !*podYAML.Spec.HostUsers {
if podYAML.Template.Spec.HostUsers != nil && !*podYAML.Template.Spec.HostUsers {
options.Userns = "auto"
}
} else if podYAML.Spec.HostUsers != nil {
} else if podYAML.Template.Spec.HostUsers != nil {
logrus.Info("overriding the user namespace mode in the pod spec")
}

Expand Down Expand Up @@ -493,7 +496,7 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY
configMaps = append(configMaps, cm)
}

volumes, err := kube.InitializeVolumes(podYAML.Spec.Volumes, configMaps, secretsManager)
volumes, err := kube.InitializeVolumes(podYAML.Template.Spec.Volumes, configMaps, secretsManager)
if err != nil {
return nil, nil, err
}
Expand Down Expand Up @@ -535,13 +538,13 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY
}
}

seccompPaths, err := kube.InitializeSeccompPaths(podYAML.ObjectMeta.Annotations, options.SeccompProfileRoot)
seccompPaths, err := kube.InitializeSeccompPaths(podYAML.Template.ObjectMeta.Annotations, options.SeccompProfileRoot)
if err != nil {
return nil, nil, err
}

var ctrRestartPolicy string
switch podYAML.Spec.RestartPolicy {
switch podYAML.Template.Spec.RestartPolicy {
case v1.RestartPolicyAlways:
ctrRestartPolicy = define.RestartPolicyAlways
case v1.RestartPolicyOnFailure:
Expand Down Expand Up @@ -588,8 +591,8 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY
writer = os.Stderr
}

containers := make([]*libpod.Container, 0, len(podYAML.Spec.Containers))
initContainers := make([]*libpod.Container, 0, len(podYAML.Spec.InitContainers))
containers := make([]*libpod.Container, 0, len(podYAML.Template.Spec.Containers))
initContainers := make([]*libpod.Container, 0, len(podYAML.Template.Spec.InitContainers))

var cwd string
if options.ContextDir != "" {
Expand All @@ -602,7 +605,7 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY
}

ctrNames := make(map[string]string)
for _, initCtr := range podYAML.Spec.InitContainers {
for _, initCtr := range podYAML.Template.Spec.InitContainers {
// Error out if same name is used for more than one container
if _, ok := ctrNames[initCtr.Name]; ok {
return nil, nil, fmt.Errorf("the pod %q is invalid; duplicate container name %q detected", podName, initCtr.Name)
Expand Down Expand Up @@ -638,7 +641,7 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY
PodID: pod.ID(),
PodInfraID: podInfraID,
PodName: podName,
PodSecurityContext: podYAML.Spec.SecurityContext,
PodSecurityContext: podYAML.Template.Spec.SecurityContext,
RestartPolicy: ctrRestartPolicy,
SeccompPaths: seccompPaths,
SecretsManager: secretsManager,
Expand All @@ -665,7 +668,7 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY

var sdNotifyProxies []*notifyproxy.NotifyProxy // containers' sd-notify proxies

for _, container := range podYAML.Spec.Containers {
for _, container := range podYAML.Template.Spec.Containers {
// Error out if the same name is used for more than one container
if _, ok := ctrNames[container.Name]; ok {
return nil, nil, fmt.Errorf("the pod %q is invalid; duplicate container name %q detected", podName, container.Name)
Expand All @@ -692,7 +695,7 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY
PodID: pod.ID(),
PodInfraID: podInfraID,
PodName: podName,
PodSecurityContext: podYAML.Spec.SecurityContext,
PodSecurityContext: podYAML.Template.Spec.SecurityContext,
RestartPolicy: ctrRestartPolicy,
SeccompPaths: seccompPaths,
SecretsManager: secretsManager,
Expand Down
45 changes: 33 additions & 12 deletions pkg/specgen/generate/kube/kube.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,55 +25,67 @@ import (
"github.com/containers/podman/v4/libpod/define"
ann "github.com/containers/podman/v4/pkg/annotations"
"github.com/containers/podman/v4/pkg/domain/entities"
v1apps "github.com/containers/podman/v4/pkg/k8s.io/api/apps/v1"
v1 "github.com/containers/podman/v4/pkg/k8s.io/api/core/v1"
"github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/api/resource"
"github.com/containers/podman/v4/pkg/specgen"
"github.com/containers/podman/v4/pkg/specgen/generate"
systemdDefine "github.com/containers/podman/v4/pkg/systemd/define"
"github.com/containers/podman/v4/pkg/util"
"github.com/containers/podman/v4/utils"
"github.com/docker/docker/pkg/system"
"github.com/docker/go-units"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/sirupsen/logrus"
)

func ToPodOpt(ctx context.Context, podName string, p entities.PodCreateOptions, podYAML *v1.PodTemplateSpec) (entities.PodCreateOptions, error) {
func ToPodOpt(ctx context.Context, podName string, p entities.PodCreateOptions, podYAML *v1apps.DeploymentSpec, exposeHostPort bool) (entities.PodCreateOptions, error) {
p.Net = &entities.NetOptions{NoHosts: p.Net.NoHosts}

template := podYAML.Template

p.Name = podName
p.Labels = podYAML.ObjectMeta.Labels
p.Labels = template.ObjectMeta.Labels
// Kube pods must share {ipc, net, uts} by default
p.Share = append(p.Share, "ipc")
p.Share = append(p.Share, "net")
p.Share = append(p.Share, "uts")
// TODO we only configure Process namespace. We also need to account for Host{IPC,Network,PID}
// which is not currently possible with pod create
if podYAML.Spec.ShareProcessNamespace != nil && *podYAML.Spec.ShareProcessNamespace {
if template.Spec.ShareProcessNamespace != nil && *template.Spec.ShareProcessNamespace {
p.Share = append(p.Share, "pid")
}
p.Hostname = podYAML.Spec.Hostname
p.Hostname = template.Spec.Hostname
if p.Hostname == "" {
p.Hostname = podName
}
if podYAML.Spec.HostNetwork {
if template.Spec.HostNetwork {
p.Net.Network = specgen.Namespace{NSMode: "host"}
}
if podYAML.Spec.HostAliases != nil {
if template.Spec.HostAliases != nil {
if p.Net.NoHosts {
return p, errors.New("HostAliases in yaml file will not work with --no-hosts")
}
hosts := make([]string, 0, len(podYAML.Spec.HostAliases))
for _, hostAlias := range podYAML.Spec.HostAliases {
hosts := make([]string, 0, len(template.Spec.HostAliases))
for _, hostAlias := range template.Spec.HostAliases {
for _, host := range hostAlias.Hostnames {
hosts = append(hosts, host+":"+hostAlias.IP)
}
}
p.Net.AddHosts = hosts
}
podPorts := getPodPorts(podYAML.Spec.Containers)

replicas := int32(1)
if podYAML.Replicas != nil && *podYAML.Replicas > 1 {
replicas = *podYAML.Replicas
}
podPorts, err := getPodPorts(template.Spec.Containers, exposeHostPort, replicas)
if err != nil {
return entities.PodCreateOptions{}, err
}
p.Net.PublishPorts = podPorts

if dnsConfig := podYAML.Spec.DNSConfig; dnsConfig != nil {
if dnsConfig := template.Spec.DNSConfig; dnsConfig != nil {
// name servers
if dnsServers := dnsConfig.Nameservers; len(dnsServers) > 0 {
servers := make([]net.IP, 0)
Expand Down Expand Up @@ -934,7 +946,7 @@ func getContainerResources(container v1.Container) (v1.ResourceRequirements, err

// getPodPorts converts a slice of kube container descriptions to an
// array of portmapping
func getPodPorts(containers []v1.Container) []types.PortMapping {
func getPodPorts(containers []v1.Container, bindToHostPort bool, replicas int32) ([]types.PortMapping, error) {
var infraPorts []types.PortMapping
for _, container := range containers {
for _, p := range container.Ports {
Expand All @@ -947,6 +959,15 @@ func getPodPorts(containers []v1.Container) []types.PortMapping {
if p.Protocol == "" {
p.Protocol = "tcp"
}
if !bindToHostPort {
p.HostPort = 0
} else if replicas > 1 {
port, err := utils.GetRandomPort()
if err != nil {
return nil, err
}
p.HostPort = int32(port)
}
portBinding := types.PortMapping{
HostPort: uint16(p.HostPort),
ContainerPort: uint16(p.ContainerPort),
Expand All @@ -960,5 +981,5 @@ func getPodPorts(containers []v1.Container) []types.PortMapping {
}
}
}
return infraPorts
return infraPorts, nil
}
2 changes: 1 addition & 1 deletion test/e2e/play_kube_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2685,7 +2685,7 @@ spec:
err := generateKubeYaml("pod", pod, kubeYaml)
Expect(err).ToNot(HaveOccurred())

kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
kube := podmanTest.Podman([]string{"play", "kube", "--expose", kubeYaml})
kube.WaitWithDefaultTimeout()
Expect(kube).Should(Exit(0))

Expand Down
43 changes: 42 additions & 1 deletion test/system/700-play.bats
Original file line number Diff line number Diff line change
Expand Up @@ -454,11 +454,52 @@ spec:
containerPort: $HOST_PORT
" > $PODMAN_TMPDIR/testpod.yaml

run_podman kube play $PODMAN_TMPDIR/testpod.yaml
run_podman kube play --expose $PODMAN_TMPDIR/testpod.yaml
run_podman pod inspect test_pod --format "{{.InfraConfig.PortBindings}}"
assert "$output" = "map[$HOST_PORT/tcp:[{ $HOST_PORT}]]"
run_podman kube down $PODMAN_TMPDIR/testpod.yaml

run_podman kube play $PODMAN_TMPDIR/testpod.yaml
run_podman pod inspect test_pod --format "{{.InfraConfig.PortBindings}}"
assert "$output" = "map[]"
run_podman kube down $PODMAN_TMPDIR/testpod.yaml

run_podman pod rm -a -f
run_podman rm -a -f
}

@test "podman kube play - hostport and replicas" {
echo "
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: test_pod
spec:
replicas: 3
selector:
matchLabels:
app: test
template:
metadata:
labels:
app: test
spec:
containers:
- name: server
image: $IMAGE
ports:
- name: hostp
containerPort: 8080
" > "$PODMAN_TMPDIR/testpod.yaml"

run_podman kube play --expose "$PODMAN_TMPDIR/testpod.yaml"
for i in $(seq 0 2); do
run_podman pod inspect "test_pod-pod-$i" --format '{{ index .InfraConfig.PortBindings "8080/tcp" |len}}'
assert "$output" = "1" "Expected port bindings from 8080 to exactly one container port"
done
run_podman kube down "$PODMAN_TMPDIR/testpod.yaml"

run_podman pod rm -a -f
run_podman rm -a -f
}

0 comments on commit ddb2fdc

Please sign in to comment.