From 81e413c0198d751a85276aeb354b694824096d7d Mon Sep 17 00:00:00 2001 From: Patrick Dillon Date: Mon, 2 Mar 2026 16:57:36 -0500 Subject: [PATCH 1/3] bootkube: enable konnectivity Enables kube-apiserver running on the bootstrap node to access the pod network, specifically to enable access to webhooks running in the cluster. Changes: * Adds a new static Konnectivity server pod running on the bootstrap node * Configures the bootstrap KAS to use its local Konnectivity server for outbound cluster traffic * Add a daemonset deployed into the cluster to run Konnectivity agent on every cluster node * Removes daemonset automatically in bootstrap teardown Co-authored-by: Matthew Booth --- .../opt/openshift/egress-selector-config.yaml | 15 ++++ .../konnectivity-agent-certs-secret.yaml | 13 ++++ .../konnectivity-agent-daemonset.yaml | 58 +++++++++++++++ .../konnectivity-config-override.yaml | 5 ++ .../opt/openshift/konnectivity-namespace.yaml | 6 ++ .../openshift/konnectivity-server-pod.yaml | 49 +++++++++++++ .../files/usr/local/bin/bootkube.sh.template | 13 +++- .../files/usr/local/bin/konnectivity-certs.sh | 55 ++++++++++++++ .../usr/local/bin/konnectivity.sh.template | 71 +++++++++++++++++++ 9 files changed, 284 insertions(+), 1 deletion(-) create mode 100644 data/data/bootstrap/files/opt/openshift/egress-selector-config.yaml create mode 100644 data/data/bootstrap/files/opt/openshift/konnectivity-agent-certs-secret.yaml create mode 100644 data/data/bootstrap/files/opt/openshift/konnectivity-agent-daemonset.yaml create mode 100644 data/data/bootstrap/files/opt/openshift/konnectivity-config-override.yaml create mode 100644 data/data/bootstrap/files/opt/openshift/konnectivity-namespace.yaml create mode 100644 data/data/bootstrap/files/opt/openshift/konnectivity-server-pod.yaml create mode 100644 data/data/bootstrap/files/usr/local/bin/konnectivity-certs.sh create mode 100644 data/data/bootstrap/files/usr/local/bin/konnectivity.sh.template diff --git a/data/data/bootstrap/files/opt/openshift/egress-selector-config.yaml b/data/data/bootstrap/files/opt/openshift/egress-selector-config.yaml new file mode 100644 index 00000000000..49332e9b47b --- /dev/null +++ b/data/data/bootstrap/files/opt/openshift/egress-selector-config.yaml @@ -0,0 +1,15 @@ +apiVersion: apiserver.k8s.io/v1beta1 +kind: EgressSelectorConfiguration +egressSelections: +- name: "cluster" + connection: + proxyProtocol: "HTTPConnect" + transport: + uds: + udsName: "/etc/kubernetes/config/konnectivity-server.socket" +- name: "controlplane" + connection: + proxyProtocol: "Direct" +- name: "etcd" + connection: + proxyProtocol: "Direct" diff --git a/data/data/bootstrap/files/opt/openshift/konnectivity-agent-certs-secret.yaml b/data/data/bootstrap/files/opt/openshift/konnectivity-agent-certs-secret.yaml new file mode 100644 index 00000000000..4fe0d702b5e --- /dev/null +++ b/data/data/bootstrap/files/opt/openshift/konnectivity-agent-certs-secret.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Secret +metadata: + name: konnectivity-agent-certs + namespace: openshift-bootstrap-konnectivity + labels: + app: konnectivity-agent + openshift.io/bootstrap-only: "true" +type: Opaque +data: + tls.crt: ${KONNECTIVITY_AGENT_CERT_BASE64} + tls.key: ${KONNECTIVITY_AGENT_KEY_BASE64} + ca.crt: ${KONNECTIVITY_CA_CERT_BASE64} diff --git a/data/data/bootstrap/files/opt/openshift/konnectivity-agent-daemonset.yaml b/data/data/bootstrap/files/opt/openshift/konnectivity-agent-daemonset.yaml new file mode 100644 index 00000000000..99aa5e37080 --- /dev/null +++ b/data/data/bootstrap/files/opt/openshift/konnectivity-agent-daemonset.yaml @@ -0,0 +1,58 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: konnectivity-agent + namespace: openshift-bootstrap-konnectivity + labels: + app: konnectivity-agent + openshift.io/bootstrap-only: "true" +spec: + selector: + matchLabels: + app: konnectivity-agent + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 10% + template: + metadata: + labels: + app: konnectivity-agent + spec: + hostNetwork: true + dnsPolicy: Default + priorityClassName: system-node-critical + tolerations: + - operator: Exists + containers: + - name: konnectivity-agent + image: ${KONNECTIVITY_IMAGE} + command: + - /usr/bin/proxy-agent + args: + - --logtostderr=true + - --ca-cert=/etc/konnectivity/ca.crt + - --agent-cert=/etc/konnectivity/tls.crt + - --agent-key=/etc/konnectivity/tls.key + - --proxy-server-host=${BOOTSTRAP_NODE_IP} + - --proxy-server-port=8091 + - --health-server-port=2041 + - --agent-identifiers=default-route=true + - --keepalive-time=30s + - --probe-interval=5s + - --sync-interval=5s + - --sync-interval-cap=30s + livenessProbe: + httpGet: + path: /healthz + port: 2041 + initialDelaySeconds: 10 + periodSeconds: 10 + volumeMounts: + - name: konnectivity-certs + mountPath: /etc/konnectivity + readOnly: true + volumes: + - name: konnectivity-certs + secret: + secretName: konnectivity-agent-certs diff --git a/data/data/bootstrap/files/opt/openshift/konnectivity-config-override.yaml b/data/data/bootstrap/files/opt/openshift/konnectivity-config-override.yaml new file mode 100644 index 00000000000..034779e03f1 --- /dev/null +++ b/data/data/bootstrap/files/opt/openshift/konnectivity-config-override.yaml @@ -0,0 +1,5 @@ +apiVersion: kubecontrolplane.config.openshift.io/v1 +kind: KubeAPIServerConfig +apiServerArguments: + egress-selector-config-file: + - "/etc/kubernetes/config/egress-selector-config.yaml" diff --git a/data/data/bootstrap/files/opt/openshift/konnectivity-namespace.yaml b/data/data/bootstrap/files/opt/openshift/konnectivity-namespace.yaml new file mode 100644 index 00000000000..cc668ac3364 --- /dev/null +++ b/data/data/bootstrap/files/opt/openshift/konnectivity-namespace.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: openshift-bootstrap-konnectivity + labels: + openshift.io/bootstrap-only: "true" diff --git a/data/data/bootstrap/files/opt/openshift/konnectivity-server-pod.yaml b/data/data/bootstrap/files/opt/openshift/konnectivity-server-pod.yaml new file mode 100644 index 00000000000..00d063b0150 --- /dev/null +++ b/data/data/bootstrap/files/opt/openshift/konnectivity-server-pod.yaml @@ -0,0 +1,49 @@ +apiVersion: v1 +kind: Pod +metadata: + name: konnectivity-server + namespace: kube-system + labels: + app: konnectivity-server +spec: + hostNetwork: true + priorityClassName: system-node-critical + containers: + - name: konnectivity-server + image: ${KONNECTIVITY_IMAGE} + command: + - /usr/bin/proxy-server + args: + - --logtostderr=true + - --cluster-cert=/etc/konnectivity/server.crt + - --cluster-key=/etc/konnectivity/server.key + - --cluster-ca-cert=/etc/konnectivity/ca.crt + - --uds-name=/etc/kubernetes/bootstrap-configs/konnectivity-server.socket + - --server-port=0 + - --agent-port=8091 + - --health-port=2041 + - --mode=http-connect + - --proxy-strategies=destHost,defaultRoute + - --keepalive-time=30s + - --frontend-keepalive-time=30s + livenessProbe: + httpGet: + path: /healthz + port: 2041 + initialDelaySeconds: 10 + periodSeconds: 10 + volumeMounts: + - name: config-dir + mountPath: /etc/kubernetes/bootstrap-configs + - name: konnectivity-certs + mountPath: /etc/konnectivity + readOnly: true + volumes: + - name: config-dir + hostPath: + path: /etc/kubernetes/bootstrap-configs + type: DirectoryOrCreate + - name: konnectivity-certs + hostPath: + path: /opt/openshift/tls/konnectivity + type: Directory diff --git a/data/data/bootstrap/files/usr/local/bin/bootkube.sh.template b/data/data/bootstrap/files/usr/local/bin/bootkube.sh.template index 6aa5d7e253d..7191d0bc1a1 100755 --- a/data/data/bootstrap/files/usr/local/bin/bootkube.sh.template +++ b/data/data/bootstrap/files/usr/local/bin/bootkube.sh.template @@ -10,6 +10,8 @@ set -euoE pipefail ## -E option will cause functions to inherit trap . /usr/local/bin/bootstrap-cluster-gather.sh # shellcheck source=bootstrap-verify-api-server-urls.sh . /usr/local/bin/bootstrap-verify-api-server-urls.sh +# shellcheck source=konnectivity.sh.template +. /usr/local/bin/konnectivity.sh mkdir --parents /etc/kubernetes/{manifests,bootstrap-configs,bootstrap-manifests} @@ -245,6 +247,8 @@ then record_service_stage_success fi +konnectivity_setup + if [ ! -f kube-apiserver-bootstrap.done ] then record_service_stage_start "kube-apiserver-bootstrap" @@ -269,9 +273,12 @@ then --infra-config-file=/assets/manifests/cluster-infrastructure-02-config.yml \ --rendered-manifest-files=/assets/manifests \ --payload-version=$VERSION \ - --operand-kubernetes-version="${KUBERNETES_VERSION}" + --operand-kubernetes-version="${KUBERNETES_VERSION}" \ + --config-override-files=/assets/konnectivity-config-override.yaml cp kube-apiserver-bootstrap/config /etc/kubernetes/bootstrap-configs/kube-apiserver-config.yaml + # Copy egress selector config to bootstrap-configs where KAS can read it + cp /opt/openshift/egress-selector-config.yaml /etc/kubernetes/bootstrap-configs/egress-selector-config.yaml cp kube-apiserver-bootstrap/bootstrap-manifests/* bootstrap-manifests/ cp kube-apiserver-bootstrap/manifests/* manifests/ @@ -566,6 +573,8 @@ then record_service_stage_success fi +konnectivity_manifests + REQUIRED_PODS="openshift-kube-apiserver/kube-apiserver,openshift-kube-scheduler/openshift-kube-scheduler,openshift-kube-controller-manager/kube-controller-manager,openshift-cluster-version/cluster-version-operator" if [ "$BOOTSTRAP_INPLACE" = true ] then @@ -651,6 +660,8 @@ if [ ! -f api-int-dns-check.done ]; then fi fi +konnectivity_cleanup + # Workaround for https://github.com/opencontainers/runc/pull/1807 touch /opt/openshift/.bootkube.done echo "bootkube.service complete" diff --git a/data/data/bootstrap/files/usr/local/bin/konnectivity-certs.sh b/data/data/bootstrap/files/usr/local/bin/konnectivity-certs.sh new file mode 100644 index 00000000000..e71a299174e --- /dev/null +++ b/data/data/bootstrap/files/usr/local/bin/konnectivity-certs.sh @@ -0,0 +1,55 @@ +#!/usr/bin/env bash +set -euo pipefail + +# Generate Konnectivity certificates with a self-signed CA (1-day validity). +# These are needed for mTLS between the Konnectivity server and agents +# during the bootstrap phase. +# +# Usage: konnectivity-certs.sh + +BOOTSTRAP_NODE_IP="${1:?Usage: konnectivity-certs.sh }" + +KONNECTIVITY_CERT_DIR=/opt/openshift/tls/konnectivity +mkdir -p "${KONNECTIVITY_CERT_DIR}" + +echo "Generating Konnectivity certificates in ${KONNECTIVITY_CERT_DIR}..." + +# Generate self-signed Konnectivity CA +openssl req -x509 -newkey rsa:2048 -nodes \ + -keyout "${KONNECTIVITY_CERT_DIR}/ca.key" \ + -out "${KONNECTIVITY_CERT_DIR}/ca.crt" \ + -days 1 \ + -subj "/CN=konnectivity-signer/O=openshift" + +# Server certificate for agent endpoint (needs bootstrap IP as SAN) +openssl req -new -newkey rsa:2048 -nodes \ + -keyout "${KONNECTIVITY_CERT_DIR}/server.key" \ + -out "${KONNECTIVITY_CERT_DIR}/server.csr" \ + -subj "/CN=konnectivity-server/O=openshift" + +openssl x509 -req -in "${KONNECTIVITY_CERT_DIR}/server.csr" \ + -CA "${KONNECTIVITY_CERT_DIR}/ca.crt" \ + -CAkey "${KONNECTIVITY_CERT_DIR}/ca.key" \ + -CAcreateserial \ + -out "${KONNECTIVITY_CERT_DIR}/server.crt" \ + -days 1 \ + -extfile <(printf "extendedKeyUsage=serverAuth\nsubjectAltName=IP:%s" "${BOOTSTRAP_NODE_IP}") + +# Agent client certificate (shared by all agents) +openssl req -new -newkey rsa:2048 -nodes \ + -keyout "${KONNECTIVITY_CERT_DIR}/agent.key" \ + -out "${KONNECTIVITY_CERT_DIR}/agent.csr" \ + -subj "/CN=konnectivity-agent/O=openshift" + +openssl x509 -req -in "${KONNECTIVITY_CERT_DIR}/agent.csr" \ + -CA "${KONNECTIVITY_CERT_DIR}/ca.crt" \ + -CAkey "${KONNECTIVITY_CERT_DIR}/ca.key" \ + -CAcreateserial \ + -out "${KONNECTIVITY_CERT_DIR}/agent.crt" \ + -days 1 \ + -extfile <(printf "extendedKeyUsage=clientAuth") + +# Clean up CSR files +rm -f "${KONNECTIVITY_CERT_DIR}"/*.csr + +echo "Konnectivity certificates generated successfully." diff --git a/data/data/bootstrap/files/usr/local/bin/konnectivity.sh.template b/data/data/bootstrap/files/usr/local/bin/konnectivity.sh.template new file mode 100644 index 00000000000..cf5458d7464 --- /dev/null +++ b/data/data/bootstrap/files/usr/local/bin/konnectivity.sh.template @@ -0,0 +1,71 @@ +#!/usr/bin/env bash +# Konnectivity bootstrap functions. +# Sourced by bootkube.sh — do not execute directly. + +# konnectivity_setup detects the bootstrap node IP, generates certificates, +# and creates the konnectivity server static pod manifest. +konnectivity_setup() { + # Detect bootstrap node IP at runtime using the default route source address. + # Konnectivity agents use this to connect back to the bootstrap server. +{{- if .UseIPv6ForNodeIP }} + BOOTSTRAP_NODE_IP=$(ip -6 -j route get 2001:4860:4860::8888 | jq -r '.[0].prefsrc') +{{- else }} + BOOTSTRAP_NODE_IP=$(ip -j route get 1.1.1.1 | jq -r '.[0].prefsrc') +{{- end }} + echo "Detected bootstrap node IP: ${BOOTSTRAP_NODE_IP}" + + if [ ! -f konnectivity-certs.done ]; then + record_service_stage_start "konnectivity-certs" + /usr/local/bin/konnectivity-certs.sh "${BOOTSTRAP_NODE_IP}" + touch konnectivity-certs.done + record_service_stage_success + fi + + if [ ! -f konnectivity-server-bootstrap.done ]; then + record_service_stage_start "konnectivity-server-bootstrap" + echo "Creating Konnectivity server static pod manifest..." + export KONNECTIVITY_IMAGE=$(image_for apiserver-network-proxy) + envsubst < /opt/openshift/konnectivity-server-pod.yaml > /etc/kubernetes/manifests/konnectivity-server-pod.yaml + touch konnectivity-server-bootstrap.done + record_service_stage_success + fi +} + +# konnectivity_manifests creates the agent namespace, secret, and daemonset +# manifests for cluster deployment. +konnectivity_manifests() { + if [ ! -f konnectivity-agent-manifest.done ]; then + record_service_stage_start "konnectivity-agent-manifest" + echo "Creating Konnectivity agent manifests..." + + KONNECTIVITY_CERT_DIR=/opt/openshift/tls/konnectivity + + cp /opt/openshift/konnectivity-namespace.yaml manifests/konnectivity-namespace.yaml + + export KONNECTIVITY_AGENT_CERT_BASE64=$(base64 -w0 "${KONNECTIVITY_CERT_DIR}/agent.crt") + export KONNECTIVITY_AGENT_KEY_BASE64=$(base64 -w0 "${KONNECTIVITY_CERT_DIR}/agent.key") + export KONNECTIVITY_CA_CERT_BASE64=$(base64 -w0 "${KONNECTIVITY_CERT_DIR}/ca.crt") + envsubst < /opt/openshift/konnectivity-agent-certs-secret.yaml > manifests/konnectivity-agent-certs.yaml + + export BOOTSTRAP_NODE_IP + envsubst < /opt/openshift/konnectivity-agent-daemonset.yaml > manifests/konnectivity-agent-daemonset.yaml + + touch konnectivity-agent-manifest.done + record_service_stage_success + fi +} + +# konnectivity_cleanup removes bootstrap konnectivity resources by deleting +# the namespace (cascading to DaemonSet and Secret) and the server static pod. +konnectivity_cleanup() { + if [ ! -f konnectivity-cleanup.done ]; then + record_service_stage_start "konnectivity-cleanup" + echo "Cleaning up bootstrap konnectivity resources..." + oc delete namespace openshift-bootstrap-konnectivity \ + --kubeconfig=/opt/openshift/auth/kubeconfig \ + --ignore-not-found=true || true + rm -f /etc/kubernetes/manifests/konnectivity-server-pod.yaml + touch konnectivity-cleanup.done + record_service_stage_success + fi +} From de4ebdeee4cbb7f88d441c252665140a8ab13a83 Mon Sep 17 00:00:00 2001 From: Patrick Dillon Date: Mon, 2 Mar 2026 17:00:31 -0500 Subject: [PATCH 2/3] pkg/gather: analyze konnectivity failures Adds error handling to report konnectivity specific failures when running gather bootstrap or analyze. --- pkg/gather/service/analyze.go | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/pkg/gather/service/analyze.go b/pkg/gather/service/analyze.go index 680dc5b6247..4a5f85b89cb 100644 --- a/pkg/gather/service/analyze.go +++ b/pkg/gather/service/analyze.go @@ -114,18 +114,20 @@ func checkReleaseImageDownload(a analysis) bool { return false } -// bootstrap-verify-api-servel-urls.sh is currently running as part of the bootkube service. -// And the verification of the API and API-Int URLs are the only stage where a failure is -// currently reported. So, here we are able to conclude that a failure corresponds to a -// failure to resolve either the API URL or API-Int URL or both. If that changes and if -// any other stage in the bootkube service starts reporting a failure, we need to revisit -// this. At that point verification of the URLs could be moved to its own service. func checkBootkubeService(a analysis) bool { if a.successful { return true } - // Note: Even when there is a stage failure, we are not returning false here. That is - // intentional because we donot want to report this as an error in the "analyze" output. + switch a.failingStage { + case "konnectivity-certs": + logrus.Error("The bootstrap machine failed to generate konnectivity certificates") + case "konnectivity-server-bootstrap": + logrus.Error("The bootstrap machine failed to start the konnectivity server") + case "konnectivity-agent-manifest": + logrus.Error("The bootstrap machine failed to create konnectivity agent manifests") + case "konnectivity-cleanup": + logrus.Error("The bootstrap machine failed to clean up konnectivity resources") + } a.logLastError() return true } From 836e8d26938bd394df7791f3db994acc468dd9af Mon Sep 17 00:00:00 2001 From: Patrick Dillon Date: Mon, 2 Mar 2026 17:01:18 -0500 Subject: [PATCH 3/3] Open konnectivity port This updates all platforms to open the konnectivity port. Baremetal and on-prem platform have user-provisioned networks, so that will need be handled up front. --- pkg/asset/manifests/aws/cluster.go | 7 +++++++ pkg/asset/manifests/azure/cluster.go | 11 +++++++++++ pkg/asset/manifests/ibmcloud/securitygroups.go | 18 ++++++++++++++++++ pkg/asset/manifests/powervs/securitygroups.go | 17 +++++++++++++++++ .../gcp/clusterapi/firewallrules.go | 6 ++++++ .../openstack/preprovision/securitygroups.go | 2 ++ 6 files changed, 61 insertions(+) diff --git a/pkg/asset/manifests/aws/cluster.go b/pkg/asset/manifests/aws/cluster.go index 6be52b16748..c7b611631bc 100644 --- a/pkg/asset/manifests/aws/cluster.go +++ b/pkg/asset/manifests/aws/cluster.go @@ -141,6 +141,13 @@ func GenerateClusterAssets(ic *installconfig.InstallConfig, clusterID *installco ToPort: 10259, SourceSecurityGroupRoles: []capa.SecurityGroupRole{"controlplane", "node"}, }, + { + Description: "Konnectivity agent traffic from cluster nodes", + Protocol: capa.SecurityGroupProtocolTCP, + FromPort: 8091, + ToPort: 8091, + SourceSecurityGroupRoles: []capa.SecurityGroupRole{"controlplane", "node"}, + }, { Description: BootstrapSSHDescription, Protocol: capa.SecurityGroupProtocolTCP, diff --git a/pkg/asset/manifests/azure/cluster.go b/pkg/asset/manifests/azure/cluster.go index fb9a8ab3ffd..9a6a273c2ee 100644 --- a/pkg/asset/manifests/azure/cluster.go +++ b/pkg/asset/manifests/azure/cluster.go @@ -93,6 +93,17 @@ func GenerateClusterAssets(installConfig *installconfig.InstallConfig, clusterID Destination: ptr.To("*"), Action: capz.SecurityRuleActionAllow, }, + { + Name: "konnectivity_in", + Protocol: capz.SecurityGroupProtocolTCP, + Direction: capz.SecurityRuleDirectionInbound, + Priority: 103, + SourcePorts: ptr.To("*"), + DestinationPorts: ptr.To("8091"), + Source: ptr.To(source), + Destination: ptr.To("*"), + Action: capz.SecurityRuleActionAllow, + }, { Name: fmt.Sprintf("%s_ssh_in", clusterID.InfraID), Protocol: capz.SecurityGroupProtocolTCP, diff --git a/pkg/asset/manifests/ibmcloud/securitygroups.go b/pkg/asset/manifests/ibmcloud/securitygroups.go index 24f9a020f2e..abfa9bdb706 100644 --- a/pkg/asset/manifests/ibmcloud/securitygroups.go +++ b/pkg/asset/manifests/ibmcloud/securitygroups.go @@ -421,6 +421,24 @@ func buildControlPlaneSecurityGroup(infraID string) capibmcloud.VPCSecurityGroup }, }, }, + { + // Konnectivity + Action: capibmcloud.VPCSecurityGroupRuleActionAllow, + Direction: capibmcloud.VPCSecurityGroupRuleDirectionInbound, + Source: &capibmcloud.VPCSecurityGroupRulePrototype{ + PortRange: &capibmcloud.VPCSecurityGroupPortRange{ + MaximumPort: 8091, + MinimumPort: 8091, + }, + Protocol: capibmcloud.VPCSecurityGroupRuleProtocolTCP, + Remotes: []capibmcloud.VPCSecurityGroupRuleRemote{ + { + RemoteType: capibmcloud.VPCSecurityGroupRuleRemoteTypeSG, + SecurityGroupName: clusterWideSGNamePtr, + }, + }, + }, + }, }, } } diff --git a/pkg/asset/manifests/powervs/securitygroups.go b/pkg/asset/manifests/powervs/securitygroups.go index bf474d3f28a..69bd0067e11 100644 --- a/pkg/asset/manifests/powervs/securitygroups.go +++ b/pkg/asset/manifests/powervs/securitygroups.go @@ -50,6 +50,23 @@ func buildControlPlaneSecurityGroup(infraID string) capibmcloud.VPCSecurityGroup }, }, }, + { + // Konnectivity + Action: capibmcloud.VPCSecurityGroupRuleActionAllow, + Direction: capibmcloud.VPCSecurityGroupRuleDirectionInbound, + Source: &capibmcloud.VPCSecurityGroupRulePrototype{ + PortRange: &capibmcloud.VPCSecurityGroupPortRange{ + MaximumPort: 8091, + MinimumPort: 8091, + }, + Protocol: capibmcloud.VPCSecurityGroupRuleProtocolTCP, + Remotes: []capibmcloud.VPCSecurityGroupRuleRemote{ + { + RemoteType: capibmcloud.VPCSecurityGroupRuleRemoteTypeAny, + }, + }, + }, + }, { Action: capibmcloud.VPCSecurityGroupRuleActionAllow, Direction: capibmcloud.VPCSecurityGroupRuleDirectionInbound, diff --git a/pkg/infrastructure/gcp/clusterapi/firewallrules.go b/pkg/infrastructure/gcp/clusterapi/firewallrules.go index f5b2952ec33..0900f73ed9a 100644 --- a/pkg/infrastructure/gcp/clusterapi/firewallrules.go +++ b/pkg/infrastructure/gcp/clusterapi/firewallrules.go @@ -58,6 +58,12 @@ func getControlPlanePorts() []*compute.FirewallAllowed { "10259", // Kube scheduler }, }, + { + IPProtocol: "tcp", + Ports: []string{ + "8091", // Konnectivity + }, + }, } } diff --git a/pkg/infrastructure/openstack/preprovision/securitygroups.go b/pkg/infrastructure/openstack/preprovision/securitygroups.go index efbf14c0eff..2e7ab0d236f 100644 --- a/pkg/infrastructure/openstack/preprovision/securitygroups.go +++ b/pkg/infrastructure/openstack/preprovision/securitygroups.go @@ -138,6 +138,7 @@ func SecurityGroups(ctx context.Context, installConfig *installconfig.InstallCon serviceIKENat = service{udp, 4500, 4500} serviceInternal = service{tcp | udp, 9000, 9999} serviceKCM = service{tcp, 10257, 10257} + serviceKonnectivity = service{tcp, 8091, 8091} serviceKubeScheduler = service{tcp, 10259, 10259} serviceKubelet = service{tcp, 10250, 10250} serviceMCS = service{tcp, 22623, 22623} @@ -234,6 +235,7 @@ func SecurityGroups(ctx context.Context, installConfig *installconfig.InstallCon addMasterRules(serviceDNS, ipVersion, CIDRs) addMasterRules(serviceETCD, ipVersion, CIDRs) addMasterRules(serviceKCM, ipVersion, CIDRs) + addMasterRules(serviceKonnectivity, ipVersion, CIDRs) addMasterRules(serviceKubeScheduler, ipVersion, CIDRs) addMasterRules(serviceMCS, ipVersion, CIDRs) addMasterRules(serviceOVNDB, ipVersion, CIDRs)