Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Bug 1881534: Use internal load balancer instead of localhost 4.5 #455

Merged
merged 2 commits into from Sep 26, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
6 changes: 3 additions & 3 deletions bindata/v4.1.0/kube-controller-manager/kubeconfig-cm.yaml
Expand Up @@ -9,11 +9,11 @@ data:
clusters:
- cluster:
certificate-authority: /etc/kubernetes/static-pod-resources/configmaps/serviceaccount-ca/ca-bundle.crt
server: https://localhost:6443
name: loopback
server: $LB_INT_URL
name: lb-int
contexts:
- context:
cluster: loopback
cluster: lb-int
user: kube-controller-manager
name: kube-controller-manager
current-context: kube-controller-manager
Expand Down
2 changes: 1 addition & 1 deletion pkg/operator/starter.go
Expand Up @@ -79,7 +79,6 @@ func RunOperator(ctx context.Context, cc *controllercmd.ControllerContext) error
[]string{
"v4.1.0/kube-controller-manager/ns.yaml",
"v4.1.0/kube-controller-manager/kubeconfig-cert-syncer.yaml",
"v4.1.0/kube-controller-manager/kubeconfig-cm.yaml",
"v4.1.0/kube-controller-manager/leader-election-rolebinding.yaml",
"v4.1.0/kube-controller-manager/leader-election-cluster-policy-controller-role.yaml",
"v4.1.0/kube-controller-manager/leader-election-cluster-policy-controller-rolebinding.yaml",
Expand Down Expand Up @@ -108,6 +107,7 @@ func RunOperator(ctx context.Context, cc *controllercmd.ControllerContext) error
kubeInformersForNamespaces,
operatorClient,
kubeClient,
configInformers.Config().V1().Infrastructures(),
cc.EventRecorder,
)

Expand Down
53 changes: 44 additions & 9 deletions pkg/operator/targetconfigcontroller/targetconfigcontroller.go
Expand Up @@ -31,9 +31,12 @@ import (
kubecontrolplanev1 "github.com/openshift/api/kubecontrolplane/v1"
openshiftcontrolplanev1 "github.com/openshift/api/openshiftcontrolplane/v1"
operatorv1 "github.com/openshift/api/operator/v1"
configv1informers "github.com/openshift/client-go/config/informers/externalversions/config/v1"
configv1listers "github.com/openshift/client-go/config/listers/config/v1"
"github.com/openshift/cluster-kube-controller-manager-operator/pkg/operator/operatorclient"
"github.com/openshift/cluster-kube-controller-manager-operator/pkg/operator/v411_00_assets"
"github.com/openshift/cluster-kube-controller-manager-operator/pkg/version"

"github.com/openshift/library-go/pkg/crypto"
"github.com/openshift/library-go/pkg/operator/events"
"github.com/openshift/library-go/pkg/operator/resource/resourceapply"
Expand All @@ -53,10 +56,11 @@ type TargetConfigController struct {

operatorClient v1helpers.StaticPodOperatorClient

kubeClient kubernetes.Interface
configMapLister corev1listers.ConfigMapLister
secretLister corev1listers.SecretLister
eventRecorder events.Recorder
kubeClient kubernetes.Interface
configMapLister corev1listers.ConfigMapLister
secretLister corev1listers.SecretLister
infrastuctureLister configv1listers.InfrastructureLister
eventRecorder events.Recorder

// queue only ever has one item, but it has nice error handling backoff/retry semantics
queue workqueue.RateLimitingInterface
Expand All @@ -68,6 +72,7 @@ func NewTargetConfigController(
kubeInformersForNamespaces v1helpers.KubeInformersForNamespaces,
operatorClient v1helpers.StaticPodOperatorClient,
kubeClient kubernetes.Interface,
infrastuctureInformer configv1informers.InfrastructureInformer,
eventRecorder events.Recorder,
) *TargetConfigController {
c := &TargetConfigController{
Expand All @@ -76,18 +81,22 @@ func NewTargetConfigController(
operatorImagePullSpec: operatorImagePullSpec,
clusterPolicyControllerPullSpec: clusterPolicyControllerPullSpec,

configMapLister: kubeInformersForNamespaces.ConfigMapLister(),
secretLister: kubeInformersForNamespaces.SecretLister(),
operatorClient: operatorClient,
kubeClient: kubeClient,
eventRecorder: eventRecorder.WithComponentSuffix("target-config-controller"),
configMapLister: kubeInformersForNamespaces.ConfigMapLister(),
secretLister: kubeInformersForNamespaces.SecretLister(),
infrastuctureLister: infrastuctureInformer.Lister(),
operatorClient: operatorClient,
kubeClient: kubeClient,
eventRecorder: eventRecorder.WithComponentSuffix("target-config-controller"),

queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "TargetConfigController"),
}

// this is for our general configuration input and our status output in case another actor changes it
operatorClient.Informer().AddEventHandler(c.eventHandler())

// We use infrastuctureInformer for observing load balancer URL
infrastuctureInformer.Informer().AddEventHandler(c.eventHandler())

// these are for watching our outputs in case someone changes them
kubeInformersForNamespaces.InformersFor(operatorclient.TargetNamespace).Core().V1().ConfigMaps().Informer().AddEventHandler(c.eventHandler())
kubeInformersForNamespaces.InformersFor(operatorclient.TargetNamespace).Core().V1().Secrets().Informer().AddEventHandler(c.eventHandler())
Expand Down Expand Up @@ -210,6 +219,10 @@ func createTargetConfigController(ctx context.Context, c TargetConfigController,
if err != nil {
errors = append(errors, fmt.Errorf("%q: %v", "serviceaccount/localhost-recovery-client", err))
}
_, _, err = manageControllerManagerKubeconfig(ctx, c.kubeClient.CoreV1(), c.infrastuctureLister, recorder)
if err != nil {
errors = append(errors, fmt.Errorf("%q: %v", "configmap/controller-manager-kubeconfig", err))
}

// Allow the addition of the service ca to token secrets to be enabled by setting an
// UnsupportedConfigOverride field named
Expand Down Expand Up @@ -369,6 +382,28 @@ func ensureLocalhostRecoverySAToken(ctx context.Context, client corev1client.Cor
return err
}

func manageControllerManagerKubeconfig(ctx context.Context, client corev1client.CoreV1Interface, infrastructureLister configv1listers.InfrastructureLister, recorder events.Recorder) (*corev1.ConfigMap, bool, error) {
cmString := string(v411_00_assets.MustAsset("v4.1.0/kube-controller-manager/kubeconfig-cm.yaml"))

infrastructure, err := infrastructureLister.Get("cluster")
if err != nil {
return nil, false, err
}
apiServerInternalURL := infrastructure.Status.APIServerInternalURL
if len(apiServerInternalURL) == 0 {
return nil, false, fmt.Errorf("infrastucture/cluster: missing APIServerInternalURL")
}

for pattern, value := range map[string]string{
"$LB_INT_URL": apiServerInternalURL,
} {
cmString = strings.ReplaceAll(cmString, pattern, value)
}

requiredCM := resourceread.ReadConfigMapV1OrDie([]byte(cmString))
return resourceapply.ApplyConfigMap(client, recorder, requiredCM)
}

func managePod(ctx context.Context, configMapsGetter corev1client.ConfigMapsGetter, secretsGetter corev1client.SecretsGetter, recorder events.Recorder, operatorSpec *operatorv1.StaticPodOperatorSpec, imagePullSpec, operatorImagePullSpec, clusterPolicyControllerPullSpec string, addServingServiceCAToTokenSecrets bool) (*corev1.ConfigMap, bool, error) {
required := resourceread.ReadPodV1OrDie(v411_00_assets.MustAsset("v4.1.0/kube-controller-manager/pod.yaml"))
// TODO: If the image pull spec is not specified, the "${IMAGE}" will be used as value and the pod will fail to start.
Expand Down
6 changes: 3 additions & 3 deletions pkg/operator/v411_00_assets/bindata.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.