Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

separate RootCAConfigMap from BoundServiceAccountTokenVolume #96197

Merged
merged 1 commit into from Nov 5, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
2 changes: 1 addition & 1 deletion cmd/kube-controller-manager/app/certificates.go
Expand Up @@ -193,7 +193,7 @@ func startCSRCleanerController(ctx ControllerContext) (http.Handler, bool, error
}

func startRootCACertPublisher(ctx ControllerContext) (http.Handler, bool, error) {
if !utilfeature.DefaultFeatureGate.Enabled(features.BoundServiceAccountTokenVolume) {
if !utilfeature.DefaultFeatureGate.Enabled(features.RootCAConfigMap) {
return nil, false, nil
}

Expand Down
2 changes: 2 additions & 0 deletions pkg/controller/certificates/rootcacertpublisher/publisher.go
Expand Up @@ -200,6 +200,8 @@ func (c *Publisher) syncNamespace(ns string) error {
return nil
}

// copy so we don't modify the cache's instance of the configmap
cm = cm.DeepCopy()
cm.Data = data

_, err = c.client.CoreV1().ConfigMaps(ns).Update(context.TODO(), cm, metav1.UpdateOptions{})
Expand Down
9 changes: 9 additions & 0 deletions pkg/features/kube_features.go
Expand Up @@ -643,6 +643,14 @@ const (
// Add support for the HPA to scale based on metrics from individual containers
// in target pods
HPAContainerMetrics featuregate.Feature = "HPAContainerMetrics"

// owner: @zshihang
// alpha: v1.13
// beta: v1.20
//
// Allows kube-controller-manager to publish kube-root-ca.crt configmap to
// every namespace. This feature is a prerequisite of BoundServiceAccountTokenVolume.
RootCAConfigMap featuregate.Feature = "RootCAConfigMap"
)

func init() {
Expand Down Expand Up @@ -740,6 +748,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
WinDSR: {Default: false, PreRelease: featuregate.Alpha},
DisableAcceleratorUsageMetrics: {Default: true, PreRelease: featuregate.Beta},
HPAContainerMetrics: {Default: false, PreRelease: featuregate.Alpha},
RootCAConfigMap: {Default: true, PreRelease: featuregate.Beta},

// inherited features from generic apiserver, relisted here to get a conflict if it is changed
// unintentionally on either side:
Expand Down
3 changes: 3 additions & 0 deletions pkg/kubeapiserver/options/authentication.go
Expand Up @@ -199,6 +199,9 @@ func (o *BuiltInAuthenticationOptions) Validate() []error {
}
}
if o.ServiceAccounts != nil && utilfeature.DefaultFeatureGate.Enabled(features.BoundServiceAccountTokenVolume) {
if !utilfeature.DefaultFeatureGate.Enabled(features.RootCAConfigMap) {
allErrors = append(allErrors, errors.New("BoundServiceAccountTokenVolume feature depends on RootCAConfigMap feature, but RootCAConfigMap features is not enabled"))
}
if len(o.ServiceAccounts.Issuer) == 0 {
allErrors = append(allErrors, errors.New("service-account-issuer is a required flag when BoundServiceAccountTokenVolume is enabled"))
}
Expand Down
Expand Up @@ -402,7 +402,7 @@ func buildControllerRoles() ([]rbacv1.ClusterRole, []rbacv1.ClusterRoleBinding)
})
}

if utilfeature.DefaultFeatureGate.Enabled(features.BoundServiceAccountTokenVolume) {
if utilfeature.DefaultFeatureGate.Enabled(features.RootCAConfigMap) {
addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "root-ca-cert-publisher"},
Rules: []rbacv1.PolicyRule{
Expand Down
Expand Up @@ -391,6 +391,23 @@ items:
- kind: ServiceAccount
name: resourcequota-controller
namespace: kube-system
- apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
creationTimestamp: null
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:controller:root-ca-cert-publisher
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:controller:root-ca-cert-publisher
subjects:
- kind: ServiceAccount
name: root-ca-cert-publisher
namespace: kube-system
- apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
Expand Down
Expand Up @@ -1195,6 +1195,32 @@ items:
- create
- patch
- update
- apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
creationTimestamp: null
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:controller:root-ca-cert-publisher
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
- update
- apiGroups:
- ""
- events.k8s.io
resources:
- events
verbs:
- create
- patch
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

does rootcacertpublisher need "patch" for events?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yes, that's part of the standard event permissions used by the event recorder:

func eventsRule() rbacv1.PolicyRule {
return rbacv1helpers.NewRule("create", "update", "patch").Groups(legacyGroup, eventsGroup).Resources("events").RuleOrDie()
}

- update
- apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
Expand Down
4 changes: 2 additions & 2 deletions test/cmd/apply.sh
Expand Up @@ -310,8 +310,8 @@ __EOF__
kubectl delete -f hack/testdata/multi-resource-1.yaml "${kube_flags[@]:?}"

## kubectl apply multiple resources with one failure during builder phase.
# Pre-Condition: No configmaps
kube::test::get_object_assert configmaps "{{range.items}}{{${id_field:?}}}:{{end}}" ''
# Pre-Condition: No configmaps with name=foo
kube::test::get_object_assert 'configmaps --field-selector=metadata.name=foo' "{{range.items}}{{${id_field:?}}}:{{end}}" ''
# Apply a configmap and a bogus custom resource.
output_message=$(! kubectl apply -f hack/testdata/multi-resource-2.yaml 2>&1 "${kube_flags[@]:?}")
# Should be error message from bogus custom resource.
Expand Down
4 changes: 2 additions & 2 deletions test/cmd/create.sh
Expand Up @@ -134,8 +134,8 @@ run_kubectl_create_kustomization_directory_tests() {
set -o errexit

## kubectl create -k <dir> for kustomization directory
# Pre-condition: no ConfigMap, Deployment, Service exist
kube::test::get_object_assert configmaps "{{range.items}}{{$id_field}}:{{end}}" ''
# Pre-Condition: No configmaps with name=test-the-map, no Deployment, Service exist
kube::test::get_object_assert 'configmaps --field-selector=metadata.name=test-the-map' "{{range.items}}{{${id_field:?}}}:{{end}}" ''
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
Expand Down
10 changes: 5 additions & 5 deletions test/cmd/delete.sh
Expand Up @@ -37,17 +37,17 @@ run_kubectl_delete_allnamespaces_tests() {
kubectl delete configmap --dry-run=client -l deletetest=true --all-namespaces
kubectl delete configmap --dry-run=server -l deletetest=true --all-namespaces
kubectl config set-context "${CONTEXT}" --namespace="${ns_one}"
kube::test::get_object_assert configmap "{{range.items}}{{${id_field:?}}}:{{end}}" 'one:'
kube::test::get_object_assert 'configmap -l deletetest' "{{range.items}}{{${id_field:?}}}:{{end}}" 'one:'
kubectl config set-context "${CONTEXT}" --namespace="${ns_two}"
kube::test::get_object_assert configmap "{{range.items}}{{${id_field:?}}}:{{end}}" 'two:'
kube::test::get_object_assert 'configmap -l deletetest' "{{range.items}}{{${id_field:?}}}:{{end}}" 'two:'

kubectl delete configmap -l deletetest=true --all-namespaces

# no configmaps should be in either of those namespaces
# no configmaps should be in either of those namespaces with label deletetest
kubectl config set-context "${CONTEXT}" --namespace="${ns_one}"
kube::test::get_object_assert configmap "{{range.items}}{{${id_field:?}}}:{{end}}" ''
kube::test::get_object_assert 'configmap -l deletetest' "{{range.items}}{{${id_field:?}}}:{{end}}" ''
kubectl config set-context "${CONTEXT}" --namespace="${ns_two}"
kube::test::get_object_assert configmap "{{range.items}}{{${id_field:?}}}:{{end}}" ''
kube::test::get_object_assert 'configmap -l deletetest' "{{range.items}}{{${id_field:?}}}:{{end}}" ''

set +o nounset
set +o errexit
Expand Down
6 changes: 3 additions & 3 deletions test/cmd/get.sh
Expand Up @@ -206,8 +206,8 @@ run_kubectl_get_tests() {
kubectl delete pods redis-master valid-pod "${kube_flags[@]}"

### Test 'kubectl get -k <dir>' prints all the items built from a kustomization directory
# Pre-condition: no ConfigMap, Deployment, Service exist
kube::test::get_object_assert configmaps "{{range.items}}{{$id_field}}:{{end}}" ''
# Pre-Condition: No configmaps with name=test-the-map, no Deployment, Service exist
kube::test::get_object_assert 'configmaps --field-selector=metadata.name=test-the-map' "{{range.items}}{{${id_field:?}}}:{{end}}" ''
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
Expand All @@ -224,7 +224,7 @@ run_kubectl_get_tests() {
kubectl delete -k hack/testdata/kustomize

# Check that all items in the list are deleted
kube::test::get_object_assert configmaps "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert 'configmaps --field-selector=metadata.name=test-the-map' "{{range.items}}{{${id_field:?}}}:{{end}}" ''
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" ''

Expand Down
79 changes: 64 additions & 15 deletions test/e2e/auth/service_accounts.go
Expand Up @@ -582,20 +582,6 @@ var _ = SIGDescribe("ServiceAccounts", func() {
})

ginkgo.It("should support InClusterConfig with token rotation [Slow]", func() {
cfg, err := framework.LoadConfig()
framework.ExpectNoError(err)

if _, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "kube-root-ca.crt",
},
Data: map[string]string{
"ca.crt": string(cfg.TLSClientConfig.CAData),
},
}, metav1.CreateOptions{}); err != nil && !apierrors.IsAlreadyExists(err) {
framework.Failf("Unexpected err creating kube-ca-crt: %v", err)
}

tenMin := int64(10 * 60)
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "inclusterclient"},
Expand Down Expand Up @@ -655,7 +641,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
}},
},
}
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err)

framework.Logf("created pod")
Expand Down Expand Up @@ -870,6 +856,69 @@ var _ = SIGDescribe("ServiceAccounts", func() {
}
framework.ExpectEqual(eventFound, true, "failed to find %v event", watch.Deleted)
})

ginkgo.It("should guarantee kube-root-ca.crt exist in any namespace", func() {
const rootCAConfigMapName = "kube-root-ca.crt"

framework.ExpectNoError(wait.PollImmediate(500*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
_, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Get(context.TODO(), rootCAConfigMapName, metav1.GetOptions{})
if err == nil {
return true, nil
}
if apierrors.IsNotFound(err) {
ginkgo.By("root ca configmap not found, retrying")
return false, nil
}
return false, err
}))
framework.Logf("Got root ca configmap in namespace %q", f.Namespace.Name)

framework.ExpectNoError(f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), rootCAConfigMapName, metav1.DeleteOptions{GracePeriodSeconds: utilptr.Int64Ptr(0)}))
framework.Logf("Deleted root ca configmap in namespace %q", f.Namespace.Name)

framework.ExpectNoError(wait.Poll(500*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
ginkgo.By("waiting for a new root ca configmap created")
_, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Get(context.TODO(), rootCAConfigMapName, metav1.GetOptions{})
if err == nil {
return true, nil
}
if apierrors.IsNotFound(err) {
ginkgo.By("root ca configmap not found, retrying")
return false, nil
}
return false, err
}))
framework.Logf("Recreated root ca configmap in namespace %q", f.Namespace.Name)

_, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: rootCAConfigMapName,
},
Data: map[string]string{
"ca.crt": "",
},
}, metav1.UpdateOptions{})
framework.ExpectNoError(err)
framework.Logf("Updated root ca configmap in namespace %q", f.Namespace.Name)

framework.ExpectNoError(wait.Poll(500*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
liggitt marked this conversation as resolved.
Show resolved Hide resolved
ginkgo.By("waiting for the root ca configmap reconciled")
cm, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Get(context.TODO(), rootCAConfigMapName, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
ginkgo.By("root ca configmap not found, retrying")
return false, nil
}
return false, err
}
if value, ok := cm.Data["ca.crt"]; !ok || value == "" {
ginkgo.By("root ca configmap is not reconciled yet, retrying")
return false, nil
}
return true, nil
}))
framework.Logf("Reconciled root ca configmap in namespace %q", f.Namespace.Name)
})
})

var reportLogsParser = regexp.MustCompile("([a-zA-Z0-9-_]*)=([a-zA-Z0-9-_]*)$")
Expand Down