Skip to content

Commit

Permalink
Migrate cluster service account tokens to secrets
Browse files Browse the repository at this point in the history
  • Loading branch information
cmurphy authored and jakefhyde committed Jun 22, 2022
1 parent a8ae635 commit 05fab40
Show file tree
Hide file tree
Showing 9 changed files with 151 additions and 83 deletions.
5 changes: 4 additions & 1 deletion pkg/api/norman/customization/project/project_store.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ import (
v32 "github.com/rancher/rancher/pkg/apis/management.cattle.io/v3"
mgmtclient "github.com/rancher/rancher/pkg/client/generated/management/v3"
"github.com/rancher/rancher/pkg/clustermanager"
v1 "github.com/rancher/rancher/pkg/generated/norman/core/v1"
v3 "github.com/rancher/rancher/pkg/generated/norman/management.cattle.io/v3"
"github.com/rancher/rancher/pkg/resourcequota"
mgmtschema "github.com/rancher/rancher/pkg/schemas/management.cattle.io/v3"
Expand All @@ -33,6 +34,7 @@ type projectStore struct {
roleTemplateLister v3.RoleTemplateLister
scaledContext *config.ScaledContext
clusterLister v3.ClusterLister
secretLister v1.SecretLister
}

func SetProjectStore(schema *types.Schema, mgmt *config.ScaledContext) {
Expand All @@ -42,6 +44,7 @@ func SetProjectStore(schema *types.Schema, mgmt *config.ScaledContext) {
roleTemplateLister: mgmt.Management.RoleTemplates("").Controller().Lister(),
scaledContext: mgmt,
clusterLister: mgmt.Management.Clusters("").Controller().Lister(),
secretLister: mgmt.Core.Secrets("").Controller().Lister(),
}
schema.Store = store
}
Expand Down Expand Up @@ -281,7 +284,7 @@ func (s *projectStore) getNamespacesCount(apiContext *types.APIContext, project
return 0, err
}

kubeConfig, err := clustermanager.ToRESTConfig(cluster, s.scaledContext)
kubeConfig, err := clustermanager.ToRESTConfig(cluster, s.scaledContext, s.secretLister)
if kubeConfig == nil || err != nil {
return 0, err
}
Expand Down
24 changes: 13 additions & 11 deletions pkg/apis/management.cattle.io/v3/cluster_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -61,17 +61,18 @@ const (
ClusterConditionDefaultNamespaceAssigned condition.Cond = "DefaultNamespaceAssigned"
// Deprecated: ClusterConditionSystemNamespacesAssigned true when cluster's system namespaces has been initially assigned to
// a system project
ClusterConditionSystemNamespacesAssigned condition.Cond = "SystemNamespacesAssigned"
ClusterConditionAddonDeploy condition.Cond = "AddonDeploy"
ClusterConditionSystemAccountCreated condition.Cond = "SystemAccountCreated"
ClusterConditionAgentDeployed condition.Cond = "AgentDeployed"
ClusterConditionGlobalAdminsSynced condition.Cond = "GlobalAdminsSynced"
ClusterConditionInitialRolesPopulated condition.Cond = "InitialRolesPopulated"
ClusterConditionServiceAccountMigrated condition.Cond = "ServiceAccountMigrated"
ClusterConditionPrometheusOperatorDeployed condition.Cond = "PrometheusOperatorDeployed"
ClusterConditionMonitoringEnabled condition.Cond = "MonitoringEnabled"
ClusterConditionAlertingEnabled condition.Cond = "AlertingEnabled"
ClusterConditionSecretsMigrated condition.Cond = "SecretsMigrated"
ClusterConditionSystemNamespacesAssigned condition.Cond = "SystemNamespacesAssigned"
ClusterConditionAddonDeploy condition.Cond = "AddonDeploy"
ClusterConditionSystemAccountCreated condition.Cond = "SystemAccountCreated"
ClusterConditionAgentDeployed condition.Cond = "AgentDeployed"
ClusterConditionGlobalAdminsSynced condition.Cond = "GlobalAdminsSynced"
ClusterConditionInitialRolesPopulated condition.Cond = "InitialRolesPopulated"
ClusterConditionServiceAccountMigrated condition.Cond = "ServiceAccountMigrated"
ClusterConditionPrometheusOperatorDeployed condition.Cond = "PrometheusOperatorDeployed"
ClusterConditionMonitoringEnabled condition.Cond = "MonitoringEnabled"
ClusterConditionAlertingEnabled condition.Cond = "AlertingEnabled"
ClusterConditionSecretsMigrated condition.Cond = "SecretsMigrated"
ClusterConditionServiceAccountSecretsMigrated condition.Cond = "ServiceAccountSecretsMigrated"

ClusterDriverImported = "imported"
ClusterDriverLocal = "local"
Expand Down Expand Up @@ -160,6 +161,7 @@ type ClusterStatus struct {
ComponentStatuses []ClusterComponentStatus `json:"componentStatuses,omitempty"`
APIEndpoint string `json:"apiEndpoint,omitempty"`
ServiceAccountToken string `json:"serviceAccountToken,omitempty"`
ServiceAccountTokenSecret string `json:"serviceAccountTokenSecret,omitempty"`
CACert string `json:"caCert,omitempty"`
Capacity v1.ResourceList `json:"capacity,omitempty"`
Allocatable v1.ResourceList `json:"allocatable,omitempty"`
Expand Down
5 changes: 4 additions & 1 deletion pkg/auth/providers/publicapi/login.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ import (
client "github.com/rancher/rancher/pkg/client/generated/management/v3public"
"github.com/rancher/rancher/pkg/clustermanager"
"github.com/rancher/rancher/pkg/controllers/managementuser/clusterauthtoken/common"
v1 "github.com/rancher/rancher/pkg/generated/norman/core/v1"
v3 "github.com/rancher/rancher/pkg/generated/norman/management.cattle.io/v3"
schema "github.com/rancher/rancher/pkg/schemas/management.cattle.io/v3public"
"github.com/rancher/rancher/pkg/types/config"
Expand All @@ -49,6 +50,7 @@ func newLoginHandler(ctx context.Context, mgmt *config.ScaledContext) *loginHand
userMGR: mgmt.UserManager,
tokenMGR: tokens.NewManager(ctx, mgmt),
clusterLister: mgmt.Management.Clusters("").Controller().Lister(),
secretLister: mgmt.Core.Secrets("").Controller().Lister(),
}
}

Expand All @@ -57,6 +59,7 @@ type loginHandler struct {
userMGR user.Manager
tokenMGR *tokens.Manager
clusterLister v3.ClusterLister
secretLister v1.SecretLister
}

func (h *loginHandler) login(actionName string, action *types.Action, request *types.APIContext) error {
Expand Down Expand Up @@ -247,7 +250,7 @@ func (h *loginHandler) createClusterAuthTokenIfNeeded(token *v3.Token, tokenValu
if !cluster.Spec.LocalClusterAuthEndpoint.Enabled {
return nil
}
clusterConfig, err := clustermanager.ToRESTConfig(cluster, h.scaledContext)
clusterConfig, err := clustermanager.ToRESTConfig(cluster, h.scaledContext, h.secretLister)
if err != nil {
return err
}
Expand Down
26 changes: 17 additions & 9 deletions pkg/clustermanager/manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,9 @@ import (
"github.com/rancher/norman/types"
v32 "github.com/rancher/rancher/pkg/apis/management.cattle.io/v3"
"github.com/rancher/rancher/pkg/clusterrouter"
"github.com/rancher/rancher/pkg/controllers/management/secretmigrator"
clusterController "github.com/rancher/rancher/pkg/controllers/managementuser"
v1 "github.com/rancher/rancher/pkg/generated/norman/core/v1"
v3 "github.com/rancher/rancher/pkg/generated/norman/management.cattle.io/v3"
"github.com/rancher/rancher/pkg/kontainer-engine/drivers/gke"
"github.com/rancher/rancher/pkg/rbac"
Expand All @@ -33,7 +35,7 @@ import (
"golang.org/x/sync/semaphore"
"k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
apierrors "k8s.io/apimachinery/pkg/api/errors"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
authv1 "k8s.io/client-go/kubernetes/typed/authorization/v1"
"k8s.io/client-go/rest"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
Expand All @@ -44,6 +46,7 @@ type Manager struct {
ScaledContext *config.ScaledContext
clusterLister v3.ClusterLister
clusters v3.ClusterInterface
secretLister v1.SecretLister
controllers sync.Map
accessControl types.AccessControl
rbac rbacv1.Interface
Expand All @@ -69,6 +72,7 @@ func NewManager(httpsPort int, context *config.ScaledContext, asl accesscontrol.
accessControl: rbac.NewAccessControlWithASL("", asl),
clusterLister: context.Management.Clusters("").Controller().Lister(),
clusters: context.Management.Clusters(""),
secretLister: context.Core.Secrets("").Controller().Lister(),
startSem: semaphore.NewWeighted(int64(settings.ClusterControllerStartCount.GetInt())),
}
}
Expand Down Expand Up @@ -107,7 +111,7 @@ func (m *Manager) RESTConfig(cluster *v3.Cluster) (rest.Config, error) {
}

func (m *Manager) markUnavailable(clusterName string) {
if cluster, err := m.clusters.Get(clusterName, v1.GetOptions{}); err == nil {
if cluster, err := m.clusters.Get(clusterName, metav1.GetOptions{}); err == nil {
if !v32.ClusterConditionReady.IsFalse(cluster) {
v32.ClusterConditionReady.False(cluster)
m.clusters.Update(cluster)
Expand Down Expand Up @@ -170,7 +174,7 @@ func (m *Manager) startController(r *record, controllers, clusterOwner bool) err
func (m *Manager) changed(r *record, cluster *v3.Cluster, controllers, clusterOwner bool) bool {
existing := r.clusterRec
if existing.Status.APIEndpoint != cluster.Status.APIEndpoint ||
existing.Status.ServiceAccountToken != cluster.Status.ServiceAccountToken ||
existing.Status.ServiceAccountTokenSecret != cluster.Status.ServiceAccountTokenSecret ||
existing.Status.CACert != cluster.Status.CACert ||
existing.Status.AppliedSpec.LocalClusterAuthEndpoint.Enabled != cluster.Status.AppliedSpec.LocalClusterAuthEndpoint.Enabled {
return true
Expand All @@ -194,7 +198,7 @@ func (m *Manager) doStart(rec *record, clusterOwner bool) (exit error) {
// Prior to k8s v1.14, we simply did a DiscoveryClient.Version() check to see if the user cluster is alive
// As of k8s v1.14, kubeapi returns a successful version response even if etcd is not available.
// To work around this, now we try to get a namespace from the API, even if not found, it means the API is up.
if _, err := rec.cluster.K8sClient.CoreV1().Namespaces().Get(rec.ctx, "kube-system", v1.GetOptions{}); err != nil && !apierrors.IsNotFound(err) {
if _, err := rec.cluster.K8sClient.CoreV1().Namespaces().Get(rec.ctx, "kube-system", metav1.GetOptions{}); err != nil && !apierrors.IsNotFound(err) {
if i == 2 {
m.markUnavailable(rec.cluster.ClusterName)
}
Expand Down Expand Up @@ -252,7 +256,7 @@ func (m *Manager) doStart(rec *record, clusterOwner bool) (exit error) {
}
}

func ToRESTConfig(cluster *v3.Cluster, context *config.ScaledContext) (*rest.Config, error) {
func ToRESTConfig(cluster *v3.Cluster, context *config.ScaledContext, secretLister v1.SecretLister) (*rest.Config, error) {
if cluster == nil {
return nil, nil
}
Expand All @@ -261,7 +265,7 @@ func ToRESTConfig(cluster *v3.Cluster, context *config.ScaledContext) (*rest.Con
return &context.RESTConfig, nil
}

if cluster.Status.APIEndpoint == "" || cluster.Status.CACert == "" || cluster.Status.ServiceAccountToken == "" {
if cluster.Status.APIEndpoint == "" || cluster.Status.CACert == "" || cluster.Status.ServiceAccountTokenSecret == "" {
return nil, nil
}

Expand Down Expand Up @@ -292,11 +296,15 @@ func ToRESTConfig(cluster *v3.Cluster, context *config.ScaledContext) (*rest.Con
}
}

secret, err := secretLister.Get(secretmigrator.SecretNamespace, cluster.Status.ServiceAccountTokenSecret)
if err != nil {
return nil, err
}
// adding suffix to make tlsConfig hashkey unique
suffix := []byte("\n" + cluster.Name)
rc := &rest.Config{
Host: u.String(),
BearerToken: cluster.Status.ServiceAccountToken,
BearerToken: string(secret.Data[secretmigrator.SecretKey]),
TLSClientConfig: rest.TLSClientConfig{
CAData: append(caBytes, suffix...),
NextProtos: []string{"http/1.1"},
Expand Down Expand Up @@ -397,7 +405,7 @@ func VerifyIgnoreDNSName(caCertsPEM []byte) (func(rawCerts [][]byte, verifiedCha
}

func (m *Manager) toRecord(ctx context.Context, cluster *v3.Cluster) (*record, error) {
kubeConfig, err := ToRESTConfig(cluster, m.ScaledContext)
kubeConfig, err := ToRESTConfig(cluster, m.ScaledContext, m.secretLister)
if kubeConfig == nil || err != nil {
return nil, err
}
Expand Down Expand Up @@ -484,7 +492,7 @@ func (m *Manager) UserContext(clusterName string) (*config.UserContext, error) {
// UserContextFromCluster accepts a pointer to a Cluster and returns a client
// for that cluster. It does not start any controllers.
func (m *Manager) UserContextFromCluster(cluster *v3.Cluster) (*config.UserContext, error) {
kubeConfig, err := ToRESTConfig(cluster, m.ScaledContext)
kubeConfig, err := ToRESTConfig(cluster, m.ScaledContext, m.secretLister)
if err != nil {
return nil, err
}
Expand Down
16 changes: 14 additions & 2 deletions pkg/controllers/management/clusterprovisioner/provisioner.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ import (
util "github.com/rancher/rancher/pkg/cluster"
"github.com/rancher/rancher/pkg/controllers/management/imported"
kd "github.com/rancher/rancher/pkg/controllers/management/kontainerdrivermetadata"
"github.com/rancher/rancher/pkg/controllers/management/secretmigrator"
v1 "github.com/rancher/rancher/pkg/generated/norman/apps/v1"
corev1 "github.com/rancher/rancher/pkg/generated/norman/core/v1"
v3 "github.com/rancher/rancher/pkg/generated/norman/management.cattle.io/v3"
Expand Down Expand Up @@ -62,6 +63,7 @@ type Provisioner struct {
RKESystemImages v3.RkeK8sSystemImageInterface
RKESystemImagesLister v3.RkeK8sSystemImageLister
SecretLister corev1.SecretLister
Secrets corev1.SecretInterface
}

func Register(ctx context.Context, management *config.ManagementContext) {
Expand All @@ -80,6 +82,7 @@ func Register(ctx context.Context, management *config.ManagementContext) {
RKESystemImages: management.Management.RkeK8sSystemImages(""),
DaemonsetLister: management.Apps.DaemonSets("").Controller().Lister(),
SecretLister: management.Core.Secrets("").Controller().Lister(),
Secrets: management.Core.Secrets(""),
}
// Add handlers
p.Clusters.AddLifecycle(ctx, "cluster-provisioner-controller", p)
Expand Down Expand Up @@ -494,7 +497,11 @@ func (p *Provisioner) reconcileCluster(cluster *v3.Cluster, create bool) (*v3.Cl
return nil, err
}

cluster.Status.ServiceAccountToken = serviceAccountToken
secret, err := secretmigrator.NewMigrator(p.SecretLister, p.Secrets).CreateOrUpdateServiceAccountTokenSecret(cluster.Status.ServiceAccountTokenSecret, serviceAccountToken, cluster)
if err != nil {
return nil, err
}
cluster.Status.ServiceAccountTokenSecret = secret.Name
apimgmtv3.ClusterConditionServiceAccountMigrated.True(cluster)

// Update the cluster in k8s
Expand Down Expand Up @@ -585,6 +592,11 @@ func (p *Provisioner) reconcileCluster(cluster *v3.Cluster, create bool) (*v3.Cl

apimgmtv3.ClusterConditionServiceAccountMigrated.True(cluster)

secret, err := secretmigrator.NewMigrator(p.SecretLister, p.Secrets).CreateOrUpdateServiceAccountTokenSecret(cluster.Status.ServiceAccountTokenSecret, serviceAccountToken, cluster)
if err != nil {
return nil, err
}

saved := false
for i := 0; i < 20; i++ {
cluster, err = p.Clusters.Get(cluster.Name, metav1.GetOptions{})
Expand All @@ -599,7 +611,7 @@ func (p *Provisioner) reconcileCluster(cluster *v3.Cluster, create bool) (*v3.Cl

cluster.Status.AppliedSpec = censoredSpec
cluster.Status.APIEndpoint = apiEndpoint
cluster.Status.ServiceAccountToken = serviceAccountToken
cluster.Status.ServiceAccountTokenSecret = secret.Name
cluster.Status.CACert = caCert
resetRkeConfigFlags(cluster, updateTriggered)

Expand Down
Loading

0 comments on commit 05fab40

Please sign in to comment.