diff --git a/cmd/preupgradechecks/checks.go b/cmd/preupgradechecks/checks.go index f3474b9ec5..f01692fccd 100644 --- a/cmd/preupgradechecks/checks.go +++ b/cmd/preupgradechecks/checks.go @@ -123,7 +123,7 @@ func (client *PreUpgradeTaskClient) VerifyFunctionSpecReferences(ctx context.Con var fList *fv1.FunctionList errs := &multierror.Error{} - for _, namespace := range utils.GetNamespaces() { + for _, namespace := range utils.DefaultNSResolver().FissionResourceNS { for i := 0; i < maxRetries; i++ { fList, err = client.fissionClient.CoreV1().Functions(namespace).List(ctx, metav1.ListOptions{}) if err == nil { diff --git a/pkg/executor/executor.go b/pkg/executor/executor.go index 5ac425e5ad..f677bb0eb5 100644 --- a/pkg/executor/executor.go +++ b/pkg/executor/executor.go @@ -290,7 +290,7 @@ func StartExecutor(ctx context.Context, logger *zap.Logger, port int) error { envInformer := make(map[string]finformerv1.EnvironmentInformer, 0) pkgInformer := make(map[string]finformerv1.PackageInformer, 0) - for _, ns := range utils.GetNamespaces() { + for _, ns := range utils.DefaultNSResolver().FissionResourceNS { factory := genInformer.NewFilteredSharedInformerFactory(fissionClient, time.Minute*30, ns, nil) funcInformer[ns] = factory.Core().V1().Functions() envInformer[ns] = factory.Core().V1().Environments() @@ -369,7 +369,7 @@ func StartExecutor(ctx context.Context, logger *zap.Logger, port int) error { configMapInformer := make(map[string]k8sInformersv1.ConfigMapInformer, 0) secretInformer := make(map[string]k8sInformersv1.SecretInformer, 0) - for _, ns := range utils.GetNamespaces() { + for _, ns := range utils.DefaultNSResolver().FissionResourceNS { factory := k8sInformers.NewFilteredSharedInformerFactory(kubernetesClient, time.Minute*30, ns, nil) configMapInformer[ns] = factory.Core().V1().ConfigMaps() secretInformer[ns] = factory.Core().V1().Secrets() diff --git a/pkg/executor/executortype/container/containermgr.go b/pkg/executor/executortype/container/containermgr.go index d84b260f86..dd815231d3 100644 --- a/pkg/executor/executortype/container/containermgr.go +++ b/pkg/executor/executortype/container/containermgr.go @@ -242,7 +242,8 @@ func (caaf *Container) RefreshFuncPods(ctx context.Context, logger *zap.Logger, funcLabels := caaf.getDeployLabels(f.ObjectMeta) - dep, err := caaf.kubernetesClient.AppsV1().Deployments(metav1.NamespaceAll).List(ctx, metav1.ListOptions{ + nsResolver := utils.DefaultNSResolver() + dep, err := caaf.kubernetesClient.AppsV1().Deployments(nsResolver.GetFunctionNS(f.ObjectMeta.Namespace)).List(ctx, metav1.ListOptions{ LabelSelector: labels.Set(funcLabels).AsSelector().String(), }) if err != nil { @@ -273,7 +274,7 @@ func (caaf *Container) RefreshFuncPods(ctx context.Context, logger *zap.Logger, func (caaf *Container) AdoptExistingResources(ctx context.Context) { wg := &sync.WaitGroup{} - for _, namepsace := range utils.GetNamespaces() { + for _, namepsace := range utils.DefaultNSResolver().FissionResourceNS { fnList, err := caaf.fissionClient.CoreV1().Functions(namepsace).List(ctx, metav1.ListOptions{}) if err != nil { caaf.logger.Error("error getting function list", zap.Error(err)) diff --git a/pkg/executor/executortype/newdeploy/newdeploymgr.go b/pkg/executor/executortype/newdeploy/newdeploymgr.go index cf5f7971f1..3d706840aa 100644 --- a/pkg/executor/executortype/newdeploy/newdeploymgr.go +++ b/pkg/executor/executortype/newdeploy/newdeploymgr.go @@ -260,7 +260,7 @@ func (deploy *NewDeploy) RefreshFuncPods(ctx context.Context, logger *zap.Logger UID: env.ObjectMeta.UID, }) - dep, err := deploy.kubernetesClient.AppsV1().Deployments(metav1.NamespaceAll).List(ctx, metav1.ListOptions{ + dep, err := deploy.kubernetesClient.AppsV1().Deployments(deploy.nsResolver.GetFunctionNS(f.ObjectMeta.Namespace)).List(ctx, metav1.ListOptions{ LabelSelector: labels.Set(funcLabels).AsSelector().String(), }) @@ -292,7 +292,7 @@ func (deploy *NewDeploy) RefreshFuncPods(ctx context.Context, logger *zap.Logger func (deploy *NewDeploy) AdoptExistingResources(ctx context.Context) { wg := &sync.WaitGroup{} - for _, namepsace := range utils.GetNamespaces() { + for _, namepsace := range utils.DefaultNSResolver().FissionResourceNS { fnList, err := deploy.fissionClient.CoreV1().Functions(namepsace).List(ctx, metav1.ListOptions{}) if err != nil { deploy.logger.Error("error getting function list", zap.Error(err)) @@ -769,7 +769,7 @@ func (deploy *NewDeploy) idleObjectReaper(ctx context.Context) { func (deploy *NewDeploy) doIdleObjectReaper(ctx context.Context) { envList := make(map[k8sTypes.UID]struct{}) - for _, namespace := range utils.GetNamespaces() { + for _, namespace := range utils.DefaultNSResolver().FissionResourceNS { envs, err := deploy.fissionClient.CoreV1().Environments(namespace).List(ctx, metav1.ListOptions{}) if err != nil { deploy.logger.Fatal("failed to get environment list", zap.Error(err), zap.String("namespace", namespace)) diff --git a/pkg/executor/executortype/newdeploy/newdeploymgr_test.go b/pkg/executor/executortype/newdeploy/newdeploymgr_test.go index 1a1a0059b2..4bef44763a 100644 --- a/pkg/executor/executortype/newdeploy/newdeploymgr_test.go +++ b/pkg/executor/executortype/newdeploy/newdeploymgr_test.go @@ -92,6 +92,7 @@ func TestRefreshFuncPods(t *testing.T) { nsResolver := utils.NamespaceResolver{ FunctionNamespace: functionNamespace, BuiderNamespace: builderNamespace, + DefaultNamespace: defaultNamespace, } ndm.nsResolver = &nsResolver diff --git a/pkg/executor/executortype/poolmgr/gpm.go b/pkg/executor/executortype/poolmgr/gpm.go index ba8acf886d..6d06896bb2 100644 --- a/pkg/executor/executortype/poolmgr/gpm.go +++ b/pkg/executor/executortype/poolmgr/gpm.go @@ -317,7 +317,7 @@ func (gpm *GenericPoolManager) AdoptExistingResources(ctx context.Context) { envMap := make(map[string]fv1.Environment) wg := &sync.WaitGroup{} - for _, namespace := range utils.GetNamespaces() { + for _, namespace := range utils.DefaultNSResolver().FissionResourceNS { envs, err := gpm.fissionClient.CoreV1().Environments(namespace).List(ctx, metav1.ListOptions{}) if err != nil { gpm.logger.Error("error getting environment list", zap.Error(err)) @@ -351,7 +351,7 @@ func (gpm *GenericPoolManager) AdoptExistingResources(ctx context.Context) { fv1.EXECUTOR_TYPE: string(fv1.ExecutorTypePoolmgr), } - for _, namespace := range utils.GetNamespaces() { + for _, namespace := range utils.DefaultNSResolver().FissionResourceNS { podList, err := gpm.kubernetesClient.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{ LabelSelector: labels.Set(l).AsSelector().String(), }) @@ -588,7 +588,7 @@ func (gpm *GenericPoolManager) idleObjectReaper(ctx context.Context) { func (gpm *GenericPoolManager) doIdleObjectReaper(ctx context.Context) { envList := make(map[k8sTypes.UID]struct{}) - for _, namespace := range utils.GetNamespaces() { + for _, namespace := range utils.DefaultNSResolver().FissionResourceNS { envs, err := gpm.fissionClient.CoreV1().Environments(namespace).List(ctx, metav1.ListOptions{}) if err != nil { gpm.logger.Error("failed to get environment list", zap.Error(err), zap.String("namespace", namespace)) @@ -601,7 +601,7 @@ func (gpm *GenericPoolManager) doIdleObjectReaper(ctx context.Context) { } fnList := make(map[k8sTypes.UID]fv1.Function) - for _, namespace := range utils.GetNamespaces() { + for _, namespace := range utils.DefaultNSResolver().FissionResourceNS { fns, err := gpm.fissionClient.CoreV1().Functions(namespace).List(ctx, metav1.ListOptions{}) if err != nil { gpm.logger.Error("failed to get environment list", zap.Error(err), zap.String("namespace", namespace)) diff --git a/pkg/executor/reaper/reaper.go b/pkg/executor/reaper/reaper.go index 08c5211783..564a92f4e0 100644 --- a/pkg/executor/reaper/reaper.go +++ b/pkg/executor/reaper/reaper.go @@ -71,26 +71,34 @@ func CleanupKubeObject(ctx context.Context, logger *zap.Logger, kubeClient kuber // CleanupDeployments deletes deployment(s) for a given instanceID func CleanupDeployments(ctx context.Context, logger *zap.Logger, client kubernetes.Interface, instanceID string, listOps metav1.ListOptions) error { - deploymentList, err := client.AppsV1().Deployments(metav1.NamespaceAll).List(ctx, listOps) - if err != nil { - return err - } - for _, dep := range deploymentList.Items { - id, ok := dep.ObjectMeta.Annotations[fv1.EXECUTOR_INSTANCEID_LABEL] - if !ok { - // Backward compatibility with older label name - id, ok = dep.ObjectMeta.Labels[fv1.EXECUTOR_INSTANCEID_LABEL] + cleanupDeployments := func(namespace string) error { + deploymentList, err := client.AppsV1().Deployments(namespace).List(ctx, listOps) + if err != nil { + return err } - if ok && id != instanceID { - logger.Info("cleaning up deployment", zap.String("deployment", dep.ObjectMeta.Name)) - err := client.AppsV1().Deployments(dep.ObjectMeta.Namespace).Delete(ctx, dep.ObjectMeta.Name, delOpt) - if err != nil { - logger.Error("error cleaning up deployment", - zap.Error(err), - zap.String("deployment_name", dep.ObjectMeta.Name), - zap.String("deployment_namespace", dep.ObjectMeta.Namespace)) + for _, dep := range deploymentList.Items { + id, ok := dep.ObjectMeta.Annotations[fv1.EXECUTOR_INSTANCEID_LABEL] + if !ok { + // Backward compatibility with older label name + id, ok = dep.ObjectMeta.Labels[fv1.EXECUTOR_INSTANCEID_LABEL] } - // ignore err + if ok && id != instanceID { + logger.Info("cleaning up deployment", zap.String("deployment", dep.ObjectMeta.Name)) + err := client.AppsV1().Deployments(dep.ObjectMeta.Namespace).Delete(ctx, dep.ObjectMeta.Name, delOpt) + if err != nil { + logger.Error("error cleaning up deployment", + zap.Error(err), + zap.String("deployment_name", dep.ObjectMeta.Name), + zap.String("deployment_namespace", dep.ObjectMeta.Namespace)) + } + // ignore err + } + } + return nil + } + for _, namespace := range GetReaperNamespace() { + if err := cleanupDeployments(namespace); err != nil { + return err } } @@ -99,26 +107,35 @@ func CleanupDeployments(ctx context.Context, logger *zap.Logger, client kubernet // CleanupPods deletes pod(s) for a given instanceID func CleanupPods(ctx context.Context, logger *zap.Logger, client kubernetes.Interface, instanceID string, listOps metav1.ListOptions) error { - podList, err := client.CoreV1().Pods(metav1.NamespaceAll).List(ctx, listOps) - if err != nil { - return err - } - for _, pod := range podList.Items { - id, ok := pod.ObjectMeta.Annotations[fv1.EXECUTOR_INSTANCEID_LABEL] - if !ok { - // Backward compatibility with older label name - id, ok = pod.ObjectMeta.Labels[fv1.EXECUTOR_INSTANCEID_LABEL] + cleanupPods := func(namespace string) error { + podList, err := client.CoreV1().Pods(namespace).List(ctx, listOps) + if err != nil { + return err } - if ok && id != instanceID { - logger.Info("cleaning up pod", zap.String("pod", pod.ObjectMeta.Name)) - err := client.CoreV1().Pods(pod.ObjectMeta.Namespace).Delete(ctx, pod.ObjectMeta.Name, metav1.DeleteOptions{}) - if err != nil { - logger.Error("error cleaning up pod", - zap.Error(err), - zap.String("pod_name", pod.ObjectMeta.Name), - zap.String("pod_namespace", pod.ObjectMeta.Namespace)) + for _, pod := range podList.Items { + id, ok := pod.ObjectMeta.Annotations[fv1.EXECUTOR_INSTANCEID_LABEL] + if !ok { + // Backward compatibility with older label name + id, ok = pod.ObjectMeta.Labels[fv1.EXECUTOR_INSTANCEID_LABEL] } - // ignore err + if ok && id != instanceID { + logger.Info("cleaning up pod", zap.String("pod", pod.ObjectMeta.Name)) + err := client.CoreV1().Pods(pod.ObjectMeta.Namespace).Delete(ctx, pod.ObjectMeta.Name, metav1.DeleteOptions{}) + if err != nil { + logger.Error("error cleaning up pod", + zap.Error(err), + zap.String("pod_name", pod.ObjectMeta.Name), + zap.String("pod_namespace", pod.ObjectMeta.Namespace)) + } + // ignore err + } + } + return nil + } + + for _, namespace := range GetReaperNamespace() { + if err := cleanupPods(namespace); err != nil { + return err } } @@ -127,26 +144,35 @@ func CleanupPods(ctx context.Context, logger *zap.Logger, client kubernetes.Inte // CleanupServices deletes service(s) for a given instanceID func CleanupServices(ctx context.Context, logger *zap.Logger, client kubernetes.Interface, instanceID string, listOps metav1.ListOptions) error { - svcList, err := client.CoreV1().Services(metav1.NamespaceAll).List(ctx, listOps) - if err != nil { - return err - } - for _, svc := range svcList.Items { - id, ok := svc.ObjectMeta.Annotations[fv1.EXECUTOR_INSTANCEID_LABEL] - if !ok { - // Backward compatibility with older label name - id, ok = svc.ObjectMeta.Labels[fv1.EXECUTOR_INSTANCEID_LABEL] + cleanupServices := func(namespace string) error { + svcList, err := client.CoreV1().Services(namespace).List(ctx, listOps) + if err != nil { + return err } - if ok && id != instanceID { - logger.Info("cleaning up service", zap.String("service", svc.ObjectMeta.Name)) - err := client.CoreV1().Services(svc.ObjectMeta.Namespace).Delete(ctx, svc.ObjectMeta.Name, metav1.DeleteOptions{}) - if err != nil { - logger.Error("error cleaning up service", - zap.Error(err), - zap.String("service_name", svc.ObjectMeta.Name), - zap.String("service_namespace", svc.ObjectMeta.Namespace)) + for _, svc := range svcList.Items { + id, ok := svc.ObjectMeta.Annotations[fv1.EXECUTOR_INSTANCEID_LABEL] + if !ok { + // Backward compatibility with older label name + id, ok = svc.ObjectMeta.Labels[fv1.EXECUTOR_INSTANCEID_LABEL] } - // ignore err + if ok && id != instanceID { + logger.Info("cleaning up service", zap.String("service", svc.ObjectMeta.Name)) + err := client.CoreV1().Services(svc.ObjectMeta.Namespace).Delete(ctx, svc.ObjectMeta.Name, metav1.DeleteOptions{}) + if err != nil { + logger.Error("error cleaning up service", + zap.Error(err), + zap.String("service_name", svc.ObjectMeta.Name), + zap.String("service_namespace", svc.ObjectMeta.Namespace)) + } + // ignore err + } + } + return nil + } + + for _, namespace := range GetReaperNamespace() { + if err := cleanupServices(namespace); err != nil { + return err } } @@ -155,27 +181,36 @@ func CleanupServices(ctx context.Context, logger *zap.Logger, client kubernetes. // CleanupHpa deletes horizontal pod autoscaler(s) for a given instanceID func CleanupHpa(ctx context.Context, logger *zap.Logger, client kubernetes.Interface, instanceID string, listOps metav1.ListOptions) error { - hpaList, err := client.AutoscalingV2beta2().HorizontalPodAutoscalers(metav1.NamespaceAll).List(ctx, listOps) - if err != nil { - return err - } - - for _, hpa := range hpaList.Items { - id, ok := hpa.ObjectMeta.Annotations[fv1.EXECUTOR_INSTANCEID_LABEL] - if !ok { - // Backward compatibility with older label name - id, ok = hpa.ObjectMeta.Labels[fv1.EXECUTOR_INSTANCEID_LABEL] + cleanupHpa := func(namespace string) error { + hpaList, err := client.AutoscalingV2beta2().HorizontalPodAutoscalers(namespace).List(ctx, listOps) + if err != nil { + return err } - if ok && id != instanceID { - logger.Info("cleaning up HPA", zap.String("hpa", hpa.ObjectMeta.Name)) - err := client.AutoscalingV2beta2().HorizontalPodAutoscalers(hpa.ObjectMeta.Namespace).Delete(ctx, hpa.ObjectMeta.Name, metav1.DeleteOptions{}) - if err != nil { - logger.Error("error cleaning up HPA", - zap.Error(err), - zap.String("hpa_name", hpa.ObjectMeta.Name), - zap.String("hpa_namespace", hpa.ObjectMeta.Namespace)) + + for _, hpa := range hpaList.Items { + id, ok := hpa.ObjectMeta.Annotations[fv1.EXECUTOR_INSTANCEID_LABEL] + if !ok { + // Backward compatibility with older label name + id, ok = hpa.ObjectMeta.Labels[fv1.EXECUTOR_INSTANCEID_LABEL] + } + if ok && id != instanceID { + logger.Info("cleaning up HPA", zap.String("hpa", hpa.ObjectMeta.Name)) + err := client.AutoscalingV2beta2().HorizontalPodAutoscalers(hpa.ObjectMeta.Namespace).Delete(ctx, hpa.ObjectMeta.Name, metav1.DeleteOptions{}) + if err != nil { + logger.Error("error cleaning up HPA", + zap.Error(err), + zap.String("hpa_name", hpa.ObjectMeta.Name), + zap.String("hpa_namespace", hpa.ObjectMeta.Namespace)) + } + // ignore err } - // ignore err + } + return nil + } + + for _, namespace := range GetReaperNamespace() { + if err := cleanupHpa(namespace); err != nil { + return err } } @@ -192,137 +227,151 @@ func CleanupRoleBindings(ctx context.Context, logger *zap.Logger, client kuberne logger.Debug("starting cleanupRoleBindings cycle") - // get all rolebindings ( just to be efficient, one call to kubernetes ) - rbList, err := client.RbacV1().RoleBindings(metav1.NamespaceAll).List(ctx, metav1.ListOptions{}) - if err != nil { - // something wrong, but next iteration hopefully succeeds - logger.Error("error listing role bindings in all namespaces", zap.Error(err)) - continue - } - - // go through each role-binding object and do the cleanup necessary - for _, roleBinding := range rbList.Items { - // ignore role-bindings in kube-system namespace - if roleBinding.Namespace == "kube-system" { - continue - } - - // ignore role-bindings not created by fission - if roleBinding.Name != fv1.PackageGetterRB && roleBinding.Name != fv1.SecretConfigMapGetterRB { - continue - } - - // in order to find out if there are any functions that need this role-binding in role-binding namespace, - // we can list the functions once per role-binding. - funcList, err := fissionClient.CoreV1().Functions(roleBinding.Namespace).List(ctx, metav1.ListOptions{}) + cleanupRoleBindings := func(namespace string) error { + // get all rolebindings ( just to be efficient, one call to kubernetes ) + rbList, err := client.RbacV1().RoleBindings(namespace).List(ctx, metav1.ListOptions{}) if err != nil { - logger.Error("error fetching function list in namespace", zap.Error(err), zap.String("namespace", roleBinding.Namespace)) - continue + // something wrong, but next iteration hopefully succeeds + logger.Error("error listing role bindings in all namespaces", zap.Error(err)) + return err } - // final map of service accounts that can be removed from this roleBinding object - // using a map here instead of a list so the code in RemoveSAFromRoleBindingWithRetries is efficient. - saToRemove := make(map[string]bool) - - // the following flags are needed to decide if any of the service accounts can be removed from role-bindings depending on the functions that need them. - // ndmFunc denotes if there's at least one function that has executor type New deploy Manager - // funcEnvReference denotes if there's at least one function that has reference to an environment in the SA Namespace for the SA in question - var ndmFunc, funcEnvReference bool - - // iterate through each subject in the role-binding and check if there are any references to them - for _, subj := range roleBinding.Subjects { - ndmFunc = false - funcEnvReference = false - - // this is the reverse of what we're doing in setting up of role-bindings. if objects are created in default ns, - // the SA namespace will have the value of "fission-function"/"fission-builder" depending on the SA. - // so now we need to look for the objects in default namespace. - saNs := subj.Namespace - isInReservedNS := false - if subj.Namespace == nsResolver.FunctionNamespace || - subj.Namespace == nsResolver.BuiderNamespace { - saNs = metav1.NamespaceDefault - isInReservedNS = true + // go through each role-binding object and do the cleanup necessary + for _, roleBinding := range rbList.Items { + // ignore role-bindings in kube-system namespace + if roleBinding.Namespace == "kube-system" { + continue } - // go through each function and find out if there's either at least one function with env reference in the same namespace as the Service Account in this iteration - // or at least one function using ndm executor in the role-binding namespace and set the corresponding flags - for _, fn := range funcList.Items { - if fn.Spec.Environment.Namespace == saNs || - // For the case that the environment is created in the reserved namespace. - (isInReservedNS && (fn.Spec.Environment.Namespace == nsResolver.FunctionNamespace || - fn.Spec.Environment.Namespace == nsResolver.BuiderNamespace)) { - funcEnvReference = true - break - } + // ignore role-bindings not created by fission + if roleBinding.Name != fv1.PackageGetterRB && roleBinding.Name != fv1.SecretConfigMapGetterRB { + continue + } - if fn.Spec.InvokeStrategy.ExecutionStrategy.ExecutorType == fv1.ExecutorTypeNewdeploy { - ndmFunc = true - break - } + // in order to find out if there are any functions that need this role-binding in role-binding namespace, + // we can list the functions once per role-binding. + funcList, err := fissionClient.CoreV1().Functions(roleBinding.Namespace).List(ctx, metav1.ListOptions{}) + if err != nil { + logger.Error("error fetching function list in namespace", zap.Error(err), zap.String("namespace", roleBinding.Namespace)) + continue } - // if its a package-getter-rb, we have 2 kinds of SAs and each of them is handled differently - // else if its a secret-configmap-rb, we have only one SA which is fission-fetcher - if roleBinding.Name == fv1.PackageGetterRB { - // check if there is an env obj in saNs - envList, err := fissionClient.CoreV1().Environments(saNs).List(ctx, metav1.ListOptions{}) - if err != nil { - logger.Error("error fetching environment list in service account namespace", zap.Error(err), zap.String("namespace", saNs)) - continue + // final map of service accounts that can be removed from this roleBinding object + // using a map here instead of a list so the code in RemoveSAFromRoleBindingWithRetries is efficient. + saToRemove := make(map[string]bool) + + // the following flags are needed to decide if any of the service accounts can be removed from role-bindings depending on the functions that need them. + // ndmFunc denotes if there's at least one function that has executor type New deploy Manager + // funcEnvReference denotes if there's at least one function that has reference to an environment in the SA Namespace for the SA in question + var ndmFunc, funcEnvReference bool + + // iterate through each subject in the role-binding and check if there are any references to them + for _, subj := range roleBinding.Subjects { + ndmFunc = false + funcEnvReference = false + + // this is the reverse of what we're doing in setting up of role-bindings. if objects are created in default ns, + // the SA namespace will have the value of "fission-function"/"fission-builder" depending on the SA. + // so now we need to look for the objects in default namespace. + saNs := subj.Namespace + isInReservedNS := false + if subj.Namespace == nsResolver.FunctionNamespace || + subj.Namespace == nsResolver.BuiderNamespace { + saNs = metav1.NamespaceDefault + isInReservedNS = true } - // if the SA in this iteration is fission-builder, then we need to only check - // if either there's at least one env object in the SA's namespace, or, - // if there's at least one function in the role-binding namespace with env reference - // to the SA's namespace. - // if neither, then we can remove this SA from this role-binding - if subj.Name == fv1.FissionBuilderSA { - if len(envList.Items) == 0 && !funcEnvReference { - saToRemove[utils.MakeSAMapKey(subj.Name, subj.Namespace)] = true + // go through each function and find out if there's either at least one function with env reference in the same namespace as the Service Account in this iteration + // or at least one function using ndm executor in the role-binding namespace and set the corresponding flags + for _, fn := range funcList.Items { + if fn.Spec.Environment.Namespace == saNs || + // For the case that the environment is created in the reserved namespace. + (isInReservedNS && (fn.Spec.Environment.Namespace == nsResolver.FunctionNamespace || + fn.Spec.Environment.Namespace == nsResolver.BuiderNamespace)) { + funcEnvReference = true + break + } + + if fn.Spec.InvokeStrategy.ExecutionStrategy.ExecutorType == fv1.ExecutorTypeNewdeploy { + ndmFunc = true + break } } - // if the SA in this iteration is fission-fetcher, then in addition to above checks, - // we also need to check if there's at least one function with executor type New deploy - // in the rolebinding's namespace. - // if none of them are true, then remove this SA from this role-binding - if subj.Name == fv1.FissionFetcherSA { - if len(envList.Items) == 0 && !ndmFunc && !funcEnvReference { - // remove SA from rolebinding + // if its a package-getter-rb, we have 2 kinds of SAs and each of them is handled differently + // else if its a secret-configmap-rb, we have only one SA which is fission-fetcher + if roleBinding.Name == fv1.PackageGetterRB { + // check if there is an env obj in saNs + envList, err := fissionClient.CoreV1().Environments(saNs).List(ctx, metav1.ListOptions{}) + if err != nil { + logger.Error("error fetching environment list in service account namespace", zap.Error(err), zap.String("namespace", saNs)) + continue + } + + // if the SA in this iteration is fission-builder, then we need to only check + // if either there's at least one env object in the SA's namespace, or, + // if there's at least one function in the role-binding namespace with env reference + // to the SA's namespace. + // if neither, then we can remove this SA from this role-binding + if subj.Name == fv1.FissionBuilderSA { + if len(envList.Items) == 0 && !funcEnvReference { + saToRemove[utils.MakeSAMapKey(subj.Name, subj.Namespace)] = true + } + } + + // if the SA in this iteration is fission-fetcher, then in addition to above checks, + // we also need to check if there's at least one function with executor type New deploy + // in the rolebinding's namespace. + // if none of them are true, then remove this SA from this role-binding + if subj.Name == fv1.FissionFetcherSA { + if len(envList.Items) == 0 && !ndmFunc && !funcEnvReference { + // remove SA from rolebinding + saToRemove[utils.MakeSAMapKey(subj.Name, subj.Namespace)] = true + } + } + } else if roleBinding.Name == fv1.SecretConfigMapGetterRB { + // if there's not even one function in the role-binding's namespace and there's not even + // one function with env reference to the SA's namespace, then remove that SA + // from this role-binding + if !ndmFunc && !funcEnvReference { saToRemove[utils.MakeSAMapKey(subj.Name, subj.Namespace)] = true } } - } else if roleBinding.Name == fv1.SecretConfigMapGetterRB { - // if there's not even one function in the role-binding's namespace and there's not even - // one function with env reference to the SA's namespace, then remove that SA - // from this role-binding - if !ndmFunc && !funcEnvReference { - saToRemove[utils.MakeSAMapKey(subj.Name, subj.Namespace)] = true - } } - } - // finally, make a call to RemoveSAFromRoleBindingWithRetries for all the service accounts that need to be removed - // for the role-binding in this iteration - if len(saToRemove) != 0 { - logger.Debug("removing service accounts from role binding", - zap.Any("service_accounts", saToRemove), - zap.String("role_binding_name", roleBinding.Name), - zap.String("role_binding_namespace", roleBinding.Namespace)) - - // call this once in the end for each role-binding - err = utils.RemoveSAFromRoleBindingWithRetries(ctx, logger, client, roleBinding.Name, roleBinding.Namespace, saToRemove) - if err != nil { - // if there's an error, we just log it and proceed with the next role-binding, hoping that this role-binding - // will be processed in next iteration. - logger.Debug("error removing service account from role binding", - zap.Error(err), + // finally, make a call to RemoveSAFromRoleBindingWithRetries for all the service accounts that need to be removed + // for the role-binding in this iteration + if len(saToRemove) != 0 { + logger.Debug("removing service accounts from role binding", zap.Any("service_accounts", saToRemove), zap.String("role_binding_name", roleBinding.Name), zap.String("role_binding_namespace", roleBinding.Namespace)) + + // call this once in the end for each role-binding + err = utils.RemoveSAFromRoleBindingWithRetries(ctx, logger, client, roleBinding.Name, roleBinding.Namespace, saToRemove) + if err != nil { + // if there's an error, we just log it and proceed with the next role-binding, hoping that this role-binding + // will be processed in next iteration. + logger.Debug("error removing service account from role binding", + zap.Error(err), + zap.Any("service_accounts", saToRemove), + zap.String("role_binding_name", roleBinding.Name), + zap.String("role_binding_namespace", roleBinding.Namespace)) + } } } + return nil + } + for _, namespace := range GetReaperNamespace() { + //ignore error + cleanupRoleBindings(namespace) //nolint errcheck } } } + +func GetReaperNamespace() map[string]string { + ns := utils.DefaultNSResolver() + //to support backward compatibility we need to cleanup deployment and rolebinding created in function, buidler and default namespace as well + fissionResourceNs := ns.FissionNSWithOptions(utils.WithBuilderNs(), utils.WithFunctionNs(), utils.WithDefaultNs()) + return fissionResourceNs +} diff --git a/pkg/storagesvc/archivePruner.go b/pkg/storagesvc/archivePruner.go index a08ebdd724..1c450c9951 100644 --- a/pkg/storagesvc/archivePruner.go +++ b/pkg/storagesvc/archivePruner.go @@ -83,7 +83,7 @@ func (pruner *ArchivePruner) getOrphanArchives(ctx context.Context) { var archiveID string // get all pkgs from kubernetes - for _, namespace := range utils.GetNamespaces() { + for _, namespace := range utils.DefaultNSResolver().FissionResourceNS { pkgList, err := pruner.crdClient.CoreV1().Packages(namespace).List(ctx, metav1.ListOptions{}) if err != nil { pruner.logger.Error("error getting package list from kubernetes", zap.Error(err)) diff --git a/pkg/utils/informer.go b/pkg/utils/informer.go index 2c779423a3..a0ee8f9fc0 100644 --- a/pkg/utils/informer.go +++ b/pkg/utils/informer.go @@ -1,8 +1,6 @@ package utils import ( - "os" - "strings" "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -13,37 +11,14 @@ import ( "k8s.io/client-go/tools/cache" metricsapi "k8s.io/metrics/pkg/apis/metrics" - v1 "github.com/fission/fission/pkg/apis/core/v1" - fv1 "github.com/fission/fission/pkg/apis/core/v1" "github.com/fission/fission/pkg/generated/clientset/versioned" genInformer "github.com/fission/fission/pkg/generated/informers/externalversions" ) -const additionalNamespaces string = "FISSION_RESOURCE_NAMESPACES" - -func GetNamespaces() []string { - envValue := os.Getenv(additionalNamespaces) - if len(envValue) == 0 { - return []string{ - metav1.NamespaceAll, - } - } - - informerNS := make([]string, 0) - lstNamespaces := strings.Split(envValue, ",") - for _, namespace := range lstNamespaces { - //check to handle string with additional comma at the end of string. eg- ns1,ns2, - if namespace != "" { - informerNS = append(informerNS, namespace) - } - } - return informerNS -} - func GetInformersForNamespaces(client versioned.Interface, defaultSync time.Duration, kind string) map[string]cache.SharedIndexInformer { informers := make(map[string]cache.SharedIndexInformer) - for _, ns := range GetNamespaces() { + for _, ns := range DefaultNSResolver().FissionResourceNS { factory := genInformer.NewFilteredSharedInformerFactory(client, defaultSync, ns, nil).Core().V1() switch kind { case fv1.CanaryConfigResource: @@ -79,8 +54,8 @@ func GetInformerFactoryByReadyPod(client kubernetes.Interface, namespace string, return informerFactory, nil } -func GetInformerFactoryByExecutor(client kubernetes.Interface, executorType v1.ExecutorType, defaultResync time.Duration) (k8sInformers.SharedInformerFactory, error) { - executorLabel, err := labels.NewRequirement(v1.EXECUTOR_TYPE, selection.DoubleEquals, []string{string(executorType)}) +func GetInformerFactoryByExecutor(client kubernetes.Interface, executorType fv1.ExecutorType, defaultResync time.Duration) (k8sInformers.SharedInformerFactory, error) { + executorLabel, err := labels.NewRequirement(fv1.EXECUTOR_TYPE, selection.DoubleEquals, []string{string(executorType)}) if err != nil { return nil, err } diff --git a/pkg/utils/namespace.go b/pkg/utils/namespace.go index 9c27a9f500..73a8988992 100644 --- a/pkg/utils/namespace.go +++ b/pkg/utils/namespace.go @@ -2,21 +2,38 @@ package utils import ( "os" + "strings" + "go.uber.org/zap" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/fission/fission/pkg/utils/loggerfactory" ) const ( - ENV_FUNCTION_NAMESPACE string = "FISSION_FUNCTION_NAMESPACE" - ENV_BUILDER_NAMESPACE string = "FISSION_BUILDER_NAMESPACE" - ENV_DEFAULT_NAMESPACE string = "FISSION_DEFAULT_NAMESPACE" + ENV_FUNCTION_NAMESPACE string = "FISSION_FUNCTION_NAMESPACE" + ENV_BUILDER_NAMESPACE string = "FISSION_BUILDER_NAMESPACE" + ENV_DEFAULT_NAMESPACE string = "FISSION_DEFAULT_NAMESPACE" + ENV_ADDITIONAL_NAMESPACE string = "FISSION_RESOURCE_NAMESPACES" ) -type NamespaceResolver struct { - FunctionNamespace string - BuiderNamespace string - DefaultNamespace string -} +type ( + NamespaceResolver struct { + FunctionNamespace string + BuiderNamespace string + DefaultNamespace string + FissionResourceNS map[string]string + Logger *zap.Logger + } + + options struct { + functionNS bool + builderNS bool + defaultNs bool + } + + option func(options *options) *options +) var nsResolver *NamespaceResolver @@ -25,7 +42,87 @@ func init() { FunctionNamespace: os.Getenv(ENV_FUNCTION_NAMESPACE), BuiderNamespace: os.Getenv(ENV_BUILDER_NAMESPACE), DefaultNamespace: os.Getenv(ENV_DEFAULT_NAMESPACE), + FissionResourceNS: getNamespaces(), + Logger: loggerfactory.GetLogger(), + } + + nsResolver.Logger.Debug("namespaces", zap.String("function_namespace", nsResolver.FunctionNamespace), + zap.String("builder_namespace", nsResolver.BuiderNamespace), + zap.String("default_namespace", nsResolver.DefaultNamespace), + zap.Any("fission_resource_namespace", listNamespaces(nsResolver.FissionResourceNS))) +} + +// listNamespaces => convert namespaces from map to slice +func listNamespaces(namespaces map[string]string) []string { + ns := make([]string, 0) + for _, namespace := range namespaces { + ns = append(ns, namespace) + } + return ns +} + +func WithBuilderNs() option { + return func(options *options) *options { + options.builderNS = true + return options + } +} + +func WithFunctionNs() option { + return func(options *options) *options { + options.functionNS = true + return options + } +} + +func WithDefaultNs() option { + return func(options *options) *options { + options.defaultNs = true + return options + } +} + +func (nsr *NamespaceResolver) FissionNSWithOptions(option ...option) map[string]string { + var options options + for _, opt := range option { + options = *opt(&options) + } + + fissionResourceNS := make(map[string]string) + for k, v := range nsr.FissionResourceNS { + fissionResourceNS[k] = v + } + + if options.functionNS && nsr.FunctionNamespace != "" { + fissionResourceNS[nsr.FunctionNamespace] = nsr.FunctionNamespace + } + if options.builderNS && nsr.BuiderNamespace != "" { + fissionResourceNS[nsr.BuiderNamespace] = nsr.BuiderNamespace + } + if options.defaultNs && nsr.DefaultNamespace != "" { + fissionResourceNS[nsr.DefaultNamespace] = nsr.DefaultNamespace + } + nsr.Logger.Debug("fission resource namespaces", zap.Any("namespaces", listNamespaces(fissionResourceNS))) + return fissionResourceNS +} + +func getNamespaces() map[string]string { + envValue := os.Getenv(ENV_ADDITIONAL_NAMESPACE) + if len(envValue) == 0 { + return map[string]string{ + metav1.NamespaceDefault: metav1.NamespaceDefault, + } + } + + lstNamespaces := strings.Split(envValue, ",") + namespaces := make(map[string]string, len(lstNamespaces)) + for _, namespace := range lstNamespaces { + //check to handle string with additional comma at the end of string. eg- ns1,ns2, + if namespace != "" { + namespaces[namespace] = namespace + } } + return namespaces } func (nsr *NamespaceResolver) GetBuilderNS(namespace string) string { diff --git a/pkg/utils/namespace_test.go b/pkg/utils/namespace_test.go index ad150dd549..61b55984cf 100644 --- a/pkg/utils/namespace_test.go +++ b/pkg/utils/namespace_test.go @@ -1,6 +1,8 @@ package utils -import "testing" +import ( + "testing" +) func TestNamespaceResolver(t *testing.T) { t.Run("GetBuilderNS", func(t *testing.T) {