diff --git a/cmd/preupgradechecks/checks.go b/cmd/preupgradechecks/checks.go index 36756dc5ac..ee76d68cda 100644 --- a/cmd/preupgradechecks/checks.go +++ b/cmd/preupgradechecks/checks.go @@ -33,6 +33,7 @@ import ( fv1 "github.com/fission/fission/pkg/apis/core/v1" "github.com/fission/fission/pkg/crd" "github.com/fission/fission/pkg/generated/clientset/versioned" + "github.com/fission/fission/pkg/utils" ) type ( @@ -124,40 +125,41 @@ func (client *PreUpgradeTaskClient) VerifyFunctionSpecReferences(ctx context.Con var err error var fList *fv1.FunctionList + errs := &multierror.Error{} - for i := 0; i < maxRetries; i++ { - fList, err = client.fissionClient.CoreV1().Functions(metav1.NamespaceAll).List(ctx, metav1.ListOptions{}) - if err == nil { - break + for _, namespace := range utils.GetNamespaces() { + for i := 0; i < maxRetries; i++ { + fList, err = client.fissionClient.CoreV1().Functions(namespace).List(ctx, metav1.ListOptions{}) + if err == nil { + break + } } - } - - if err != nil { - client.logger.Fatal("error listing functions after max retries", - zap.Error(err), - zap.Int("max_retries", maxRetries)) - } - errs := &multierror.Error{} + if err != nil { + client.logger.Fatal("error listing functions after max retries", + zap.Error(err), + zap.Int("max_retries", maxRetries)) + } - // check that all secrets, configmaps, packages are in the same namespace - for _, fn := range fList.Items { - secrets := fn.Spec.Secrets - for _, secret := range secrets { - if secret.Namespace != "" && secret.Namespace != fn.ObjectMeta.Namespace { - errs = multierror.Append(errs, fmt.Errorf("function : %s.%s cannot reference a secret : %s in namespace : %s", fn.ObjectMeta.Name, fn.ObjectMeta.Namespace, secret.Name, secret.Namespace)) + // check that all secrets, configmaps, packages are in the same namespace + for _, fn := range fList.Items { + secrets := fn.Spec.Secrets + for _, secret := range secrets { + if secret.Namespace != "" && secret.Namespace != fn.ObjectMeta.Namespace { + errs = multierror.Append(errs, fmt.Errorf("function : %s.%s cannot reference a secret : %s in namespace : %s", fn.ObjectMeta.Name, fn.ObjectMeta.Namespace, secret.Name, secret.Namespace)) + } } - } - configmaps := fn.Spec.ConfigMaps - for _, configmap := range configmaps { - if configmap.Namespace != "" && configmap.Namespace != fn.ObjectMeta.Namespace { - errs = multierror.Append(errs, fmt.Errorf("function : %s.%s cannot reference a configmap : %s in namespace : %s", fn.ObjectMeta.Name, fn.ObjectMeta.Namespace, configmap.Name, configmap.Namespace)) + configmaps := fn.Spec.ConfigMaps + for _, configmap := range configmaps { + if configmap.Namespace != "" && configmap.Namespace != fn.ObjectMeta.Namespace { + errs = multierror.Append(errs, fmt.Errorf("function : %s.%s cannot reference a configmap : %s in namespace : %s", fn.ObjectMeta.Name, fn.ObjectMeta.Namespace, configmap.Name, configmap.Namespace)) + } } - } - if fn.Spec.Package.PackageRef.Namespace != "" && fn.Spec.Package.PackageRef.Namespace != fn.ObjectMeta.Namespace { - errs = multierror.Append(errs, fmt.Errorf("function : %s.%s cannot reference a package : %s in namespace : %s", fn.ObjectMeta.Name, fn.ObjectMeta.Namespace, fn.Spec.Package.PackageRef.Name, fn.Spec.Package.PackageRef.Namespace)) + if fn.Spec.Package.PackageRef.Namespace != "" && fn.Spec.Package.PackageRef.Namespace != fn.ObjectMeta.Namespace { + errs = multierror.Append(errs, fmt.Errorf("function : %s.%s cannot reference a package : %s in namespace : %s", fn.ObjectMeta.Name, fn.ObjectMeta.Namespace, fn.Spec.Package.PackageRef.Name, fn.Spec.Package.PackageRef.Namespace)) + } } } diff --git a/pkg/buildermgr/pkgwatcher.go b/pkg/buildermgr/pkgwatcher.go index 732c5ac4ee..8d4d0e5a65 100644 --- a/pkg/buildermgr/pkgwatcher.go +++ b/pkg/buildermgr/pkgwatcher.go @@ -200,7 +200,7 @@ func (pkgw *packageWatcher) build(ctx context.Context, srcpkg *fv1.Package) { pkgw.logger.Info("starting package info update", zap.String("package_name", pkg.ObjectMeta.Name)) fnList, err := pkgw.fissionClient.CoreV1(). - Functions(metav1.NamespaceAll).List(ctx, metav1.ListOptions{}) + Functions(pkg.Namespace).List(ctx, metav1.ListOptions{}) if err != nil { e := "error getting function list" pkgw.logger.Error(e, zap.Error(err)) diff --git a/pkg/executor/executortype/container/containermgr.go b/pkg/executor/executortype/container/containermgr.go index 341e3591d6..692a08ba0c 100644 --- a/pkg/executor/executortype/container/containermgr.go +++ b/pkg/executor/executortype/container/containermgr.go @@ -269,28 +269,30 @@ func (caaf *Container) RefreshFuncPods(ctx context.Context, logger *zap.Logger, // AdoptExistingResources attempts to adopt resources for functions in all namespaces. func (caaf *Container) AdoptExistingResources(ctx context.Context) { - fnList, err := caaf.fissionClient.CoreV1().Functions(metav1.NamespaceAll).List(ctx, metav1.ListOptions{}) - if err != nil { - caaf.logger.Error("error getting function list", zap.Error(err)) - return - } - wg := &sync.WaitGroup{} - for i := range fnList.Items { - fn := &fnList.Items[i] - if fn.Spec.InvokeStrategy.ExecutionStrategy.ExecutorType == fv1.ExecutorTypeContainer { - wg.Add(1) - go func() { - defer wg.Done() - - _, err = caaf.fnCreate(ctx, fn) - if err != nil { - caaf.logger.Warn("failed to adopt resources for function", zap.Error(err)) - return - } - caaf.logger.Info("adopt resources for function", zap.String("function", fn.ObjectMeta.Name)) - }() + for _, namepsace := range utils.GetNamespaces() { + fnList, err := caaf.fissionClient.CoreV1().Functions(namepsace).List(ctx, metav1.ListOptions{}) + if err != nil { + caaf.logger.Error("error getting function list", zap.Error(err)) + return + } + + for i := range fnList.Items { + fn := &fnList.Items[i] + if fn.Spec.InvokeStrategy.ExecutionStrategy.ExecutorType == fv1.ExecutorTypeContainer { + wg.Add(1) + go func() { + defer wg.Done() + + _, err = caaf.fnCreate(ctx, fn) + if err != nil { + caaf.logger.Warn("failed to adopt resources for function", zap.Error(err)) + return + } + caaf.logger.Info("adopt resources for function", zap.String("function", fn.ObjectMeta.Name)) + }() + } } } diff --git a/pkg/executor/executortype/newdeploy/newdeploymgr.go b/pkg/executor/executortype/newdeploy/newdeploymgr.go index f25036bfea..e8e5038402 100644 --- a/pkg/executor/executortype/newdeploy/newdeploymgr.go +++ b/pkg/executor/executortype/newdeploy/newdeploymgr.go @@ -290,28 +290,30 @@ func (deploy *NewDeploy) RefreshFuncPods(ctx context.Context, logger *zap.Logger // AdoptExistingResources attempts to adopt resources for functions in all namespaces. func (deploy *NewDeploy) AdoptExistingResources(ctx context.Context) { - fnList, err := deploy.fissionClient.CoreV1().Functions(metav1.NamespaceAll).List(ctx, metav1.ListOptions{}) - if err != nil { - deploy.logger.Error("error getting function list", zap.Error(err)) - return - } - wg := &sync.WaitGroup{} - for i := range fnList.Items { - fn := &fnList.Items[i] - if fn.Spec.InvokeStrategy.ExecutionStrategy.ExecutorType == fv1.ExecutorTypeNewdeploy { - wg.Add(1) - go func() { - defer wg.Done() - - _, err = deploy.fnCreate(ctx, fn) - if err != nil { - deploy.logger.Warn("failed to adopt resources for function", zap.Error(err)) - return - } - deploy.logger.Info("adopt resources for function", zap.String("function", fn.ObjectMeta.Name)) - }() + for _, namepsace := range utils.GetNamespaces() { + fnList, err := deploy.fissionClient.CoreV1().Functions(namepsace).List(ctx, metav1.ListOptions{}) + if err != nil { + deploy.logger.Error("error getting function list", zap.Error(err)) + return + } + + for i := range fnList.Items { + fn := &fnList.Items[i] + if fn.Spec.InvokeStrategy.ExecutionStrategy.ExecutorType == fv1.ExecutorTypeNewdeploy { + wg.Add(1) + go func() { + defer wg.Done() + + _, err = deploy.fnCreate(ctx, fn) + if err != nil { + deploy.logger.Warn("failed to adopt resources for function", zap.Error(err)) + return + } + deploy.logger.Info("adopt resources for function", zap.String("function", fn.ObjectMeta.Name)) + }() + } } } @@ -349,7 +351,7 @@ func (deploy *NewDeploy) CleanupOldExecutorObjects(ctx context.Context) { } func (deploy *NewDeploy) getEnvFunctions(ctx context.Context, m *metav1.ObjectMeta) []fv1.Function { - funcList, err := deploy.fissionClient.CoreV1().Functions(metav1.NamespaceAll).List(ctx, metav1.ListOptions{}) + funcList, err := deploy.fissionClient.CoreV1().Functions(m.Namespace).List(ctx, metav1.ListOptions{}) if err != nil { deploy.logger.Error("Error getting functions for env", zap.Error(err), zap.Any("environment", m)) } diff --git a/pkg/executor/executortype/poolmgr/gpm.go b/pkg/executor/executortype/poolmgr/gpm.go index fa779c86f5..901f4095eb 100644 --- a/pkg/executor/executortype/poolmgr/gpm.go +++ b/pkg/executor/executortype/poolmgr/gpm.go @@ -289,7 +289,7 @@ func (gpm *GenericPoolManager) RefreshFuncPods(ctx context.Context, logger *zap. funcLabels := gp.labelsForFunction(&f.ObjectMeta) - podList, err := gpm.kubernetesClient.CoreV1().Pods(metav1.NamespaceAll).List(ctx, metav1.ListOptions{ + podList, err := gpm.kubernetesClient.CoreV1().Pods(f.Spec.Environment.Namespace).List(ctx, metav1.ListOptions{ LabelSelector: labels.Set(funcLabels).AsSelector().String(), }) @@ -311,132 +311,136 @@ func (gpm *GenericPoolManager) RefreshFuncPods(ctx context.Context, logger *zap. } func (gpm *GenericPoolManager) AdoptExistingResources(ctx context.Context) { - envs, err := gpm.fissionClient.CoreV1().Environments(metav1.NamespaceAll).List(ctx, metav1.ListOptions{}) - if err != nil { - gpm.logger.Error("error getting environment list", zap.Error(err)) - return - } - - envMap := make(map[string]fv1.Environment, len(envs.Items)) + envMap := make(map[string]fv1.Environment) wg := &sync.WaitGroup{} - for i := range envs.Items { - env := envs.Items[i] - - if getEnvPoolSize(&env) > 0 { - wg.Add(1) - go func() { - defer wg.Done() - _, created, err := gpm.getPool(ctx, &env) - if err != nil { - gpm.logger.Error("adopt pool failed", zap.Error(err)) - } - if created { - gpm.logger.Info("created pool for the environment", zap.String("env", env.ObjectMeta.Name), zap.String("namespace", gpm.namespace)) - } - }() + for _, namespace := range utils.GetNamespaces() { + envs, err := gpm.fissionClient.CoreV1().Environments(namespace).List(ctx, metav1.ListOptions{}) + if err != nil { + gpm.logger.Error("error getting environment list", zap.Error(err)) + return } - // create environment map for later use - key := fmt.Sprintf("%v/%v", env.ObjectMeta.Namespace, env.ObjectMeta.Name) - envMap[key] = env + for i := range envs.Items { + env := envs.Items[i] + + if getEnvPoolSize(&env) > 0 { + wg.Add(1) + go func() { + defer wg.Done() + _, created, err := gpm.getPool(ctx, &env) + if err != nil { + gpm.logger.Error("adopt pool failed", zap.Error(err)) + } + if created { + gpm.logger.Info("created pool for the environment", zap.String("env", env.ObjectMeta.Name), zap.String("namespace", gpm.namespace)) + } + }() + } + + // create environment map for later use + key := fmt.Sprintf("%v/%v", env.ObjectMeta.Namespace, env.ObjectMeta.Name) + envMap[key] = env + } } l := map[string]string{ fv1.EXECUTOR_TYPE: string(fv1.ExecutorTypePoolmgr), } - podList, err := gpm.kubernetesClient.CoreV1().Pods(metav1.NamespaceAll).List(ctx, metav1.ListOptions{ - LabelSelector: labels.Set(l).AsSelector().String(), - }) - - if err != nil { - gpm.logger.Error("error getting pod list", zap.Error(err)) - return - } + for _, namespace := range utils.GetNamespaces() { + podList, err := gpm.kubernetesClient.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{ + LabelSelector: labels.Set(l).AsSelector().String(), + }) - for i := range podList.Items { - pod := &podList.Items[i] - if !utils.IsReadyPod(pod) { - continue + if err != nil { + gpm.logger.Error("error getting pod list", zap.Error(err)) + return } - wg.Add(1) - go func() { - defer wg.Done() + for i := range podList.Items { + pod := &podList.Items[i] + if !utils.IsReadyPod(pod) { + continue + } - // avoid too many requests arrive Kubernetes API server at the same time. - time.Sleep(time.Duration(rand.Intn(30)) * time.Millisecond) + wg.Add(1) + go func() { + defer wg.Done() - patch := fmt.Sprintf(`{"metadata":{"annotations":{"%v":"%v"}}}`, fv1.EXECUTOR_INSTANCEID_LABEL, gpm.instanceID) - pod, err = gpm.kubernetesClient.CoreV1().Pods(pod.Namespace).Patch(ctx, pod.Name, k8sTypes.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{}) - if err != nil { - // just log the error since it won't affect the function serving - gpm.logger.Warn("error patching executor instance ID of pod", zap.Error(err), - zap.String("pod", pod.Name), zap.String("ns", pod.Namespace)) - return - } + // avoid too many requests arrive Kubernetes API server at the same time. + time.Sleep(time.Duration(rand.Intn(30)) * time.Millisecond) - // for unspecialized pod, we only update its annotations - if pod.Labels["managed"] == "true" { - return - } + patch := fmt.Sprintf(`{"metadata":{"annotations":{"%v":"%v"}}}`, fv1.EXECUTOR_INSTANCEID_LABEL, gpm.instanceID) + pod, err = gpm.kubernetesClient.CoreV1().Pods(pod.Namespace).Patch(ctx, pod.Name, k8sTypes.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{}) + if err != nil { + // just log the error since it won't affect the function serving + gpm.logger.Warn("error patching executor instance ID of pod", zap.Error(err), + zap.String("pod", pod.Name), zap.String("ns", pod.Namespace)) + return + } - fnName, ok1 := pod.Labels[fv1.FUNCTION_NAME] - fnNS, ok2 := pod.Labels[fv1.FUNCTION_NAMESPACE] - fnUID, ok3 := pod.Labels[fv1.FUNCTION_UID] - fnRV, ok4 := pod.Annotations[fv1.FUNCTION_RESOURCE_VERSION] - envName, ok5 := pod.Labels[fv1.ENVIRONMENT_NAME] - envNS, ok6 := pod.Labels[fv1.ENVIRONMENT_NAMESPACE] - svcHost, ok7 := pod.Annotations[fv1.ANNOTATION_SVC_HOST] - env, ok8 := envMap[fmt.Sprintf("%v/%v", envNS, envName)] - - if !(ok1 && ok2 && ok3 && ok4 && ok5 && ok6 && ok7 && ok8) { - gpm.logger.Warn("failed to adopt pod for function due to lack of necessary information", - zap.String("pod", pod.Name), zap.Any("labels", pod.Labels), zap.Any("annotations", pod.Annotations), - zap.String("env", env.ObjectMeta.Name)) - return - } + // for unspecialized pod, we only update its annotations + if pod.Labels["managed"] == "true" { + return + } - fsvc := fscache.FuncSvc{ - Name: pod.Name, - Function: &metav1.ObjectMeta{ - Name: fnName, - Namespace: fnNS, - UID: k8sTypes.UID(fnUID), - ResourceVersion: fnRV, - }, - Environment: &env, - Address: svcHost, - KubernetesObjects: []apiv1.ObjectReference{ - { - Kind: "pod", - Name: pod.Name, - APIVersion: pod.TypeMeta.APIVersion, - Namespace: pod.ObjectMeta.Namespace, - ResourceVersion: pod.ObjectMeta.ResourceVersion, - UID: pod.ObjectMeta.UID, - }, - }, - Executor: fv1.ExecutorTypePoolmgr, - Ctime: time.Now(), - Atime: time.Now(), - } + fnName, ok1 := pod.Labels[fv1.FUNCTION_NAME] + fnNS, ok2 := pod.Labels[fv1.FUNCTION_NAMESPACE] + fnUID, ok3 := pod.Labels[fv1.FUNCTION_UID] + fnRV, ok4 := pod.Annotations[fv1.FUNCTION_RESOURCE_VERSION] + envName, ok5 := pod.Labels[fv1.ENVIRONMENT_NAME] + envNS, ok6 := pod.Labels[fv1.ENVIRONMENT_NAMESPACE] + svcHost, ok7 := pod.Annotations[fv1.ANNOTATION_SVC_HOST] + env, ok8 := envMap[fmt.Sprintf("%v/%v", envNS, envName)] + + if !(ok1 && ok2 && ok3 && ok4 && ok5 && ok6 && ok7 && ok8) { + gpm.logger.Warn("failed to adopt pod for function due to lack of necessary information", + zap.String("pod", pod.Name), zap.Any("labels", pod.Labels), zap.Any("annotations", pod.Annotations), + zap.String("env", env.ObjectMeta.Name)) + return + } - _, err = gpm.fsCache.Add(fsvc) - if err != nil { - // If fsvc already exists we just skip the duplicate one. And let reaper to recycle the duplicate pods. - // This is for the case that there are multiple function pods for the same function due to unknown reason. - if !fscache.IsNameExistError(err) { - gpm.logger.Warn("failed to adopt pod for function", zap.Error(err), zap.String("pod", pod.Name)) + fsvc := fscache.FuncSvc{ + Name: pod.Name, + Function: &metav1.ObjectMeta{ + Name: fnName, + Namespace: fnNS, + UID: k8sTypes.UID(fnUID), + ResourceVersion: fnRV, + }, + Environment: &env, + Address: svcHost, + KubernetesObjects: []apiv1.ObjectReference{ + { + Kind: "pod", + Name: pod.Name, + APIVersion: pod.TypeMeta.APIVersion, + Namespace: pod.ObjectMeta.Namespace, + ResourceVersion: pod.ObjectMeta.ResourceVersion, + UID: pod.ObjectMeta.UID, + }, + }, + Executor: fv1.ExecutorTypePoolmgr, + Ctime: time.Now(), + Atime: time.Now(), } - return - } + _, err = gpm.fsCache.Add(fsvc) + if err != nil { + // If fsvc already exists we just skip the duplicate one. And let reaper to recycle the duplicate pods. + // This is for the case that there are multiple function pods for the same function due to unknown reason. + if !fscache.IsNameExistError(err) { + gpm.logger.Warn("failed to adopt pod for function", zap.Error(err), zap.String("pod", pod.Name)) + } - gpm.logger.Info("adopt function pod", - zap.String("pod", pod.Name), zap.Any("labels", pod.Labels), zap.Any("annotations", pod.Annotations)) - }() + return + } + + gpm.logger.Info("adopt function pod", + zap.String("pod", pod.Name), zap.Any("labels", pod.Labels), zap.Any("annotations", pod.Annotations)) + }() + } } wg.Wait() diff --git a/pkg/executor/executortype/poolmgr/poolpodcontroller.go b/pkg/executor/executortype/poolmgr/poolpodcontroller.go index 9e6fd29a4d..d0a28fa878 100644 --- a/pkg/executor/executortype/poolmgr/poolpodcontroller.go +++ b/pkg/executor/executortype/poolmgr/poolpodcontroller.go @@ -263,7 +263,7 @@ func (p *PoolPodController) workerRun(ctx context.Context, name string, processF } func (p *PoolPodController) getEnvLister(namespace string) (flisterv1.EnvironmentLister, error) { - lister, ok := p.envLister[metav1.NamespaceAll] + lister, ok := p.envLister[namespace] if ok { return lister, nil } diff --git a/pkg/executor/reaper/reaper.go b/pkg/executor/reaper/reaper.go index 2c028e0cb5..bdc7669514 100644 --- a/pkg/executor/reaper/reaper.go +++ b/pkg/executor/reaper/reaper.go @@ -93,6 +93,7 @@ func CleanupDeployments(ctx context.Context, logger *zap.Logger, client kubernet // ignore err } } + return nil } @@ -120,6 +121,7 @@ func CleanupPods(ctx context.Context, logger *zap.Logger, client kubernetes.Inte // ignore err } } + return nil } @@ -147,6 +149,7 @@ func CleanupServices(ctx context.Context, logger *zap.Logger, client kubernetes. // ignore err } } + return nil } @@ -175,6 +178,7 @@ func CleanupHpa(ctx context.Context, logger *zap.Logger, client kubernetes.Inter // ignore err } } + return nil } @@ -186,6 +190,7 @@ func CleanupRoleBindings(ctx context.Context, logger *zap.Logger, client kuberne time.Sleep(cleanupRoleBindingInterval) logger.Debug("starting cleanupRoleBindings cycle") + // get all rolebindings ( just to be efficient, one call to kubernetes ) rbList, err := client.RbacV1().RoleBindings(metav1.NamespaceAll).List(ctx, metav1.ListOptions{}) if err != nil { diff --git a/pkg/router/functionReferenceResolver.go b/pkg/router/functionReferenceResolver.go index 09c71f9ba9..febb9b7720 100644 --- a/pkg/router/functionReferenceResolver.go +++ b/pkg/router/functionReferenceResolver.go @@ -122,9 +122,6 @@ func (frr *functionReferenceResolver) resolve(trigger fv1.HTTPTrigger) (*resolve } func (frr *functionReferenceResolver) getInformerByNamespace(namespace string) (k8sCache.SharedIndexInformer, error) { - if informer, ok := frr.funcInformer[metav1.NamespaceAll]; ok { - return informer, nil - } if informer, ok := frr.funcInformer[namespace]; ok { return informer, nil } diff --git a/pkg/storagesvc/archivePruner.go b/pkg/storagesvc/archivePruner.go index 275ccac44a..a08ebdd724 100644 --- a/pkg/storagesvc/archivePruner.go +++ b/pkg/storagesvc/archivePruner.go @@ -25,6 +25,7 @@ import ( "github.com/fission/fission/pkg/crd" "github.com/fission/fission/pkg/generated/clientset/versioned" + "github.com/fission/fission/pkg/utils" ) type ArchivePruner struct { @@ -82,33 +83,35 @@ func (pruner *ArchivePruner) getOrphanArchives(ctx context.Context) { var archiveID string // get all pkgs from kubernetes - pkgList, err := pruner.crdClient.CoreV1().Packages(metav1.NamespaceAll).List(ctx, metav1.ListOptions{}) - if err != nil { - pruner.logger.Error("error getting package list from kubernetes", zap.Error(err)) - return - } + for _, namespace := range utils.GetNamespaces() { + pkgList, err := pruner.crdClient.CoreV1().Packages(namespace).List(ctx, metav1.ListOptions{}) + if err != nil { + pruner.logger.Error("error getting package list from kubernetes", zap.Error(err)) + return + } - // extract archives referenced by these pkgs - for _, pkg := range pkgList.Items { - if pkg.Spec.Deployment.URL != "" { - archiveID, err = getQueryParamValue(pkg.Spec.Deployment.URL, "id") - if err != nil { - pruner.logger.Error("error extracting value of archiveID from deployment url", - zap.Error(err), - zap.String("url", pkg.Spec.Deployment.URL)) - return + // extract archives referenced by these pkgs + for _, pkg := range pkgList.Items { + if pkg.Spec.Deployment.URL != "" { + archiveID, err = getQueryParamValue(pkg.Spec.Deployment.URL, "id") + if err != nil { + pruner.logger.Error("error extracting value of archiveID from deployment url", + zap.Error(err), + zap.String("url", pkg.Spec.Deployment.URL)) + return + } + archivesRefByPkgs = append(archivesRefByPkgs, archiveID) } - archivesRefByPkgs = append(archivesRefByPkgs, archiveID) - } - if pkg.Spec.Source.URL != "" { - archiveID, err = getQueryParamValue(pkg.Spec.Source.URL, "id") - if err != nil { - pruner.logger.Error("error extracting value of archiveID from source url", - zap.Error(err), - zap.String("url", pkg.Spec.Source.URL)) - return + if pkg.Spec.Source.URL != "" { + archiveID, err = getQueryParamValue(pkg.Spec.Source.URL, "id") + if err != nil { + pruner.logger.Error("error extracting value of archiveID from source url", + zap.Error(err), + zap.String("url", pkg.Spec.Source.URL)) + return + } + archivesRefByPkgs = append(archivesRefByPkgs, archiveID) } - archivesRefByPkgs = append(archivesRefByPkgs, archiveID) } }