diff --git a/.github/workflows/integration-tests-frontend-multicluster-multi-primary.yml b/.github/workflows/integration-tests-frontend-multicluster-multi-primary.yml new file mode 100644 index 0000000000..fef72dcdf0 --- /dev/null +++ b/.github/workflows/integration-tests-frontend-multicluster-multi-primary.yml @@ -0,0 +1,83 @@ +name: Integration Tests Frontend + +on: + workflow_call: + inputs: + target_branch: + required: true + type: string + build_branch: + required: true + type: string + istio_version: + required: false + type: string + default: "" + +env: + TARGET_BRANCH: ${{ inputs.target_branch }} + +jobs: + integration_tests_frontend_multicluster_multi_primary: + name: Cypress integration tests + runs-on: ubuntu-20.04 + env: + # Copied from: https://github.com/bahmutov/cypress-gh-action-split-install/blob/ca3916d4e7240ebdc337825d2d78eb354855464b/.github/workflows/tests.yml#L7-L11 + # prevents extra Cypress installation progress messages + CI: 1 + # avoid warnings like "tput: No value for $TERM and no -T specified" + TERM: xterm + steps: + - name: Check out code + uses: actions/checkout@v4 + with: + ref: ${{ inputs.build_branch }} + + - name: Setup node + uses: actions/setup-node@v3 + with: + node-version: "18" + cache: yarn + cache-dependency-path: frontend/yarn.lock + + - name: Download go binary + uses: actions/download-artifact@v3 + with: + name: kiali + path: ~/go/bin/ + + - name: Ensure kiali binary is executable + run: chmod +x ~/go/bin/kiali + + - name: Download frontend build + uses: actions/download-artifact@v3 + with: + name: build + path: frontend/build + + # Need to install frontend dependencies to run cypress tests. + - name: Install frontend dependencies + working-directory: ./frontend + run: yarn install --frozen-lockfile + + - name: Run frontend multi-cluster integration tests + run: hack/run-integration-tests.sh --test-suite frontend-multi-primary $(if [ -n "${{ inputs.istio_version }}" ]; then echo "--istio-version ${{ inputs.istio_version }}"; fi) + + - name: Get debug info when integration tests fail + if: failure() + run: | + kubectl --context kind-east logs -l app.kubernetes.io/name=kiali --tail=-1 --all-containers -n istio-system + kubectl --context kind-east describe nodes + kubectl --context kind-east get pods -l app.kubernetes.io/name=kiali -n istio-system -o yaml + kubectl --context kind-east describe pods -n metallb-system + kubectl --context kind-east logs -p deployments/controller -n metallb-system + kubectl --context kind-east logs -p ds/speaker -n metallb-system + kubectl --context kind-east logs deployments/controller -n metallb-system + kubectl --context kind-east logs ds/speaker -n metallb-system + + - name: Upload cypress screenshots when tests fail + uses: actions/upload-artifact@v3 + if: failure() + with: + name: cypress-screenshots + path: frontend/cypress/screenshots diff --git a/.github/workflows/integration-tests-frontend-multicluster.yml b/.github/workflows/integration-tests-frontend-multicluster-primary-remote.yml similarity index 95% rename from .github/workflows/integration-tests-frontend-multicluster.yml rename to .github/workflows/integration-tests-frontend-multicluster-primary-remote.yml index 94b9c2e10a..a66cbfb7da 100644 --- a/.github/workflows/integration-tests-frontend-multicluster.yml +++ b/.github/workflows/integration-tests-frontend-multicluster-primary-remote.yml @@ -61,7 +61,7 @@ jobs: run: yarn install --frozen-lockfile - name: Run frontend multi-cluster integration tests - run: hack/run-integration-tests.sh --test-suite frontend-multi-cluster $(if [ -n "${{ inputs.istio_version }}" ]; then echo "--istio-version ${{ inputs.istio_version }}"; fi) + run: hack/run-integration-tests.sh --test-suite frontend-primary-remote $(if [ -n "${{ inputs.istio_version }}" ]; then echo "--istio-version ${{ inputs.istio_version }}"; fi) - name: Get debug info when integration tests fail if: failure() diff --git a/.github/workflows/kiali-ci.yml b/.github/workflows/kiali-ci.yml index 92c71d8c1c..3a60aef2f8 100644 --- a/.github/workflows/kiali-ci.yml +++ b/.github/workflows/kiali-ci.yml @@ -78,9 +78,17 @@ jobs: target_branch: ${{ needs.initialize.outputs.target-branch }} build_branch: ${{ needs.initialize.outputs.build-branch }} - integration_tests_frontend_multicluster: - name: Run frontend multicluster integration tests - uses: ./.github/workflows/integration-tests-frontend-multicluster.yml + integration_tests_frontend_multicluster_primary_remote: + name: Run frontend multicluster primary-remote integration tests + uses: ./.github/workflows/integration-tests-frontend-multicluster-primary-remote.yml + needs: [initialize, build_backend, build_frontend] + with: + target_branch: ${{ needs.initialize.outputs.target-branch }} + build_branch: ${{ needs.initialize.outputs.build-branch }} + + integration_tests_frontend_multicluster_multi_primary: + name: Run frontend multicluster multi-primary integration tests + uses: ./.github/workflows/integration-tests-frontend-multicluster-multi-primary.yml needs: [initialize, build_backend, build_frontend] with: target_branch: ${{ needs.initialize.outputs.target-branch }} diff --git a/business/apps_test.go b/business/apps_test.go index 2263ce8020..41c3e3ac6d 100644 --- a/business/apps_test.go +++ b/business/apps_test.go @@ -62,6 +62,7 @@ func TestGetAppListFromDeployments(t *testing.T) { func TestGetAppFromDeployments(t *testing.T) { assert := assert.New(t) + require := require.New(t) conf := config.NewConfig() conf.ExternalServices.CustomDashboards.Enabled = false @@ -91,7 +92,7 @@ func TestGetAppFromDeployments(t *testing.T) { criteria := AppCriteria{Namespace: "Namespace", AppName: "httpbin", Cluster: conf.KubernetesConfig.ClusterName} appDetails, appDetailsErr := svc.GetAppDetails(context.TODO(), criteria) - assert.NoError(appDetailsErr) + require.NoError(appDetailsErr) assert.Equal("Namespace", appDetails.Namespace.Name) assert.Equal("httpbin", appDetails.Name) @@ -137,8 +138,12 @@ func TestGetAppListFromReplicaSets(t *testing.T) { func TestGetAppFromReplicaSets(t *testing.T) { assert := assert.New(t) + + // Disabling CustomDashboards on Workload details testing + // otherwise this adds 10s to the test due to an http timeout. conf := config.NewConfig() - config.Set(conf) + conf.ExternalServices.CustomDashboards.Enabled = false + kubernetes.SetConfig(t, *conf) // Setup mocks objects := []runtime.Object{ diff --git a/business/controlplane_monitor.go b/business/controlplane_monitor.go new file mode 100644 index 0000000000..32543f93f9 --- /dev/null +++ b/business/controlplane_monitor.go @@ -0,0 +1,502 @@ +package business + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "strings" + "sync" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/wait" + + "github.com/kiali/kiali/config" + "github.com/kiali/kiali/kubernetes" + "github.com/kiali/kiali/kubernetes/cache" + "github.com/kiali/kiali/log" +) + +// ControlPlaneMonitor is an interface for the control plane monitor. +// This is an interface solely for testing purposes since we need to mock +// out portforwarding and polling. +type ControlPlaneMonitor interface { + PollIstiodForProxyStatus(ctx context.Context) + CanConnectToIstiod(client kubernetes.ClientInterface) (kubernetes.IstioComponentStatus, error) + CanConnectToIstiodForRevision(client kubernetes.ClientInterface, revision string) (kubernetes.IstioComponentStatus, error) + // RefreshIstioCache should update the kiali cache's istio related stores. + RefreshIstioCache(ctx context.Context) error +} + +func NewControlPlaneMonitor(cache cache.KialiCache, clientFactory kubernetes.ClientFactory, conf config.Config, meshService *MeshService) *controlPlaneMonitor { + return &controlPlaneMonitor{ + cache: cache, + clientFactory: clientFactory, + conf: conf, + pollingInterval: time.Duration(conf.ExternalServices.Istio.IstiodPollingIntervalSeconds) * time.Second, + meshService: meshService, + } +} + +// controlPlaneMonitor will periodically scrape the debug endpoint(s) of istiod. +// It scrapes a single pod from each controlplane. The list of controlplanes +// comes from the kialiCache. It will update the kialiCache with the info +// that it scrapes. +type controlPlaneMonitor struct { + // Where we store the proxy status. + cache cache.KialiCache + // Used for getting the Kiali Service Account clients for all clusters. + // Since these can change when clusters are added/removed we want to get + // these directly from the client factory rather than passing in a static list. + clientFactory kubernetes.ClientFactory + conf config.Config + meshService *MeshService + pollingInterval time.Duration +} + +// RefreshIstioCache will scrape the debug endpoint(s) of istiod a single time +// and update the kialiCache. The proxy status and the registry services are +// scraped from the debug endpoint. +func (p *controlPlaneMonitor) RefreshIstioCache(ctx context.Context) error { + log.Debug("Scraping istiod for debug info") + ctx, cancel := context.WithTimeout(ctx, p.pollingInterval) + defer cancel() + + mesh, err := p.meshService.GetMesh(ctx) + if err != nil { + return fmt.Errorf("unable to get mesh when refreshing istio cache: %s", err) + } + + // Get the list of controlplanes we are polling. + revisionsPerCluster := map[string][]string{} + for _, controlPlane := range mesh.ControlPlanes { + clusterName := controlPlane.Cluster.Name + revisionsPerCluster[clusterName] = append(revisionsPerCluster[clusterName], controlPlane.Revision) + } + + // Proxy status endpoint has unique results per controlplane whereas services/config are duplicated across + // all controlplanes for that cluster so we'll get the proxy status per controlplane e.g. from both istiod-rev-1 + // and istiod-rev-2 but the services will only be gotten from one of the istiods. + var proxyStatus []*kubernetes.ProxyStatus + registryStatus := make(map[string]*kubernetes.RegistryStatus) + for cluster, revisions := range revisionsPerCluster { + client := p.clientFactory.GetSAClient(cluster) + if client == nil { + log.Errorf("client for cluster [%s] does not exist", cluster) + // Even if one cluster is down we're going to continue to try and get results for the rest. + continue + } + + // Retry roughly once. Context set to timeout after p.interval should cancel before any subsequent retries. + interval := p.pollingInterval / 2 + + for _, revision := range revisions { + pstatus, err := p.getProxyStatusWithRetry(ctx, interval, client, revision) + if err != nil { + log.Warningf("Unable to get proxy status from istiod for revision: [%s] and cluster: [%s]. Proxy status may be stale: %s", revision, client.ClusterInfo().Name, err) + continue + } + proxyStatus = append(proxyStatus, pstatus...) + } + + // Services can just be done once per cluster since these are shared across revisions + // Whereas the proxy status is per revision. + if len(revisions) > 0 { + // Since it doesn't matter what revision we choose, just choose the first one. + revision := revisions[0] + status := &kubernetes.RegistryStatus{} + services, err := p.getServicesWithRetry(ctx, interval, client, revision) + if err != nil { + log.Warningf("Unable to get registry services from istiod for revision: [%s] and cluster: [%s]. Registry services may be stale: %s", revision, client.ClusterInfo().Name, err) + continue + } + status.Services = services + registryStatus[cluster] = status + } + } + + p.cache.SetRegistryStatus(registryStatus) + p.cache.SetPodProxyStatus(proxyStatus) + + return nil +} + +func (p *controlPlaneMonitor) PollIstiodForProxyStatus(ctx context.Context) { + log.Debugf("Starting polling istiod(s) every %d seconds for proxy status", p.conf.ExternalServices.Istio.IstiodPollingIntervalSeconds) + + // Prime the pump once by calling refresh immediately here. Any errors are just logged + // because they could be transient and we'll try again on the next interval. + if err := p.RefreshIstioCache(ctx); err != nil { + log.Errorf("Unable to refresh istio cache: %s", err) + } + + go func() { + for { + select { + case <-ctx.Done(): + log.Debug("Stopping polling for istiod(s) proxy status") + return + case <-time.After(p.pollingInterval): + if err := p.RefreshIstioCache(ctx); err != nil { + log.Errorf("Unable to refresh istio cache: %s", err) + } + } + } + }() +} + +func (p *controlPlaneMonitor) getProxyStatusWithRetry(ctx context.Context, interval time.Duration, client kubernetes.ClientInterface, revision string) ([]*kubernetes.ProxyStatus, error) { + var ( + proxyStatus []*kubernetes.ProxyStatus + err error + ) + retryErr := wait.PollUntilContextCancel(ctx, interval, true, func(ctx context.Context) (bool, error) { + log.Tracef("Getting proxy status from istiod in cluster [%s] for revision [%s]", client.ClusterInfo().Name, revision) + var err error + proxyStatus, err = p.getProxyStatus(client, revision) + if err != nil { + return false, nil + } + + return true, nil + }) + if retryErr != nil { + log.Warningf("Error getting proxy status from istiod. Proxy status may be stale. Err: %v", err) + return nil, err + } + + return proxyStatus, nil +} + +func (p *controlPlaneMonitor) getServicesWithRetry(ctx context.Context, interval time.Duration, client kubernetes.ClientInterface, revision string) ([]*kubernetes.RegistryService, error) { + var ( + registryServices []*kubernetes.RegistryService + err error + ) + retryErr := wait.PollUntilContextCancel(ctx, interval, true, func(ctx context.Context) (bool, error) { + log.Tracef("Getting services from istiod in cluster [%s] for revision [%s]", client.ClusterInfo().Name, revision) + var err error + registryServices, err = p.getRegistryServices(client, revision) + if err != nil { + return false, nil + } + + return true, nil + }) + if retryErr != nil { + log.Warningf("Error getting proxy status from istiod. Proxy status may be stale. Err: %v", err) + return nil, err + } + + return registryServices, nil +} + +func joinURL(base, path string) string { + base = strings.TrimSuffix(base, "/") + path = strings.TrimPrefix(path, "/") + return base + "/" + path +} + +func getRequest(url string) ([]byte, error) { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return nil, err + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("unable to read response body when getting config from remote istiod. Err: %s", err) + } + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("bad response when getting config from remote istiod. Status: %s. Body: %s", resp.Status, body) + } + + return body, err +} + +func (p *controlPlaneMonitor) getIstiodDebugStatus(client kubernetes.ClientInterface, revision string, debugPath string) (map[string][]byte, error) { + // Check if the kube-api has proxy access to pods in the istio-system + // https://github.com/kiali/kiali/issues/3494#issuecomment-772486224 + status, err := p.CanConnectToIstiodForRevision(client, revision) + if err != nil { + return nil, fmt.Errorf("unable to connect to Istiod pods on cluster [%s] for revision [%s]: %s", client.ClusterInfo().Name, revision, err.Error()) + } + + istiodReachable := false + for _, istiodStatus := range status { + if istiodStatus.Status != kubernetes.ComponentUnreachable { + istiodReachable = true + break + } + } + if !istiodReachable { + return nil, fmt.Errorf("unable to proxy Istiod pods. " + + "Make sure your Kubernetes API server has access to the Istio control plane through 8080 port") + } + + var healthyIstiods kubernetes.IstioComponentStatus + for _, istiod := range status { + if istiod.Status == kubernetes.ComponentHealthy { + healthyIstiods = append(healthyIstiods, istiod) + } + } + + wg := sync.WaitGroup{} + wg.Add(len(healthyIstiods)) + errChan := make(chan error, len(healthyIstiods)) + syncChan := make(chan map[string][]byte, len(healthyIstiods)) + + result := map[string][]byte{} + for _, istiod := range healthyIstiods { + go func(name, namespace string) { + defer wg.Done() + + // The 15014 port on Istiod is open for control plane monitoring. + // Here's the Istio doc page about the port usage by istio: + // https://istio.io/latest/docs/ops/deployment/requirements/#ports-used-by-istio + res, err := client.ForwardGetRequest(namespace, name, p.conf.ExternalServices.Istio.IstiodPodMonitoringPort, debugPath) + if err != nil { + errChan <- fmt.Errorf("%s: %s", name, err.Error()) + } else { + syncChan <- map[string][]byte{name: res} + } + }(istiod.Name, istiod.Namespace) + } + + wg.Wait() + close(errChan) + close(syncChan) + + errs := "" + for err := range errChan { + if errs != "" { + errs = errs + "; " + } + errs = errs + err.Error() + } + errs = "Error fetching the proxy-status in the following pods: " + errs + + for status := range syncChan { + for pilot, sync := range status { + result[pilot] = sync + } + } + + if len(result) > 0 { + return result, nil + } else { + return nil, errors.New(errs) + } +} + +// CanConnectToIstiod checks if Kiali can reach the istiod pod(s) via port +// fowarding through the k8s api server or via http if the registry is +// configured with a remote url. An error does not indicate that istiod +// cannot be reached. The kubernetes.IstioComponentStatus must be checked. +func (p *controlPlaneMonitor) CanConnectToIstiodForRevision(client kubernetes.ClientInterface, revision string) (kubernetes.IstioComponentStatus, error) { + if p.conf.ExternalServices.Istio.Registry != nil { + istiodURL := p.conf.ExternalServices.Istio.Registry.IstiodURL + // Being able to hit /debug doesn't necessarily mean we are authorized to hit the others. + url := joinURL(istiodURL, "/debug") + if _, err := getRequest(url); err != nil { + log.Warningf("Kiali can't connect to remote Istiod: %s", err) + return kubernetes.IstioComponentStatus{{Name: istiodURL, Status: kubernetes.ComponentUnreachable, IsCore: true}}, nil + } + return kubernetes.IstioComponentStatus{{Name: istiodURL, Status: kubernetes.ComponentHealthy, IsCore: true}}, nil + } + + kubeCache, err := p.cache.GetKubeCache(client.ClusterInfo().Name) + if err != nil { + return nil, err + } + + podLabels := map[string]string{ + "app": "istiod", + IstioRevisionLabel: revision, + } + + istiods, err := kubeCache.GetPods(p.conf.IstioNamespace, labels.Set(podLabels).String()) + if err != nil { + return nil, err + } + + healthyIstiods := make([]*corev1.Pod, 0, len(istiods)) + for i, istiod := range istiods { + if istiod.Status.Phase == corev1.PodRunning { + healthyIstiods = append(healthyIstiods, &istiods[i]) + } + } + + wg := sync.WaitGroup{} + wg.Add(len(healthyIstiods)) + syncChan := make(chan kubernetes.ComponentStatus, len(healthyIstiods)) + + for _, istiod := range healthyIstiods { + go func(name, namespace string) { + defer wg.Done() + var status kubernetes.ComponentStatus + // The 8080 port is not accessible from outside of the pod. However, it is used for kubernetes to do the live probes. + // Using the proxy method to make sure that K8s API has access to the Istio Control Plane namespace. + // By proxying one Istiod, we ensure that the following connection is allowed: + // Kiali -> K8s API (proxy) -> istiod + // This scenario is not obvious for private clusters (like GKE private cluster) + _, err := client.ForwardGetRequest(namespace, name, 8080, "/ready") + if err != nil { + log.Warningf("Unable to get ready status of istiod: %s/%s. Err: %s", namespace, name, err) + status = kubernetes.ComponentStatus{ + Name: name, + Namespace: namespace, + Status: kubernetes.ComponentUnreachable, + IsCore: true, + } + } else { + status = kubernetes.ComponentStatus{ + Name: name, + Namespace: namespace, + Status: kubernetes.ComponentHealthy, + IsCore: true, + } + } + syncChan <- status + }(istiod.Name, istiod.Namespace) + } + + wg.Wait() + close(syncChan) + ics := kubernetes.IstioComponentStatus{} + for componentStatus := range syncChan { + ics.Merge(kubernetes.IstioComponentStatus{componentStatus}) + } + + return ics, nil +} + +func (p *controlPlaneMonitor) CanConnectToIstiod(client kubernetes.ClientInterface) (kubernetes.IstioComponentStatus, error) { + if p.conf.ExternalServices.Istio.Registry != nil { + istiodURL := p.conf.ExternalServices.Istio.Registry.IstiodURL + // Being able to hit /debug doesn't necessarily mean we are authorized to hit the others. + url := joinURL(istiodURL, "/debug") + if _, err := getRequest(url); err != nil { + log.Warningf("Kiali can't connect to remote Istiod: %s", err) + return kubernetes.IstioComponentStatus{{Name: istiodURL, Status: kubernetes.ComponentUnreachable, IsCore: true}}, nil + } + return kubernetes.IstioComponentStatus{{Name: istiodURL, Status: kubernetes.ComponentHealthy, IsCore: true}}, nil + } + + kubeCache, err := p.cache.GetKubeCache(client.ClusterInfo().Name) + if err != nil { + return nil, err + } + + // Find the rev label for the controlplane that is set in the config. + istiod, err := kubeCache.GetDeployment(p.conf.IstioNamespace, p.conf.ExternalServices.Istio.IstiodDeploymentName) + if err != nil { + return nil, err + } + + return p.CanConnectToIstiodForRevision(client, istiod.Labels[IstioRevisionLabel]) +} + +func parseProxyStatus(statuses map[string][]byte) ([]*kubernetes.ProxyStatus, error) { + var fullStatus []*kubernetes.ProxyStatus + for pilot, status := range statuses { + var ss []*kubernetes.ProxyStatus + err := json.Unmarshal(status, &ss) + if err != nil { + return nil, err + } + for _, s := range ss { + s.Pilot = pilot + } + fullStatus = append(fullStatus, ss...) + } + return fullStatus, nil +} + +func (p *controlPlaneMonitor) getProxyStatus(client kubernetes.ClientInterface, revision string) ([]*kubernetes.ProxyStatus, error) { + const synczPath = "/debug/syncz" + var result map[string][]byte + + if externalConf := p.conf.ExternalServices.Istio.Registry; externalConf != nil { + url := joinURL(externalConf.IstiodURL, synczPath) + r, err := getRequest(url) + if err != nil { + log.Errorf("Failed to get Istiod info from remote endpoint %s error: %s", synczPath, err) + return nil, err + } + result = map[string][]byte{"remote": r} + } else { + debugStatus, err := p.getIstiodDebugStatus(client, revision, synczPath) + if err != nil { + log.Errorf("Failed to call Istiod endpoint %s error: %s", synczPath, err) + return nil, err + } + result = debugStatus + } + return parseProxyStatus(result) +} + +func (p *controlPlaneMonitor) getRegistryServices(client kubernetes.ClientInterface, revision string) ([]*kubernetes.RegistryService, error) { + const registryzPath = "/debug/registryz" + var result map[string][]byte + + if externalConf := p.conf.ExternalServices.Istio.Registry; externalConf != nil { + url := joinURL(externalConf.IstiodURL, registryzPath) + r, err := getRequest(url) + if err != nil { + log.Errorf("Failed to get Istiod info from remote endpoint %s error: %s", registryzPath, err) + return nil, err + } + result = map[string][]byte{"remote": r} + } else { + debugStatus, err := p.getIstiodDebugStatus(client, revision, registryzPath) + if err != nil { + log.Errorf("Failed to call Istiod endpoint %s error: %s", registryzPath, err) + return nil, err + } + result = debugStatus + } + return parseRegistryServices(result) +} + +func parseRegistryServices(registries map[string][]byte) ([]*kubernetes.RegistryService, error) { + var fullRegistryServices []*kubernetes.RegistryService + isRegistryLoaded := false + for pilot, registry := range registries { + // skip reading registry configs multiple times in a case of multiple istiod pods + if isRegistryLoaded { + break + } + var rr []*kubernetes.RegistryService + err := json.Unmarshal(registry, &rr) + if err != nil { + log.Errorf("Error parsing RegistryServices results: %s", err) + return nil, err + } + for _, r := range rr { + r.Pilot = pilot + } + fullRegistryServices = append(fullRegistryServices, rr...) + if len(rr) > 0 { + isRegistryLoaded = true + } + } + return fullRegistryServices, nil +} + +// Interface guards +var _ ControlPlaneMonitor = &controlPlaneMonitor{} diff --git a/business/controlplane_monitor_test.go b/business/controlplane_monitor_test.go new file mode 100644 index 0000000000..eb13b0c17d --- /dev/null +++ b/business/controlplane_monitor_test.go @@ -0,0 +1,488 @@ +package business + +import ( + "context" + "fmt" + "io" + "net/http" + "net/http/httptest" + "os" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + apps_v1 "k8s.io/api/apps/v1" + core_v1 "k8s.io/api/core/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/kiali/kiali/config" + "github.com/kiali/kiali/kubernetes" + "github.com/kiali/kiali/kubernetes/kubetest" +) + +func TestRegistryServices(t *testing.T) { + assert := assert.New(t) + require := require.New(t) + + registryz := "../tests/data/registry/registry-registryz.json" + bRegistryz, err := os.ReadFile(registryz) + require.NoError(err) + + rRegistry := map[string][]byte{ + "istiod1": bRegistryz, + } + + registry, err2 := parseRegistryServices(rRegistry) + require.NoError(err2) + require.NotNil(registry) + + assert.Equal(79, len(registry)) + assert.Equal("*.msn.com", registry[0].Attributes.Name) +} + +type fakeForwarder struct { + kubernetes.ClientInterface + testURL string +} + +func (f *fakeForwarder) ForwardGetRequest(namespace, podName string, destinationPort int, path string) ([]byte, error) { + resp, err := http.Get(joinURL(f.testURL, path)) + if err != nil { + return nil, err + } + + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + return body, nil +} + +func istiodTestServer(t *testing.T) *httptest.Server { + t.Helper() + testServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var file string + switch r.URL.Path { + case "/debug/registryz": + file = "../tests/data/registry/registry-registryz.json" + case "/debug/syncz": + file = "../tests/data/registry/registry-syncz.json" + case "/debug": + w.WriteHeader(http.StatusOK) + return + case "/ready": + w.WriteHeader(http.StatusOK) + return + default: + w.WriteHeader(http.StatusInternalServerError) + t.Fatalf("Unexpected request path: %s", r.URL.Path) + return + } + if _, err := w.Write(kubernetes.ReadFile(t, file)); err != nil { + t.Fatalf("Error writing response: %s", err) + } + })) + t.Cleanup(testServer.Close) + return testServer +} + +func runningIstiodPod() *core_v1.Pod { + return &core_v1.Pod{ + ObjectMeta: meta_v1.ObjectMeta{ + Name: "istiod-123", + Namespace: "istio-system", + Labels: map[string]string{ + "app": "istiod", + IstioRevisionLabel: "default", + }, + }, + Status: core_v1.PodStatus{ + Phase: core_v1.PodRunning, + }, + } +} + +func TestCanConnectToIstiod(t *testing.T) { + assert := assert.New(t) + require := require.New(t) + + conf := config.NewConfig() + + testServer := istiodTestServer(t) + fakeForwarder := &fakeForwarder{ + ClientInterface: kubetest.NewFakeK8sClient( + runningIstiodPod(), + fakeIstiodDeployment(conf.KubernetesConfig.ClusterName, false), + fakeIstioConfigMap("default"), + &core_v1.Namespace{ObjectMeta: meta_v1.ObjectMeta{Name: "istio-system"}}, + ), + testURL: testServer.URL, + } + + cache := SetupBusinessLayer(t, fakeForwarder, *conf) + + cf := kubetest.NewK8SClientFactoryMock(fakeForwarder) + k8sclients := make(map[string]kubernetes.ClientInterface) + k8sclients[conf.KubernetesConfig.ClusterName] = fakeForwarder + mesh := NewWithBackends(k8sclients, k8sclients, nil, nil).Mesh + cpm := NewControlPlaneMonitor(cache, cf, *conf, &mesh) + + status, err := cpm.CanConnectToIstiod(fakeForwarder) + require.NoError(err) + require.Len(status, 1) + assert.Equal(kubernetes.ComponentHealthy, status[0].Status) +} + +func TestConnectToIstiodExternalURL(t *testing.T) { + assert := assert.New(t) + require := require.New(t) + + testServer := istiodTestServer(t) + + conf := config.NewConfig() + conf.ExternalServices.Istio.Registry = &config.RegistryConfig{ + IstiodURL: testServer.URL, + } + + k8s := kubetest.NewFakeK8sClient(runningIstiodPod()) + cache := SetupBusinessLayer(t, k8s, *conf) + + cf := kubetest.NewK8SClientFactoryMock(k8s) + k8sclients := make(map[string]kubernetes.ClientInterface) + k8sclients[conf.KubernetesConfig.ClusterName] = k8s + mesh := NewWithBackends(k8sclients, k8sclients, nil, nil).Mesh + cpm := NewControlPlaneMonitor(cache, cf, *conf, &mesh) + + status, err := cpm.CanConnectToIstiod(k8s) + require.NoError(err) + require.Len(status, 1) + assert.Equal(kubernetes.ComponentHealthy, status[0].Status) +} + +func TestConnectToIstiodWithRevisionExternalURL(t *testing.T) { + assert := assert.New(t) + require := require.New(t) + + testServer := istiodTestServer(t) + + conf := config.NewConfig() + conf.ExternalServices.Istio.Registry = &config.RegistryConfig{ + IstiodURL: testServer.URL, + } + + k8s := kubetest.NewFakeK8sClient(runningIstiodPod()) + cache := SetupBusinessLayer(t, k8s, *conf) + + cf := kubetest.NewK8SClientFactoryMock(k8s) + k8sclients := make(map[string]kubernetes.ClientInterface) + k8sclients[conf.KubernetesConfig.ClusterName] = k8s + mesh := NewWithBackends(k8sclients, k8sclients, nil, nil).Mesh + cpm := NewControlPlaneMonitor(cache, cf, *conf, &mesh) + + status, err := cpm.CanConnectToIstiodForRevision(k8s, "default") + require.NoError(err) + require.Len(status, 1) + assert.Equal(kubernetes.ComponentHealthy, status[0].Status) +} + +type badForwarder struct { + kubernetes.ClientInterface +} + +func (f *badForwarder) ForwardGetRequest(namespace, podName string, destinationPort int, path string) ([]byte, error) { + return nil, fmt.Errorf("unable to forward request") +} + +func TestCanConnectToUnreachableIstiod(t *testing.T) { + assert := assert.New(t) + require := require.New(t) + + conf := config.NewConfig() + + fakeForwarder := &badForwarder{ + ClientInterface: kubetest.NewFakeK8sClient( + runningIstiodPod(), + fakeIstiodDeployment(conf.KubernetesConfig.ClusterName, false), + fakeIstioConfigMap("default"), + &core_v1.Namespace{ObjectMeta: meta_v1.ObjectMeta{Name: "istio-system"}}, + ), + } + + cache := SetupBusinessLayer(t, fakeForwarder, *conf) + + cf := kubetest.NewK8SClientFactoryMock(fakeForwarder) + k8sclients := make(map[string]kubernetes.ClientInterface) + k8sclients[conf.KubernetesConfig.ClusterName] = fakeForwarder + mesh := NewWithBackends(k8sclients, k8sclients, nil, nil).Mesh + cpm := NewControlPlaneMonitor(cache, cf, *conf, &mesh) + + status, err := cpm.CanConnectToIstiod(fakeForwarder) + require.NoError(err) + require.Len(status, 1) + assert.Equal(kubernetes.ComponentUnreachable, status[0].Status) +} + +func fakeIstiodWithRevision(cluster string, revision string, manageExternal bool) *apps_v1.Deployment { + deployment := fakeIstiodDeployment(cluster, manageExternal) + deployment.Labels[IstioRevisionLabel] = revision + deployment.Name = "istiod-" + revision + return deployment +} + +func fakeIstioConfigMap(revision string) *core_v1.ConfigMap { + const configMapData = `accessLogFile: /dev/stdout +enableAutoMtls: true +rootNamespace: istio-system +trustDomain: cluster.local +` + configMapName := "istio" + if revision != "default" { + configMapName = "istio-" + revision + } + return &core_v1.ConfigMap{ + ObjectMeta: meta_v1.ObjectMeta{ + Name: configMapName, + Namespace: "istio-system", + }, + Data: map[string]string{"mesh": configMapData}, + } +} + +func fakeIstiodDeployment(cluster string, manageExternal bool) *apps_v1.Deployment { + deployment := &apps_v1.Deployment{ + ObjectMeta: meta_v1.ObjectMeta{ + Name: "istiod", + Namespace: "istio-system", + Labels: map[string]string{ + "app": "istiod", + IstioRevisionLabel: "default", + }, + }, + Spec: apps_v1.DeploymentSpec{ + Template: core_v1.PodTemplateSpec{ + Spec: core_v1.PodSpec{ + Containers: []core_v1.Container{ + { + Name: "discovery", + Env: []core_v1.EnvVar{ + { + Name: "CLUSTER_ID", + Value: cluster, + }, + }, + }, + }, + }, + }, + }, + } + if manageExternal { + deployment.Spec.Template.Spec.Containers[0].Env = append(deployment.Spec.Template.Spec.Containers[0].Env, core_v1.EnvVar{ + Name: "EXTERNAL_ISTIOD", + Value: "true", + }) + } + return deployment +} + +func TestRefreshIstioCache(t *testing.T) { + assert := assert.New(t) + require := require.New(t) + + conf := config.NewConfig() + conf.KubernetesConfig.ClusterName = "Kubernetes" + kubernetes.SetConfig(t, *conf) + + const configMapData = `accessLogFile: /dev/stdout +enableAutoMtls: true +rootNamespace: istio-system +trustDomain: cluster.local +` + istioConfigMap := &core_v1.ConfigMap{ + ObjectMeta: meta_v1.ObjectMeta{ + Name: "istio", + Namespace: "istio-system", + }, + Data: map[string]string{"mesh": configMapData}, + } + + k8s := kubetest.NewFakeK8sClient( + runningIstiodPod(), + fakeIstiodDeployment(conf.KubernetesConfig.ClusterName, true), + &core_v1.Namespace{ObjectMeta: meta_v1.ObjectMeta{Name: "istio-system"}}, + istioConfigMap, + ) + // RefreshIstioCache relies on this being set. + k8s.KubeClusterInfo.Name = conf.KubernetesConfig.ClusterName + + testServer := istiodTestServer(t) + fakeForwarder := &fakeForwarder{ + ClientInterface: k8s, + testURL: testServer.URL, + } + + cache := SetupBusinessLayer(t, fakeForwarder, *conf) + + cf := kubetest.NewK8SClientFactoryMock(fakeForwarder) + k8sclients := make(map[string]kubernetes.ClientInterface) + k8sclients[conf.KubernetesConfig.ClusterName] = fakeForwarder + mesh := NewWithBackends(k8sclients, k8sclients, nil, nil).Mesh + cpm := NewControlPlaneMonitor(cache, cf, *conf, &mesh) + + assert.Nil(cache.GetRegistryStatus(conf.KubernetesConfig.ClusterName)) + err := cpm.RefreshIstioCache(context.TODO()) + require.NoError(err) + + registryServices := cache.GetRegistryStatus(conf.KubernetesConfig.ClusterName) + require.NotNil(registryServices) + + assert.Len(registryServices.Services, 79) + // This is a pod that exists in the test data at: "../tests/data/registry/registry-syncz.json" + podProxyStatus := cache.GetPodProxyStatus("Kubernetes", "beta", "b-client-8b97458bb-tghx9") + require.NotNil(podProxyStatus) + assert.Equal("Kubernetes", podProxyStatus.ClusterID) +} + +func TestCancelingContextEndsPolling(t *testing.T) { + assert := assert.New(t) + + conf := config.NewConfig() + kubernetes.SetConfig(t, *conf) + + k8s := kubetest.NewFakeK8sClient() + cache := SetupBusinessLayer(t, k8s, *conf) + + cf := kubetest.NewK8SClientFactoryMock(k8s) + k8sclients := make(map[string]kubernetes.ClientInterface) + k8sclients[conf.KubernetesConfig.ClusterName] = k8s + mesh := NewWithBackends(k8sclients, k8sclients, nil, nil).Mesh + cpm := NewControlPlaneMonitor(cache, cf, *conf, &mesh) + + ctx, cancel := context.WithCancel(context.Background()) + cancel() + cpm.PollIstiodForProxyStatus(ctx) + + assert.Nil(cache.GetRegistryStatus(conf.KubernetesConfig.ClusterName)) +} + +func TestPollingPopulatesCache(t *testing.T) { + require := require.New(t) + + conf := config.NewConfig() + kubernetes.SetConfig(t, *conf) + + const configMapData = `accessLogFile: /dev/stdout +enableAutoMtls: true +rootNamespace: istio-system +trustDomain: cluster.local +` + istioConfigMap := &core_v1.ConfigMap{ + ObjectMeta: meta_v1.ObjectMeta{ + Name: "istio", + Namespace: "istio-system", + }, + Data: map[string]string{"mesh": configMapData}, + } + + testServer := istiodTestServer(t) + + k8s := kubetest.NewFakeK8sClient( + runningIstiodPod(), + fakeIstiodDeployment(conf.KubernetesConfig.ClusterName, true), + &core_v1.Namespace{ObjectMeta: meta_v1.ObjectMeta{Name: "istio-system"}}, + istioConfigMap, + ) + // RefreshIstioCache relies on this being set. + k8s.KubeClusterInfo.Name = conf.KubernetesConfig.ClusterName + + fakeForwarder := &fakeForwarder{ + ClientInterface: k8s, + testURL: testServer.URL, + } + + cache := SetupBusinessLayer(t, fakeForwarder, *conf) + + cf := kubetest.NewK8SClientFactoryMock(fakeForwarder) + k8sclients := make(map[string]kubernetes.ClientInterface) + k8sclients[conf.KubernetesConfig.ClusterName] = fakeForwarder + mesh := NewWithBackends(k8sclients, k8sclients, nil, nil).Mesh + cpm := NewControlPlaneMonitor(cache, cf, *conf, &mesh) + // Make this really low so that we get something sooner. + cpm.pollingInterval = time.Millisecond * 1 + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + require.Nil(cache.GetRegistryStatus(conf.KubernetesConfig.ClusterName)) + cpm.PollIstiodForProxyStatus(ctx) + // Cache should be populated after PollIstiod returns because the + // pump gets primed before polling starts. + require.NotNil(cache.GetRegistryStatus(conf.KubernetesConfig.ClusterName)) + + // Clear the registry to make sure it gets populated again through polling. + cache.SetRegistryStatus(nil) + for { + select { + case <-time.After(time.Millisecond * 300): + require.Fail("Timed out waiting for cache to be populated") + return + default: + if cache.GetRegistryStatus(conf.KubernetesConfig.ClusterName) != nil { + return + } + } + } +} + +func TestRefreshIstioCacheWithMultipleRevisions(t *testing.T) { + require := require.New(t) + + conf := config.NewConfig() + conf.KubernetesConfig.ClusterName = "Kubernetes" + kubernetes.SetConfig(t, *conf) + + defaultIstiod := fakeIstiodDeployment(conf.KubernetesConfig.ClusterName, false) + istiod_1_19 := fakeIstiodWithRevision(conf.KubernetesConfig.ClusterName, "1-19-0", false) + defaultPod := runningIstiodPod() + defaultPod.Labels[IstioRevisionLabel] = "default" + istiod_1_19_pod := runningIstiodPod() + istiod_1_19_pod.Labels[IstioRevisionLabel] = "1-19-0" + + k8s := kubetest.NewFakeK8sClient( + &core_v1.Namespace{ObjectMeta: meta_v1.ObjectMeta{Name: "istio-system"}}, + defaultIstiod, + istiod_1_19, + fakeIstioConfigMap("default"), + fakeIstioConfigMap("1-19-0"), + ) + // RefreshIstioCache relies on this being set. + k8s.KubeClusterInfo.Name = conf.KubernetesConfig.ClusterName + + testServer := istiodTestServer(t) + fakeForwarder := &fakeForwarder{ + ClientInterface: k8s, + testURL: testServer.URL, + } + + cache := SetupBusinessLayer(t, fakeForwarder, *conf) + + cf := kubetest.NewK8SClientFactoryMock(fakeForwarder) + k8sclients := make(map[string]kubernetes.ClientInterface) + k8sclients[conf.KubernetesConfig.ClusterName] = fakeForwarder + mesh := NewWithBackends(k8sclients, k8sclients, nil, nil).Mesh + cpm := NewControlPlaneMonitor(cache, cf, *conf, &mesh) + + defaultStatuses, err := cpm.CanConnectToIstiodForRevision(fakeForwarder, "default") + require.NoError(err) + for _, status := range defaultStatuses { + require.NotEqual(istiod_1_19_pod.Name, status.Name) + } + + istiod_1_19_statuses, err := cpm.CanConnectToIstiodForRevision(fakeForwarder, "1-19-0") + require.NoError(err) + for _, status := range istiod_1_19_statuses { + require.NotEqual(istiod_1_19_pod.Name, status.Name) + } +} diff --git a/business/fake_controlplane_monitor.go b/business/fake_controlplane_monitor.go new file mode 100644 index 0000000000..2b24f15b08 --- /dev/null +++ b/business/fake_controlplane_monitor.go @@ -0,0 +1,25 @@ +package business + +import ( + "context" + + "github.com/kiali/kiali/kubernetes" +) + +// FakeControlPlaneMonitor is used for testing and implements ControlPlaneMonitor. +type FakeControlPlaneMonitor struct { + status kubernetes.IstioComponentStatus +} + +func (f *FakeControlPlaneMonitor) PollIstiodForProxyStatus(ctx context.Context) {} +func (f *FakeControlPlaneMonitor) CanConnectToIstiod(client kubernetes.ClientInterface) (kubernetes.IstioComponentStatus, error) { + return f.status, nil +} + +func (f *FakeControlPlaneMonitor) CanConnectToIstiodForRevision(client kubernetes.ClientInterface, revision string) (kubernetes.IstioComponentStatus, error) { + return f.status, nil +} +func (f *FakeControlPlaneMonitor) RefreshIstioCache(ctx context.Context) error { return nil } + +// Interface guard +var _ ControlPlaneMonitor = &FakeControlPlaneMonitor{} diff --git a/business/istio_config.go b/business/istio_config.go index 769e52335c..839e11b687 100644 --- a/business/istio_config.go +++ b/business/istio_config.go @@ -29,10 +29,11 @@ import ( const allResources string = "*" type IstioConfigService struct { - userClients map[string]kubernetes.ClientInterface - config config.Config - kialiCache cache.KialiCache - businessLayer *Layer + userClients map[string]kubernetes.ClientInterface + config config.Config + kialiCache cache.KialiCache + businessLayer *Layer + controlPlaneMonitor ControlPlaneMonitor } type IstioConfigCriteria struct { @@ -603,16 +604,20 @@ func GetIstioAPI(resourceType string) bool { } // DeleteIstioConfigDetail deletes the given Istio resource -func (in *IstioConfigService) DeleteIstioConfigDetail(cluster, namespace, resourceType, name string) error { +func (in *IstioConfigService) DeleteIstioConfigDetail(ctx context.Context, cluster, namespace, resourceType, name string) error { var err error delOpts := meta_v1.DeleteOptions{} - ctx := context.TODO() userClient := in.userClients[cluster] if userClient == nil { return fmt.Errorf("K8s Client [%s] is not found or is not accessible for Kiali", cluster) } + kubeCache, err := in.kialiCache.GetKubeCache(cluster) + if err != nil { + return err + } + switch resourceType { case kubernetes.DestinationRules: err = userClient.Istio().NetworkingV1beta1().DestinationRules(namespace).Delete(ctx, name, delOpts) @@ -651,19 +656,25 @@ func (in *IstioConfigService) DeleteIstioConfigDetail(cluster, namespace, resour return err } - // Cache is stopped after a Create/Update/Delete operation to force a refresh - in.kialiCache.Refresh(namespace) + if in.config.ExternalServices.Istio.IstioAPIEnabled { + // Refreshing the istio cache in case something has changed with the registry services. Not sure if this is really needed. + if err := in.controlPlaneMonitor.RefreshIstioCache(ctx); err != nil { + log.Errorf("Error while refreshing Istio cache: %s", err) + } + } + + // We need to refresh the kube cache though at least until waiting for the object to be updated is implemented. + kubeCache.Refresh(namespace) return nil } -func (in *IstioConfigService) UpdateIstioConfigDetail(cluster, namespace, resourceType, name, jsonPatch string) (models.IstioConfigDetails, error) { +func (in *IstioConfigService) UpdateIstioConfigDetail(ctx context.Context, cluster, namespace, resourceType, name, jsonPatch string) (models.IstioConfigDetails, error) { istioConfigDetail := models.IstioConfigDetails{} istioConfigDetail.Namespace = models.Namespace{Name: namespace} istioConfigDetail.ObjectType = resourceType patchOpts := meta_v1.PatchOptions{} - ctx := context.TODO() patchType := api_types.MergePatchType bytePatch := []byte(jsonPatch) @@ -672,7 +683,11 @@ func (in *IstioConfigService) UpdateIstioConfigDetail(cluster, namespace, resour return istioConfigDetail, fmt.Errorf("K8s Client [%s] is not found or is not accessible for Kiali", cluster) } - var err error + kubeCache, err := in.kialiCache.GetKubeCache(cluster) + if err != nil { + return istioConfigDetail, nil + } + switch resourceType { case kubernetes.DestinationRules: istioConfigDetail.DestinationRule = &networking_v1beta1.DestinationRule{} @@ -722,27 +737,33 @@ func (in *IstioConfigService) UpdateIstioConfigDetail(cluster, namespace, resour default: err = fmt.Errorf("object type not found: %v", resourceType) } + if err != nil { + return istioConfigDetail, err + } - // Cache is stopped after a Create/Update/Delete operation to force a refresh - kialiCache.Refresh(namespace) + // We need to refresh the kube cache though at least until waiting for the object to be updated is implemented. + kubeCache.Refresh(namespace) return istioConfigDetail, err } -func (in *IstioConfigService) CreateIstioConfigDetail(cluster, namespace, resourceType string, body []byte) (models.IstioConfigDetails, error) { +func (in *IstioConfigService) CreateIstioConfigDetail(ctx context.Context, cluster, namespace, resourceType string, body []byte) (models.IstioConfigDetails, error) { istioConfigDetail := models.IstioConfigDetails{} istioConfigDetail.Namespace = models.Namespace{Name: namespace} istioConfigDetail.ObjectType = resourceType createOpts := meta_v1.CreateOptions{} - ctx := context.TODO() userClient := in.userClients[cluster] if userClient == nil { return istioConfigDetail, fmt.Errorf("K8s Client [%s] is not found or is not accessible for Kiali", cluster) } - var err error + kubeCache, err := in.kialiCache.GetKubeCache(cluster) + if err != nil { + return istioConfigDetail, nil + } + switch resourceType { case kubernetes.DestinationRules: istioConfigDetail.DestinationRule = &networking_v1beta1.DestinationRule{} @@ -852,8 +873,15 @@ func (in *IstioConfigService) CreateIstioConfigDetail(cluster, namespace, resour default: err = fmt.Errorf("object type not found: %v", resourceType) } - // Cache is stopped after a Create/Update/Delete operation to force a refresh - in.kialiCache.Refresh(namespace) + + if in.config.ExternalServices.Istio.IstioAPIEnabled { + // Refreshing the istio cache in case something has changed with the registry services. Not sure if this is really needed. + if err := in.controlPlaneMonitor.RefreshIstioCache(ctx); err != nil { + log.Errorf("Error while refreshing Istio cache: %s", err) + } + } + // We need to refresh the kube cache though at least until waiting for the object to be updated is implemented. + kubeCache.Refresh(namespace) return istioConfigDetail, err } diff --git a/business/istio_config_test.go b/business/istio_config_test.go index 991b0c4fa6..6c83d4b0ed 100644 --- a/business/istio_config_test.go +++ b/business/istio_config_test.go @@ -531,27 +531,28 @@ func TestDeleteIstioConfigDetails(t *testing.T) { k8sclients := make(map[string]kubernetes.ClientInterface) k8sclients[conf.KubernetesConfig.ClusterName] = k8s - configService := IstioConfigService{userClients: k8sclients, kialiCache: cache} + configService := IstioConfigService{userClients: k8sclients, kialiCache: cache, controlPlaneMonitor: poller} - err := configService.DeleteIstioConfigDetail(conf.KubernetesConfig.ClusterName, "test", "virtualservices", "reviews-to-delete") + err := configService.DeleteIstioConfigDetail(context.Background(), conf.KubernetesConfig.ClusterName, "test", "virtualservices", "reviews-to-delete") assert.Nil(err) } func TestUpdateIstioConfigDetails(t *testing.T) { assert := assert.New(t) + require := require.New(t) k8s := kubetest.NewFakeK8sClient(data.CreateEmptyVirtualService("reviews-to-update", "test", []string{"reviews"})) cache := SetupBusinessLayer(t, k8s, *config.NewConfig()) conf := config.Get() k8sclients := make(map[string]kubernetes.ClientInterface) k8sclients[conf.KubernetesConfig.ClusterName] = k8s - configService := IstioConfigService{userClients: k8sclients, kialiCache: cache} + configService := IstioConfigService{userClients: k8sclients, kialiCache: cache, controlPlaneMonitor: poller} - updatedVirtualService, err := configService.UpdateIstioConfigDetail(conf.KubernetesConfig.ClusterName, "test", "virtualservices", "reviews-to-update", "{}") + updatedVirtualService, err := configService.UpdateIstioConfigDetail(context.Background(), conf.KubernetesConfig.ClusterName, "test", "virtualservices", "reviews-to-update", "{}") + require.NoError(err) assert.Equal("test", updatedVirtualService.Namespace.Name) assert.Equal("virtualservices", updatedVirtualService.ObjectType) assert.Equal("reviews-to-update", updatedVirtualService.VirtualService.Name) - assert.Nil(err) } func TestCreateIstioConfigDetails(t *testing.T) { @@ -562,9 +563,9 @@ func TestCreateIstioConfigDetails(t *testing.T) { k8sclients := make(map[string]kubernetes.ClientInterface) k8sclients[conf.KubernetesConfig.ClusterName] = k8s - configService := IstioConfigService{userClients: k8sclients, kialiCache: cache} + configService := IstioConfigService{userClients: k8sclients, kialiCache: cache, controlPlaneMonitor: poller} - createVirtualService, err := configService.CreateIstioConfigDetail(conf.KubernetesConfig.ClusterName, "test", "virtualservices", []byte("{}")) + createVirtualService, err := configService.CreateIstioConfigDetail(context.Background(), conf.KubernetesConfig.ClusterName, "test", "virtualservices", []byte("{}")) assert.Equal("test", createVirtualService.Namespace.Name) assert.Equal("virtualservices", createVirtualService.ObjectType) // Name is now encoded in the payload of the virtualservice so, it modifies this test diff --git a/business/istio_status.go b/business/istio_status.go index c3a88c0fbe..54f4bb803b 100644 --- a/business/istio_status.go +++ b/business/istio_status.go @@ -16,10 +16,19 @@ import ( "github.com/kiali/kiali/util/httputil" ) +func NewIstioStatusService(userClients map[string]kubernetes.ClientInterface, businessLayer *Layer, cpm ControlPlaneMonitor) IstioStatusService { + return IstioStatusService{ + userClients: userClients, + businessLayer: businessLayer, + controlPlaneMonitor: cpm, + } +} + // SvcService deals with fetching istio/kubernetes services related content and convert to kiali model type IstioStatusService struct { - userClients map[string]kubernetes.ClientInterface - businessLayer *Layer + userClients map[string]kubernetes.ClientInterface + businessLayer *Layer + controlPlaneMonitor ControlPlaneMonitor } func (iss *IstioStatusService) GetStatus(ctx context.Context, cluster string) (kubernetes.IstioComponentStatus, error) { @@ -43,7 +52,7 @@ func (iss *IstioStatusService) GetStatus(ctx context.Context, cluster string) (k func (iss *IstioStatusService) getIstioComponentStatus(ctx context.Context, cluster string) (kubernetes.IstioComponentStatus, error) { // Fetching workloads from component namespaces - workloads, err := iss.getComponentNamespacesWorkloads(ctx) + workloads, err := iss.getComponentNamespacesWorkloads(ctx, cluster) if err != nil { return kubernetes.IstioComponentStatus{}, err } @@ -58,7 +67,7 @@ func (iss *IstioStatusService) getIstioComponentStatus(ctx context.Context, clus return kubernetes.IstioComponentStatus{}, fmt.Errorf("Cluster %s doesn't exist ", cluster) } - istiodStatus, err := k8s.CanConnectToIstiod() + istiodStatus, err := iss.controlPlaneMonitor.CanConnectToIstiod(k8s) if err != nil { return kubernetes.IstioComponentStatus{}, err } @@ -66,7 +75,7 @@ func (iss *IstioStatusService) getIstioComponentStatus(ctx context.Context, clus return deploymentStatus.Merge(istiodStatus), nil } -func (iss *IstioStatusService) getComponentNamespacesWorkloads(ctx context.Context) ([]*models.Workload, error) { +func (iss *IstioStatusService) getComponentNamespacesWorkloads(ctx context.Context, cluster string) ([]*models.Workload, error) { var wg sync.WaitGroup nss := map[string]bool{} @@ -86,7 +95,7 @@ func (iss *IstioStatusService) getComponentNamespacesWorkloads(ctx context.Conte defer wg.Done() var wls models.Workloads var err error - wls, err = iss.businessLayer.Workload.fetchWorkloads(ctx, n, "") + wls, err = iss.businessLayer.Workload.fetchWorkloadsFromCluster(ctx, cluster, n, "") wliChan <- wls errChan <- err }(ctx, n, wlChan, errChan) diff --git a/business/istio_status_test.go b/business/istio_status_test.go index d83535570e..de58546656 100644 --- a/business/istio_status_test.go +++ b/business/istio_status_test.go @@ -130,7 +130,9 @@ func mockAddOnsCalls(t *testing.T, objects []runtime.Object, isIstioReachable bo objects = append(objects, &osproject_v1.Project{ObjectMeta: meta_v1.ObjectMeta{Name: "istio-system"}}) // Mock k8s api calls - k8s := mockDeploymentCall(objects, isIstioReachable) + mockDeploymentCall(objects, isIstioReachable) + k8s := kubetest.NewFakeK8sClient(objects...) + k8s.OpenShift = true routes := mockAddOnCalls(defaultAddOnCalls(&grafanaCalls, &prometheusCalls)) httpServer := mockServer(t, routes) @@ -266,7 +268,7 @@ func TestGrafanaNotWorking(t *testing.T) { }), } objects = append(objects, &osproject_v1.Project{ObjectMeta: meta_v1.ObjectMeta{Name: "istio-system"}}) - k8s := mockDeploymentCall(objects, true) + mockDeploymentCall(objects, true) addOnsStetup := defaultAddOnCalls(&grafanaCalls, &prometheusCalls) addOnsStetup["grafana"] = addOnsSetup{ Url: "/grafana/mock", @@ -280,6 +282,9 @@ func TestGrafanaNotWorking(t *testing.T) { conf := addonAddMockUrls(httpServer.URL, config.NewConfig(), false) config.Set(conf) + k8s := kubetest.NewFakeK8sClient(objects...) + k8s.OpenShift = true + // Set global cache var SetupBusinessLayer(t, k8s, *conf) @@ -563,8 +568,6 @@ func TestIstiodUnreachable(t *testing.T) { } k8s, grafanaCalls, promCalls := mockAddOnsCalls(t, objects, false, false) - k8s = &fakeIstiodConnecter{k8s, istioStatus} - conf := config.Get() conf.IstioLabels.AppLabelName = "app.kubernetes.io/name" conf.ExternalServices.Istio.ComponentStatuses = config.ComponentStatuses{ @@ -579,6 +582,7 @@ func TestIstiodUnreachable(t *testing.T) { // Set global cache var SetupBusinessLayer(t, k8s, *conf) + WithControlPlaneMonitor(&FakeControlPlaneMonitor{status: istioStatus}) clients := make(map[string]kubernetes.ClientInterface) clients[conf.KubernetesConfig.ClusterName] = k8s @@ -788,21 +792,8 @@ func mockFailingJaeger() (tracing.ClientInterface, error) { return tracing.ClientInterface(j), nil } -// func newFakeIstiodConnector() -type fakeIstiodConnecter struct { - kubernetes.ClientInterface - status kubernetes.IstioComponentStatus -} - -func (in *fakeIstiodConnecter) CanConnectToIstiod() (kubernetes.IstioComponentStatus, error) { - return in.status, nil -} - // Setup K8S api call to fetch Pods -func mockDeploymentCall(objects []runtime.Object, isIstioReachable bool) kubernetes.ClientInterface { - k8s := kubetest.NewFakeK8sClient(objects...) - k8s.OpenShift = true - +func mockDeploymentCall(objects []runtime.Object, isIstioReachable bool) { var istioStatus kubernetes.IstioComponentStatus if !isIstioReachable { for _, obj := range objects { @@ -819,7 +810,7 @@ func mockDeploymentCall(objects []runtime.Object, isIstioReachable bool) kuberne } } - return &fakeIstiodConnecter{k8s, istioStatus} + WithControlPlaneMonitor(&FakeControlPlaneMonitor{status: istioStatus}) } func fakeDeploymentWithStatus(name string, labels map[string]string, status apps_v1.DeploymentStatus) *apps_v1.Deployment { diff --git a/business/istio_validations.go b/business/istio_validations.go index 7ce2900b17..ec1fba5cea 100644 --- a/business/istio_validations.go +++ b/business/istio_validations.go @@ -79,17 +79,11 @@ func (in *IstioValidationsService) GetValidations(ctx context.Context, cluster, var rbacDetails kubernetes.RBACDetails var registryServices []*kubernetes.RegistryService - istioApiEnabled := config.Get().ExternalServices.Istio.IstioAPIEnabled - wg.Add(3) // We need to add these here to make sure we don't execute wg.Wait() before scheduler has started goroutines if service != "" { wg.Add(1) } - if istioApiEnabled { - wg.Add(1) - } - // We fetch without target service as some validations will require full-namespace details go in.fetchIstioConfigList(ctx, &istioConfigList, &mtlsDetails, &rbacDetails, cluster, namespace, errChan, &wg) @@ -105,10 +99,8 @@ func (in *IstioValidationsService) GetValidations(ctx context.Context, cluster, go in.fetchServices(ctx, &services, cluster, namespace, errChan, &wg) } - if istioApiEnabled { - // @TODO registry services for remote cluster - go in.fetchRegistryServices(®istryServices, errChan, &wg) - } + criteria := RegistryCriteria{AllNamespaces: true, Cluster: cluster} + registryServices = in.businessLayer.RegistryStatus.GetRegistryServices(criteria) wg.Wait() close(errChan) @@ -197,16 +189,13 @@ func (in *IstioValidationsService) GetIstioObjectValidations(ctx context.Context // Get all the Istio objects from a Namespace and all gateways from every namespace wg.Add(3) - if istioApiEnabled { - wg.Add(1) - } - go in.fetchIstioConfigList(ctx, &istioConfigList, &mtlsDetails, &rbacDetails, cluster, namespace, errChan, &wg) go in.fetchAllWorkloads(ctx, &workloadsPerNamespace, cluster, &namespaces, errChan, &wg) go in.fetchNonLocalmTLSConfigs(&mtlsDetails, cluster, errChan, &wg) if istioApiEnabled { - go in.fetchRegistryServices(®istryServices, errChan, &wg) + criteria := RegistryCriteria{AllNamespaces: true, Cluster: cluster} + registryServices = in.businessLayer.RegistryStatus.GetRegistryServices(criteria) } wg.Wait() @@ -573,20 +562,6 @@ func (in *IstioValidationsService) fetchNonLocalmTLSConfigs(mtlsDetails *kuberne } } -func (in *IstioValidationsService) fetchRegistryServices(rValue *[]*kubernetes.RegistryService, errChan chan error, wg *sync.WaitGroup) { - defer wg.Done() - criteria := RegistryCriteria{AllNamespaces: true} - registryServices, err := in.businessLayer.RegistryStatus.GetRegistryServices(criteria) - if err != nil { - select { - case errChan <- err: - default: - } - } else { - *rValue = registryServices - } -} - func (in *IstioValidationsService) isGatewayToNamespace() bool { mesh, err := in.businessLayer.Mesh.GetMesh(context.TODO()) if err != nil { diff --git a/business/istio_validations_test.go b/business/istio_validations_test.go index 633e85f5d9..f9ec1cf58f 100644 --- a/business/istio_validations_test.go +++ b/business/istio_validations_test.go @@ -33,12 +33,13 @@ func TestGetNamespaceValidations(t *testing.T) { validations, err := vs.GetValidations(context.TODO(), conf.KubernetesConfig.ClusterName, "test", "", "") require.NoError(err) - assert.NotEmpty(validations) + require.NotEmpty(validations) assert.True(validations[models.IstioValidationKey{ObjectType: "virtualservice", Namespace: "test", Name: "product-vs"}].Valid) } func TestGetAllValidations(t *testing.T) { assert := assert.New(t) + require := require.New(t) conf := config.NewConfig() config.Set(conf) @@ -46,7 +47,7 @@ func TestGetAllValidations(t *testing.T) { []string{"details.test.svc.cluster.local", "product.test.svc.cluster.local", "product2.test.svc.cluster.local", "customer.test.svc.cluster.local"}, "test", fakePods()) validations, _ := vs.GetValidations(context.TODO(), conf.KubernetesConfig.ClusterName, "", "", "") - assert.NotEmpty(validations) + require.NotEmpty(validations) assert.True(validations[models.IstioValidationKey{ObjectType: "virtualservice", Namespace: "test", Name: "product-vs"}].Valid) } @@ -296,13 +297,16 @@ func mockCombinedValidationService(t *testing.T, istioConfigList *models.IstioCo k8s := kubetest.NewFakeK8sClient(fakeIstioObjects...) - cache := SetupBusinessLayer(t, k8s, *config.NewConfig()) - cache.SetRegistryStatus(&kubernetes.RegistryStatus{ - Services: data.CreateFakeMultiRegistryServices(services, "test", "*"), + conf := config.NewConfig() + cache := SetupBusinessLayer(t, k8s, *conf) + cache.SetRegistryStatus(map[string]*kubernetes.RegistryStatus{ + conf.KubernetesConfig.ClusterName: { + Services: data.CreateFakeMultiRegistryServices(services, "test", "*"), + }, }) k8sclients := make(map[string]kubernetes.ClientInterface) - k8sclients[config.Get().KubernetesConfig.ClusterName] = k8s + k8sclients[conf.KubernetesConfig.ClusterName] = k8s return IstioValidationsService{userClients: k8sclients, businessLayer: NewWithBackends(k8sclients, k8sclients, nil, nil)} } diff --git a/business/layer.go b/business/layer.go index ee2fdc96ec..c3e270d6e2 100644 --- a/business/layer.go +++ b/business/layer.go @@ -6,7 +6,6 @@ import ( "github.com/kiali/kiali/config" "github.com/kiali/kiali/kubernetes" "github.com/kiali/kiali/kubernetes/cache" - "github.com/kiali/kiali/log" "github.com/kiali/kiali/prometheus" "github.com/kiali/kiali/tracing" ) @@ -15,24 +14,23 @@ import ( // A business layer is created per token/user. Any data that // needs to be saved across layers is saved in the Kiali Cache. type Layer struct { - App AppService - Health HealthService - IstioConfig IstioConfigService - IstioStatus IstioStatusService - IstioCerts IstioCertsService - Tracing TracingService - Mesh MeshService - Namespace NamespaceService - OpenshiftOAuth OpenshiftOAuthService - ProxyLogging ProxyLoggingService - ProxyStatus ProxyStatusService - RegistryStatus RegistryStatusService - RegistryStatuses map[string]RegistryStatusService // Key is the cluster name - Svc SvcService - TLS TLSService - TokenReview TokenReviewService - Validations IstioValidationsService - Workload WorkloadService + App AppService + Health HealthService + IstioConfig IstioConfigService + IstioStatus IstioStatusService + IstioCerts IstioCertsService + Tracing TracingService + Mesh MeshService + Namespace NamespaceService + OpenshiftOAuth OpenshiftOAuthService + ProxyLogging ProxyLoggingService + ProxyStatus ProxyStatusService + RegistryStatus RegistryStatusService + Svc SvcService + TLS TLSService + TokenReview TokenReviewService + Validations IstioValidationsService + Workload WorkloadService } // Global clientfactory and prometheus clients. @@ -41,43 +39,15 @@ var ( tracingClient tracing.ClientInterface kialiCache cache.KialiCache prometheusClient prometheus.ClientInterface + poller ControlPlaneMonitor ) -// sets the global kiali cache var. -func initKialiCache() error { - conf := config.Get() - - if excludedWorkloads == nil { - excludedWorkloads = make(map[string]bool) - for _, w := range conf.KubernetesConfig.ExcludeWorkloads { - excludedWorkloads[w] = true - } - } - - userClient, err := kubernetes.GetClientFactory() - if err != nil { - log.Errorf("Failed to create client factory. Err: %s", err) - return err - } - clientFactory = userClient - - log.Infof("Initializing Kiali Cache") - - cache, err := cache.NewKialiCache(clientFactory, *conf) - if err != nil { - log.Errorf("Error initializing Kiali Cache. Details: %s", err) - return err - } - +// Start sets the globals necessary for the business layer. +// TODO: Refactor out global vars. +func Start(cf kubernetes.ClientFactory, controlPlaneMonitor ControlPlaneMonitor, cache cache.KialiCache) { + clientFactory = cf kialiCache = cache - - return nil -} - -// Start initializes the Kiali Cache and sets the -// globals necessary for the business layer. -func Start() error { - return initKialiCache() + poller = controlPlaneMonitor } // Get the business.Layer @@ -132,8 +102,8 @@ func NewWithBackends(userClients map[string]kubernetes.ClientInterface, kialiSAC // TODO: Modify the k8s argument to other services to pass the whole k8s map if needed temporaryLayer.App = AppService{prom: prom, userClients: userClients, businessLayer: temporaryLayer} temporaryLayer.Health = HealthService{prom: prom, businessLayer: temporaryLayer, userClients: userClients} - temporaryLayer.IstioConfig = IstioConfigService{config: *conf, userClients: userClients, kialiCache: kialiCache, businessLayer: temporaryLayer} - temporaryLayer.IstioStatus = IstioStatusService{userClients: userClients, businessLayer: temporaryLayer} + temporaryLayer.IstioConfig = IstioConfigService{config: *conf, userClients: userClients, kialiCache: kialiCache, businessLayer: temporaryLayer, controlPlaneMonitor: poller} + temporaryLayer.IstioStatus = NewIstioStatusService(userClients, temporaryLayer, poller) temporaryLayer.IstioCerts = IstioCertsService{k8s: userClients[homeClusterName], businessLayer: temporaryLayer} temporaryLayer.Tracing = TracingService{loader: tracingClient, businessLayer: temporaryLayer} temporaryLayer.Namespace = NewNamespaceService(userClients, kialiSAClients, kialiCache, *conf) @@ -142,22 +112,12 @@ func NewWithBackends(userClients map[string]kubernetes.ClientInterface, kialiSAC temporaryLayer.ProxyStatus = ProxyStatusService{kialiSAClients: kialiSAClients, kialiCache: kialiCache, businessLayer: temporaryLayer} // Out of order because it relies on ProxyStatus temporaryLayer.ProxyLogging = ProxyLoggingService{userClients: userClients, proxyStatus: &temporaryLayer.ProxyStatus} - temporaryLayer.RegistryStatus = RegistryStatusService{k8s: userClients[homeClusterName], businessLayer: temporaryLayer} + temporaryLayer.RegistryStatus = RegistryStatusService{kialiCache: kialiCache} temporaryLayer.TLS = TLSService{userClients: userClients, kialiCache: kialiCache, businessLayer: temporaryLayer} temporaryLayer.Svc = SvcService{config: *conf, kialiCache: kialiCache, businessLayer: temporaryLayer, prom: prom, userClients: userClients} temporaryLayer.TokenReview = NewTokenReview(userClients[homeClusterName]) temporaryLayer.Validations = IstioValidationsService{userClients: userClients, businessLayer: temporaryLayer} temporaryLayer.Workload = *NewWorkloadService(userClients, prom, kialiCache, temporaryLayer, conf) - registryStatuses := make(map[string]RegistryStatusService) - for name, client := range userClients { - registryStatuses[name] = RegistryStatusService{k8s: client, businessLayer: temporaryLayer} - } - temporaryLayer.RegistryStatuses = registryStatuses - return temporaryLayer } - -func Stop() { - kialiCache.Stop() -} diff --git a/business/mesh.go b/business/mesh.go index 52dd627b31..29ff2448b9 100644 --- a/business/mesh.go +++ b/business/mesh.go @@ -35,9 +35,6 @@ const ( type Mesh struct { // ControlPlanes that share the same mesh ID. ControlPlanes []ControlPlane - - // ID is the ID of the mesh. - ID *string } // ControlPlane manages the dataplane for one or more kube clusters. @@ -163,14 +160,6 @@ func (in *MeshService) GetMesh(ctx context.Context) (*Mesh, error) { } controlPlane.Config = *controlPlaneConfig - // Kiali only supports a single mesh. All controlplanes should share the same mesh id. - // Otherwise this is an error. - if mesh.ID == nil { - mesh.ID = &controlPlane.Config.DefaultConfig.MeshId - } else if *mesh.ID != controlPlane.Config.DefaultConfig.MeshId { - return nil, fmt.Errorf("multiple mesh ids found: [%s] and [%s]", *mesh.ID, controlPlane.Config.DefaultConfig.MeshId) - } - if containers := istiod.Spec.Template.Spec.Containers; len(containers) > 0 { for _, env := range istiod.Spec.Template.Spec.Containers[0].Env { switch { diff --git a/business/registry_status.go b/business/registry_status.go index 27e24a37b5..7bc1aaec43 100644 --- a/business/registry_status.go +++ b/business/registry_status.go @@ -1,40 +1,31 @@ package business import ( - "sync" - "k8s.io/apimachinery/pkg/labels" "github.com/kiali/kiali/kubernetes" + "github.com/kiali/kiali/kubernetes/cache" "github.com/kiali/kiali/log" ) -var refreshLock sync.Mutex - type RegistryStatusService struct { - k8s kubernetes.ClientInterface - businessLayer *Layer + kialiCache cache.KialiCache } type RegistryCriteria struct { // When AllNamespaces is true Namespace criteria is ignored // Note this flag is only supported in Registry queries AllNamespaces bool + Cluster string Namespace string ServiceName string ServiceSelector string } -func (in *RegistryStatusService) GetRegistryServices(criteria RegistryCriteria) ([]*kubernetes.RegistryService, error) { - if kialiCache == nil { - return nil, nil - } - if err := in.checkAndRefresh(); err != nil { - return nil, err - } - registryStatus := kialiCache.GetRegistryStatus() +func (in *RegistryStatusService) GetRegistryServices(criteria RegistryCriteria) []*kubernetes.RegistryService { + registryStatus := kialiCache.GetRegistryStatus(criteria.Cluster) registryServices := filterRegistryServices(registryStatus, criteria) - return registryServices, nil + return registryServices } func filterRegistryServices(registryStatus *kubernetes.RegistryStatus, criteria RegistryCriteria) []*kubernetes.RegistryService { @@ -68,56 +59,3 @@ func filterRegistryServices(registryStatus *kubernetes.RegistryStatus, criteria } return filteredRegistryServices } - -func (in *RegistryStatusService) checkAndRefresh() error { - if !kialiCache.CheckRegistryStatus() { - refreshLock.Lock() - if !kialiCache.CheckRegistryStatus() { - registryStatus, err := in.refreshRegistryStatus() - if err != nil { - refreshLock.Unlock() - return err - } - kialiCache.SetRegistryStatus(registryStatus) - log.Debugf("Lock acquired. Update the Registry") - } else { - log.Debugf("Lock acquired but registry updated. Doing nothing") - } - refreshLock.Unlock() - } - return nil -} - -func (in *RegistryStatusService) refreshRegistryStatus() (*kubernetes.RegistryStatus, error) { - registryServices, err := in.k8s.GetRegistryServices() - if err != nil { - registryServices, err = in.getRegistryServicesUsingKialiSA() - if err != nil { - return nil, err - } - } - - registryStatus := kubernetes.RegistryStatus{ - Services: registryServices, - } - - return ®istryStatus, nil -} - -func getSAClient() (kubernetes.ClientInterface, error) { - clientFactory, err := kubernetes.GetClientFactory() - if err != nil { - return nil, err - } - - return clientFactory.GetSAHomeClusterClient(), nil -} - -func (in *RegistryStatusService) getRegistryServicesUsingKialiSA() ([]*kubernetes.RegistryService, error) { - k8s, err := getSAClient() - if err != nil { - return nil, err - } - - return k8s.GetRegistryServices() -} diff --git a/business/services.go b/business/services.go index 6805a18ce7..ab940a6f1b 100644 --- a/business/services.go +++ b/business/services.go @@ -122,15 +122,9 @@ func (in *SvcService) getServiceListForCluster(ctx context.Context, criteria Ser return nil, err } - // TODO: Handle istio api registry. - // Only the home cluster will fetch the registry for now. This can change once Kiali supports - // talking to multiple istiods. - nFetches := 4 + nFetches := 3 if criteria.IncludeIstioResources { - nFetches = 5 - } - if !in.config.ExternalServices.Istio.IstioAPIEnabled || cluster != in.config.KubernetesConfig.ClusterName { - nFetches-- + nFetches = 4 } wg := sync.WaitGroup{} @@ -156,19 +150,12 @@ func (in *SvcService) getServiceListForCluster(ctx context.Context, criteria Ser }() if in.config.ExternalServices.Istio.IstioAPIEnabled && cluster == in.config.KubernetesConfig.ClusterName { - go func() { - defer wg.Done() - var err2 error - registryCriteria := RegistryCriteria{ - Namespace: criteria.Namespace, - ServiceSelector: criteria.ServiceSelector, - } - rSvcs, err2 = in.businessLayer.RegistryStatus.GetRegistryServices(registryCriteria) - if err2 != nil { - log.Errorf("Error fetching Registry Services per namespace %s: %s", criteria.Namespace, err2) - errChan <- err2 - } - }() + registryCriteria := RegistryCriteria{ + Namespace: criteria.Namespace, + ServiceSelector: criteria.ServiceSelector, + Cluster: cluster, + } + rSvcs = in.businessLayer.RegistryStatus.GetRegistryServices(registryCriteria) } go func() { @@ -494,7 +481,7 @@ func (in *SvcService) GetServiceDetails(ctx context.Context, cluster, namespace, wg := sync.WaitGroup{} // Max possible number of errors. It's ok if the buffer size exceeds the number of goroutines // in cases where istio api is disabled. - errChan := make(chan error, 9) + errChan := make(chan error, 8) labelsSelector := labels.Set(svc.Selectors).String() // If service doesn't have any selector, we can't know which are the pods and workloads applying. @@ -521,19 +508,11 @@ func (in *SvcService) GetServiceDetails(ctx context.Context, cluster, namespace, }(ctx) if in.config.ExternalServices.Istio.IstioAPIEnabled { - wg.Add(1) - go func() { - defer wg.Done() - var err2 error - registryCriteria := RegistryCriteria{ - Namespace: namespace, - } - rSvcs, err2 = in.businessLayer.RegistryStatus.GetRegistryServices(registryCriteria) - if err2 != nil { - log.Errorf("Error fetching Registry Services per namespace %s: %s", registryCriteria.Namespace, err2) - errChan <- err2 - } - }() + registryCriteria := RegistryCriteria{ + Namespace: namespace, + Cluster: cluster, + } + rSvcs = in.businessLayer.RegistryStatus.GetRegistryServices(registryCriteria) } } @@ -734,27 +713,22 @@ func (in *SvcService) GetService(ctx context.Context, cluster, namespace, servic return models.Service{}, err } - var err error - var kSvc *core_v1.Service - var cache cache.KubeCache - - cache, err = in.kialiCache.GetKubeCache(cluster) + cache, err := in.kialiCache.GetKubeCache(cluster) if err != nil { return models.Service{}, err } svc := models.Service{} - kSvc, err = cache.GetService(namespace, service) + // First try to get the service from kube. + // If it doesn't exist, try to get it from the Istio Registry. + kSvc, err := cache.GetService(namespace, service) if err != nil { // Check if this service is in the Istio Registry criteria := RegistryCriteria{ Namespace: namespace, + Cluster: cluster, } - var rSvcs []*kubernetes.RegistryService - rSvcs, err = in.businessLayer.RegistryStatus.GetRegistryServices(criteria) - if err != nil { - return svc, err - } + rSvcs := in.businessLayer.RegistryStatus.GetRegistryServices(criteria) for _, rSvc := range rSvcs { if rSvc.Attributes.Name == service { svc.ParseRegistryService(rSvc) @@ -763,13 +737,13 @@ func (in *SvcService) GetService(ctx context.Context, cluster, namespace, servic } // Service not found in Kubernetes and Istio if svc.Name == "" { - err = kubernetes.NewNotFound(service, "Kiali", "Service") - return svc, err + return svc, kubernetes.NewNotFound(service, "Kiali", "Service") } } else { svc.Parse(kSvc) } - return svc, err + + return svc, nil } func (in *SvcService) getServiceValidations(services []core_v1.Service, deployments []apps_v1.Deployment, pods []core_v1.Pod) models.IstioValidations { diff --git a/business/services_test.go b/business/services_test.go index 0a5404ea93..f9791fb935 100644 --- a/business/services_test.go +++ b/business/services_test.go @@ -85,7 +85,7 @@ func TestParseRegistryServices(t *testing.T) { rServices := map[string][]byte{ "istiod1": bServicesz, } - registryServices, err2 := kubernetes.ParseRegistryServices(rServices) + registryServices, err2 := parseRegistryServices(rServices) assert.NoError(err2) assert.Equal(3, len(registryServices)) @@ -113,7 +113,7 @@ func TestFilterLocalIstioRegistry(t *testing.T) { rServices := map[string][]byte{ "istiod1": bServicesz, } - registryServices, err2 := kubernetes.ParseRegistryServices(rServices) + registryServices, err2 := parseRegistryServices(rServices) assert.NoError(err2) assert.Equal(true, filterIstioServiceByClusterId("istio-east", registryServices[0])) diff --git a/business/testing.go b/business/testing.go index 718cd602db..0ead938c91 100644 --- a/business/testing.go +++ b/business/testing.go @@ -19,10 +19,11 @@ import ( // SetWithBackends allows for specifying the ClientFactory and Prometheus clients to be used. // Mock friendly. Used only with tests. -func setWithBackends(cf kubernetes.ClientFactory, prom prometheus.ClientInterface, cache cache.KialiCache) { +func setWithBackends(cf kubernetes.ClientFactory, prom prometheus.ClientInterface, cache cache.KialiCache, cpm ControlPlaneMonitor) { clientFactory = cf prometheusClient = prom kialiCache = cache + poller = cpm } // SetupBusinessLayer mocks out some global variables in the business package @@ -33,7 +34,6 @@ func SetupBusinessLayer(t *testing.T, k8s kubernetes.ClientInterface, config con cf := kubetest.NewK8SClientFactoryMock(k8s) cache := cache.NewTestingCacheWithFactory(t, cf, config) - cache.SetRegistryStatus(&kubernetes.RegistryStatus{}) originalClientFactory := clientFactory originalPrometheusClient := prometheusClient @@ -44,7 +44,9 @@ func SetupBusinessLayer(t *testing.T, k8s kubernetes.ClientInterface, config con kialiCache = originalKialiCache }) - setWithBackends(cf, nil, cache) + cpm := &FakeControlPlaneMonitor{} + + setWithBackends(cf, nil, cache, cpm) return cache } @@ -58,6 +60,11 @@ func WithKialiCache(cache cache.KialiCache) { kialiCache = cache } +// WithControlPlaneMonitor is a testing func that lets you replace the global cpm var. +func WithControlPlaneMonitor(cpm ControlPlaneMonitor) { + poller = cpm +} + // FindOrFail will find an element in a slice or fail the test. func FindOrFail[T any](t *testing.T, s []T, f func(T) bool) T { t.Helper() diff --git a/business/workloads.go b/business/workloads.go index 1d7e15ad4c..532920198d 100644 --- a/business/workloads.go +++ b/business/workloads.go @@ -35,12 +35,18 @@ import ( ) func NewWorkloadService(userClients map[string]kubernetes.ClientInterface, prom prometheus.ClientInterface, cache cache.KialiCache, layer *Layer, config *config.Config) *WorkloadService { + excludedWorkloads := make(map[string]bool) + for _, w := range config.KubernetesConfig.ExcludeWorkloads { + excludedWorkloads[w] = true + } + return &WorkloadService{ - businessLayer: layer, - cache: cache, - config: config, - prom: prom, - userClients: userClients, + businessLayer: layer, + cache: cache, + config: config, + excludedWorkloads: excludedWorkloads, + prom: prom, + userClients: userClients, } } @@ -51,9 +57,10 @@ type WorkloadService struct { // The global kiali cache. This should be passed into the workload service rather than created inside of it. cache cache.KialiCache // The global kiali config. - config *config.Config - prom prometheus.ClientInterface - userClients map[string]kubernetes.ClientInterface + config *config.Config + excludedWorkloads map[string]bool + prom prometheus.ClientInterface + userClients map[string]kubernetes.ClientInterface } type WorkloadCriteria struct { @@ -98,24 +105,20 @@ type LogOptions struct { core_v1.PodLogOptions } -var ( - excludedWorkloads map[string]bool - - // Matches an ISO8601 full date - severityRegexp = regexp.MustCompile(`(?i)ERROR|WARN|DEBUG|TRACE`) -) +// Matches an ISO8601 full date +var severityRegexp = regexp.MustCompile(`(?i)ERROR|WARN|DEBUG|TRACE`) -func isWorkloadIncluded(workload string) bool { - if excludedWorkloads == nil { +func (in *WorkloadService) isWorkloadIncluded(workload string) bool { + if in.excludedWorkloads == nil { return true } - return !excludedWorkloads[workload] + return !in.excludedWorkloads[workload] } // isWorkloadValid returns true if it is a known workload type and it is not configured as excluded -func isWorkloadValid(workloadType string) bool { +func (in *WorkloadService) isWorkloadValid(workloadType string) bool { _, knownWorkloadType := controllerOrder[workloadType] - return knownWorkloadType && isWorkloadIncluded(workloadType) + return knownWorkloadType && in.isWorkloadIncluded(workloadType) } // @TODO do validations per cluster @@ -148,7 +151,7 @@ func (in *WorkloadService) GetWorkloadList(ctx context.Context, criteria Workloa Validations: models.IstioValidations{}, } var ws models.Workloads - //var authpolicies []*security_v1beta1.AuthorizationPolicy + // var authpolicies []*security_v1beta1.AuthorizationPolicy var err error nFetches := 1 @@ -689,7 +692,7 @@ func (in *WorkloadService) fetchWorkloadsFromCluster(ctx context.Context, cluste defer wg.Done() var err error - if isWorkloadIncluded(kubernetes.ReplicationControllerType) { + if in.isWorkloadIncluded(kubernetes.ReplicationControllerType) { // No Cache for ReplicationControllers repcon, err = userClient.GetReplicationControllers(namespace) if err != nil { @@ -704,7 +707,7 @@ func (in *WorkloadService) fetchWorkloadsFromCluster(ctx context.Context, cluste defer wg.Done() var err error - if userClient.IsOpenShift() && isWorkloadIncluded(kubernetes.DeploymentConfigType) { + if userClient.IsOpenShift() && in.isWorkloadIncluded(kubernetes.DeploymentConfigType) { // No cache for DeploymentConfigs depcon, err = userClient.GetDeploymentConfigs(namespace) if err != nil { @@ -719,7 +722,7 @@ func (in *WorkloadService) fetchWorkloadsFromCluster(ctx context.Context, cluste defer wg.Done() var err error - if isWorkloadIncluded(kubernetes.StatefulSetType) { + if in.isWorkloadIncluded(kubernetes.StatefulSetType) { fulset, err = kubeCache.GetStatefulSets(namespace) if err != nil { log.Errorf("Error fetching StatefulSets per namespace %s: %s", namespace, err) @@ -733,7 +736,7 @@ func (in *WorkloadService) fetchWorkloadsFromCluster(ctx context.Context, cluste defer wg.Done() var err error - if isWorkloadIncluded(kubernetes.CronJobType) { + if in.isWorkloadIncluded(kubernetes.CronJobType) { // No cache for Cronjobs conjbs, err = userClient.GetCronJobs(namespace) if err != nil { @@ -748,7 +751,7 @@ func (in *WorkloadService) fetchWorkloadsFromCluster(ctx context.Context, cluste defer wg.Done() var err error - if isWorkloadIncluded(kubernetes.JobType) { + if in.isWorkloadIncluded(kubernetes.JobType) { // No cache for Jobs jbs, err = userClient.GetJobs(namespace) if err != nil { @@ -763,7 +766,7 @@ func (in *WorkloadService) fetchWorkloadsFromCluster(ctx context.Context, cluste defer wg.Done() var err error - if isWorkloadIncluded(kubernetes.DaemonSetType) { + if in.isWorkloadIncluded(kubernetes.DaemonSetType) { daeset, err = kialiCache.GetDaemonSets(namespace) if err != nil { log.Errorf("Error fetching DaemonSets per namespace %s: %s", namespace, err) @@ -784,7 +787,7 @@ func (in *WorkloadService) fetchWorkloadsFromCluster(ctx context.Context, cluste for _, pod := range pods { if len(pod.OwnerReferences) != 0 { for _, ref := range pod.OwnerReferences { - if ref.Controller != nil && *ref.Controller && isWorkloadIncluded(ref.Kind) { + if ref.Controller != nil && *ref.Controller && in.isWorkloadIncluded(ref.Kind) { if _, exist := controllers[ref.Name]; !exist { controllers[ref.Name] = ref.Kind } else { @@ -822,7 +825,7 @@ func (in *WorkloadService) fetchWorkloadsFromCluster(ctx context.Context, cluste if _, exist := controllers[ref.Name]; !exist { // For valid owner controllers, delete the child ReplicaSet and add the parent controller, // otherwise (for custom controllers), defer to the replica set. - if isWorkloadValid(ref.Kind) { + if in.isWorkloadValid(ref.Kind) { controllers[ref.Name] = ref.Kind delete(controllers, controllerName) } else { @@ -1271,7 +1274,7 @@ func (in *WorkloadService) fetchWorkload(ctx context.Context, criteria WorkloadC } var err error - if isWorkloadIncluded(kubernetes.ReplicationControllerType) { + if in.isWorkloadIncluded(kubernetes.ReplicationControllerType) { // No cache for ReplicationControllers repcon, err = client.GetReplicationControllers(criteria.Namespace) if err != nil { @@ -1290,7 +1293,7 @@ func (in *WorkloadService) fetchWorkload(ctx context.Context, criteria WorkloadC } var err error - if client.IsOpenShift() && isWorkloadIncluded(kubernetes.DeploymentConfigType) { + if client.IsOpenShift() && in.isWorkloadIncluded(kubernetes.DeploymentConfigType) { // No cache for deploymentConfigs depcon, err = client.GetDeploymentConfig(criteria.Namespace, criteria.WorkloadName) if err != nil { @@ -1308,7 +1311,7 @@ func (in *WorkloadService) fetchWorkload(ctx context.Context, criteria WorkloadC } var err error - if isWorkloadIncluded(kubernetes.StatefulSetType) { + if in.isWorkloadIncluded(kubernetes.StatefulSetType) { fulset, err = kialiCache.GetStatefulSet(criteria.Namespace, criteria.WorkloadName) if err != nil { fulset = nil @@ -1325,7 +1328,7 @@ func (in *WorkloadService) fetchWorkload(ctx context.Context, criteria WorkloadC } var err error - if isWorkloadIncluded(kubernetes.CronJobType) { + if in.isWorkloadIncluded(kubernetes.CronJobType) { // No cache for CronJobs conjbs, err = client.GetCronJobs(criteria.Namespace) if err != nil { @@ -1344,7 +1347,7 @@ func (in *WorkloadService) fetchWorkload(ctx context.Context, criteria WorkloadC } var err error - if isWorkloadIncluded(kubernetes.JobType) { + if in.isWorkloadIncluded(kubernetes.JobType) { // No cache for Jobs jbs, err = client.GetJobs(criteria.Namespace) if err != nil { @@ -1363,7 +1366,7 @@ func (in *WorkloadService) fetchWorkload(ctx context.Context, criteria WorkloadC } var err error - if isWorkloadIncluded(kubernetes.DaemonSetType) { + if in.isWorkloadIncluded(kubernetes.DaemonSetType) { ds, err = kialiCache.GetDaemonSet(criteria.Namespace, criteria.WorkloadName) if err != nil { ds = nil @@ -1384,7 +1387,7 @@ func (in *WorkloadService) fetchWorkload(ctx context.Context, criteria WorkloadC for _, pod := range pods { if len(pod.OwnerReferences) != 0 { for _, ref := range pod.OwnerReferences { - if ref.Controller != nil && *ref.Controller && isWorkloadIncluded(ref.Kind) { + if ref.Controller != nil && *ref.Controller && in.isWorkloadIncluded(ref.Kind) { if _, exist := controllers[ref.Name]; !exist { controllers[ref.Name] = ref.Kind } else { @@ -1422,7 +1425,7 @@ func (in *WorkloadService) fetchWorkload(ctx context.Context, criteria WorkloadC // For valid owner controllers, delete the child ReplicaSet and add the parent controller, // otherwise (for custom controllers), defer to the replica set. if _, exist := controllers[ref.Name]; !exist { - if isWorkloadValid(ref.Kind) { + if in.isWorkloadValid(ref.Kind) { controllers[ref.Name] = ref.Kind delete(controllers, controllerName) } else { @@ -1880,7 +1883,7 @@ func (in *WorkloadService) updateWorkload(ctx context.Context, cluster string, n go func(wkType string) { defer wg.Done() var err error - if isWorkloadIncluded(wkType) { + if in.isWorkloadIncluded(wkType) { err = userClient.UpdateWorkload(namespace, workloadName, wkType, jsonPatch, patchType) } if err != nil { diff --git a/business/workloads_test.go b/business/workloads_test.go index c2787f5883..f0e89116e0 100644 --- a/business/workloads_test.go +++ b/business/workloads_test.go @@ -145,8 +145,8 @@ func TestGetWorkloadListFromReplicationControllers(t *testing.T) { k8s.OpenShift = true SetupBusinessLayer(t, k8s, *config.NewConfig()) svc := setupWorkloadService(k8s, config.NewConfig()) + svc.excludedWorkloads = map[string]bool{} - excludedWorkloads = map[string]bool{} criteria := WorkloadCriteria{Namespace: "Namespace", IncludeIstioResources: false, IncludeHealth: false} workloadList, _ := svc.GetWorkloadList(context.TODO(), criteria) workloads := workloadList.Workloads @@ -184,8 +184,8 @@ func TestGetWorkloadListFromDeploymentConfigs(t *testing.T) { k8s.OpenShift = true SetupBusinessLayer(t, k8s, *config.NewConfig()) svc := setupWorkloadService(k8s, config.NewConfig()) + svc.excludedWorkloads = map[string]bool{} - excludedWorkloads = map[string]bool{} criteria := WorkloadCriteria{Namespace: "Namespace", IncludeIstioResources: false, IncludeHealth: false} workloadList, _ := svc.GetWorkloadList(context.TODO(), criteria) workloads := workloadList.Workloads @@ -223,8 +223,8 @@ func TestGetWorkloadListFromStatefulSets(t *testing.T) { k8s.OpenShift = true SetupBusinessLayer(t, k8s, *config.NewConfig()) svc := setupWorkloadService(k8s, config.NewConfig()) + svc.excludedWorkloads = map[string]bool{} - excludedWorkloads = map[string]bool{} criteria := WorkloadCriteria{Namespace: "Namespace", IncludeIstioResources: false, IncludeHealth: false} workloadList, _ := svc.GetWorkloadList(context.TODO(), criteria) workloads := workloadList.Workloads @@ -262,8 +262,8 @@ func TestGetWorkloadListFromDaemonSets(t *testing.T) { k8s.OpenShift = true SetupBusinessLayer(t, k8s, *config.NewConfig()) svc := setupWorkloadService(k8s, config.NewConfig()) + svc.excludedWorkloads = map[string]bool{} - excludedWorkloads = map[string]bool{} criteria := WorkloadCriteria{Namespace: "Namespace", IncludeIstioResources: false, IncludeHealth: false} workloadList, _ := svc.GetWorkloadList(context.TODO(), criteria) workloads := workloadList.Workloads @@ -394,6 +394,7 @@ func TestGetWorkloadFromDeployment(t *testing.T) { // Disabling CustomDashboards on Workload details testing conf := config.NewConfig() conf.ExternalServices.CustomDashboards.Enabled = false + kubernetes.SetConfig(t, *conf) // Setup mocks kubeObjs := []runtime.Object{ @@ -408,7 +409,7 @@ func TestGetWorkloadFromDeployment(t *testing.T) { } k8s := kubetest.NewFakeK8sClient(kubeObjs...) k8s.OpenShift = true - SetupBusinessLayer(t, k8s, *config.NewConfig()) + SetupBusinessLayer(t, k8s, *conf) svc := setupWorkloadService(k8s, conf) criteria := WorkloadCriteria{Cluster: conf.KubernetesConfig.ClusterName, Namespace: "Namespace", WorkloadName: "details-v1", WorkloadType: "", IncludeServices: false} @@ -426,8 +427,10 @@ func TestGetWorkloadWithInvalidWorkloadType(t *testing.T) { require := require.New(t) // Disabling CustomDashboards on Workload details testing + // otherwise this adds 10s to the test due to an http timeout. conf := config.NewConfig() conf.ExternalServices.CustomDashboards.Enabled = false + kubernetes.SetConfig(t, *conf) // Setup mocks kubeObjs := []runtime.Object{ @@ -444,7 +447,7 @@ func TestGetWorkloadWithInvalidWorkloadType(t *testing.T) { } k8s := kubetest.NewFakeK8sClient(kubeObjs...) k8s.OpenShift = true - SetupBusinessLayer(t, k8s, *config.NewConfig()) + SetupBusinessLayer(t, k8s, *conf) svc := setupWorkloadService(k8s, conf) criteria := WorkloadCriteria{Cluster: conf.KubernetesConfig.ClusterName, Namespace: "Namespace", WorkloadName: "details-v1", WorkloadType: "invalid", IncludeServices: false} @@ -462,8 +465,10 @@ func TestGetWorkloadFromPods(t *testing.T) { require := require.New(t) // Disabling CustomDashboards on Workload details testing + // otherwise this adds 10s to the test due to an http timeout. conf := config.NewConfig() conf.ExternalServices.CustomDashboards.Enabled = false + kubernetes.SetConfig(t, *conf) // Setup mocks kubeObjs := []runtime.Object{ @@ -479,7 +484,7 @@ func TestGetWorkloadFromPods(t *testing.T) { } k8s := kubetest.NewFakeK8sClient(kubeObjs...) k8s.OpenShift = true - SetupBusinessLayer(t, k8s, *config.NewConfig()) + SetupBusinessLayer(t, k8s, *conf) svc := setupWorkloadService(k8s, conf) criteria := WorkloadCriteria{Cluster: conf.KubernetesConfig.ClusterName, Namespace: "Namespace", WorkloadName: "custom-controller", WorkloadType: "", IncludeServices: false} @@ -701,8 +706,10 @@ func TestDuplicatedControllers(t *testing.T) { require := require.New(t) // Disabling CustomDashboards on Workload details testing + // otherwise this adds 10s to the test due to an http timeout. conf := config.NewConfig() conf.ExternalServices.CustomDashboards.Enabled = false + kubernetes.SetConfig(t, *conf) // Setup mocks kubeObjs := []runtime.Object{ @@ -727,11 +734,13 @@ func TestDuplicatedControllers(t *testing.T) { } k8s := kubetest.NewFakeK8sClient(kubeObjs...) k8s.OpenShift = true - SetupBusinessLayer(t, k8s, *config.NewConfig()) + SetupBusinessLayer(t, k8s, *conf) svc := setupWorkloadService(k8s, conf) criteria := WorkloadCriteria{Namespace: "Namespace", IncludeIstioResources: false, IncludeHealth: false} - workloadList, _ := svc.GetWorkloadList(context.TODO(), criteria) + workloadList, err := svc.GetWorkloadList(context.TODO(), criteria) + require.NoError(err) + workloads := workloadList.Workloads criteria = WorkloadCriteria{Cluster: conf.KubernetesConfig.ClusterName, Namespace: "Namespace", WorkloadName: "duplicated-v1", WorkloadType: "", IncludeServices: false} diff --git a/config/config.go b/config/config.go index 2938a0bfd6..1590d9f2e5 100644 --- a/config/config.go +++ b/config/config.go @@ -247,9 +247,12 @@ type IstioConfig struct { IstioSidecarAnnotation string `yaml:"istio_sidecar_annotation,omitempty"` IstiodDeploymentName string `yaml:"istiod_deployment_name,omitempty"` IstiodPodMonitoringPort int `yaml:"istiod_pod_monitoring_port,omitempty"` - Registry *RegistryConfig `yaml:"registry,omitempty"` - RootNamespace string `yaml:"root_namespace,omitempty"` - UrlServiceVersion string `yaml:"url_service_version"` + // IstiodPollingIntervalSeconds is how often in seconds Kiali will poll istiod(s) for + // proxy status and registry services. Polling is not performed if IstioAPIEnabled is false. + IstiodPollingIntervalSeconds int `yaml:"istiod_polling_interval_seconds,omitempty"` + Registry *RegistryConfig `yaml:"registry,omitempty"` + RootNamespace string `yaml:"root_namespace,omitempty"` + UrlServiceVersion string `yaml:"url_service_version"` } type IstioCanaryRevision struct { @@ -645,6 +648,7 @@ func NewConfig() (c *Config) { IstioSidecarAnnotation: "sidecar.istio.io/status", IstiodDeploymentName: "istiod", IstiodPodMonitoringPort: 15014, + IstiodPollingIntervalSeconds: 20, RootNamespace: "istio-system", UrlServiceVersion: "http://istiod.istio-system:15014/version", GatewayAPIClasses: []GatewayAPIClass{ diff --git a/frontend/cypress/integration/common/istio_config.ts b/frontend/cypress/integration/common/istio_config.ts index a422a87880..7a105d7636 100644 --- a/frontend/cypress/integration/common/istio_config.ts +++ b/frontend/cypress/integration/common/istio_config.ts @@ -326,6 +326,34 @@ Then('user sees all the Istio Config objects in the bookinfo namespace', () => { }); }); +Then('user sees all the Istio Config objects in the bookinfo namespace for the {string} cluster', (cluster: string) => { + // Bookinfo Gateway + cy.getBySel(`VirtualItem_Cluster${cluster}_Nsbookinfo_gateway_bookinfo-gateway`); + // Bookinfo VS + cy.getBySel(`VirtualItem_Cluster${cluster}_Nsbookinfo_virtualservice_bookinfo`); +}); + +And('user sees Cluster information for Istio objects', () => { + // Gateways + cy.getBySel(`VirtualItem_Clustereast_Nsbookinfo_gateway_bookinfo-gateway`).contains( + 'td[data-label="Cluster"]', + 'east' + ); + cy.getBySel(`VirtualItem_Clusterwest_Nsbookinfo_gateway_bookinfo-gateway`).contains( + 'td[data-label="Cluster"]', + 'west' + ); + // VirtualServices + cy.getBySel(`VirtualItem_Clustereast_Nsbookinfo_virtualservice_bookinfo`).contains( + 'td[data-label="Cluster"]', + 'east' + ); + cy.getBySel(`VirtualItem_Clusterwest_Nsbookinfo_virtualservice_bookinfo`).contains( + 'td[data-label="Cluster"]', + 'west' + ); +}); + And('user sees Name information for Istio objects', () => { const object = 'bookinfo-gateway'; // There should be a table with a heading for each piece of information. diff --git a/frontend/cypress/integration/common/overview.ts b/frontend/cypress/integration/common/overview.ts index 507cca2934..da7fc5be1d 100644 --- a/frontend/cypress/integration/common/overview.ts +++ b/frontend/cypress/integration/common/overview.ts @@ -1,9 +1,8 @@ import { Before, Given, Then, When, And } from '@badeball/cypress-cucumber-preprocessor'; import { ensureKialiFinishedLoading } from './transition'; - -const CLUSTER1_CONTEXT = Cypress.env('CLUSTER1_CONTEXT') -const CLUSTER2_CONTEXT = Cypress.env('CLUSTER2_CONTEXT') +const CLUSTER1_CONTEXT = Cypress.env('CLUSTER1_CONTEXT'); +const CLUSTER2_CONTEXT = Cypress.env('CLUSTER2_CONTEXT'); Before(() => { // Focing to not stop cypress on unexpected errors not related to the tests. @@ -195,10 +194,14 @@ Then('there should be a {string} application indicator in the namespace', functi .should('exist'); }); -Then('there should be a {string} application indicator in the namespace in the {string} cluster', function (healthStatus: string, cluster:string) { - cy.get( - `[data-test=CardItem_${this.targetNamespace}_${cluster}] [data-test=overview-app-health]`).find('span').filter(`.icon-${healthStatus}`) - .should('exist'); +Then('there should be a {string} application indicator in the namespace in the {string} cluster', function ( + healthStatus: string, + cluster: string +) { + cy.get(`[data-test=CardItem_${this.targetNamespace}_${cluster}] [data-test=overview-app-health]`) + .find('span') + .filter(`.icon-${healthStatus}`) + .should('exist'); }); Then('the {string} application indicator should list the application', function (healthStatus: string) { @@ -222,17 +225,24 @@ Then('the {string} application indicator should list the application', function ).should('contain.text', this.targetApp); }); -Then('the {string} application indicator for the {string} cluster should list the application', function (healthStatus: string, cluster:string) { +Then('the {string} application indicator for the {string} cluster should list the application', function ( + healthStatus: string, + cluster: string +) { let healthIndicatorStatusKey = healthStatus; if (healthStatus === 'idle') { healthIndicatorStatusKey = 'not-ready'; } + cy.get(`[data-test=CardItem_${this.targetNamespace}_${cluster}] [data-test=overview-app-health]`) + .find('span') + .filter(`.icon-${healthStatus}`) + .trigger('mouseenter'); cy.get( - `[data-test=CardItem_${this.targetNamespace}_${cluster}] [data-test=overview-app-health]`).find('span').filter(`.icon-${healthStatus}`) - .trigger('mouseenter'); - cy.get( - `[aria-label='Overview status'] [data-test=${this.targetNamespace}-${healthIndicatorStatusKey}-${this.targetApp}]`).find('span').filter(`.icon-${healthStatus}`) + `[aria-label='Overview status'] [data-test=${this.targetNamespace}-${healthIndicatorStatusKey}-${this.targetApp}]` + ) + .find('span') + .filter(`.icon-${healthStatus}`) .should('exist'); cy.get( `[aria-label='Overview status'] [data-test=${this.targetNamespace}-${healthIndicatorStatusKey}-${this.targetApp}]` @@ -267,39 +277,45 @@ Then('the user sees information related to canary upgrades', view => { cy.get('[data-test="canary-upgrade"]').should('exist'); }); -Then('user sees the {string} cluster badge in the Kiali header', (name:string) =>{ +Then('user sees the {string} cluster badge in the Kiali header', (name: string) => { cy.get('[data-test="cluster-icon"]').contains(name).should('be.visible'); }); -And('user sees the {string} label in both {string} namespace cards', (label:string, ns:string) => { +And('user sees the {string} label in both {string} namespace cards', (label: string, ns: string) => { cy.get(`[data-test="CardItem_${ns}_east"]`).contains(label).should('be.visible'); cy.get(`[data-test="CardItem_${ns}_west"]`).contains(label).should('be.visible'); -}) +}); -And('the toggle on the right side of both {string} namespace cards exists', (ns:string) => { +And('the toggle on the right side of both {string} namespace cards exists', (ns: string) => { ensureKialiFinishedLoading(); cy.get(`[data-test="CardItem_${ns}_east"]`).find('[aria-label="Actions"]').should('exist'); cy.get(`[data-test="CardItem_${ns}_west"]`).find('[aria-label="Actions"]').should('exist'); }); -And('Istio config should not be available for the {string} {string}', (cluster:string, ns:string) => { +And('Istio config should not be available for the {string} {string}', (cluster: string, ns: string) => { cy.get(`[data-test="CardItem_${ns}_${cluster}"]`).contains('Istio config').siblings().contains('N/A'); -}); +}); -And('health should be different for {string} and {string} {string}', (cluster1:string, cluster2:string, ns:string) => { - if (ns == 'bookinfo'){ +And( + 'health should be different for {string} and {string} {string}', + (cluster1: string, cluster2: string, ns: string) => { + if (ns == 'bookinfo') { cy.get(`[data-test="CardItem_${ns}_${cluster1}"]`).find('[data-test="overview-type-app"]').contains(`5 app`); cy.get(`[data-test="CardItem_${ns}_${cluster2}"]`).find('[data-test="overview-type-app"]').contains(`4 app`); + } else { + cy.exec(`kubectl get pods -n ${ns} -l app --context ${CLUSTER1_CONTEXT} --no-headers | wc -l`).then(result => { + cy.get(`[data-test="CardItem_${ns}_${cluster1}"]`) + .find('[data-test="overview-type-app"]') + .contains(`${result.stdout} app`); + }); + cy.exec(`kubectl get pods -n ${ns} -l app --context ${CLUSTER2_CONTEXT} --no-headers | wc -l`).then(result => { + cy.get(`[data-test="CardItem_${ns}_${cluster2}"]`) + .find('[data-test="overview-type-app"]') + .contains(`${result.stdout} app`); + }); + } } - else { - cy.exec(`kubectl get pods -n ${ns} -l app --context ${CLUSTER1_CONTEXT} --no-headers | wc -l`).then((result) => { - cy.get(`[data-test="CardItem_${ns}_${cluster1}"]`).find('[data-test="overview-type-app"]').contains(`${result.stdout} app`); - }); - cy.exec(`kubectl get pods -n ${ns} -l app --context ${CLUSTER2_CONTEXT} --no-headers | wc -l`).then((result) => { - cy.get(`[data-test="CardItem_${ns}_${cluster2}"]`).find('[data-test="overview-type-app"]').contains(`${result.stdout} app`); - }); - } -}) +); And('user sees the {string} label in the {string} namespace card', (label: string, ns: string) => { cy.log(label); @@ -314,21 +330,29 @@ And('user does not see any cluster badge in the {string} namespace card', (ns: s }); }); -And('user sees the {string} label in the {string} {string} namespace card',(label:string, cluster:string, ns:string) =>{ - cy.get(`[data-test="CardItem_${ns}_${cluster}"]`).contains(label).should('be.visible'); -}); +And( + 'user sees the {string} label in the {string} {string} namespace card', + (label: string, cluster: string, ns: string) => { + cy.get(`[data-test="CardItem_${ns}_${cluster}"]`).contains(label).should('be.visible'); + } +); -And('user does not see the {string} label in the {string} {string} namespace card',(label:string, cluster:string, ns:string) =>{ - cy.get(`[data-test="CardItem_${ns}_${cluster}"]`).contains(label).should('not.exist'); -}); +And( + 'user does not see the {string} label in the {string} {string} namespace card', + (label: string, cluster: string, ns: string) => { + cy.get(`[data-test="CardItem_${ns}_${cluster}"]`).contains(label).should('not.exist'); + } +); -And('cluster badges for {string} and {string} cluster are visible in the LIST view', (cluster1:string, cluster2:string) =>{ - cy.get('[data-test="VirtualItem_bookinfo"]').find('#pfbadge-C').should('have.length',2); - cy.get('[data-test="VirtualItem_bookinfo"]').contains('east').should('be.visible'); - cy.get('[data-test="VirtualItem_bookinfo"]').contains('west').should('be.visible'); -}); +And( + 'cluster badges for {string} and {string} cluster are visible in the LIST view', + (cluster1: string, cluster2: string) => { + cy.getBySel(`VirtualItem_Cluster${cluster1}_bookinfo`).contains(cluster1).should('be.visible'); + cy.getBySel(`VirtualItem_Cluster${cluster2}_bookinfo`).contains(cluster2).should('be.visible'); + } +); -And('Control Plane metrics should be visible',() => { - cy.get('[data-test="VirtualItem_istio-system"]').find('[data-test="cpu-chart"]'); - cy.get('[data-test="VirtualItem_istio-system"]').find('[data-test="memory-chart"]'); +And('Control Plane metrics should be visible for cluster {string}', (cluster: string) => { + cy.getBySel(`VirtualItem_Cluster${cluster}_istio-system`).find('[data-test="cpu-chart"]'); + cy.getBySel(`VirtualItem_Cluster${cluster}_istio-system`).find('[data-test="memory-chart"]'); }); diff --git a/frontend/cypress/integration/common/sidecar_injection.ts b/frontend/cypress/integration/common/sidecar_injection.ts index 6f236223b4..99e38dcdab 100644 --- a/frontend/cypress/integration/common/sidecar_injection.ts +++ b/frontend/cypress/integration/common/sidecar_injection.ts @@ -242,14 +242,24 @@ When('I visit the overview page', function () { cy.contains('Inbound traffic', { matchCase: false }); // Make sure data finished loading, so avoid broken tests because of a re-render }); +// Only works for single cluster. When('I override the default automatic sidecar injection policy in the namespace to enabled', function () { cy.request('GET', '/api/status').should(response => { expect(response.status).to.equal(200); - cy.get('[data-test=overview-type-LIST]').should('be.visible').click(); - cy.get(`[data-test=VirtualItem_${this.targetNamespace}] button[aria-label=Actions]`).should('be.visible').click(); - cy.get(`[data-test=enable-${this.targetNamespace}-namespace-sidecar-injection]`).should('be.visible').click(); - cy.get('[data-test=confirm-traffic-policies]').should('be.visible').click(); + cy.request('/api/clusters').then(response => { + cy.wrap(response.isOkStatusCode).should('be.true'); + cy.wrap(response.body).should('have.length', 1); + const cluster = response.body[0].name; + + cy.getBySel('overview-type-LIST').should('be.visible').click(); + cy.get(`[data-test=VirtualItem_Cluster${cluster}_${this.targetNamespace}] button[aria-label=Actions]`) + .should('be.visible') + .click(); + cy.getBySel(`enable-${this.targetNamespace}-namespace-sidecar-injection`).should('be.visible').click(); + cy.getBySel('confirm-traffic-policies').should('be.visible').click(); + }); + ensureKialiFinishedLoading(); }); }); @@ -260,13 +270,21 @@ When( cy.request('GET', '/api/status').should(response => { expect(response.status).to.equal(200); - cy.get('[data-test=overview-type-LIST]').should('be.visible').click(); - cy.get(`[data-test=VirtualItem_${this.targetNamespace}] button[aria-label=Actions]`).should('be.visible').click(); - cy.get(`[data-test=${enabledOrDisabled}-${this.targetNamespace}-namespace-sidecar-injection]`) - .should('be.visible') - .click(); - cy.get('[data-test=confirm-traffic-policies]').should('be.visible').click(); - ensureKialiFinishedLoading(); + cy.request('/api/clusters').then(response => { + cy.wrap(response.isOkStatusCode).should('be.true'); + cy.wrap(response.body).should('have.length', 1); + const cluster = response.body[0].name; + + cy.getBySel('overview-type-LIST').should('be.visible').click(); + cy.get(`[data-test=VirtualItem_Cluster${cluster}_${this.targetNamespace}] button[aria-label=Actions]`) + .should('be.visible') + .click(); + cy.getBySel(`${enabledOrDisabled}-${this.targetNamespace}-namespace-sidecar-injection`) + .should('be.visible') + .click(); + cy.getBySel('confirm-traffic-policies').should('be.visible').click(); + ensureKialiFinishedLoading(); + }); }); } ); @@ -275,11 +293,19 @@ When('I remove override configuration for sidecar injection in the namespace', f cy.request('GET', '/api/status').should(response => { expect(response.status).to.equal(200); - cy.get('[data-test=overview-type-LIST]').should('be.visible').click(); - cy.get(`[data-test=VirtualItem_${this.targetNamespace}] button[aria-label=Actions]`).should('be.visible').click(); - cy.get(`[data-test=remove-${this.targetNamespace}-namespace-sidecar-injection]`).should('be.visible').click(); - cy.get('[data-test=confirm-traffic-policies]').should('be.visible').click(); - ensureKialiFinishedLoading(); + cy.request('/api/clusters').then(response => { + cy.wrap(response.isOkStatusCode).should('be.true'); + cy.wrap(response.body).should('have.length', 1); + const cluster = response.body[0].name; + + cy.getBySel('overview-type-LIST').should('be.visible').click(); + cy.get(`[data-test=VirtualItem_Cluster${cluster}_${this.targetNamespace}] button[aria-label=Actions]`) + .should('be.visible') + .click(); + cy.getBySel(`remove-${this.targetNamespace}-namespace-sidecar-injection`).should('be.visible').click(); + cy.getBySel('confirm-traffic-policies').should('be.visible').click(); + ensureKialiFinishedLoading(); + }); }); }); @@ -309,16 +335,29 @@ Then('I should see the override annotation for sidecar injection in the namespac expect(response.status).to.equal(200); const expectation = 'exist'; - cy.get(`[data-test=VirtualItem_${this.targetNamespace}]`) - .contains(`istio-injection=${enabled}`) - .should(expectation); + cy.request('/api/clusters').then(response => { + cy.wrap(response.isOkStatusCode).should('be.true'); + cy.wrap(response.body).should('have.length', 1); + const cluster = response.body[0].name; + cy.getBySel(`VirtualItem_Cluster${cluster}_${this.targetNamespace}`) + .contains(`istio-injection=${enabled}`) + .should(expectation); + }); }); }); Then('I should see no override annotation for sidecar injection in the namespace', function () { cy.request('GET', '/api/status').should(response => { expect(response.status).to.equal(200); - cy.get(`[data-test=VirtualItem_${this.targetNamespace}]`).contains(`istio-injection`).should('not.exist'); + cy.request('/api/clusters').then(response => { + cy.wrap(response.isOkStatusCode).should('be.true'); + cy.wrap(response.body).should('have.length', 1); + const cluster = response.body[0].name; + + cy.getBySel(`VirtualItem_Cluster${cluster}_${this.targetNamespace}`) + .contains(`istio-injection`) + .should('not.exist'); + }); }); }); diff --git a/frontend/cypress/integration/common/table.ts b/frontend/cypress/integration/common/table.ts index eca63e1fb1..66ac740611 100644 --- a/frontend/cypress/integration/common/table.ts +++ b/frontend/cypress/integration/common/table.ts @@ -185,6 +185,7 @@ export const ensureObjectsInTable = (...names: string[]) => { }); }; +// Only works for a single cluster. export const checkHealthIndicatorInTable = ( targetNamespace: string, targetType: string | null, @@ -195,7 +196,20 @@ export const checkHealthIndicatorInTable = ( ? `${targetNamespace}_${targetType}_${targetRowItemName}` : `${targetNamespace}_${targetRowItemName}`; - cy.get(`tr[data-test=VirtualItem_Ns${selector}]`).find('span').filter(`.icon-${healthStatus}`).should('exist'); + // cy.getBySel(`VirtualItem_Ns${selector}]`).find('span').filter(`.icon-${healthStatus}`).should('exist'); + // Fetch the cluster info from /api/clusters + // TODO: Move this somewhere else since other tests will most likely need this info as well. + // VirtualItem_Clustercluster-default_Nsbookinfo_details + // VirtualItem_Clustercluster-default_Nsbookinfo_productpage + cy.request('/api/clusters').then(response => { + cy.wrap(response.isOkStatusCode).should('be.true'); + cy.wrap(response.body).should('have.length', 1); + const cluster = response.body[0].name; + cy.getBySel(`VirtualItem_Cluster${cluster}_Ns${selector}`) + .find('span') + .filter(`.icon-${healthStatus}`) + .should('exist'); + }); }; export const checkHealthStatusInTable = ( @@ -208,11 +222,16 @@ export const checkHealthStatusInTable = ( ? `${targetNamespace}_${targetType}_${targetRowItemName}` : `${targetNamespace}_${targetRowItemName}`; - cy.get(`[data-test=VirtualItem_Ns${selector}] td:first-child span[class=pf-v5-c-icon__content]`).trigger( - 'mouseenter' - ); + cy.request('/api/clusters').then(response => { + cy.wrap(response.isOkStatusCode).should('be.true'); + cy.wrap(response.body).should('have.length', 1); + const cluster = response.body[0].name; + cy.get( + `[data-test=VirtualItem_Cluster${cluster}_Ns${selector}] td:first-child span[class=pf-v5-c-icon__content]` + ).trigger('mouseenter'); - cy.get(`[aria-label='Health indicator'] strong`).should('contain.text', healthStatus); + cy.get(`[aria-label='Health indicator'] strong`).should('contain.text', healthStatus); + }); }; And('an entry for {string} cluster should be in the table', (cluster: string) => { diff --git a/frontend/cypress/integration/common/wizard_request_routing.ts b/frontend/cypress/integration/common/wizard_request_routing.ts index e787e1f235..a606499104 100644 --- a/frontend/cypress/integration/common/wizard_request_routing.ts +++ b/frontend/cypress/integration/common/wizard_request_routing.ts @@ -30,21 +30,39 @@ Given('user opens the namespace {string} and {string} service details page', (na cy.visit(url + '/namespaces/' + namespace + '/services/' + service + '?refresh=0'); }); -Given('user opens the namespace {string} and the remote {string} service details page', (namespace: string, service: string) => { - cy.visit(url + '/namespaces/' + namespace + '/services/' + service + '?refresh=0&clusterName=west'); -}); - -When('user deletes Request Routing named {string} and the resource is no longer available in any cluster', (name:string) => { - cy.exec(`kubectl delete destinationrules.networking.istio.io ${name} -n bookinfo --context ${CLUSTER1_CONTEXT}`, { failOnNonZeroExit: false }); - cy.exec(`kubectl delete destinationrules.networking.istio.io ${name} -n bookinfo --context ${CLUSTER2_CONTEXT}`, { failOnNonZeroExit: false }); - cy.exec(`kubectl delete virtualservices.networking.istio.io ${name} -n bookinfo --context ${CLUSTER1_CONTEXT}`, { failOnNonZeroExit: false }); - cy.exec(`kubectl delete virtualservices.networking.istio.io ${name} -n bookinfo --context ${CLUSTER2_CONTEXT}`, { failOnNonZeroExit: false }); - ensureKialiFinishedLoading(); -}); +Given( + 'user opens the namespace {string} and the remote {string} service details page', + (namespace: string, service: string) => { + cy.visit(url + '/namespaces/' + namespace + '/services/' + service + '?refresh=0&clusterName=west'); + } +); + +When( + 'user deletes Request Routing named {string} and the resource is no longer available in any cluster', + (name: string) => { + cy.exec(`kubectl delete destinationrules.networking.istio.io ${name} -n bookinfo --context ${CLUSTER1_CONTEXT}`, { + failOnNonZeroExit: false + }); + cy.exec(`kubectl delete destinationrules.networking.istio.io ${name} -n bookinfo --context ${CLUSTER2_CONTEXT}`, { + failOnNonZeroExit: false + }); + cy.exec(`kubectl delete virtualservices.networking.istio.io ${name} -n bookinfo --context ${CLUSTER1_CONTEXT}`, { + failOnNonZeroExit: false + }); + cy.exec(`kubectl delete virtualservices.networking.istio.io ${name} -n bookinfo --context ${CLUSTER2_CONTEXT}`, { + failOnNonZeroExit: false + }); + ensureKialiFinishedLoading(); + } +); When('user deletes gateway named {string} and the resource is no longer available in any cluster', (name: string) => { - cy.exec(`kubectl delete gateway.networking.istio.io ${name} -n bookinfo --context ${CLUSTER1_CONTEXT}`, { failOnNonZeroExit: false }); - cy.exec(`kubectl delete gateway.networking.istio.io ${name} -n bookinfo --context ${CLUSTER2_CONTEXT}`, { failOnNonZeroExit: false }); + cy.exec(`kubectl delete gateway.networking.istio.io ${name} -n bookinfo --context ${CLUSTER1_CONTEXT}`, { + failOnNonZeroExit: false + }); + cy.exec(`kubectl delete gateway.networking.istio.io ${name} -n bookinfo --context ${CLUSTER2_CONTEXT}`, { + failOnNonZeroExit: false + }); ensureKialiFinishedLoading(); }); @@ -78,14 +96,23 @@ When('user clicks in the {string} actions', (action: string) => { .should('not.exist'); }); -Then('user sees the generated {string} objects located in the {string} cluster',(svc:string, cluster:string) =>{ - cy.get(`[data-test="VirtualItem_Nsbookinfo_destinationrule_${svc}"]`).find('[data-label="Cluster"]').contains(cluster); - cy.get(`[data-test="VirtualItem_Nsbookinfo_virtualservice_${svc}"]`).find('[data-label="Cluster"]').contains(cluster); +Then('user sees the generated {string} objects located in the {string} cluster', (svc: string, cluster: string) => { + cy.getBySel(`VirtualItem_Cluster${cluster}_Nsbookinfo_destinationrule_${svc}`) + .find('[data-label="Cluster"]') + .contains(cluster); + cy.getBySel(`VirtualItem_Cluster${cluster}_Nsbookinfo_virtualservice_${svc}`) + .find('[data-label="Cluster"]') + .contains(cluster); }); -And('the {string} {string} should be listed in {string} {string} namespace', (type:string, svc:string, cluster:string, ns:string) => { - cy.get(`[data-test="VirtualItem_Ns${ns}_${type.toLowerCase()}_${svc}"]`).find('[data-label="Cluster"]').contains(cluster); -}) +And( + 'the {string} {string} should be listed in {string} {string} namespace', + (type: string, svc: string, cluster: string, ns: string) => { + cy.getBySel(`VirtualItem_Cluster${cluster}_Ns${ns}_${type.toLowerCase()}_${svc}`) + .find('[data-label="Cluster"]') + .contains(cluster); + } +); And('user sees the {string} wizard', (title: string) => { cy.get('div[aria-label="' + title + '"]'); diff --git a/frontend/cypress/integration/featureFiles/istio_config.feature b/frontend/cypress/integration/featureFiles/istio_config.feature index 5426a99b8e..c26b54a8cb 100644 --- a/frontend/cypress/integration/featureFiles/istio_config.feature +++ b/frontend/cypress/integration/featureFiles/istio_config.feature @@ -78,14 +78,16 @@ Feature: Kiali Istio Config page Then the user can create a "Sidecar" Istio object @multi-cluster - @skip + @multi-primary Scenario: See all Istio Config objects in the bookinfo namespace in the multi-cluster environment. Then the "Cluster" column "appears" - And user sees all the Istio Config objects from both clusters in the bookinfo namespace + And user sees all the Istio Config objects in the bookinfo namespace for the "east" cluster + And user sees all the Istio Config objects in the bookinfo namespace for the "west" cluster And user sees Name information for Istio objects And user sees Namespace information for Istio objects And user sees Type information for Istio objects And user sees Configuration information for Istio objects + And user sees Cluster information for Istio objects @skip @multi-cluster diff --git a/frontend/cypress/integration/featureFiles/overview.feature b/frontend/cypress/integration/featureFiles/overview.feature index 0efc9d5968..1772e43f61 100644 --- a/frontend/cypress/integration/featureFiles/overview.feature +++ b/frontend/cypress/integration/featureFiles/overview.feature @@ -173,9 +173,10 @@ Feature: Kiali Overview page Then user sees a "LIST" "bookinfo" namespace And the "Cluster" column "appears" And cluster badges for "east" and "west" cluster are visible in the LIST view - And Control Plane metrics should be visible + And Control Plane metrics should be visible for cluster "east" #this scenario refers to a bug (https://github.com/kiali/kiali/issues/6504) which is not resolved at the time of writing the scenario + # this scenario refers to a bug (https://github.com/kiali/kiali/issues/6504) @multi-cluster @skip Scenario: The new Cluster column should be visible and sortable when changing to list view @@ -183,3 +184,9 @@ Feature: Kiali Overview page Then the "Cluster" column "appears" And user sorts by "Cluster" desc Then the list is sorted by "Cluster" desc + + @multi-cluster + @multi-primary + Scenario: There should be two control plane cards for each cluster + Then user sees the "Control plane" label in the "east" "istio-system" namespace card + Then user sees the "Control plane" label in the "west" "istio-system" namespace card diff --git a/frontend/cypress/integration/featureFiles/wizard_istio_config.feature b/frontend/cypress/integration/featureFiles/wizard_istio_config.feature index ddda93b279..5bc59112af 100644 --- a/frontend/cypress/integration/featureFiles/wizard_istio_config.feature +++ b/frontend/cypress/integration/featureFiles/wizard_istio_config.feature @@ -1,7 +1,7 @@ @wizard-istio-config # don't change first line of this file - the tag is used for the test scripts to identify the test suite -Feature: Kiali Istio Config page +Feature: Kiali Istio Config wizard On the Istio Config page, an admin should see all the Istio Config objects. The admin should be able to filter for the Istio Config objects they are looking for diff --git a/frontend/cypress/support/commands.ts b/frontend/cypress/support/commands.ts index 8b25057ed0..81636f145d 100644 --- a/frontend/cypress/support/commands.ts +++ b/frontend/cypress/support/commands.ts @@ -195,18 +195,17 @@ Cypress.Commands.add('login', (username: string, password: string) => { haveCookie = true; }); } else if (auth_strategy === 'token') { - cy.exec('kubectl -n istio-system create token citest') - .then(result => { - cy.request({ - method: 'POST', - url: 'api/authenticate', - form: true, - body: { - token: result.stdout - } - }); - haveCookie = true; + cy.exec('kubectl -n istio-system create token citest').then(result => { + cy.request({ + method: 'POST', + url: 'api/authenticate', + form: true, + body: { + token: result.stdout + } }); + haveCookie = true; + }); } } else { cy.log('got an auth cookie, skipping login'); diff --git a/frontend/package.json b/frontend/package.json index 45c5b6d9a4..a7f9e81064 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -45,7 +45,8 @@ "cypress:run:selected": "cypress run -e TAGS=\"@selected\"", "cypress:run": "cypress run -e TAGS=\"@smoke\" && cypress run -e TAGS=\"not @crd-validation and not @multi-cluster and not @smoke\" && cypress run -e TAGS=\"@crd-validation and not @multi-cluster and not @smoke\"", "cypress:run:smoke": "cypress run -e TAGS=\"@smoke\"", - "cypress:run:multi-cluster": "cypress run -e TAGS=\"not @crd-validation and @multi-cluster\"", + "cypress:run:multi-cluster": "cypress run -e TAGS=\"not @crd-validation and @multi-cluster and not @multi-primary\"", + "cypress:run:multi-primary": "cypress run -e TAGS=\"not @crd-validation and @multi-cluster and @multi-primary\"", "cypress:run:junit": "cypress run --reporter cypress-multi-reporters --reporter-options configFile=reporter-config.json -e TAGS=\"not @crd-validation and not @multi-cluster\" && cypress run --reporter cypress-multi-reporters --reporter-options configFile=reporter-config.json -e TAGS=\"@crd-validation and not @multi-cluster\"", "cypress:delete:reports": "rm cypress/results/* || true", "cypress:combine:reports": "jrm cypress/results/combined-report.xml \"cypress/results/*.xml\"", diff --git a/frontend/src/components/VirtualList/VirtualItem.tsx b/frontend/src/components/VirtualList/VirtualItem.tsx index ed70ea0f3c..04c71cb7ec 100644 --- a/frontend/src/components/VirtualList/VirtualItem.tsx +++ b/frontend/src/components/VirtualList/VirtualItem.tsx @@ -58,11 +58,12 @@ export class VirtualItem extends React.Component diff --git a/hack/istio/multicluster/install-multi-primary.sh b/hack/istio/multicluster/install-multi-primary.sh index b144eeee34..af291532c2 100755 --- a/hack/istio/multicluster/install-multi-primary.sh +++ b/hack/istio/multicluster/install-multi-primary.sh @@ -73,7 +73,7 @@ create_remote_secret() { fi echo "Choosing to use: [${secretname}]" fi - REMOTE_SECRET="$("${ISTIOCTL}" x create-remote-secret --name "${clustername}" ${secretname})" + REMOTE_SECRET="$("${ISTIOCTL}" create-remote-secret --name "${clustername}" ${secretname})" if [ "$?" != "0" ]; then echo "Failed to generate remote secret for cluster [${clustername}]" exit 1 @@ -199,6 +199,3 @@ source ${SCRIPT_DIR}/setup-tracing.sh # Install bookinfo across cluster if enabled source ${SCRIPT_DIR}/split-bookinfo.sh - -# Install Kiali in both clusters if enabled -source ${SCRIPT_DIR}/deploy-kiali.sh diff --git a/hack/run-integration-tests.sh b/hack/run-integration-tests.sh index 7bd90f79d9..3d0299425f 100755 --- a/hack/run-integration-tests.sh +++ b/hack/run-integration-tests.sh @@ -4,9 +4,17 @@ infomsg() { echo "[INFO] ${1}" } +# Suites +BACKEND="backend" +FRONTEND="frontend" +FRONTEND_PRIMARY_REMOTE="frontend-primary-remote" +FRONTEND_MULTI_PRIMARY="frontend-multi-primary" +# + ISTIO_VERSION="" -TEST_SUITE="backend" +TEST_SUITE="${BACKEND}" SETUP_ONLY="false" +TESTS_ONLY="false" # process command line args while [[ $# -gt 0 ]]; do @@ -16,14 +24,6 @@ while [[ $# -gt 0 ]]; do ISTIO_VERSION="${2}" shift;shift ;; - -ts|--test-suite) - TEST_SUITE="${2}" - if [ "${TEST_SUITE}" != "backend" -a "${TEST_SUITE}" != "frontend" -a "${TEST_SUITE}" != "frontend-multi-cluster" ]; then - echo "--test-suite option must be one of 'backend', 'frontend', or 'frontend-multi-cluster'" - exit 1 - fi - shift;shift - ;; -so|--setup-only) SETUP_ONLY="${2}" if [ "${SETUP_ONLY}" != "true" -a "${SETUP_ONLY}" != "false" ]; then @@ -32,18 +32,37 @@ while [[ $# -gt 0 ]]; do fi shift;shift ;; + -to|--tests-only) + TESTS_ONLY="${2}" + if [ "${TESTS_ONLY}" != "true" -a "${TESTS_ONLY}" != "false" ]; then + echo "--tests-only option must be one of 'true' or 'false'" + exit 1 + fi + shift;shift + ;; + -ts|--test-suite) + TEST_SUITE="${2}" + if [ "${TEST_SUITE}" != "${BACKEND}" -a "${TEST_SUITE}" != "${FRONTEND}" -a "${TEST_SUITE}" != "${FRONTEND_PRIMARY_REMOTE}" -a "${TEST_SUITE}" != "${FRONTEND_MULTI_PRIMARY}" ]; then + echo "--test-suite option must be one of '${BACKEND}', '${FRONTEND}', '${FRONTEND_PRIMARY_REMOTE}', or '${FRONTEND_MULTI_PRIMARY}'" + exit 1 + fi + shift;shift + ;; -h|--help) cat < Which Istio version to test with. For releases, specify "#.#.#". For dev builds, specify in the form "#.#-dev" Default: The latest release - -ts|--test-suite - Which test suite to run. - Default: backend -so|--setup-only If true, only setup the test environment and exit without running the tests. Default: false + -to|--tests-only + If true, only run the tests and skip the setup. + Default: false + -ts|--test-suite <${BACKEND}|${FRONTEND}|${FRONTEND_PRIMARY_REMOTE}|${FRONTEND_MULTI_PRIMARY}> + Which test suite to run. + Default: ${BACKEND} -h|--help: This message @@ -63,11 +82,17 @@ HELPMSG esac done +if [ "${SETUP_ONLY}" == "true" -a "${TESTS_ONLY}" == "true" ]; then + echo "ERROR: --setup-only and --tests-only cannot both be true. Aborting." + exit 1 +fi + # print out our settings for debug purposes cat < --mc|--multicluster - Whether to set up a multicluster environment. - Default: false +-mc|--multicluster <${MULTI_PRIMARY}|${PRIMARY_REMOTE}> + Whether to set up a multicluster environment + and which kind of multicluster environment to setup. + Default: HELP } @@ -38,7 +42,14 @@ while [[ $# -gt 0 ]]; do -dorp|--docker-or-podman) DORP="$2"; shift;shift; ;; -h|--help) helpmsg; exit 1 ;; -iv|--istio-version) ISTIO_VERSION="$2"; shift;shift; ;; - -mc|--multicluster) MULTICLUSTER="$2"; shift;shift; ;; + -mc|--multicluster) + MULTICLUSTER="${2}" + if [ "${MULTICLUSTER}" != "${PRIMARY_REMOTE}" -a "${MULTICLUSTER}" != "${MULTI_PRIMARY}" ]; then + echo "--multicluster option must be one of '${PRIMARY_REMOTE}' or '${MULTI_PRIMARY}'" + exit 1 + fi + shift;shift + ;; *) echo "Unknown argument: [$key]. Aborting."; helpmsg; exit 1 ;; esac done @@ -49,7 +60,6 @@ set -e # set up some of our defaults AUTH_STRATEGY="${AUTH_STRATEGY:-anonymous}" DORP="${DORP:-docker}" -MULTICLUSTER="${MULTICLUSTER:-false}" # Defaults the branch to master unless it is already set TARGET_BRANCH="${TARGET_BRANCH:-master}" @@ -209,11 +219,15 @@ setup_kind_multicluster() { if [[ "${ISTIO_VERSION}" == *-dev ]]; then local hub_arg="--istio-hub default" fi - "${SCRIPT_DIR}"/istio/multicluster/install-primary-remote.sh --manage-kind true -dorp docker --istio-dir "${istio_dir}" ${hub_arg:-} + if [ "${MULTICLUSTER}" == "${MULTI_PRIMARY}" ]; then + "${SCRIPT_DIR}"/istio/multicluster/install-multi-primary.sh --manage-kind true -dorp docker --istio-dir "${istio_dir}" ${hub_arg:-} + elif [ "${MULTICLUSTER}" == "${PRIMARY_REMOTE}" ]; then + "${SCRIPT_DIR}"/istio/multicluster/install-primary-remote.sh --manage-kind true -dorp docker --istio-dir "${istio_dir}" ${hub_arg:-} + fi "${SCRIPT_DIR}"/istio/multicluster/deploy-kiali.sh --manage-kind true -dorp docker -kas anonymous -kudi true -kshc "${HELM_CHARTS_DIR}"/_output/charts/kiali-server-*.tgz } -if [ "${MULTICLUSTER}" == "true" ]; then +if [ -n "${MULTICLUSTER}" ]; then setup_kind_multicluster else setup_kind_singlecluster diff --git a/handlers/apps_test.go b/handlers/apps_test.go index de28252bd3..2127b61b7d 100644 --- a/handlers/apps_test.go +++ b/handlers/apps_test.go @@ -198,10 +198,16 @@ func setupAppMetricsEndpoint(t *testing.T) (*httptest.Server, *prometheustest.Pr return ts, xapi, k8s } -func setupAppListEndpoint(t *testing.T, k8s kubernetes.ClientInterface, conf config.Config) (*httptest.Server, *prometheustest.PromClientMock) { - prom := new(prometheustest.PromClientMock) - +func setupAppListEndpoint(t *testing.T, k8s kubernetes.ClientInterface, conf config.Config) *httptest.Server { business.SetupBusinessLayer(t, k8s, conf) + promMock := new(prometheustest.PromAPIMock) + promMock.SpyArgumentsAndReturnEmpty(func(mock.Arguments) {}) + prom, err := prometheus.NewClient() + if err != nil { + t.Fatal(err) + } + prom.Inject(promMock) + business.WithProm(prom) mr := mux.NewRouter() mr.HandleFunc("/api/namespaces/{namespace}/apps", http.HandlerFunc( @@ -218,7 +224,7 @@ func setupAppListEndpoint(t *testing.T, k8s kubernetes.ClientInterface, conf con ts := httptest.NewServer(mr) t.Cleanup(ts.Close) - return ts, prom + return ts } func newProject() *osproject_v1.Project { @@ -246,7 +252,7 @@ func TestAppsEndpoint(t *testing.T) { } k8s := kubetest.NewFakeK8sClient(kubeObjects...) k8s.OpenShift = true - ts, _ := setupAppListEndpoint(t, k8s, *cfg) + ts := setupAppListEndpoint(t, k8s, *cfg) url := ts.URL + "/api/namespaces/Namespace/apps" @@ -264,15 +270,18 @@ func TestAppDetailsEndpoint(t *testing.T) { assert := assert.New(t) require := require.New(t) - cfg := config.NewConfig() - cfg.ExternalServices.Istio.IstioAPIEnabled = false - config.Set(cfg) + // Disabling CustomDashboards on testing + // otherwise this adds 10s to the test due to an http timeout. + conf := config.NewConfig() + conf.ExternalServices.CustomDashboards.Enabled = false + conf.ExternalServices.Istio.IstioAPIEnabled = false + kubernetes.SetConfig(t, *conf) mockClock() proj := newProject() proj.Name = "Namespace" kubeObjects := []runtime.Object{proj} - for _, obj := range business.FakeDeployments(*cfg) { + for _, obj := range business.FakeDeployments(*conf) { o := obj kubeObjects = append(kubeObjects, &o) } @@ -282,7 +291,7 @@ func TestAppDetailsEndpoint(t *testing.T) { } k8s := kubetest.NewFakeK8sClient(kubeObjects...) k8s.OpenShift = true - ts, _ := setupAppListEndpoint(t, k8s, *cfg) + ts := setupAppListEndpoint(t, k8s, *conf) url := ts.URL + "/api/namespaces/Namespace/apps/httpbin" diff --git a/handlers/istio_config.go b/handlers/istio_config.go index 4cadc173e0..a27bf8b5e4 100644 --- a/handlers/istio_config.go +++ b/handlers/istio_config.go @@ -229,7 +229,7 @@ func IstioConfigDelete(w http.ResponseWriter, r *http.Request) { RespondWithError(w, http.StatusInternalServerError, "Services initialization error: "+err.Error()) return } - err = business.IstioConfig.DeleteIstioConfigDetail(cluster, namespace, objectType, object) + err = business.IstioConfig.DeleteIstioConfigDetail(r.Context(), cluster, namespace, objectType, object) if err != nil { handleErrorResponse(w, err) return @@ -265,7 +265,7 @@ func IstioConfigUpdate(w http.ResponseWriter, r *http.Request) { RespondWithError(w, http.StatusBadRequest, "Update request with bad update patch: "+err.Error()) } jsonPatch := string(body) - updatedConfigDetails, err := business.IstioConfig.UpdateIstioConfigDetail(cluster, namespace, objectType, object, jsonPatch) + updatedConfigDetails, err := business.IstioConfig.UpdateIstioConfigDetail(r.Context(), cluster, namespace, objectType, object, jsonPatch) if err != nil { handleErrorResponse(w, err) return @@ -301,7 +301,7 @@ func IstioConfigCreate(w http.ResponseWriter, r *http.Request) { RespondWithError(w, http.StatusBadRequest, "Create request could not be read: "+err.Error()) } - createdConfigDetails, err := business.IstioConfig.CreateIstioConfigDetail(cluster, namespace, objectType, body) + createdConfigDetails, err := business.IstioConfig.CreateIstioConfigDetail(r.Context(), cluster, namespace, objectType, body) if err != nil { handleErrorResponse(w, err) return diff --git a/handlers/services.go b/handlers/services.go index 00cff05c46..390761b786 100644 --- a/handlers/services.go +++ b/handlers/services.go @@ -54,8 +54,17 @@ func ServiceList(w http.ResponseWriter, r *http.Request) { p := serviceListParams{} p.extract(r) - criteria := business.ServiceCriteria{Namespace: p.Namespace, IncludeHealth: p.IncludeHealth, - IncludeIstioResources: p.IncludeIstioResources, IncludeOnlyDefinitions: p.IncludeOnlyDefinitions, RateInterval: "", QueryTime: p.QueryTime} + criteria := business.ServiceCriteria{ + // Purposefully leaving cluster out of Criteria because the frontend doesn't + // yet send the cluster param and the business service will iterate over all + // clusters if a cluster criteria is not provided which is what we want. + Namespace: p.Namespace, + IncludeHealth: p.IncludeHealth, + IncludeIstioResources: p.IncludeIstioResources, + IncludeOnlyDefinitions: p.IncludeOnlyDefinitions, + RateInterval: "", + QueryTime: p.QueryTime, + } // Get business layer business, err := getBusiness(r) diff --git a/kiali.go b/kiali.go index 5cea7e8ba2..9d2f1ac5e4 100644 --- a/kiali.go +++ b/kiali.go @@ -37,9 +37,11 @@ import ( "strings" "syscall" + "github.com/kiali/kiali/business" "github.com/kiali/kiali/business/authentication" "github.com/kiali/kiali/config" "github.com/kiali/kiali/kubernetes" + "github.com/kiali/kiali/kubernetes/cache" "github.com/kiali/kiali/log" "github.com/kiali/kiali/observability" "github.com/kiali/kiali/prometheus/internalmetrics" @@ -107,8 +109,31 @@ func main() { // prepare our internal metrics so Prometheus can scrape them internalmetrics.RegisterInternalMetrics() + // Create the business package dependencies. + clientFactory, err := kubernetes.GetClientFactory() + if err != nil { + log.Fatalf("Failed to create client factory. Err: %s", err) + } + + log.Info("Initializing Kiali Cache") + cache, err := cache.NewKialiCache(clientFactory, *cfg) + if err != nil { + log.Fatalf("Error initializing Kiali Cache. Details: %s", err) + } + defer cache.Stop() + + namespaceService := business.NewNamespaceService(clientFactory.GetSAClients(), clientFactory.GetSAClients(), cache, *cfg) + meshService := business.NewMeshService(clientFactory.GetSAClients(), cache, namespaceService, *cfg) + cpm := business.NewControlPlaneMonitor(cache, clientFactory, *cfg, &meshService) + + if cfg.ExternalServices.Istio.IstioAPIEnabled { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cpm.PollIstiodForProxyStatus(ctx) + } + // Start listening to requests - server := server.NewServer() + server := server.NewServer(cpm, clientFactory, cache, *cfg) server.Start() // wait forever, or at least until we are told to exit diff --git a/kubernetes/cache/cache.go b/kubernetes/cache/cache.go index 236f0c9fc3..b15fe70f0a 100644 --- a/kubernetes/cache/cache.go +++ b/kubernetes/cache/cache.go @@ -11,6 +11,7 @@ import ( "github.com/kiali/kiali/kubernetes" "github.com/kiali/kiali/log" "github.com/kiali/kiali/models" + "github.com/kiali/kiali/store" ) // Istio uses caches for pods and controllers. @@ -39,9 +40,9 @@ type KialiCache interface { // Instead of using the interface directly for kube objects, use the GetKubeCache() method. KubeCache - NamespacesCache - ProxyStatusCache RegistryStatusCache + ProxyStatusCache + NamespacesCache } // namespaceCache caches namespaces according to their token. @@ -51,16 +52,10 @@ type namespaceCache struct { clusterNamespace map[string]map[string]models.Namespace // By cluster, by namespace name } -type podProxyStatus struct { - cluster string - namespace string - pod string - proxyStatus *kubernetes.ProxyStatus -} - type kialiCacheImpl struct { // Embedded for backward compatibility for business methods that just use one cluster. // All business methods should eventually use the multi-cluster cache. + // TODO: Get rid of embedding. KubeCache // Stops the background goroutines which refresh the cache's @@ -75,11 +70,10 @@ type kialiCacheImpl struct { tokenLock sync.RWMutex tokenNamespaces map[string]namespaceCache // TODO: Another option can be define here the namespaces by token/cluster tokenNamespaceDuration time.Duration - proxyStatusLock sync.RWMutex - proxyStatusNamespaces map[string]map[string]map[string]podProxyStatus - registryStatusLock sync.RWMutex - registryStatusCreated *time.Time - registryStatus *kubernetes.RegistryStatus + // ProxyStatusStore stores the proxy status and should be key'd off cluster + namespace + pod. + proxyStatusStore store.Store[*kubernetes.ProxyStatus] + // RegistryStatusStore stores the registry status and should be key'd off of the cluster name. + registryStatusStore store.Store[*kubernetes.RegistryStatus] // Info about the kube clusters that the cache knows about. clusters []kubernetes.Cluster @@ -91,14 +85,15 @@ func NewKialiCache(clientFactory kubernetes.ClientFactory, cfg config.Config) (K clientFactory: clientFactory, clientRefreshPollingPeriod: time.Duration(time.Second * 60), kubeCache: make(map[string]KubeCache), - proxyStatusNamespaces: make(map[string]map[string]map[string]podProxyStatus), refreshDuration: time.Duration(cfg.KubernetesConfig.CacheDuration) * time.Second, tokenNamespaces: make(map[string]namespaceCache), tokenNamespaceDuration: time.Duration(cfg.KubernetesConfig.CacheTokenNamespaceDuration) * time.Second, + proxyStatusStore: store.New[*kubernetes.ProxyStatus](), + registryStatusStore: store.New[*kubernetes.RegistryStatus](), } for cluster, client := range clientFactory.GetSAClients() { - cache, err := NewKubeCache(client, cfg, NewRegistryHandler(kialiCacheImpl.RefreshRegistryStatus)) + cache, err := NewKubeCache(client, cfg) if err != nil { log.Errorf("[Kiali Cache] Error creating kube cache for cluster: [%s]. Err: %v", cluster, err) return nil, err @@ -121,13 +116,9 @@ func NewKialiCache(clientFactory kubernetes.ClientFactory, cfg config.Config) (K // Starting background goroutines to: // 1. Refresh the cache's service account token - // 2. Poll for istiod's proxy status. // These will stop when the context is cancelled. // Starting goroutines after any errors are handled so as not to leak goroutines. ctx, cancel := context.WithCancel(context.Background()) - if cfg.ExternalServices.Istio.IstioAPIEnabled { - kialiCacheImpl.pollIstiodForProxyStatus(ctx) - } // Note that this only watches for changes to the home cluster's token since it is // expected that the remote cluster tokens will not change. However, that assumption diff --git a/kubernetes/cache/cache_handlers.go b/kubernetes/cache/cache_handlers.go deleted file mode 100644 index a83fb14844..0000000000 --- a/kubernetes/cache/cache_handlers.go +++ /dev/null @@ -1,46 +0,0 @@ -package cache - -import ( - "k8s.io/apimachinery/pkg/api/meta" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/kiali/kiali/log" -) - -type RegistryRefreshHandler struct { - refresh func() -} - -func NewRegistryHandler(refresh func()) RegistryRefreshHandler { - log.Infof("Adding a RegistryRefreshHandler") - return RegistryRefreshHandler{refresh: refresh} -} - -func (sh RegistryRefreshHandler) OnAdd(obj interface{}, isInInitialList bool) { - sh.refresh() -} - -func (sh RegistryRefreshHandler) OnUpdate(oldObj, newObj interface{}) { - var ( - oldMeta v1.Object - newMeta v1.Object - err error - ) - - if oldMeta, err = meta.Accessor(oldObj); err != nil { - log.Errorf("oldObj is not a valid kube object. Err: %s", err) - return - } - if newMeta, err = meta.Accessor(newObj); err != nil { - log.Errorf("newObj is not a valid kube object. Err: %s", err) - return - } - - if oldMeta.GetResourceVersion() != newMeta.GetResourceVersion() { - sh.refresh() - } -} - -func (sh RegistryRefreshHandler) OnDelete(obj interface{}) { - sh.refresh() -} diff --git a/kubernetes/cache/cache_handlers_test.go b/kubernetes/cache/cache_handlers_test.go deleted file mode 100644 index d1c9887877..0000000000 --- a/kubernetes/cache/cache_handlers_test.go +++ /dev/null @@ -1,89 +0,0 @@ -package cache - -import ( - "testing" - - "github.com/stretchr/testify/assert" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -type fakeRegistryStatus struct { - statusRefreshed bool -} - -func (f *fakeRegistryStatus) RefreshRegistryStatus() { - f.statusRefreshed = true -} - -func TestRegistryStatusUpdatedOnAdd(t *testing.T) { - assert := assert.New(t) - kialiCache := &fakeRegistryStatus{} - handler := NewRegistryHandler(kialiCache.RefreshRegistryStatus) - handler.OnAdd(&corev1.Service{}, false) - assert.True(kialiCache.statusRefreshed) -} - -func TestRegistryStatusUpdatedOnDelete(t *testing.T) { - assert := assert.New(t) - kialiCache := &fakeRegistryStatus{} - handler := NewRegistryHandler(kialiCache.RefreshRegistryStatus) - handler.OnDelete(&corev1.Service{}) - assert.True(kialiCache.statusRefreshed) -} - -func TestRegistryStatusUpdatedOnUpdate(t *testing.T) { - cases := map[string]struct { - old, new interface{} - expectedRefresh bool - }{ - "Same revision": { - old: &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: "1", - }, - }, - new: &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: "1", - }, - }, - expectedRefresh: false, - }, - "Different revision": { - old: &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: "1", - }, - }, - new: &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: "2", - }, - }, - expectedRefresh: true, - }, - "old object not typemeta": { - old: &struct{}{}, - new: &corev1.Service{}, - expectedRefresh: false, - }, - "new object not typemeta": { - old: &corev1.Service{}, - new: &struct{}{}, - expectedRefresh: false, - }, - } - - for name, tc := range cases { - t.Run(name, func(t *testing.T) { - assert := assert.New(t) - kialiCache := &fakeRegistryStatus{} - handler := NewRegistryHandler(kialiCache.RefreshRegistryStatus) - - handler.OnUpdate(tc.old, tc.new) - - assert.Equal(tc.expectedRefresh, kialiCache.statusRefreshed) - }) - } -} diff --git a/kubernetes/cache/cache_test.go b/kubernetes/cache/cache_test.go index ef360948f2..7ef5088d9b 100644 --- a/kubernetes/cache/cache_test.go +++ b/kubernetes/cache/cache_test.go @@ -38,7 +38,7 @@ func TestClientUpdatedWhenSAClientChanges(t *testing.T) { client := kubetest.NewFakeK8sClient() client.Token = "current-token" clientFactory := kubetest.NewK8SClientFactoryMock(client) - k8sCache, err := NewKubeCache(client, *conf, emptyHandler) + k8sCache, err := NewKubeCache(client, *conf) require.NoError(err) kubeCache := &fakeKubeCache{kubeCache: k8sCache} diff --git a/kubernetes/cache/kube_cache.go b/kubernetes/cache/kube_cache.go index 55740bd25b..29765ee118 100644 --- a/kubernetes/cache/kube_cache.go +++ b/kubernetes/cache/kube_cache.go @@ -147,14 +147,13 @@ type cacheLister struct { // kubeCache is a local cache of kube objects. Manages informers and listers. type kubeCache struct { - cacheLock sync.RWMutex - cfg config.Config - client kubernetes.ClientInterface - clusterCacheLister *cacheLister - clusterScoped bool - nsCacheLister map[string]*cacheLister - registryRefreshHandler RegistryRefreshHandler - refreshDuration time.Duration + cacheLock sync.RWMutex + cfg config.Config + client kubernetes.ClientInterface + clusterCacheLister *cacheLister + clusterScoped bool + nsCacheLister map[string]*cacheLister + refreshDuration time.Duration // Stops the cluster scoped informers when a refresh is necessary. // Close this channel to stop the cluster-scoped informers. stopClusterScopedChan chan struct{} @@ -163,7 +162,7 @@ type kubeCache struct { } // Starts all informers. These run until context is cancelled. -func NewKubeCache(kialiClient kubernetes.ClientInterface, cfg config.Config, refreshHandler RegistryRefreshHandler) (*kubeCache, error) { +func NewKubeCache(kialiClient kubernetes.ClientInterface, cfg config.Config) (*kubeCache, error) { refreshDuration := time.Duration(cfg.KubernetesConfig.CacheDuration) * time.Second c := &kubeCache{ @@ -172,9 +171,8 @@ func NewKubeCache(kialiClient kubernetes.ClientInterface, cfg config.Config, ref // Only when all namespaces are accessible should the cache be cluster scoped. // Otherwise, kiali may not have access to all namespaces since // the operator only grants clusterroles when all namespaces are accessible. - clusterScoped: cfg.AllNamespacesAccessible(), - registryRefreshHandler: refreshHandler, - refreshDuration: refreshDuration, + clusterScoped: cfg.AllNamespacesAccessible(), + refreshDuration: refreshDuration, } if c.clusterScoped { @@ -340,94 +338,42 @@ func (c *kubeCache) createIstioInformers(namespace string) istio.SharedInformerF if c.client.IsIstioAPI() { lister.authzLister = sharedInformers.Security().V1beta1().AuthorizationPolicies().Lister() lister.cachesSynced = append(lister.cachesSynced, sharedInformers.Security().V1beta1().AuthorizationPolicies().Informer().HasSynced) - _, error := sharedInformers.Security().V1beta1().AuthorizationPolicies().Informer().AddEventHandler(c.registryRefreshHandler) - if error != nil { - log.Errorf("[Kiali Cache] Failed to Add event handler in AuthorizationPolicies cache : %s", error) - } lister.destinationRuleLister = sharedInformers.Networking().V1beta1().DestinationRules().Lister() lister.cachesSynced = append(lister.cachesSynced, sharedInformers.Networking().V1beta1().DestinationRules().Informer().HasSynced) - _, error = sharedInformers.Networking().V1beta1().DestinationRules().Informer().AddEventHandler(c.registryRefreshHandler) - if error != nil { - log.Errorf("[Kiali Cache] Failed to Add event handler in DestinationRules cache : %s", error) - } lister.envoyFilterLister = sharedInformers.Networking().V1alpha3().EnvoyFilters().Lister() lister.cachesSynced = append(lister.cachesSynced, sharedInformers.Networking().V1alpha3().EnvoyFilters().Informer().HasSynced) - _, error = sharedInformers.Networking().V1alpha3().EnvoyFilters().Informer().AddEventHandler(c.registryRefreshHandler) - if error != nil { - log.Errorf("[Kiali Cache] Failed to Add event handler in EnvoyFilters cache : %s", error) - } lister.gatewayLister = sharedInformers.Networking().V1beta1().Gateways().Lister() lister.cachesSynced = append(lister.cachesSynced, sharedInformers.Networking().V1beta1().Gateways().Informer().HasSynced) - _, error = sharedInformers.Networking().V1beta1().Gateways().Informer().AddEventHandler(c.registryRefreshHandler) - if error != nil { - log.Errorf("[Kiali Cache] Failed to Add event handler in Gateways cache : %s", error) - } lister.peerAuthnLister = sharedInformers.Security().V1beta1().PeerAuthentications().Lister() lister.cachesSynced = append(lister.cachesSynced, sharedInformers.Security().V1beta1().PeerAuthentications().Informer().HasSynced) - _, error = sharedInformers.Security().V1beta1().PeerAuthentications().Informer().AddEventHandler(c.registryRefreshHandler) - if error != nil { - log.Errorf("[Kiali Cache] Failed to Add event handler in PeerAuthentications cache : %s", error) - } lister.requestAuthnLister = sharedInformers.Security().V1beta1().RequestAuthentications().Lister() lister.cachesSynced = append(lister.cachesSynced, sharedInformers.Security().V1beta1().RequestAuthentications().Informer().HasSynced) - _, error = sharedInformers.Security().V1beta1().RequestAuthentications().Informer().AddEventHandler(c.registryRefreshHandler) - if error != nil { - log.Errorf("[Kiali Cache] Failed to Add event handler in RequestAuthentications cache : %s", error) - } lister.serviceEntryLister = sharedInformers.Networking().V1beta1().ServiceEntries().Lister() lister.cachesSynced = append(lister.cachesSynced, sharedInformers.Networking().V1beta1().ServiceEntries().Informer().HasSynced) - _, error = sharedInformers.Networking().V1beta1().ServiceEntries().Informer().AddEventHandler(c.registryRefreshHandler) - if error != nil { - log.Errorf("[Kiali Cache] Failed to Add event handler in ServiceEntries cache : %s", error) - } lister.sidecarLister = sharedInformers.Networking().V1beta1().Sidecars().Lister() lister.cachesSynced = append(lister.cachesSynced, sharedInformers.Networking().V1beta1().Sidecars().Informer().HasSynced) - _, error = sharedInformers.Networking().V1beta1().Sidecars().Informer().AddEventHandler(c.registryRefreshHandler) - if error != nil { - log.Errorf("[Kiali Cache] Failed to Add event handler in Sidecars cache : %s", error) - } lister.telemetryLister = sharedInformers.Telemetry().V1alpha1().Telemetries().Lister() lister.cachesSynced = append(lister.cachesSynced, sharedInformers.Telemetry().V1alpha1().Telemetries().Informer().HasSynced) - _, error = sharedInformers.Telemetry().V1alpha1().Telemetries().Informer().AddEventHandler(c.registryRefreshHandler) - if error != nil { - log.Errorf("[Kiali Cache] Failed to Add event handler in Telemetries cache : %s", error) - } lister.virtualServiceLister = sharedInformers.Networking().V1beta1().VirtualServices().Lister() lister.cachesSynced = append(lister.cachesSynced, sharedInformers.Networking().V1beta1().VirtualServices().Informer().HasSynced) - _, error = sharedInformers.Networking().V1beta1().VirtualServices().Informer().AddEventHandler(c.registryRefreshHandler) - if error != nil { - log.Errorf("[Kiali Cache] Failed to Add event handler in VirtualServices cache : %s", error) - } lister.wasmPluginLister = sharedInformers.Extensions().V1alpha1().WasmPlugins().Lister() lister.cachesSynced = append(lister.cachesSynced, sharedInformers.Extensions().V1alpha1().WasmPlugins().Informer().HasSynced) - _, error = sharedInformers.Extensions().V1alpha1().WasmPlugins().Informer().AddEventHandler(c.registryRefreshHandler) - if error != nil { - log.Errorf("[Kiali Cache] Failed to Add event handler in WasmPlugins cache : %s", error) - } lister.workloadEntryLister = sharedInformers.Networking().V1beta1().WorkloadEntries().Lister() lister.cachesSynced = append(lister.cachesSynced, sharedInformers.Networking().V1beta1().WorkloadEntries().Informer().HasSynced) - _, error = sharedInformers.Networking().V1beta1().WorkloadEntries().Informer().AddEventHandler(c.registryRefreshHandler) - if error != nil { - log.Errorf("[Kiali Cache] Failed to Add event handler in WorkloadEntries cache : %s", error) - } lister.workloadGroupLister = sharedInformers.Networking().V1beta1().WorkloadGroups().Lister() lister.cachesSynced = append(lister.cachesSynced, sharedInformers.Networking().V1beta1().WorkloadGroups().Informer().HasSynced) - _, error = sharedInformers.Networking().V1beta1().WorkloadGroups().Informer().AddEventHandler(c.registryRefreshHandler) - if error != nil { - log.Errorf("[Kiali Cache] Failed to Add event handler in WorkloadGroups cache : %s", error) - } } return sharedInformers @@ -445,17 +391,9 @@ func (c *kubeCache) createGatewayInformers(namespace string) gateway.SharedInfor if c.client.IsGatewayAPI() { lister.k8sgatewayLister = sharedInformers.Gateway().V1beta1().Gateways().Lister() lister.cachesSynced = append(lister.cachesSynced, sharedInformers.Gateway().V1beta1().Gateways().Informer().HasSynced) - _, err := sharedInformers.Gateway().V1beta1().Gateways().Informer().AddEventHandler(c.registryRefreshHandler) - if err != nil { - log.Errorf("[Kiali Cache] Error adding Handler to Informer Gateways: %s", err.Error()) - } lister.k8shttprouteLister = sharedInformers.Gateway().V1beta1().HTTPRoutes().Lister() lister.cachesSynced = append(lister.cachesSynced, sharedInformers.Gateway().V1beta1().HTTPRoutes().Informer().HasSynced) - _, err = sharedInformers.Gateway().V1beta1().HTTPRoutes().Informer().AddEventHandler(c.registryRefreshHandler) - if err != nil { - log.Errorf("[Kiali Cache] Error adding Handler to Informer HTTPRoutes : %s", err.Error()) - } } return sharedInformers } @@ -491,14 +429,6 @@ func (c *kubeCache) createKubernetesInformers(namespace string) informers.Shared sharedInformers.Apps().V1().ReplicaSets().Informer().HasSynced, sharedInformers.Core().V1().ConfigMaps().Informer().HasSynced, ) - _, err := sharedInformers.Core().V1().Services().Informer().AddEventHandler(c.registryRefreshHandler) - if err != nil { - log.Errorf("Error adding Handler to Informer services: %s", err.Error()) - } - _, err = sharedInformers.Core().V1().Endpoints().Informer().AddEventHandler(c.registryRefreshHandler) - if err != nil { - log.Errorf("Error adding Handler to Informer Endpoints: %s", err.Error()) - } if c.clusterScoped { c.clusterCacheLister = lister diff --git a/kubernetes/cache/kube_cache_test.go b/kubernetes/cache/kube_cache_test.go index 900a9f0aac..52a8fcbbc4 100644 --- a/kubernetes/cache/kube_cache_test.go +++ b/kubernetes/cache/kube_cache_test.go @@ -23,8 +23,7 @@ const IstioAPIEnabled = true func newTestingKubeCache(t *testing.T, cfg *config.Config, objects ...runtime.Object) *kubeCache { t.Helper() - emptyRefreshHandler := NewRegistryHandler(func() {}) - kubeCache, err := NewKubeCache(kubetest.NewFakeK8sClient(objects...), *cfg, emptyRefreshHandler) + kubeCache, err := NewKubeCache(kubetest.NewFakeK8sClient(objects...), *cfg) if err != nil { t.Fatalf("Unable to create kube cache for testing. Err: %s", err) } @@ -308,10 +307,9 @@ func TestIstioAPIDisabled(t *testing.T) { ns := &core_v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "test"}} cfg := config.NewConfig() - emptyRefreshHandler := NewRegistryHandler(func() {}) fakeClient := kubetest.NewFakeK8sClient(ns) fakeClient.IstioAPIEnabled = false - kubeCache, err := NewKubeCache(fakeClient, *cfg, emptyRefreshHandler) + kubeCache, err := NewKubeCache(fakeClient, *cfg) if err != nil { t.Fatalf("Unable to create kube cache for testing. Err: %s", err) } diff --git a/kubernetes/cache/proxy_status.go b/kubernetes/cache/proxy_status.go index 729f055530..60e658c6fc 100644 --- a/kubernetes/cache/proxy_status.go +++ b/kubernetes/cache/proxy_status.go @@ -1,105 +1,44 @@ package cache import ( - "context" "strings" - "time" - - "k8s.io/apimachinery/pkg/util/wait" "github.com/kiali/kiali/kubernetes" - "github.com/kiali/kiali/log" ) -type ProxyStatusCache interface { - GetPodProxyStatus(cluster, namespace, pod string) *kubernetes.ProxyStatus +func proxyStatusKey(cluster, namespace, pod string) string { + return cluster + namespace + pod } -// pollIstiodForProxyStatus is a long running goroutine that will periodically poll istiod for proxy status. -// Polling stops when the stopCacheChan is closed. -func (c *kialiCacheImpl) pollIstiodForProxyStatus(ctx context.Context) { - log.Debug("[Kiali Cache] Starting polling istiod for proxy status") - go func() { - for { - select { - case <-ctx.Done(): - log.Debug("[Kiali Cache] Stopping polling for istiod proxy status") - return - case <-time.After(c.tokenNamespaceDuration): - // Get the proxy status from istiod with some retries. - // Wrapping this so we can defer cancel. - func() { - ctx, cancel := context.WithTimeout(ctx, c.tokenNamespaceDuration) - defer cancel() - - var ( - proxyStatus []*kubernetes.ProxyStatus - err error - ) - - interval := c.tokenNamespaceDuration / 2 - retryErr := wait.PollUntilContextCancel(ctx, interval, true, func(ctx context.Context) (bool, error) { - log.Trace("Getting proxy status from istiod") - proxyStatus, err = c.clientFactory.GetSAHomeClusterClient().GetProxyStatus() - if err != nil { - // TODO: Error checking could be done here to determine retry if GetProxyStatus provided that info. - return false, nil - } - - return true, nil - }) - if retryErr != nil { - log.Warningf("Error getting proxy status from istiod. Proxy status may be stale. Err: %v", err) - return - } - - c.setProxyStatus(proxyStatus) - }() - } - } - }() +type ProxyStatusCache interface { + SetPodProxyStatus([]*kubernetes.ProxyStatus) + GetPodProxyStatus(cluster, namespace, pod string) *kubernetes.ProxyStatus } -func (c *kialiCacheImpl) GetPodProxyStatus(cluster, namespace, pod string) *kubernetes.ProxyStatus { - defer c.proxyStatusLock.RUnlock() - c.proxyStatusLock.RLock() - if clusterProxyStatus, ok := c.proxyStatusNamespaces[cluster]; ok { - if nsProxyStatus, ok := clusterProxyStatus[namespace]; ok { - if podProxyStatus, ok := nsProxyStatus[pod]; ok { - return podProxyStatus.proxyStatus +func (c *kialiCacheImpl) SetPodProxyStatus(proxyStatus []*kubernetes.ProxyStatus) { + podProxyByID := make(map[string]*kubernetes.ProxyStatus) + for _, ps := range proxyStatus { + if ps != nil { + // Expected format . + // "proxy": "control-7bcc64d69d-qzsdk.travel-control" + podId := strings.Split(ps.ProxyID, ".") + if len(podId) == 2 { + pod := podId[0] + ns := podId[1] + cluster := ps.ClusterID + key := proxyStatusKey(cluster, ns, pod) + podProxyByID[key] = ps } } } - return nil + c.proxyStatusStore.Replace(podProxyByID) } -func (c *kialiCacheImpl) setProxyStatus(proxyStatus []*kubernetes.ProxyStatus) { - defer c.proxyStatusLock.Unlock() - c.proxyStatusLock.Lock() - if len(proxyStatus) > 0 { - for _, ps := range proxyStatus { - if ps != nil { - // Expected format . - // "proxy": "control-7bcc64d69d-qzsdk.travel-control" - podId := strings.Split(ps.ProxyID, ".") - if len(podId) == 2 { - pod := podId[0] - ns := podId[1] - cluster := ps.ClusterID - if _, exist := c.proxyStatusNamespaces[cluster]; !exist { - c.proxyStatusNamespaces[cluster] = make(map[string]map[string]podProxyStatus) - } - if _, exist := c.proxyStatusNamespaces[cluster][ns]; !exist { - c.proxyStatusNamespaces[cluster][ns] = make(map[string]podProxyStatus) - } - c.proxyStatusNamespaces[cluster][ns][pod] = podProxyStatus{ - cluster: cluster, - namespace: ns, - pod: pod, - proxyStatus: ps, - } - } - } - } +func (c *kialiCacheImpl) GetPodProxyStatus(cluster, namespace, pod string) *kubernetes.ProxyStatus { + key := proxyStatusKey(cluster, namespace, pod) + proxyStatus, err := c.proxyStatusStore.Get(key) + if err != nil { + return nil } + return proxyStatus } diff --git a/kubernetes/cache/registry_status.go b/kubernetes/cache/registry_status.go index e87f9bd038..7d57121dcd 100644 --- a/kubernetes/cache/registry_status.go +++ b/kubernetes/cache/registry_status.go @@ -1,49 +1,29 @@ package cache import ( - "time" - "github.com/kiali/kiali/kubernetes" + "github.com/kiali/kiali/log" ) type ( RegistryStatusCache interface { - CheckRegistryStatus() bool - GetRegistryStatus() *kubernetes.RegistryStatus - SetRegistryStatus(registryStatus *kubernetes.RegistryStatus) - RefreshRegistryStatus() + GetRegistryStatus(cluster string) *kubernetes.RegistryStatus + SetRegistryStatus(registryStatus map[string]*kubernetes.RegistryStatus) } ) -func (c *kialiCacheImpl) CheckRegistryStatus() bool { - defer c.registryStatusLock.RUnlock() - c.registryStatusLock.RLock() - if c.registryStatusCreated == nil { - return false - } - if time.Since(*c.registryStatusCreated) > c.refreshDuration { - return false +func (c *kialiCacheImpl) GetRegistryStatus(cluster string) *kubernetes.RegistryStatus { + status, err := c.registryStatusStore.Get(cluster) + if err != nil { + // Ignoring any errors here because registry services are optional. Most likely any errors + // here are due to cache misses since populating the cache is handled asynchronously. + log.Tracef("Unable to get registry status for cluster [%s]. Err: %v", cluster, err) + return nil } - return true -} - -func (c *kialiCacheImpl) GetRegistryStatus() *kubernetes.RegistryStatus { - defer c.registryStatusLock.RUnlock() - c.registryStatusLock.RLock() - return c.registryStatus -} -func (c *kialiCacheImpl) SetRegistryStatus(registryStatus *kubernetes.RegistryStatus) { - defer c.registryStatusLock.Unlock() - c.registryStatusLock.Lock() - timeNow := time.Now() - c.registryStatusCreated = &timeNow - c.registryStatus = registryStatus + return status } -func (c *kialiCacheImpl) RefreshRegistryStatus() { - defer c.registryStatusLock.Unlock() - c.registryStatusLock.Lock() - c.registryStatusCreated = nil - c.registryStatus = nil +func (c *kialiCacheImpl) SetRegistryStatus(registryStatus map[string]*kubernetes.RegistryStatus) { + c.registryStatusStore.Replace(registryStatus) } diff --git a/kubernetes/cache/testing.go b/kubernetes/cache/testing.go index cabdd63c5c..885c6aef78 100644 --- a/kubernetes/cache/testing.go +++ b/kubernetes/cache/testing.go @@ -12,8 +12,6 @@ import ( "github.com/kiali/kiali/kubernetes/kubetest" ) -var emptyHandler = NewRegistryHandler(func() {}) - func newTestingCache(t *testing.T, cf kubernetes.ClientFactory, conf config.Config) KialiCache { t.Helper() // Disabling Istio API for tests. Otherwise the cache will try and poll the Istio endpoint diff --git a/kubernetes/istio.go b/kubernetes/istio.go index 23dea2984e..ca556c59e3 100644 --- a/kubernetes/istio.go +++ b/kubernetes/istio.go @@ -1,15 +1,10 @@ package kubernetes import ( - "context" "encoding/json" - "errors" "fmt" - "io" - "net/http" "regexp" "strings" - "sync" "time" api_networking_v1beta1 "istio.io/api/networking/v1beta1" @@ -17,7 +12,6 @@ import ( security_v1beta1 "istio.io/client-go/pkg/apis/security/v1beta1" istio "istio.io/client-go/pkg/clientset/versioned" core_v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/labels" k8syaml "k8s.io/apimachinery/pkg/util/yaml" k8s_networking_v1beta1 "sigs.k8s.io/gateway-api/apis/v1beta1" gatewayapiclient "sigs.k8s.io/gateway-api/pkg/client/clientset/versioned" @@ -81,11 +75,8 @@ type IstioClientInterface interface { // GatewayAPI returns the gateway-api kube client. GatewayAPI() gatewayapiclient.Interface - CanConnectToIstiod() (IstioComponentStatus, error) - GetProxyStatus() ([]*ProxyStatus, error) GetConfigDump(namespace, podName string) (*ConfigDump, error) SetProxyLogLevel(namespace, podName, level string) error - GetRegistryServices() ([]*RegistryService, error) } func (in *K8SClient) Istio() istio.Interface { @@ -96,283 +87,13 @@ func (in *K8SClient) GatewayAPI() gatewayapiclient.Interface { return in.gatewayapi } -// CanConnectToIstiod checks if Kiali can reach the istiod pod(s) via port -// fowarding through the k8s api server or via http if the registry is -// configured with a remote url. An error does not indicate that istiod -// cannot be reached. The IstioComponentStatus must be checked. -func (in *K8SClient) CanConnectToIstiod() (IstioComponentStatus, error) { - cfg := config.Get() - - if cfg.ExternalServices.Istio.Registry != nil { - istiodURL := cfg.ExternalServices.Istio.Registry.IstiodURL - // Being able to hit /debug doesn't necessarily mean we are authorized to hit the others. - url := joinURL(istiodURL, "/debug") - if _, err := getRequest(url); err != nil { - log.Warningf("Kiali can't connect to remote Istiod: %s", err) - return IstioComponentStatus{{Name: istiodURL, Status: ComponentUnreachable, IsCore: true}}, nil - } - return IstioComponentStatus{{Name: istiodURL, Status: ComponentHealthy, IsCore: true}}, nil - } - - istiods, err := in.GetPods(cfg.IstioNamespace, labels.Set(map[string]string{"app": "istiod"}).String()) - if err != nil { - return nil, err - } - - healthyIstiods := make([]*core_v1.Pod, 0, len(istiods)) - for i, istiod := range istiods { - if istiod.Status.Phase == core_v1.PodRunning { - healthyIstiods = append(healthyIstiods, &istiods[i]) - } - } - - wg := sync.WaitGroup{} - wg.Add(len(healthyIstiods)) - syncChan := make(chan ComponentStatus, len(healthyIstiods)) - - for _, istiod := range healthyIstiods { - go func(name, namespace string) { - defer wg.Done() - var status ComponentStatus - // The 8080 port is not accessible from outside of the pod. However, it is used for kubernetes to do the live probes. - // Using the proxy method to make sure that K8s API has access to the Istio Control Plane namespace. - // By proxying one Istiod, we ensure that the following connection is allowed: - // Kiali -> K8s API (proxy) -> istiod - // This scenario is no obvious for private clusters (like GKE private cluster) - _, err := in.forwardGetRequest(namespace, name, 8080, "/ready") - if err != nil { - log.Warningf("Unable to get ready status of istiod: %s/%s. Err: %s", namespace, name, err) - status = ComponentStatus{ - Name: name, - Namespace: namespace, - Status: ComponentUnreachable, - IsCore: true, - } - } else { - status = ComponentStatus{ - Name: name, - Namespace: namespace, - Status: ComponentHealthy, - IsCore: true, - } - } - syncChan <- status - }(istiod.Name, istiod.Namespace) - } - - wg.Wait() - close(syncChan) - ics := IstioComponentStatus{} - for componentStatus := range syncChan { - ics.Merge(IstioComponentStatus{componentStatus}) - } - - return ics, nil -} - -func (in *K8SClient) getIstiodDebugStatus(debugPath string) (map[string][]byte, error) { - c := config.Get() - // Check if the kube-api has proxy access to pods in the istio-system - // https://github.com/kiali/kiali/issues/3494#issuecomment-772486224 - status, err := in.CanConnectToIstiod() - if err != nil { - return nil, err - } - - istiodReachable := false - for _, istiodStatus := range status { - if istiodStatus.Status != ComponentUnreachable { - istiodReachable = true - break - } - } - if !istiodReachable { - return nil, fmt.Errorf("unable to proxy Istiod pods. " + - "Make sure your Kubernetes API server has access to the Istio control plane through 8080 port") - } - - var healthyIstiods IstioComponentStatus - for _, istiod := range status { - if istiod.Status == ComponentHealthy { - healthyIstiods = append(healthyIstiods, istiod) - } - } - - wg := sync.WaitGroup{} - wg.Add(len(healthyIstiods)) - errChan := make(chan error, len(healthyIstiods)) - syncChan := make(chan map[string][]byte, len(healthyIstiods)) - - result := map[string][]byte{} - for _, istiod := range healthyIstiods { - go func(name, namespace string) { - defer wg.Done() - - // The 15014 port on Istiod is open for control plane monitoring. - // Here's the Istio doc page about the port usage by istio: - // https://istio.io/latest/docs/ops/deployment/requirements/#ports-used-by-istio - res, err := in.forwardGetRequest(namespace, name, c.ExternalServices.Istio.IstiodPodMonitoringPort, debugPath) - if err != nil { - errChan <- fmt.Errorf("%s: %s", name, err.Error()) - } else { - syncChan <- map[string][]byte{name: res} - } - }(istiod.Name, istiod.Namespace) - } - - wg.Wait() - close(errChan) - close(syncChan) - - errs := "" - for err := range errChan { - if errs != "" { - errs = errs + "; " - } - errs = errs + err.Error() - } - errs = "Error fetching the proxy-status in the following pods: " + errs - - for status := range syncChan { - for pilot, sync := range status { - result[pilot] = sync - } - } - - if len(result) > 0 { - return result, nil - } else { - return nil, errors.New(errs) - } -} - -func (in *K8SClient) GetProxyStatus() ([]*ProxyStatus, error) { - const synczPath = "/debug/syncz" - var result map[string][]byte - - if externalConf := config.Get().ExternalServices.Istio.Registry; externalConf != nil { - url := joinURL(externalConf.IstiodURL, synczPath) - r, err := getRequest(url) - if err != nil { - log.Errorf("Failed to get Istiod info from remote endpoint %s error: %s", synczPath, err) - return nil, err - } - result = map[string][]byte{"remote": r} - } else { - debugStatus, err := in.getIstiodDebugStatus(synczPath) - if err != nil { - log.Errorf("Failed to call Istiod endpoint %s error: %s", synczPath, err) - return nil, err - } - result = debugStatus - } - return parseProxyStatus(result) -} - -func (in *K8SClient) GetRegistryServices() ([]*RegistryService, error) { - const registryzPath = "/debug/registryz" - var result map[string][]byte - - if externalConf := config.Get().ExternalServices.Istio.Registry; externalConf != nil { - url := joinURL(externalConf.IstiodURL, registryzPath) - r, err := getRequest(url) - if err != nil { - log.Errorf("Failed to get Istiod info from remote endpoint %s error: %s", registryzPath, err) - return nil, err - } - result = map[string][]byte{"remote": r} - } else { - debugStatus, err := in.getIstiodDebugStatus(registryzPath) - if err != nil { - log.Tracef("Failed to call Istiod endpoint %s error: %s", registryzPath, err) - return nil, err - } - result = debugStatus - } - return ParseRegistryServices(result) -} - -func joinURL(base, path string) string { - base = strings.TrimSuffix(base, "/") - path = strings.TrimPrefix(path, "/") - return base + "/" + path -} - -func getRequest(url string) ([]byte, error) { - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - - req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) - if err != nil { - return nil, err - } - - resp, err := http.DefaultClient.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - body, err := io.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("unable to read response body when getting config from remote istiod. Err: %s", err) - } - - if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("bad response when getting config from remote istiod. Status: %s. Body: %s", resp.Status, body) - } - - return body, err -} - -func parseProxyStatus(statuses map[string][]byte) ([]*ProxyStatus, error) { - var fullStatus []*ProxyStatus - for pilot, status := range statuses { - var ss []*ProxyStatus - err := json.Unmarshal(status, &ss) - if err != nil { - return nil, err - } - for _, s := range ss { - s.pilot = pilot - } - fullStatus = append(fullStatus, ss...) - } - return fullStatus, nil -} - -func ParseRegistryServices(registries map[string][]byte) ([]*RegistryService, error) { - var fullRegistryServices []*RegistryService - isRegistryLoaded := false - for pilot, registry := range registries { - // skip reading registry configs multiple times in a case of multiple istiod pods - if isRegistryLoaded { - break - } - var rr []*RegistryService - err := json.Unmarshal(registry, &rr) - if err != nil { - log.Errorf("Error parsing RegistryServices results: %s", err) - return nil, err - } - for _, r := range rr { - r.pilot = pilot - } - fullRegistryServices = append(fullRegistryServices, rr...) - if len(rr) > 0 { - isRegistryLoaded = true - } - } - return fullRegistryServices, nil -} - func (in *K8SClient) GetConfigDump(namespace, podName string) (*ConfigDump, error) { // Fetching the Config Dump from the pod's Envoy. // The port 15000 is open on each Envoy Sidecar (managed by Istio) to serve the Envoy Admin interface. // This port can only be accessed by inside the pod. // See the Istio's doc page about its port usage: // https://istio.io/latest/docs/ops/deployment/requirements/#ports-used-by-istio - resp, err := in.forwardGetRequest(namespace, podName, 15000, "/config_dump") + resp, err := in.ForwardGetRequest(namespace, podName, 15000, "/config_dump") if err != nil { log.Errorf("Error forwarding the /config_dump request: %v", err) return nil, err diff --git a/kubernetes/istio_internal_test.go b/kubernetes/istio_internal_test.go index 3b40472e86..dc408386d2 100644 --- a/kubernetes/istio_internal_test.go +++ b/kubernetes/istio_internal_test.go @@ -1,104 +1,15 @@ package kubernetes import ( - "net/http" - "net/http/httptest" - "os" - "strconv" - "strings" "testing" "github.com/stretchr/testify/assert" api_security_v1beta1 "istio.io/api/security/v1beta1" security_v1beta1 "istio.io/client-go/pkg/apis/security/v1beta1" - core_v1 "k8s.io/api/core/v1" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes/fake" "github.com/kiali/kiali/config" - "github.com/kiali/kiali/util/httputil" ) -// Because config.Config is a global variable, we need to reset it after each test. -// This func will reset the config back to its previous value. -func setConfig(t *testing.T, newConfig config.Config) { - t.Helper() - previousConfig := *config.Get() - t.Cleanup(func() { - config.Set(&previousConfig) - }) - config.Set(&newConfig) -} - -// The port pool is a global variable so we need to reset it between tests. -func setPortPool(t *testing.T, addr string) { - t.Helper() - addrPieces := strings.Split(addr, ":") - port, err := strconv.Atoi(addrPieces[len(addrPieces)-1]) - if err != nil { - t.Fatalf("Error parsing port from address: %s", err) - } - - oldRange := httputil.Pool.PortRangeInit - oldBusyPort := httputil.Pool.LastBusyPort - oldSize := httputil.Pool.PortRangeSize - t.Cleanup(func() { - httputil.Pool.PortRangeInit = oldRange - httputil.Pool.LastBusyPort = oldBusyPort - httputil.Pool.PortRangeSize = oldSize - }) - httputil.Pool.PortRangeInit = port - httputil.Pool.LastBusyPort = port - 1 - httputil.Pool.PortRangeSize = 1 -} - -type fakePortForwarder struct{} - -func (f *fakePortForwarder) Start() error { return nil } -func (f *fakePortForwarder) Stop() {} - -func istiodTestServer(t *testing.T) *httptest.Server { - t.Helper() - testServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - var file string - switch r.URL.Path { - case "/debug/configz": - file = "../tests/data/registry/registry-configz.json" - case "/debug/registryz": - file = "../tests/data/registry/registry-registryz.json" - case "/debug/syncz": - file = "../tests/data/registry/registry-syncz.json" - case "/ready": - w.WriteHeader(http.StatusOK) - return - default: - w.WriteHeader(http.StatusInternalServerError) - t.Fatalf("Unexpected request path: %s", r.URL.Path) - return - } - if _, err := w.Write(ReadFile(t, file)); err != nil { - t.Fatalf("Error writing response: %s", err) - } - })) - t.Cleanup(testServer.Close) - return testServer -} - -func runningIstiodPod() *core_v1.Pod { - return &core_v1.Pod{ - ObjectMeta: meta_v1.ObjectMeta{ - Name: "istiod-123", - Namespace: "istio-system", - Labels: map[string]string{ - "app": "istiod", - }, - }, - Status: core_v1.PodStatus{ - Phase: core_v1.PodRunning, - }, - } -} - func TestFilterByHost(t *testing.T) { conf := config.NewConfig() config.Set(conf) @@ -246,163 +157,3 @@ func createPeerAuthn(name, namespace string, mtls *api_security_v1beta1.PeerAuth pa.Spec.Mtls = mtls return pa } - -func TestRegistryServices(t *testing.T) { - assert := assert.New(t) - - registryz := "../tests/data/registry/registry-registryz.json" - bRegistryz, err := os.ReadFile(registryz) - assert.NoError(err) - - rRegistry := map[string][]byte{ - "istiod1": bRegistryz, - } - - registry, err2 := ParseRegistryServices(rRegistry) - assert.NoError(err2) - assert.NotNil(registry) - - assert.Equal(79, len(registry)) - assert.Equal("*.msn.com", registry[0].Attributes.Name) -} - -func TestGetRegistryServices(t *testing.T) { - assert := assert.New(t) - - testServer := istiodTestServer(t) - setPortPool(t, testServer.URL) - - k8sClient := &K8SClient{ - k8s: fake.NewSimpleClientset(runningIstiodPod()), - getPodPortForwarderFunc: func(namespace, name, portMap string) (httputil.PortForwarder, error) { - return &fakePortForwarder{}, nil - }, - } - - _, err := k8sClient.GetRegistryServices() - assert.NoError(err) -} - -func TestGetRegistryServicesExternal(t *testing.T) { - assert := assert.New(t) - - testServer := istiodTestServer(t) - - conf := config.Get() - conf.ExternalServices.Istio.Registry = &config.RegistryConfig{ - IstiodURL: testServer.URL, - } - setConfig(t, *conf) - - k8sClient := &K8SClient{} - _, err := k8sClient.GetRegistryServices() - assert.NoError(err) -} - -func TestGetRegistryServicesExternalBadResponse(t *testing.T) { - assert := assert.New(t) - - testServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusUnauthorized) - })) - t.Cleanup(testServer.Close) - - conf := config.Get() - conf.ExternalServices.Istio.Registry = &config.RegistryConfig{ - IstiodURL: testServer.URL, - } - setConfig(t, *conf) - - k8sClient := &K8SClient{} - _, err := k8sClient.GetRegistryServices() - assert.Error(err) -} - -func TestGetProxyStatus(t *testing.T) { - assert := assert.New(t) - - testServer := istiodTestServer(t) - setPortPool(t, testServer.URL) - - k8sClient := &K8SClient{ - k8s: fake.NewSimpleClientset(runningIstiodPod()), - getPodPortForwarderFunc: func(namespace, name, portMap string) (httputil.PortForwarder, error) { - return &fakePortForwarder{}, nil - }, - } - - _, err := k8sClient.GetProxyStatus() - assert.NoError(err) -} - -func TestGetProxyStatusExternal(t *testing.T) { - assert := assert.New(t) - - testServer := istiodTestServer(t) - - conf := config.Get() - conf.ExternalServices.Istio.Registry = &config.RegistryConfig{ - IstiodURL: testServer.URL, - } - setConfig(t, *conf) - - k8sClient := &K8SClient{} - _, err := k8sClient.GetProxyStatus() - assert.NoError(err) -} - -func TestGetProxyStatusExternalBadResponse(t *testing.T) { - assert := assert.New(t) - - testServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusUnauthorized) - })) - t.Cleanup(testServer.Close) - - conf := config.Get() - conf.ExternalServices.Istio.Registry = &config.RegistryConfig{ - IstiodURL: testServer.URL, - } - setConfig(t, *conf) - - k8sClient := &K8SClient{} - _, err := k8sClient.GetProxyStatus() - assert.Error(err) -} - -func TestCanConnectToIstiodUnreachable(t *testing.T) { - assert := assert.New(t) - k8sClient := &K8SClient{ - k8s: fake.NewSimpleClientset(runningIstiodPod()), - getPodPortForwarderFunc: func(namespace, name, portMap string) (httputil.PortForwarder, error) { - return &fakePortForwarder{}, nil - }, - } - - status, err := k8sClient.CanConnectToIstiod() - assert.NoError(err) - - assert.Len(status, 1) - assert.Equal(ComponentUnreachable, status[0].Status) -} - -func TestCanConnectToIstiodReachable(t *testing.T) { - assert := assert.New(t) - k8sClient := &K8SClient{ - k8s: fake.NewSimpleClientset(runningIstiodPod()), - getPodPortForwarderFunc: func(namespace, name, portMap string) (httputil.PortForwarder, error) { - return &fakePortForwarder{}, nil - }, - } - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) - })) - t.Cleanup(ts.Close) - - setPortPool(t, ts.URL) - - status, err := k8sClient.CanConnectToIstiod() - assert.NoError(err) - - assert.Len(status, 1) -} diff --git a/kubernetes/kubernetes.go b/kubernetes/kubernetes.go index abfb3c3cfb..916c807110 100644 --- a/kubernetes/kubernetes.go +++ b/kubernetes/kubernetes.go @@ -21,7 +21,6 @@ import ( core_v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/version" @@ -38,31 +37,20 @@ import ( type K8SClientInterface interface { // Kube returns the underlying kubernetes client. Kube() kubernetes.Interface - GetClusterServicesByLabels(labelsSelector string) ([]core_v1.Service, error) GetConfigMap(namespace, name string) (*core_v1.ConfigMap, error) GetCronJobs(namespace string) ([]batch_v1.CronJob, error) - GetDaemonSet(namespace string, name string) (*apps_v1.DaemonSet, error) - GetDaemonSets(namespace string) ([]apps_v1.DaemonSet, error) GetDeployment(namespace string, name string) (*apps_v1.Deployment, error) - GetDeployments(namespace string) ([]apps_v1.Deployment, error) GetDeploymentConfig(namespace string, name string) (*osapps_v1.DeploymentConfig, error) GetDeploymentConfigs(namespace string) ([]osapps_v1.DeploymentConfig, error) - GetEndpoints(namespace string, name string) (*core_v1.Endpoints, error) GetJobs(namespace string) ([]batch_v1.Job, error) GetNamespace(namespace string) (*core_v1.Namespace, error) GetNamespaces(labelSelector string) ([]core_v1.Namespace, error) GetPod(namespace, name string) (*core_v1.Pod, error) - GetPods(namespace, labelSelector string) ([]core_v1.Pod, error) GetReplicationControllers(namespace string) ([]core_v1.ReplicationController, error) - GetReplicaSets(namespace string) ([]apps_v1.ReplicaSet, error) GetSecret(namespace, name string) (*core_v1.Secret, error) GetSelfSubjectAccessReview(ctx context.Context, namespace, api, resourceType string, verbs []string) ([]*auth_v1.SelfSubjectAccessReview, error) - GetService(namespace string, name string) (*core_v1.Service, error) - GetServices(namespace string, selectorLabels map[string]string) ([]core_v1.Service, error) - GetServicesByLabels(namespace string, labelsSelector string) ([]core_v1.Service, error) - GetStatefulSet(namespace string, name string) (*apps_v1.StatefulSet, error) - GetStatefulSets(namespace string) ([]apps_v1.StatefulSet, error) GetTokenSubject(authInfo *api.AuthInfo) (string, error) + ForwardGetRequest(namespace, podName string, destinationPort int, path string) ([]byte, error) StreamPodLogs(namespace, name string, opts *core_v1.PodLogOptions) (io.ReadCloser, error) UpdateNamespace(namespace string, jsonPatch string) (*core_v1.Namespace, error) UpdateService(namespace string, name string, jsonPatch string, patchType string) error @@ -76,7 +64,7 @@ type OSClientInterface interface { UpdateProject(project string, jsonPatch string) (*osproject_v1.Project, error) } -func (in *K8SClient) forwardGetRequest(namespace, podName string, destinationPort int, path string) ([]byte, error) { +func (in *K8SClient) ForwardGetRequest(namespace, podName string, destinationPort int, path string) ([]byte, error) { localPort := httputil.Pool.GetFreePort() defer httputil.Pool.FreePort(localPort) @@ -252,55 +240,6 @@ func (in *K8SClient) IsIstioAPI() bool { return *in.isIstioAPI } -// GetServices returns a list of services for a given namespace. -// If selectorLabels is defined the list of services is filtered for those that matches Services selector labels. -// It returns an error on any problem. -// NOTE: The selectorLabels argument is NOT to find services matching the given labels. Assume selectorLabels are -// the labels of a Deployment. If this imaginary Deployment is selected by the Service (because of its Selector), then -// that service is returned; else it's omitted. -func (in *K8SClient) GetServices(namespace string, selectorLabels map[string]string) ([]core_v1.Service, error) { - var allServices []core_v1.Service - - if allServicesList, err := in.k8s.CoreV1().Services(namespace).List(in.ctx, emptyListOptions); err == nil { - allServices = allServicesList.Items - } else { - return []core_v1.Service{}, err - } - - if selectorLabels == nil { - return allServices, nil - } - var services []core_v1.Service - for _, svc := range allServices { - svcSelector := labels.Set(svc.Spec.Selector).AsSelector() - if !svcSelector.Empty() && svcSelector.Matches(labels.Set(selectorLabels)) { - services = append(services, svc) - } - } - return services, nil -} - -func (in *K8SClient) GetServicesByLabels(namespace string, labelsSelector string) ([]core_v1.Service, error) { - selector := meta_v1.ListOptions{LabelSelector: labelsSelector} - if allServicesList, err := in.k8s.CoreV1().Services(namespace).List(in.ctx, selector); err == nil { - return allServicesList.Items, nil - } else { - return []core_v1.Service{}, err - } -} - -func (in *K8SClient) GetDaemonSet(namespace string, name string) (*apps_v1.DaemonSet, error) { - return in.k8s.AppsV1().DaemonSets(namespace).Get(in.ctx, name, emptyGetOptions) -} - -func (in *K8SClient) GetDaemonSets(namespace string) ([]apps_v1.DaemonSet, error) { - if daeList, err := in.k8s.AppsV1().DaemonSets(namespace).List(in.ctx, emptyListOptions); err == nil { - return daeList.Items, nil - } else { - return []apps_v1.DaemonSet{}, err - } -} - // GetDeployment returns the definition of a specific deployment. // It returns an error on any problem. func (in *K8SClient) GetDeployment(namespace, name string) (*apps_v1.Deployment, error) { @@ -351,59 +290,6 @@ func (in *K8SClient) GetDeploymentConfigs(namespace string) ([]osapps_v1.Deploym return result.Items, nil } -// GetReplicaSets returns the cached ReplicaSets for the namespace. For any given RS for a given -// Owner (i.e. Deployment), only the most recent version of the RS will be included in the returned list. -// When an owning Deployment is configured with revisionHistoryLimit > 0, then k8s may return multiple -// versions of the RS for the same Deployment (current and older revisions). Note that it is still possible -// to have multiple RS for the same owner. In which case the most recent version of each is returned. -// see also: ../kubernetes.go -func (in *K8SClient) GetReplicaSets(namespace string) ([]apps_v1.ReplicaSet, error) { - if rsList, err := in.k8s.AppsV1().ReplicaSets(namespace).List(in.ctx, emptyListOptions); err == nil { - activeRSMap := map[string]apps_v1.ReplicaSet{} - for _, rs := range rsList.Items { - if len(rs.OwnerReferences) > 0 { - for _, ownerRef := range rs.OwnerReferences { - if ownerRef.Controller != nil && *ownerRef.Controller { - key := fmt.Sprintf("%s_%s_%s", ownerRef.Name, rs.Name, rs.ResourceVersion) - if currRS, ok := activeRSMap[key]; ok { - if currRS.CreationTimestamp.Time.Before(rs.CreationTimestamp.Time) { - activeRSMap[key] = rs - } - } else { - activeRSMap[key] = rs - } - } - } - } else { - // it is it's own controller - activeRSMap[rs.Name] = rs - } - } - - result := make([]apps_v1.ReplicaSet, len(activeRSMap)) - i := 0 - for _, activeRS := range activeRSMap { - result[i] = activeRS - i = i + 1 - } - return result, nil - } else { - return []apps_v1.ReplicaSet{}, err - } -} - -func (in *K8SClient) GetStatefulSet(namespace string, name string) (*apps_v1.StatefulSet, error) { - return in.k8s.AppsV1().StatefulSets(namespace).Get(in.ctx, name, emptyGetOptions) -} - -func (in *K8SClient) GetStatefulSets(namespace string) ([]apps_v1.StatefulSet, error) { - if ssList, err := in.k8s.AppsV1().StatefulSets(namespace).List(in.ctx, emptyListOptions); err == nil { - return ssList.Items, nil - } else { - return []apps_v1.StatefulSet{}, err - } -} - func (in *K8SClient) GetReplicationControllers(namespace string) ([]core_v1.ReplicationController, error) { if rcList, err := in.k8s.CoreV1().ReplicationControllers(namespace).List(in.ctx, emptyListOptions); err == nil { return rcList.Items, nil @@ -412,32 +298,6 @@ func (in *K8SClient) GetReplicationControllers(namespace string) ([]core_v1.Repl } } -// GetService returns the definition of a specific service. -// It returns an error on any problem. -func (in *K8SClient) GetService(namespace, name string) (*core_v1.Service, error) { - return in.k8s.CoreV1().Services(namespace).Get(in.ctx, name, emptyGetOptions) -} - -// GetEndpoints return the list of endpoint of a specific service. -// It returns an error on any problem. -func (in *K8SClient) GetEndpoints(namespace, name string) (*core_v1.Endpoints, error) { - return in.k8s.CoreV1().Endpoints(namespace).Get(in.ctx, name, emptyGetOptions) -} - -// GetPods returns the pods definitions for a given set of labels. -// An empty labelSelector will fetch all pods found per a namespace. -// It returns an error on any problem. -func (in *K8SClient) GetPods(namespace, labelSelector string) ([]core_v1.Pod, error) { - // An empty selector is ambiguous in the go client, could mean either "select all" or "select none" - // Here we assume empty == select all - // (see also https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors) - if pods, err := in.k8s.CoreV1().Pods(namespace).List(in.ctx, meta_v1.ListOptions{LabelSelector: labelSelector}); err == nil { - return pods.Items, nil - } else { - return []core_v1.Pod{}, err - } -} - // getPodPortForwarder returns a port-forwarder struct which represents an open server forwarding request to the // requested pod and port // namespace: name of the namespace where the pod lives in. diff --git a/kubernetes/kubetest/mock.go b/kubernetes/kubetest/mock.go index 09417ff00a..8c89ea01ad 100644 --- a/kubernetes/kubetest/mock.go +++ b/kubernetes/kubetest/mock.go @@ -181,6 +181,11 @@ func (o *K8SClientMock) GetTokenSubject(authInfo *api.AuthInfo) (string, error) return authInfo.Token, nil } +func (o *K8SClientMock) ForwardGetRequest(namespace, podName string, destinationPort int, path string) ([]byte, error) { + args := o.Called(namespace, podName, destinationPort, path) + return args.Get(0).([]byte), args.Error(1) +} + func (o *K8SClientMock) MockService(namespace, name string) { s := FakeService(namespace, name) o.On("GetService", namespace, name).Return(&s, nil) diff --git a/kubernetes/token_test.go b/kubernetes/token_test.go index 1de4f1e0bd..9e1b10b976 100644 --- a/kubernetes/token_test.go +++ b/kubernetes/token_test.go @@ -22,7 +22,7 @@ func TestIsTokenExpired(t *testing.T) { assert := assert.New(t) config := config.NewConfig() config.Deployment.RemoteSecretPath = t.TempDir() - setConfig(t, *config) + SetConfig(t, *config) DefaultServiceAccountPath = tmpFileTokenExpired @@ -40,7 +40,7 @@ func TestGetKialiToken(t *testing.T) { assert := assert.New(t) config := config.NewConfig() config.Deployment.RemoteSecretPath = t.TempDir() - setConfig(t, *config) + SetConfig(t, *config) DefaultServiceAccountPath = tmpFileGetToken data := "thisisarandomtoken" @@ -58,7 +58,7 @@ func TestGetKialiTokenRemoteCluster(t *testing.T) { config := config.NewConfig() config.Deployment.RemoteSecretPath = "testdata/remote-cluster-multiple-users.yaml" - setConfig(t, *config) + SetConfig(t, *config) tokenRead = time.Time{} token, err := GetKialiTokenForHomeCluster() diff --git a/kubernetes/types.go b/kubernetes/types.go index fe27dc6dd6..65600b14e8 100644 --- a/kubernetes/types.go +++ b/kubernetes/types.go @@ -193,7 +193,7 @@ type RBACDetails struct { } type ProxyStatus struct { - pilot string + Pilot string SyncStatus } @@ -214,7 +214,7 @@ type SyncStatus struct { } type RegistryService struct { - pilot string + Pilot string IstioService } diff --git a/server/server.go b/server/server.go index 78a3269b81..c3095bb0ef 100644 --- a/server/server.go +++ b/server/server.go @@ -13,22 +13,26 @@ import ( "github.com/kiali/kiali/business" "github.com/kiali/kiali/config" + "github.com/kiali/kiali/kubernetes" + "github.com/kiali/kiali/kubernetes/cache" "github.com/kiali/kiali/log" "github.com/kiali/kiali/observability" "github.com/kiali/kiali/routing" ) type Server struct { - httpServer *http.Server - router *mux.Router - tracer *sdktrace.TracerProvider + conf config.Config + controlPlaneMonitor business.ControlPlaneMonitor + clientFactory kubernetes.ClientFactory + httpServer *http.Server + kialiCache cache.KialiCache + router *mux.Router + tracer *sdktrace.TracerProvider } // NewServer creates a new server configured with the given settings. // Start and Stop it with the corresponding functions. -func NewServer() *Server { - conf := config.Get() - +func NewServer(controlPlaneMonitor business.ControlPlaneMonitor, clientFactory kubernetes.ClientFactory, cache cache.KialiCache, conf config.Config) *Server { // create a router that will route all incoming API server requests to different handlers router := routing.NewRouter() var tracingProvider *sdktrace.TracerProvider @@ -75,8 +79,12 @@ func NewServer() *Server { // return our new Server s := &Server{ - httpServer: httpServer, - router: router, + conf: conf, + clientFactory: clientFactory, + controlPlaneMonitor: controlPlaneMonitor, + httpServer: httpServer, + kialiCache: cache, + router: router, } if conf.Server.Observability.Tracing.Enabled && tracingProvider != nil { s.tracer = tracingProvider @@ -86,25 +94,18 @@ func NewServer() *Server { // Start HTTP server asynchronously. TLS may be active depending on the global configuration. func (s *Server) Start() { - // Start the business to initialize cache dependencies. - // The business cache should start before the server endpoint to ensure - // that the cache is ready before it's used by one of the server handlers. - if err := business.Start(); err != nil { - log.Errorf("Error starting business layer: %s", err) - panic(err) - } + business.Start(s.clientFactory, s.controlPlaneMonitor, s.kialiCache) - conf := config.Get() - log.Infof("Server endpoint will start at [%v%v]", s.httpServer.Addr, conf.Server.WebRoot) - log.Infof("Server endpoint will serve static content from [%v]", conf.Server.StaticContentRootDirectory) - secure := conf.Identity.CertFile != "" && conf.Identity.PrivateKeyFile != "" + log.Infof("Server endpoint will start at [%v%v]", s.httpServer.Addr, s.conf.Server.WebRoot) + log.Infof("Server endpoint will serve static content from [%v]", s.conf.Server.StaticContentRootDirectory) + secure := s.conf.Identity.CertFile != "" && s.conf.Identity.PrivateKeyFile != "" go func() { var err error if secure { log.Infof("Server endpoint will require https") log.Infof("Server will support protocols: %v", s.httpServer.TLSConfig.NextProtos) s.router.Use(secureHttpsMiddleware) - err = s.httpServer.ListenAndServeTLS(conf.Identity.CertFile, conf.Identity.PrivateKeyFile) + err = s.httpServer.ListenAndServeTLS(s.conf.Identity.CertFile, s.conf.Identity.PrivateKeyFile) } else { s.router.Use(plainHttpMiddleware) err = s.httpServer.ListenAndServe() @@ -113,7 +114,7 @@ func (s *Server) Start() { }() // Start the Metrics Server - if conf.Server.Observability.Metrics.Enabled { + if s.conf.Server.Observability.Metrics.Enabled { StartMetricsServer() } } @@ -121,7 +122,6 @@ func (s *Server) Start() { // Stop the HTTP server func (s *Server) Stop() { StopMetricsServer() - business.Stop() log.Infof("Server endpoint will stop at [%v]", s.httpServer.Addr) s.httpServer.Close() observability.StopTracer(s.tracer) diff --git a/server/server_test.go b/server/server_test.go index 7c6539b3bd..1149d7c640 100644 --- a/server/server_test.go +++ b/server/server_test.go @@ -22,6 +22,7 @@ import ( "github.com/kiali/kiali/config" "github.com/kiali/kiali/config/security" "github.com/kiali/kiali/kubernetes" + "github.com/kiali/kiali/kubernetes/cache" "github.com/kiali/kiali/util" ) @@ -40,7 +41,7 @@ func TestRootContextPath(t *testing.T) { testPort, err := getFreePort(testHostname) if err != nil { - t.Fatalf("Cannot get a free port to run tests on host [%v]", testHostname) + t.Fatalf("Cannot get a free port to run tests on host [%v], err [%s]", testHostname, err) } else { t.Logf("Will use free port [%v] on host [%v] for tests", testPort, testHostname) } @@ -57,14 +58,14 @@ func TestRootContextPath(t *testing.T) { conf.Server.StaticContentRootDirectory = tmpDir conf.Auth.Strategy = "anonymous" - // Set the global client factory. - kubernetes.NewTestingClientFactory(t) - serverURL := fmt.Sprintf("http://%v", testServerHostPort) config.Set(conf) - server := NewServer() + cf := kubernetes.NewTestingClientFactory(t) + cpm := &business.FakeControlPlaneMonitor{} + cache := cache.NewTestingCacheWithFactory(t, cf, *conf) + server := NewServer(cpm, cf, cache, *conf) server.Start() t.Logf("Started test http server: %v", serverURL) defer func() { @@ -102,9 +103,6 @@ func TestRootContextPath(t *testing.T) { } func TestAnonymousMode(t *testing.T) { - cf := kubernetes.NewTestingClientFactory(t) - business.SetWithBackends(cf, nil) - testPort, err := getFreePort(testHostname) if err != nil { t.Fatalf("Cannot get a free port to run tests on host [%v]", testHostname) @@ -126,7 +124,10 @@ func TestAnonymousMode(t *testing.T) { config.Set(conf) - server := NewServer() + cf := kubernetes.NewTestingClientFactory(t) + cpm := &business.FakeControlPlaneMonitor{} + cache := cache.NewTestingCacheWithFactory(t, cf, *conf) + server := NewServer(cpm, cf, cache, *conf) server.Start() t.Logf("Started test http server: %v", serverURL) defer func() { @@ -159,9 +160,6 @@ func TestAnonymousMode(t *testing.T) { } func TestSecureComm(t *testing.T) { - cf := kubernetes.NewTestingClientFactory(t) - business.SetWithBackends(cf, nil) - testPort, err := getFreePort(testHostname) if err != nil { t.Fatalf("Cannot get a free port to run tests on host [%v]", testHostname) @@ -215,7 +213,10 @@ func TestSecureComm(t *testing.T) { config.Set(conf) - server := NewServer() + cf := kubernetes.NewTestingClientFactory(t) + cpm := &business.FakeControlPlaneMonitor{} + cache := cache.NewTestingCacheWithFactory(t, cf, *conf) + server := NewServer(cpm, cf, cache, *conf) server.Start() t.Logf("Started test http server: %v", serverURL) defer func() { @@ -304,13 +305,14 @@ func TestTracingConfigured(t *testing.T) { conf.Auth.Strategy = "anonymous" // Set the global client factory. - kubernetes.NewTestingClientFactory(t) - + cf := kubernetes.NewTestingClientFactory(t) serverURL := fmt.Sprintf("http://%v", testServerHostPort) config.Set(conf) - server := NewServer() + cpm := &business.FakeControlPlaneMonitor{} + cache := cache.NewTestingCacheWithFactory(t, cf, *conf) + server := NewServer(cpm, cf, cache, *conf) server.Start() t.Logf("Started test http server: %v", serverURL) defer func() { diff --git a/store/store.go b/store/store.go new file mode 100644 index 0000000000..fbf16a225d --- /dev/null +++ b/store/store.go @@ -0,0 +1,32 @@ +package store + +import ( + "fmt" +) + +// NotFoundError is returned when a key is not found in the store. +type NotFoundError struct { + Key string +} + +func (e *NotFoundError) Error() string { + return fmt.Sprintf("key \"%s\" not found in store", e.Key) +} + +// Store is a generic key value store. Implementations should be safe for concurrent use. +// It is basically just a map with a lock. Does not yet implement Set so we don't have to +// worry about this thing growing without bound. +type Store[T any] interface { + // Get returns the value associated with the given key or an error. + Get(key string) (T, error) + // Replace replaces the contents of the store with the given map. + Replace(map[string]T) +} + +// New returns a new store safe for concurrent use. +func New[T any]() Store[T] { + return &threadSafeStore[T]{data: make(map[string]T)} +} + +// Interface guard to ensure NotFoundError implements the error interface. +var _ error = &NotFoundError{} diff --git a/store/store_test.go b/store/store_test.go new file mode 100644 index 0000000000..c5a82fc070 --- /dev/null +++ b/store/store_test.go @@ -0,0 +1,68 @@ +package store_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/kiali/kiali/store" +) + +func TestGetKeyExists(t *testing.T) { + require := require.New(t) + + testStore := store.New[int]() + testStore.Replace(map[string]int{"key1": 42}) + value, err := testStore.Get("key1") + + require.NoError(err) + require.Equal(42, value) +} + +func TestGetNonExistantKeyFails(t *testing.T) { + require := require.New(t) + + testStore := store.New[int]() + _, err := testStore.Get("nonexistent") + require.Error(err) + require.IsType(&store.NotFoundError{}, err) +} + +func TestReplaceStoreContents(t *testing.T) { + require := require.New(t) + + testStore := store.New[int]() + testStore.Replace(map[string]int{"key1": 42}) + + newData := map[string]int{"key2": 99, "key3": 100} + testStore.Replace(newData) + + _, err := testStore.Get("key1") + require.Error(err) + + value, err := testStore.Get("key2") + require.NoError(err) + require.Equal(99, value) + + value, err = testStore.Get("key3") + require.NoError(err) + require.Equal(100, value) +} + +func TestNotFoundImplementsStringer(t *testing.T) { + require := require.New(t) + + err := &store.NotFoundError{Key: "key1"} + require.NotEmpty(err.Error()) +} + +func TestReplaceWithEmptyKey(t *testing.T) { + require := require.New(t) + + testStore := store.New[int]() + testStore.Replace(map[string]int{"": 1}) + + val, err := testStore.Get("") + require.NoError(err) + require.Equal(1, val) +} diff --git a/store/threadsafe_store.go b/store/threadsafe_store.go new file mode 100644 index 0000000000..f531e77aff --- /dev/null +++ b/store/threadsafe_store.go @@ -0,0 +1,36 @@ +package store + +import ( + "sync" +) + +// threadSafeStore implements the Store interface and is safe for concurrent use. +type threadSafeStore[T any] struct { + lock sync.RWMutex + data map[string]T +} + +// Get returns the value associated with the given key or an error. +func (s *threadSafeStore[T]) Get(key string) (T, error) { + s.lock.RLock() + defer s.lock.RUnlock() + var ( + v T + found bool + ) + v, found = s.data[key] + if !found { + return v, &NotFoundError{key} + } + return v, nil +} + +// Replace replaces the contents of the store with the given map. +func (s *threadSafeStore[T]) Replace(items map[string]T) { + s.lock.Lock() + defer s.lock.Unlock() + s.data = items +} + +// Interface guard to ensure threadSafeStore implements the Store. +var _ Store[any] = &threadSafeStore[any]{}