From e9986eb581987b5722daac1f10b4fe4172856211 Mon Sep 17 00:00:00 2001 From: ricoberger Date: Sat, 1 May 2021 22:51:22 +0200 Subject: [PATCH] Improve caching logic This commits changes the behaviour of the current caching logic for teams and the topology graph. We are using a similar logic as it was intorduced for the plugin templates. This means that the teams and topology data isn't generated in an additional goroutine, instead we get the data on the first request and then we cache it for the defined cache duration. --- CHANGELOG.md | 1 + docs/resources/templates.md | 2 +- pkg/api/plugins/clusters/cluster/cluster.go | 21 +-- pkg/api/plugins/clusters/clusters.go | 170 +++++++++--------- pkg/api/plugins/clusters/teams.go | 117 +++++++------ pkg/api/plugins/clusters/topology.go | 181 ++++++++++++-------- 6 files changed, 269 insertions(+), 223 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6f4330f09..8efc5da95 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -33,6 +33,7 @@ NOTE: As semantic versioning states all 0.y.z releases can contain breaking chan - [#53](https://github.com/kobsio/kobs/pull/53): Improve Jaeger plugin, by allow filtering of services and operations and adding several actions for traces. - [#55](https://github.com/kobsio/kobs/pull/55): Allow a user to add a tag from a span as filter in the Jaeger plugin. - [#57](https://github.com/kobsio/kobs/pull/57): Visualize the offset of spans in the Jaeger plugin. +- [#61](https://github.com/kobsio/kobs/pull/61): Improve caching logic, by generating the teams and topology graph only when it is requested and not via an additional goroutine. ## [v0.2.0](https://github.com/kobsio/kobs/releases/tag/v0.2.0) (2021-04-23) diff --git a/docs/resources/templates.md b/docs/resources/templates.md index f17b575de..b04de5d2d 100644 --- a/docs/resources/templates.md +++ b/docs/resources/templates.md @@ -22,7 +22,7 @@ In the following you can found the specification for the Template CRD. | name | string | The name of the variable. | Yes | | description | string | A description for the variable. | Yes | -## Example +## Examples !!! note We collect several templates in the [`deploy/templates`](https://github.com/kobsio/kobs/blob/main/deploy/templates) folder. If you have a template, which can also be useful for others, feel free to add it to this folder. diff --git a/pkg/api/plugins/clusters/cluster/cluster.go b/pkg/api/plugins/clusters/cluster/cluster.go index 796e18779..11776ab7b 100644 --- a/pkg/api/plugins/clusters/cluster/cluster.go +++ b/pkg/api/plugins/clusters/cluster/cluster.go @@ -36,17 +36,10 @@ type Cluster struct { applicationClientset *applicationClientsetVersioned.Clientset teamClientset *teamClientsetVersioned.Clientset templateClientset *templateClientsetVersioned.Clientset - options Options name string crds []*clustersProto.CRD } -// Options contains various options, which could be set for a cluster. For example a user can set the cache duration for -// loaded manifest files and the names of the datasources, which should be used within a cluster. -type Options struct { - cacheDuration time.Duration -} - // Cache implements a simple caching layer, for the loaded manifest files. The goal of the caching layer is to return // the manifests faster to the user. type Cache struct { @@ -54,16 +47,6 @@ type Cache struct { namespacesLastFetch time.Time } -// SetOptions is used to set the options for a cluster. The options are not set during the creation of a cluster, so -// that we do not have to pass around the options through different functions. -// We also do not know the datasources befor the cluster name is determined, so that we loop through all loaded clusters -// and connect the datasource names with the correct cluster. -func (c *Cluster) SetOptions(cacheDuration time.Duration) { - c.options = Options{ - cacheDuration: cacheDuration, - } -} - // GetName returns the name of the cluster. func (c *Cluster) GetName() string { return c.name @@ -77,10 +60,10 @@ func (c *Cluster) GetCRDs() []*clustersProto.CRD { // GetNamespaces returns all namespaces for the cluster. To reduce the latency and the number of API calls, we are // "caching" the namespaces. This means that if a new namespace is created in a cluster, this namespaces is only shown // after the configured cache duration. -func (c *Cluster) GetNamespaces(ctx context.Context) ([]string, error) { +func (c *Cluster) GetNamespaces(ctx context.Context, cacheDuration time.Duration) ([]string, error) { log.WithFields(logrus.Fields{"last fetch": c.cache.namespacesLastFetch}).Tracef("Last namespace fetch.") - if c.cache.namespacesLastFetch.After(time.Now().Add(-1 * c.options.cacheDuration)) { + if c.cache.namespacesLastFetch.After(time.Now().Add(-1 * cacheDuration)) { log.WithFields(logrus.Fields{"cluster": c.name}).Debugf("Return namespaces from cache.") return c.cache.namespaces, nil diff --git a/pkg/api/plugins/clusters/clusters.go b/pkg/api/plugins/clusters/clusters.go index 5a4ff47b7..30da36cb7 100644 --- a/pkg/api/plugins/clusters/clusters.go +++ b/pkg/api/plugins/clusters/clusters.go @@ -12,7 +12,6 @@ import ( "github.com/kobsio/kobs/pkg/api/plugins/clusters/cluster" clustersProto "github.com/kobsio/kobs/pkg/api/plugins/clusters/proto" "github.com/kobsio/kobs/pkg/api/plugins/clusters/provider" - teamProto "github.com/kobsio/kobs/pkg/api/plugins/team/proto" templateProto "github.com/kobsio/kobs/pkg/api/plugins/template/proto" "github.com/sirupsen/logrus" @@ -23,28 +22,37 @@ import ( var ( log = logrus.WithFields(logrus.Fields{"package": "clusters"}) - cacheDurationNamespaces string - cacheDurationTopology string - cacheDurationTeams string + cacheDurationNamespaces time.Duration + cacheDurationTopology time.Duration + cacheDurationTeams time.Duration cacheDurationTemplates time.Duration forbiddenResources []string ) // init is used to define all command-line flags for the clusters package. func init() { - defaultCacheDurationNamespaces := "5m" + defaultCacheDurationNamespaces := time.Duration(5 * time.Minute) if os.Getenv("KOBS_CLUSTERS_CACHE_DURATION_NAMESPACES") != "" { - defaultCacheDurationNamespaces = os.Getenv("KOBS_CLUSTERS_CACHE_DURATION_NAMESPACES") + parsedCacheDurationNamespaces, err := time.ParseDuration(os.Getenv("KOBS_CLUSTERS_CACHE_DURATION_NAMESPACES")) + if err == nil { + defaultCacheDurationNamespaces = parsedCacheDurationNamespaces + } } - defaultCacheDurationTopology := "60m" + defaultCacheDurationTopology := time.Duration(60 * time.Minute) if os.Getenv("KOBS_CLUSTERS_CACHE_DURATION_TOPOLOGY") != "" { - defaultCacheDurationTopology = os.Getenv("KOBS_CLUSTERS_CACHE_DURATION_TOPOLOGY") + parsedCacheDurationTopology, err := time.ParseDuration(os.Getenv("KOBS_CLUSTERS_CACHE_DURATION_TOPOLOGY")) + if err == nil { + defaultCacheDurationTopology = parsedCacheDurationTopology + } } - defaultCacheDurationTeams := "60m" + defaultCacheDurationTeams := time.Duration(60 * time.Minute) if os.Getenv("KOBS_CLUSTERS_CACHE_DURATION_TEAMS") != "" { - defaultCacheDurationTeams = os.Getenv("KOBS_CLUSTERS_CACHE_DURATION_TEAMS") + parsedCacheDurationTeams, err := time.ParseDuration(os.Getenv("KOBS_CLUSTERS_CACHE_DURATION_TEAMS")) + if err == nil { + defaultCacheDurationTeams = parsedCacheDurationTeams + } } defaultCacheDurationTemplates := time.Duration(60 * time.Minute) @@ -60,9 +68,9 @@ func init() { defaultForbiddenResources = strings.Split(os.Getenv("KOBS_CLUSTERS_FORBIDDEN_RESOURCES"), ",") } - flag.StringVar(&cacheDurationNamespaces, "clusters.cache-duration.namespaces", defaultCacheDurationNamespaces, "The duration, for how long requests to get the list of namespaces should be cached.") - flag.StringVar(&cacheDurationTopology, "clusters.cache-duration.topology", defaultCacheDurationTopology, "The duration, for how long the topology data should be cached.") - flag.StringVar(&cacheDurationTeams, "clusters.cache-duration.teams", defaultCacheDurationTeams, "The duration, for how long the teams data should be cached.") + flag.DurationVar(&cacheDurationNamespaces, "clusters.cache-duration.namespaces", defaultCacheDurationNamespaces, "The duration, for how long requests to get the list of namespaces should be cached.") + flag.DurationVar(&cacheDurationTopology, "clusters.cache-duration.topology", defaultCacheDurationTopology, "The duration, for how long the topology data should be cached.") + flag.DurationVar(&cacheDurationTeams, "clusters.cache-duration.teams", defaultCacheDurationTeams, "The duration, for how long the teams data should be cached.") flag.DurationVar(&cacheDurationTemplates, "clusters.cache-duration.templates", defaultCacheDurationTemplates, "The duration, for how long the templates data should be cached.") flag.StringArrayVar(&forbiddenResources, "clusters.forbidden-resources", defaultForbiddenResources, "A list of resources, which can not be accessed via kobs.") } @@ -88,17 +96,23 @@ type Config struct { type Clusters struct { clustersProto.UnimplementedClustersServer clusters []*cluster.Cluster - edges []*clustersProto.Edge - nodes []*clustersProto.Node - teams []Team cache Cache } type Cache struct { + topology Topology + topologyLastFetch time.Time + teams []Team + teamsLastFetch time.Time templates []*templateProto.Template templatesLastFetch time.Time } +type Topology struct { + edges []*clustersProto.Edge + nodes []*clustersProto.Node +} + func (c *Clusters) getCluster(name string) *cluster.Cluster { for _, cl := range c.clusters { if cl.GetName() == name { @@ -148,7 +162,7 @@ func (c *Clusters) GetNamespaces(ctx context.Context, getNamespacesRequest *clus return nil, fmt.Errorf("invalid cluster name") } - clusterNamespaces, err := cluster.GetNamespaces(ctx) + clusterNamespaces, err := cluster.GetNamespaces(ctx, cacheDurationNamespaces) if err != nil { return nil, err } @@ -172,7 +186,7 @@ func (c *Clusters) GetNamespaces(ctx context.Context, getNamespacesRequest *clus return uniqueNamespaces[i] < uniqueNamespaces[j] }) - log.WithFields(logrus.Fields{"namespaces": uniqueNamespaces}).Tracef("GetNamespaces") + log.WithFields(logrus.Fields{"namespaces": len(uniqueNamespaces)}).Tracef("GetNamespaces") return &clustersProto.GetNamespacesResponse{ Namespaces: uniqueNamespaces, @@ -337,20 +351,34 @@ func (c *Clusters) GetApplication(ctx context.Context, getApplicationRequest *cl func (c *Clusters) GetTeams(ctx context.Context, getTeamsRequest *clustersProto.GetTeamsRequest) (*clustersProto.GetTeamsResponse, error) { log.Tracef("GetTeams") - var teams []*teamProto.Team + if c.cache.teamsLastFetch.After(time.Now().Add(-1 * cacheDurationTeams)) { + return &clustersProto.GetTeamsResponse{ + Teams: transformCachedTeams(c.cache.teams), + }, nil + } + + if c.cache.teams == nil { + teams := getTeams(ctx, c.clusters) + if teams != nil { + c.cache.teamsLastFetch = time.Now() + c.cache.teams = teams + } - for _, team := range c.teams { - teams = append(teams, &teamProto.Team{ - Name: team.Name, - Description: team.Description, - Logo: team.Logo, - }) + return &clustersProto.GetTeamsResponse{ + Teams: transformCachedTeams(teams), + }, nil } - log.WithFields(logrus.Fields{"count": len(teams)}).Tracef("GetTeams") + go func() { + teams := getTeams(ctx, c.clusters) + if teams != nil { + c.cache.teamsLastFetch = time.Now() + c.cache.teams = teams + } + }() return &clustersProto.GetTeamsResponse{ - Teams: teams, + Teams: transformCachedTeams(c.cache.teams), }, nil } @@ -365,7 +393,15 @@ func (c *Clusters) GetTeams(ctx context.Context, getTeamsRequest *clustersProto. func (c *Clusters) GetTeam(ctx context.Context, getTeamRequest *clustersProto.GetTeamRequest) (*clustersProto.GetTeamResponse, error) { log.WithFields(logrus.Fields{"name": getTeamRequest.Name}).Tracef("GetTeam") - teamShort := getTeamData(c.teams, getTeamRequest.Name) + if c.cache.teams == nil { + teams := getTeams(ctx, c.clusters) + if teams != nil { + c.cache.teamsLastFetch = time.Now() + c.cache.teams = teams + } + } + + teamShort := getTeamData(c.cache.teams, getTeamRequest.Name) if teamShort == nil { return nil, fmt.Errorf("invalid team name") } @@ -444,50 +480,42 @@ func (c *Clusters) GetTemplates(ctx context.Context, getTemplatesRequest *cluste // GetApplicationsTopology returns the topology for the given list of clusters and namespaces. We add an additional node // for each cluster and namespace. These nodes are used to group the applications by the cluster and namespace. func (c *Clusters) GetApplicationsTopology(ctx context.Context, getApplicationsTopologyRequest *clustersProto.GetApplicationsTopologyRequest) (*clustersProto.GetApplicationsTopologyResponse, error) { - var edges []*clustersProto.Edge - var nodes []*clustersProto.Node - - for _, clusterName := range getApplicationsTopologyRequest.Clusters { - nodes = append(nodes, &clustersProto.Node{ - Id: clusterName, - Label: clusterName, - Type: "cluster", - Parent: "", - Cluster: clusterName, - Namespace: "", - Name: "", - }) - - for _, namespace := range getApplicationsTopologyRequest.Namespaces { - nodes = append(nodes, &clustersProto.Node{ - Id: clusterName + "-" + namespace, - Label: namespace, - Type: "namespace", - Parent: clusterName, - Cluster: clusterName, - Namespace: namespace, - Name: "", - }) + log.Tracef("GetApplicationsTopology") - for _, edge := range c.edges { - if (edge.SourceCluster == clusterName && edge.SourceNamespace == namespace) || (edge.TargetCluster == clusterName && edge.TargetNamespace == namespace) { - edges = appendEdgeIfMissing(edges, edge) - } - } - } + if c.cache.topologyLastFetch.After(time.Now().Add(-1 * cacheDurationTopology)) { + edges, nodes := generateTopology(c.cache.topology, getApplicationsTopologyRequest.Clusters, getApplicationsTopologyRequest.Namespaces) + return &clustersProto.GetApplicationsTopologyResponse{ + Edges: edges, + Nodes: nodes, + }, nil } - for _, edge := range edges { - for _, node := range c.nodes { - if node.Id == edge.Source || node.Id == edge.Target { - nodes = appendNodeIfMissing(nodes, node) - } + if c.cache.topology.nodes == nil { + topology := getTopology(ctx, c.clusters) + if topology.nodes != nil { + c.cache.topologyLastFetch = time.Now() + c.cache.topology = topology } + + edges, nodes := generateTopology(topology, getApplicationsTopologyRequest.Clusters, getApplicationsTopologyRequest.Namespaces) + + return &clustersProto.GetApplicationsTopologyResponse{ + Edges: edges, + Nodes: nodes, + }, nil } + go func() { + topology := getTopology(ctx, c.clusters) + if topology.nodes != nil { + c.cache.topologyLastFetch = time.Now() + c.cache.topology = topology + } + }() + return &clustersProto.GetApplicationsTopologyResponse{ - Edges: edges, - Nodes: nodes, + Edges: c.cache.topology.edges, + Nodes: c.cache.topology.nodes, }, nil } @@ -508,21 +536,9 @@ func Load(config Config) (*Clusters, error) { } } - d, err := time.ParseDuration(cacheDurationNamespaces) - if err != nil { - return nil, err - } - - for _, c := range clusters { - c.SetOptions(d) - } - cs := &Clusters{ clusters: clusters, } - go cs.generateTopology() - go cs.generateTeams() - return cs, nil } diff --git a/pkg/api/plugins/clusters/teams.go b/pkg/api/plugins/clusters/teams.go index 9881df43c..76caef348 100644 --- a/pkg/api/plugins/clusters/teams.go +++ b/pkg/api/plugins/clusters/teams.go @@ -2,7 +2,9 @@ package clusters import ( "context" - "time" + + "github.com/kobsio/kobs/pkg/api/plugins/clusters/cluster" + teamProto "github.com/kobsio/kobs/pkg/api/plugins/team/proto" "github.com/sirupsen/logrus" ) @@ -22,75 +24,64 @@ type Application struct { Name string } -// generateTeams is used to generate a list of teams. This list contains the cluster, namespace and name where we can -// found the Team CR. It also contains the description and logo for the team, because we need them for the overview -// page. Last but not least it contains a list of applications, with the cluster, namespace and name, so that we can -// retrieve the complete Application CR on request. +// getTeams is used to generate a list of teams. This list contains the cluster, namespace and name where we can found +// the Team CR. It also contains the description and logo for the team, because we need them for the overview page. Last +// but not least it contains a list of applications, with the cluster, namespace and name, so that we can retrieve the +// complete Application CR on request. // To generate this list we get all teams and applications from all clusters. Then we add the team to this list, when // the team with the same name doesn't already exists. This means the name of the Team CR must be unique across clusters // and namespaces. Finally we loop through all applications and check if the application contains the team name in the // teams property. If this is the case we add the application to the team. -func (c *Clusters) generateTeams() { - sleep, err := time.ParseDuration(cacheDurationTeams) - if err != nil { - log.WithError(err).Errorf("Invalide cache duration for teams, use default cache duration of 60m") - sleep = time.Duration(60 * time.Minute) - } +func getTeams(ctx context.Context, cs []*cluster.Cluster) []Team { + log.Tracef("Fetch teams") - for { - time.Sleep(60 * time.Second) - log.Infof("Generate teams") - ctx := context.Background() + var cachedTeams []Team - var cachedTeams []Team + for _, c := range cs { + clusterName := c.GetName() - for _, c := range c.clusters { - clusterName := c.GetName() + teams, err := c.GetTeams(ctx) + if err != nil { + log.WithError(err).WithFields(logrus.Fields{"cluster": clusterName}).Errorf("Could not get teams") + continue + } - teams, err := c.GetTeams(ctx) - if err != nil { - log.WithError(err).WithFields(logrus.Fields{"cluster": clusterName}).Errorf("Could not get teams") - continue - } + applications, err := c.GetApplications(ctx, "") + if err != nil { + log.WithError(err).WithFields(logrus.Fields{"cluster": clusterName}).Errorf("Could not get applications") + continue + } - applications, err := c.GetApplications(ctx, "") - if err != nil { - log.WithError(err).WithFields(logrus.Fields{"cluster": clusterName}).Errorf("Could not get applications") + for _, team := range teams { + // Skip the team if it already exists, because teams must be unique accross clusters and namespaces. + if doesTeamExists(cachedTeams, team.Name) { continue } - for _, team := range teams { - // Skip the team if it already exists, because teams must be unique accross clusters and namespaces. - if doesTeamExists(cachedTeams, team.Name) { - continue - } - - var teamApplications []Application - for _, application := range applications { - if containsTeam(application.Teams, team.Name) { - teamApplications = append(teamApplications, Application{ - Cluster: application.Cluster, - Namespace: application.Namespace, - Name: application.Name, - }) - } + var teamApplications []Application + for _, application := range applications { + if containsTeam(application.Teams, team.Name) { + teamApplications = append(teamApplications, Application{ + Cluster: application.Cluster, + Namespace: application.Namespace, + Name: application.Name, + }) } - - cachedTeams = append(cachedTeams, Team{ - Cluster: team.Cluster, - Namespace: team.Namespace, - Name: team.Name, - Description: team.Description, - Logo: team.Logo, - Applications: teamApplications, - }) } - } - - c.teams = cachedTeams - time.Sleep(sleep) + cachedTeams = append(cachedTeams, Team{ + Cluster: team.Cluster, + Namespace: team.Namespace, + Name: team.Name, + Description: team.Description, + Logo: team.Logo, + Applications: teamApplications, + }) + } } + + log.WithFields(logrus.Fields{"teams": len(cachedTeams)}).Tracef("Fetched teams") + return cachedTeams } // doesTeamExists checks if the given team name exists in a slice of teams. @@ -125,3 +116,21 @@ func getTeamData(teams []Team, name string) *Team { return nil } + +// transformCachedTeams converts the cached slice of teams to a slice of teams which can be used in the return value in +// the GetTeams function. +func transformCachedTeams(cachedTeams []Team) []*teamProto.Team { + var teams []*teamProto.Team + + for _, team := range cachedTeams { + teams = append(teams, &teamProto.Team{ + Name: team.Name, + Description: team.Description, + Logo: team.Logo, + }) + } + + log.WithFields(logrus.Fields{"count": len(teams)}).Tracef("GetTeams") + + return teams +} diff --git a/pkg/api/plugins/clusters/topology.go b/pkg/api/plugins/clusters/topology.go index 9bdbe82dd..56522be7c 100644 --- a/pkg/api/plugins/clusters/topology.go +++ b/pkg/api/plugins/clusters/topology.go @@ -2,98 +2,89 @@ package clusters import ( "context" - "time" + "github.com/kobsio/kobs/pkg/api/plugins/clusters/cluster" clustersProto "github.com/kobsio/kobs/pkg/api/plugins/clusters/proto" "github.com/sirupsen/logrus" ) -// generateTopology generates the topology for all applications. To generate the edges and nodes for the topology we -// have to get all applications accross al configured clusters. Then we add each application as node and each dependency -// of an application as edge to the topology. +// getTopology generates the topology for all applications. To generate the edges and nodes for the topology we have to +// get all applications accross al configured clusters. Then we add each application as node and each dependency of an +// application as edge to the topology. // We also set a parent "cluster-namespace" for each application, so that we can group applications by cluster and // namespace in the topology graph. The corresponding clusters and namespaces are added in the "GetApplicationsTopology" // call -func (c *Clusters) generateTopology() { - sleep, err := time.ParseDuration(cacheDurationTopology) - if err != nil { - log.WithError(err).Errorf("Invalide cache duration for topology, use default cache duration of 60m") - sleep = time.Duration(60 * time.Minute) - } - - for { - time.Sleep(60 * time.Second) - log.Infof("Generate topology") - ctx := context.Background() +func getTopology(ctx context.Context, cs []*cluster.Cluster) Topology { + log.Tracef("Fetch topology") - var edges []*clustersProto.Edge - var nodes []*clustersProto.Node + var edges []*clustersProto.Edge + var nodes []*clustersProto.Node - for _, c := range c.clusters { - clusterName := c.GetName() + for _, c := range cs { + clusterName := c.GetName() - applications, err := c.GetApplications(ctx, "") - if err != nil { - log.WithError(err).WithFields(logrus.Fields{"cluster": clusterName}).Errorf("Could not get applications") - continue - } + applications, err := c.GetApplications(ctx, "") + if err != nil { + log.WithError(err).WithFields(logrus.Fields{"cluster": clusterName}).Errorf("Could not get applications") + continue + } - for _, application := range applications { - nodes = append(nodes, &clustersProto.Node{ - Id: clusterName + "-" + application.Namespace + "-" + application.Name, - Label: application.Name, - Type: "application", - Parent: clusterName + "-" + application.Namespace, - Cluster: clusterName, - Namespace: application.Namespace, - Name: application.Name, - }) + for _, application := range applications { + nodes = append(nodes, &clustersProto.Node{ + Id: clusterName + "-" + application.Namespace + "-" + application.Name, + Label: application.Name, + Type: "application", + Parent: clusterName + "-" + application.Namespace, + Cluster: clusterName, + Namespace: application.Namespace, + Name: application.Name, + }) + + for _, dependency := range application.Dependencies { + dependencyCluster := dependency.Cluster + if dependencyCluster == "" { + dependencyCluster = application.Cluster + } - for _, dependency := range application.Dependencies { - dependencyCluster := dependency.Cluster - if dependencyCluster == "" { - dependencyCluster = application.Cluster - } - - dependencyNamespace := dependency.Namespace - if dependencyNamespace == "" { - dependencyNamespace = application.Namespace - } - - dependencyName := dependency.Name - - edges = append(edges, &clustersProto.Edge{ - Label: application.Name + " → " + dependencyName, - Type: "dependency", - Source: clusterName + "-" + application.Namespace + "-" + application.Name, - SourceCluster: application.Cluster, - SourceNamespace: application.Namespace, - SourceName: application.Name, - Target: dependencyCluster + "-" + dependencyNamespace + "-" + dependencyName, - TargetCluster: dependencyCluster, - TargetNamespace: dependencyNamespace, - TargetName: dependencyName, - Description: dependency.Description, - }) + dependencyNamespace := dependency.Namespace + if dependencyNamespace == "" { + dependencyNamespace = application.Namespace } - } - } - // Loop through all edges and remove the edge, when the source or target node doesn't exists. This is needed, so - // that we only have edges were the source and target nodes exists, because the topology component in the React - // UI will crash when it founds an edge but no corresponding node. - var filterEdges []*clustersProto.Edge - for _, edge := range edges { - if doesNodeExists(nodes, edge.Source) && doesNodeExists(nodes, edge.Target) { - filterEdges = append(filterEdges, edge) + dependencyName := dependency.Name + + edges = append(edges, &clustersProto.Edge{ + Label: application.Name + " → " + dependencyName, + Type: "dependency", + Source: clusterName + "-" + application.Namespace + "-" + application.Name, + SourceCluster: application.Cluster, + SourceNamespace: application.Namespace, + SourceName: application.Name, + Target: dependencyCluster + "-" + dependencyNamespace + "-" + dependencyName, + TargetCluster: dependencyCluster, + TargetNamespace: dependencyNamespace, + TargetName: dependencyName, + Description: dependency.Description, + }) } } + } - c.edges = filterEdges - c.nodes = nodes + // Loop through all edges and remove the edge, when the source or target node doesn't exists. This is needed, so + // that we only have edges were the source and target nodes exists, because the topology component in the React + // UI will crash when it founds an edge but no corresponding node. + var filterEdges []*clustersProto.Edge + for _, edge := range edges { + if doesNodeExists(nodes, edge.Source) && doesNodeExists(nodes, edge.Target) { + filterEdges = append(filterEdges, edge) + } + } - time.Sleep(sleep) + log.WithFields(logrus.Fields{"edges": len(filterEdges), "nodes": len(nodes)}).Tracef("Fetched topology") + return Topology{ + edges: filterEdges, + nodes: nodes, } } @@ -129,3 +120,49 @@ func doesNodeExists(nodes []*clustersProto.Node, nodeID string) bool { return false } + +// generateTopology generates the topology chart for the requested clusters and namespaces. +func generateTopology(topology Topology, clusters, namespaces []string) ([]*clustersProto.Edge, []*clustersProto.Node) { + var edges []*clustersProto.Edge + var nodes []*clustersProto.Node + + for _, clusterName := range clusters { + nodes = append(nodes, &clustersProto.Node{ + Id: clusterName, + Label: clusterName, + Type: "cluster", + Parent: "", + Cluster: clusterName, + Namespace: "", + Name: "", + }) + + for _, namespace := range namespaces { + nodes = append(nodes, &clustersProto.Node{ + Id: clusterName + "-" + namespace, + Label: namespace, + Type: "namespace", + Parent: clusterName, + Cluster: clusterName, + Namespace: namespace, + Name: "", + }) + + for _, edge := range topology.edges { + if (edge.SourceCluster == clusterName && edge.SourceNamespace == namespace) || (edge.TargetCluster == clusterName && edge.TargetNamespace == namespace) { + edges = appendEdgeIfMissing(edges, edge) + } + } + } + } + + for _, edge := range edges { + for _, node := range topology.nodes { + if node.Id == edge.Source || node.Id == edge.Target { + nodes = appendNodeIfMissing(nodes, node) + } + } + } + + return edges, nodes +}