/
cache.go
122 lines (109 loc) · 3.33 KB
/
cache.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
package kubeinstall
import (
"context"
"sync"
"github.com/wx-chevalier/go-utils/installutils/kuberesource"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/rest"
)
/*
Contains a snapshot of all installed resources
Starts with a snapshot of everytihng in cluster
Warning: takes about 30-45s (in testing) to initialize this cache
*/
type Cache struct {
access sync.RWMutex
resources kuberesource.UnstructuredResourcesByKey
}
// starts locked, requires Init() to be unlocked
func NewCache() *Cache {
l := sync.RWMutex{}
l.Lock()
return &Cache{access: l}
}
func (c *Cache) getClusterResources(ctx context.Context, cfg *rest.Config,
filterFuncs ...kuberesource.FilterResource) (kuberesource.UnstructuredResources, error) {
currentResources, err := kuberesource.GetClusterResources(ctx, cfg, filterFuncs...)
if err != nil {
return nil, err
}
currentResources = currentResources.Filter(func(resource *unstructured.Unstructured) bool {
installedResource, err := getInstalledResource(resource)
if err != nil {
return true
}
*resource = *installedResource
return false
})
return currentResources, nil
}
/*
Initialize the cache with the snapshot of the current cluster
*/
func (c *Cache) Init(ctx context.Context, cfg *rest.Config, filterFuncs ...kuberesource.FilterResource) error {
// unlock cache after sync is complete
defer c.access.Unlock()
currentResources, err := c.getClusterResources(ctx, cfg, filterFuncs...)
if err != nil {
return err
}
c.resources = currentResources.ByKey()
return nil
}
/*
Refresh the cache with the snapshot of the current cluster
*/
func (c *Cache) Refresh(ctx context.Context, cfg *rest.Config, filterFuncs ...kuberesource.FilterResource) error {
// unlock cache after sync is complete
currentResources, err := c.getClusterResources(ctx, cfg, filterFuncs...)
if err != nil {
return err
}
c.access.Lock()
defer c.access.Unlock()
c.resources = currentResources.ByKey()
return nil
}
func (c *Cache) List() kuberesource.UnstructuredResources {
c.access.RLock()
defer c.access.RUnlock()
return c.resources.List()
}
func (c *Cache) Get(key kuberesource.ResourceKey) *unstructured.Unstructured {
c.access.RLock()
defer c.access.RUnlock()
return c.resources[key]
}
func (c *Cache) Set(obj *unstructured.Unstructured) {
c.access.Lock()
defer c.access.Unlock()
c.resources[kuberesource.Key(obj)] = obj
}
func (c *Cache) Delete(obj *unstructured.Unstructured) {
c.access.Lock()
defer c.access.Unlock()
delete(c.resources, kuberesource.Key(obj))
}
/*
to speed up the cache init, filter out resource types
*/
var DefaultFilters = []kuberesource.FilterResource{
func(resource schema.GroupVersionResource) bool {
for _, ignoredType := range ignoreTypesForInstall {
if resource.String() == ignoredType.String() {
return true
}
}
return false
},
}
// types the installer should ignore and the cache should skip
var ignoreTypesForInstall = []schema.GroupVersionResource{
{Resource: "events", Version: "v1", Group: ""},
{Resource: "endpoints", Version: "v1", Group: ""},
{Resource: "nodes", Version: "v1", Group: ""},
{Resource: "apiservices", Version: "v1beta1", Group: "apiregistration.k8s.io"},
{Resource: "apiservices", Version: "v1", Group: "apiregistration.k8s.io"},
{Resource: "events", Version: "v1beta1", Group: "events.k8s.io"},
}