forked from schrej/kubernetes
-
Notifications
You must be signed in to change notification settings - Fork 0
/
cachesize.go
126 lines (108 loc) · 4.34 KB
/
cachesize.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
//use for --watch-cache-sizes param of kube-apiserver
//make watch cache size of resources configurable
package cachesize
import (
"strconv"
"strings"
"github.com/golang/glog"
"k8s.io/apiserver/pkg/registry/generic/registry"
)
type Resource string
const (
APIServices Resource = "apiservices"
CertificateSigningRequests Resource = "certificatesigningrequests"
ClusterRoles Resource = "clusterroles"
ClusterRoleBindings Resource = "clusterrolebindings"
ConfigMaps Resource = "configmaps"
Controllers Resource = "controllers"
Daemonsets Resource = "daemonsets"
Deployments Resource = "deployments"
Endpoints Resource = "endpoints"
HorizontalPodAutoscalers Resource = "horizontalpodautoscalers"
Ingress Resource = "ingress"
PodDisruptionBudget Resource = "poddisruptionbudgets"
StatefulSet Resource = "statefulset"
Jobs Resource = "jobs"
LimitRanges Resource = "limitranges"
Namespaces Resource = "namespaces"
NetworkPolicys Resource = "networkpolicies"
Nodes Resource = "nodes"
PersistentVolumes Resource = "persistentvolumes"
PersistentVolumeClaims Resource = "persistentvolumeclaims"
Pods Resource = "pods"
PodSecurityPolicies Resource = "podsecuritypolicies"
PodTemplates Resource = "podtemplates"
Replicasets Resource = "replicasets"
ResourceQuotas Resource = "resourcequotas"
CronJobs Resource = "cronjobs"
Roles Resource = "roles"
RoleBindings Resource = "rolebindings"
Secrets Resource = "secrets"
ServiceAccounts Resource = "serviceaccounts"
Services Resource = "services"
StorageClasses Resource = "storageclasses"
)
// TODO: This shouldn't be a global variable.
var watchCacheSizes map[Resource]int
func init() {
watchCacheSizes = make(map[Resource]int)
}
func InitializeWatchCacheSizes(expectedRAMCapacityMB int) {
// This is the heuristics that from memory capacity is trying to infer
// the maximum number of nodes in the cluster and set cache sizes based
// on that value.
// From our documentation, we officially recomment 120GB machines for
// 2000 nodes, and we scale from that point. Thus we assume ~60MB of
// capacity per node.
// TODO: Revisit this heuristics
clusterSize := expectedRAMCapacityMB / 60
// We should specify cache size for a given resource only if it
// is supposed to have non-default value.
//
// TODO: Figure out which resource we should have non-default value.
watchCacheSizes[Controllers] = maxInt(5*clusterSize, 100)
watchCacheSizes[Endpoints] = maxInt(10*clusterSize, 1000)
watchCacheSizes[Nodes] = maxInt(5*clusterSize, 1000)
watchCacheSizes[Pods] = maxInt(50*clusterSize, 1000)
watchCacheSizes[Services] = maxInt(5*clusterSize, 1000)
watchCacheSizes[APIServices] = maxInt(5*clusterSize, 1000)
}
func SetWatchCacheSizes(cacheSizes []string) {
for _, c := range cacheSizes {
tokens := strings.Split(c, "#")
if len(tokens) != 2 {
glog.Errorf("invalid value of watch cache capabilities: %s", c)
continue
}
size, err := strconv.Atoi(tokens[1])
if err != nil {
glog.Errorf("invalid size of watch cache capabilities: %s", c)
continue
}
watchCacheSizes[Resource(strings.ToLower(tokens[0]))] = size
}
}
func GetWatchCacheSizeByResource(resource string) int { // TODO this should use schema.GroupResource for lookups
if value, found := watchCacheSizes[Resource(resource)]; found {
return value
}
return registry.DefaultWatchCacheSize
}
func maxInt(a, b int) int {
if a > b {
return a
}
return b
}