forked from kubernetes/kubernetes
-
Notifications
You must be signed in to change notification settings - Fork 0
/
admission.go
224 lines (192 loc) · 8.08 KB
/
admission.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package lifecycle
import (
"fmt"
"io"
"time"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apiserver/pkg/admission"
utilcache "k8s.io/apiserver/pkg/util/cache"
"k8s.io/client-go/util/clock"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion"
corelisters "k8s.io/kubernetes/pkg/client/listers/core/internalversion"
kubeapiserveradmission "k8s.io/kubernetes/pkg/kubeapiserver/admission"
)
const (
// Name of admission plug-in
PluginName = "NamespaceLifecycle"
// how long a namespace stays in the force live lookup cache before expiration.
forceLiveLookupTTL = 30 * time.Second
// how long to wait for a missing namespace before re-checking the cache (and then doing a live lookup)
// this accomplishes two things:
// 1. It allows a watch-fed cache time to observe a namespace creation event
// 2. It allows time for a namespace creation to distribute to members of a storage cluster,
// so the live lookup has a better chance of succeeding even if it isn't performed against the leader.
missingNamespaceWait = 50 * time.Millisecond
)
func init() {
admission.RegisterPlugin(PluginName, func(config io.Reader) (admission.Interface, error) {
return NewLifecycle(sets.NewString(metav1.NamespaceDefault, metav1.NamespaceSystem, metav1.NamespacePublic))
})
}
// lifecycle is an implementation of admission.Interface.
// It enforces life-cycle constraints around a Namespace depending on its Phase
type lifecycle struct {
*admission.Handler
client internalclientset.Interface
immortalNamespaces sets.String
namespaceLister corelisters.NamespaceLister
// forceLiveLookupCache holds a list of entries for namespaces that we have a strong reason to believe are stale in our local cache.
// if a namespace is in this cache, then we will ignore our local state and always fetch latest from api server.
forceLiveLookupCache *utilcache.LRUExpireCache
}
type forceLiveLookupEntry struct {
expiry time.Time
}
var _ = kubeapiserveradmission.WantsInternalKubeInformerFactory(&lifecycle{})
var _ = kubeapiserveradmission.WantsInternalKubeClientSet(&lifecycle{})
func makeNamespaceKey(namespace string) *api.Namespace {
return &api.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: namespace,
Namespace: "",
},
}
}
func (l *lifecycle) Admit(a admission.Attributes) error {
// prevent deletion of immortal namespaces
if a.GetOperation() == admission.Delete && a.GetKind().GroupKind() == api.Kind("Namespace") && l.immortalNamespaces.Has(a.GetName()) {
return errors.NewForbidden(a.GetResource().GroupResource(), a.GetName(), fmt.Errorf("this namespace may not be deleted"))
}
// if we're here, then we've already passed authentication, so we're allowed to do what we're trying to do
// if we're here, then the API server has found a route, which means that if we have a non-empty namespace
// its a namespaced resource.
if len(a.GetNamespace()) == 0 || a.GetKind().GroupKind() == api.Kind("Namespace") {
// if a namespace is deleted, we want to prevent all further creates into it
// while it is undergoing termination. to reduce incidences where the cache
// is slow to update, we add the namespace into a force live lookup list to ensure
// we are not looking at stale state.
if a.GetOperation() == admission.Delete {
l.forceLiveLookupCache.Add(a.GetName(), true, forceLiveLookupTTL)
}
return nil
}
// always allow access review checks. Returning status about the namespace would be leaking information
if isAccessReview(a) {
return nil
}
// we need to wait for our caches to warm
if !l.WaitForReady() {
return admission.NewForbidden(a, fmt.Errorf("not yet ready to handle request"))
}
var (
exists bool
err error
)
namespace, err := l.namespaceLister.Get(a.GetNamespace())
if err != nil {
if !errors.IsNotFound(err) {
return errors.NewInternalError(err)
}
} else {
exists = true
}
if !exists && a.GetOperation() == admission.Create {
// give the cache time to observe the namespace before rejecting a create.
// this helps when creating a namespace and immediately creating objects within it.
time.Sleep(missingNamespaceWait)
namespace, err = l.namespaceLister.Get(a.GetNamespace())
switch {
case errors.IsNotFound(err):
// no-op
case err != nil:
return errors.NewInternalError(err)
default:
exists = true
}
if exists {
glog.V(4).Infof("found %s in cache after waiting", a.GetNamespace())
}
}
// forceLiveLookup if true will skip looking at local cache state and instead always make a live call to server.
forceLiveLookup := false
if _, ok := l.forceLiveLookupCache.Get(a.GetNamespace()); ok {
// we think the namespace was marked for deletion, but our current local cache says otherwise, we will force a live lookup.
forceLiveLookup = exists && namespace.Status.Phase == api.NamespaceActive
}
// refuse to operate on non-existent namespaces
if !exists || forceLiveLookup {
// as a last resort, make a call directly to storage
namespace, err = l.client.Core().Namespaces().Get(a.GetNamespace(), metav1.GetOptions{})
switch {
case errors.IsNotFound(err):
return err
case err != nil:
return errors.NewInternalError(err)
}
glog.V(4).Infof("found %s via storage lookup", a.GetNamespace())
}
// ensure that we're not trying to create objects in terminating namespaces
if a.GetOperation() == admission.Create {
if namespace.Status.Phase != api.NamespaceTerminating {
return nil
}
// TODO: This should probably not be a 403
return admission.NewForbidden(a, fmt.Errorf("unable to create new content in namespace %s because it is being terminated.", a.GetNamespace()))
}
return nil
}
// NewLifecycle creates a new namespace lifecycle admission control handler
func NewLifecycle(immortalNamespaces sets.String) (admission.Interface, error) {
return newLifecycleWithClock(immortalNamespaces, clock.RealClock{})
}
func newLifecycleWithClock(immortalNamespaces sets.String, clock utilcache.Clock) (admission.Interface, error) {
forceLiveLookupCache := utilcache.NewLRUExpireCacheWithClock(100, clock)
return &lifecycle{
Handler: admission.NewHandler(admission.Create, admission.Update, admission.Delete),
immortalNamespaces: immortalNamespaces,
forceLiveLookupCache: forceLiveLookupCache,
}, nil
}
func (l *lifecycle) SetInternalKubeInformerFactory(f informers.SharedInformerFactory) {
namespaceInformer := f.Core().InternalVersion().Namespaces()
l.namespaceLister = namespaceInformer.Lister()
l.SetReadyFunc(namespaceInformer.Informer().HasSynced)
}
func (l *lifecycle) SetInternalKubeClientSet(client internalclientset.Interface) {
l.client = client
}
func (l *lifecycle) Validate() error {
if l.namespaceLister == nil {
return fmt.Errorf("missing namespaceLister")
}
if l.client == nil {
return fmt.Errorf("missing client")
}
return nil
}
// accessReviewResources are resources which give a view into permissions in a namespace. Users must be allowed to create these
// resources because returning "not found" errors allows someone to search for the "people I'm going to fire in 2017" namespace.
var accessReviewResources = map[schema.GroupResource]bool{
schema.GroupResource{Group: "authorization.k8s.io", Resource: "localsubjectaccessreviews"}: true,
}
func isAccessReview(a admission.Attributes) bool {
return accessReviewResources[a.GetResource().GroupResource()]
}