-
Notifications
You must be signed in to change notification settings - Fork 2.9k
/
resolve.go
304 lines (263 loc) · 10.4 KB
/
resolve.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"github.com/sirupsen/logrus"
"github.com/cilium/cilium/pkg/identity"
"github.com/cilium/cilium/pkg/policy/trafficdirection"
)
// selectorPolicy is a structure which contains the resolved policy for a
// particular Identity across all layers (L3, L4, and L7), with the policy
// still determined in terms of EndpointSelectors.
type selectorPolicy struct {
// Revision is the revision of the policy repository used to generate
// this selectorPolicy.
Revision uint64
// SelectorCache managing selectors in L4Policy
SelectorCache *SelectorCache
// L4Policy contains the computed L4 and L7 policy.
L4Policy L4Policy
// IngressPolicyEnabled specifies whether this policy contains any policy
// at ingress.
IngressPolicyEnabled bool
// EgressPolicyEnabled specifies whether this policy contains any policy
// at egress.
EgressPolicyEnabled bool
}
func (p *selectorPolicy) Attach(ctx PolicyContext) {
p.L4Policy.Attach(ctx)
}
// EndpointPolicy is a structure which contains the resolved policy across all
// layers (L3, L4, and L7), distilled against a set of identities.
type EndpointPolicy struct {
// Note that all Endpoints sharing the same identity will be
// referring to a shared selectorPolicy!
*selectorPolicy
// policyMapState contains the state of this policy as it relates to the
// datapath. In the future, this will be factored out of this object to
// decouple the policy as it relates to the datapath vs. its userspace
// representation.
// It maps each Key to the proxy port if proxy redirection is needed.
// Proxy port 0 indicates no proxy redirection.
// All fields within the Key and the proxy port must be in host byte-order.
// Must only be accessed with PolicyOwner (aka Endpoint) lock taken.
policyMapState MapState
// policyMapChanges collects pending changes to the PolicyMapState
policyMapChanges MapChanges
// PolicyOwner describes any type which consumes this EndpointPolicy object.
PolicyOwner PolicyOwner
}
// PolicyOwner is anything which consumes a EndpointPolicy.
type PolicyOwner interface {
GetID() uint64
LookupRedirectPortBuildLocked(ingress bool, protocol string, port uint16) uint16
HasBPFPolicyMap() bool
GetNamedPort(ingress bool, name string, proto uint8) uint16
PolicyDebug(fields logrus.Fields, msg string)
}
// newSelectorPolicy returns an empty selectorPolicy stub.
func newSelectorPolicy(selectorCache *SelectorCache) *selectorPolicy {
return &selectorPolicy{
Revision: 0,
SelectorCache: selectorCache,
L4Policy: NewL4Policy(0),
}
}
// insertUser adds a user to the L4Policy so that incremental
// updates of the L4Policy may be fowarded.
func (p *selectorPolicy) insertUser(user *EndpointPolicy) {
p.L4Policy.insertUser(user)
}
// removeUser removes a user from the L4Policy so the EndpointPolicy
// can be freed when not needed any more
func (p *selectorPolicy) removeUser(user *EndpointPolicy) {
p.L4Policy.removeUser(user)
}
// Detach releases resources held by a selectorPolicy to enable
// successful eventual GC. Note that the selectorPolicy itself if not
// modified in any way, so that it can be used concurrently.
func (p *selectorPolicy) Detach() {
p.L4Policy.Detach(p.SelectorCache)
}
// DistillPolicy filters down the specified selectorPolicy (which acts
// upon selectors) into a set of concrete map entries based on the
// SelectorCache. These can subsequently be plumbed into the datapath.
//
// Called without holding the Selector cache or Repository locks.
// PolicyOwner (aka Endpoint) is also unlocked during this call,
// but the Endpoint's build mutex is held.
func (p *selectorPolicy) DistillPolicy(policyOwner PolicyOwner, isHost bool) *EndpointPolicy {
calculatedPolicy := &EndpointPolicy{
selectorPolicy: p,
policyMapState: NewMapState(nil),
PolicyOwner: policyOwner,
}
if !p.IngressPolicyEnabled || !p.EgressPolicyEnabled {
calculatedPolicy.policyMapState.allowAllIdentities(
!p.IngressPolicyEnabled, !p.EgressPolicyEnabled)
}
// Register the new EndpointPolicy as a receiver of delta
// updates. Any updates happening after this, but before
// computeDesiredL4PolicyMapEntries() call finishes may
// already be applied to the PolicyMapState, specifically:
//
// - policyMapChanges may contain an addition of an entry that
// is already added to the PolicyMapState
//
// - policyMapChanges may contain a deletion of an entry that
// has already been deleted from PolicyMapState
p.insertUser(calculatedPolicy)
// Must come after the 'insertUser()' above to guarantee
// PolicyMapChanges will contain all changes that are applied
// after the computation of PolicyMapState has started.
p.SelectorCache.mutex.RLock()
calculatedPolicy.toMapState()
if !isHost {
calculatedPolicy.policyMapState.determineAllowLocalhostIngress()
}
p.SelectorCache.mutex.RUnlock()
return calculatedPolicy
}
// GetPolicyMap gets the policy map state as the interface
// MapState
func (p *EndpointPolicy) GetPolicyMap() MapState {
return p.policyMapState
}
// SetPolicyMap sets the policy map state as the interface
// MapState. If the main argument is nil, then this method
// will initialize a new MapState object for the caller.
func (p *EndpointPolicy) SetPolicyMap(ms MapState) {
if ms == nil {
p.policyMapState = NewMapState(nil)
return
}
p.policyMapState = ms
}
// Detach removes EndpointPolicy references from selectorPolicy
// to allow the EndpointPolicy to be GC'd.
// PolicyOwner (aka Endpoint) is also locked during this call.
func (p *EndpointPolicy) Detach() {
p.selectorPolicy.removeUser(p)
}
// toMapState transforms the EndpointPolicy.L4Policy into
// the datapath-friendly format inside EndpointPolicy.PolicyMapState.
// Called with selectorcache locked for reading.
// Called without holding the Repository lock.
// PolicyOwner (aka Endpoint) is also unlocked during this call,
// but the Endpoint's build mutex is held.
func (p *EndpointPolicy) toMapState() {
p.L4Policy.Ingress.toMapState(p)
p.L4Policy.Egress.toMapState(p)
}
// toMapState transforms the L4DirectionPolicy into
// the datapath-friendly format inside EndpointPolicy.PolicyMapState.
// Called with selectorcache locked for reading.
// Called without holding the Repository lock.
// PolicyOwner (aka Endpoint) is also unlocked during this call,
// but the Endpoint's build mutex is held.
func (l4policy L4DirectionPolicy) toMapState(p *EndpointPolicy) {
for _, l4 := range l4policy.PortRules {
lookupDone := false
proxyport := uint16(0)
l4.toMapState(p, l4policy.features, func(keyFromFilter Key, entry *MapStateEntry) bool {
// Fix up the proxy port for entries that need proxy redirection
if entry.IsRedirectEntry() {
if !lookupDone {
// only lookup once for each filter
// Use 'destPort' from the key as it is already resolved
// from a named port if needed.
proxyport = p.PolicyOwner.LookupRedirectPortBuildLocked(l4.Ingress, string(l4.Protocol), keyFromFilter.DestPort)
lookupDone = true
}
entry.ProxyPort = proxyport
// If the currently allocated proxy port is 0, this is a new
// redirect, for which no port has been allocated yet. Ignore
// it for now. This will be configured by
// UpdateRedirects() once the port has been allocated.
if !entry.IsRedirectEntry() {
return false
}
}
return true
}, ChangeState{})
}
}
type getProxyPortFunc func(*L4Filter) (proxyPort uint16, ok bool)
// UpdateRedirects updates redirects in the EndpointPolicy's PolicyMapState by using the provided
// function to obtain a proxy port number to use. Changes to 'p.PolicyMapState' are collected in
// 'adds' and 'updated' so that they can be reverted when needed.
func (p *EndpointPolicy) UpdateRedirects(ingress bool, getProxyPort getProxyPortFunc, changes ChangeState) {
l4policy := &p.L4Policy.Ingress
if ingress {
l4policy = &p.L4Policy.Egress
}
l4policy.updateRedirects(p, getProxyPort, changes)
}
func (l4policy L4DirectionPolicy) updateRedirects(p *EndpointPolicy, getProxyPort getProxyPortFunc, changes ChangeState) {
// Selectorcache needs to be locked for toMapState (GetLabels()) call
p.SelectorCache.mutex.RLock()
defer p.SelectorCache.mutex.RUnlock()
for _, l4 := range l4policy.PortRules {
if l4.IsRedirect() {
// Check if we are denying this specific L4 first regardless the L3, if there are any deny policies
if l4policy.features.contains(denyRules) && p.policyMapState.deniesL4(p.PolicyOwner, l4) {
continue
}
redirectPort, ok := getProxyPort(l4)
if !ok {
continue
}
// Set the proxy port in the policy map.
l4.toMapState(p, l4policy.features, func(_ Key, entry *MapStateEntry) bool {
if entry.IsRedirectEntry() {
entry.ProxyPort = redirectPort
}
return true
}, changes)
}
}
}
// ConsumeMapChanges transfers the changes from MapChanges to the caller,
// locking the selector cache to make sure concurrent identity updates
// have completed.
// PolicyOwner (aka Endpoint) is also locked during this call.
func (p *EndpointPolicy) ConsumeMapChanges() (adds, deletes Keys) {
p.selectorPolicy.SelectorCache.mutex.Lock()
defer p.selectorPolicy.SelectorCache.mutex.Unlock()
features := p.selectorPolicy.L4Policy.Ingress.features | p.selectorPolicy.L4Policy.Egress.features
return p.policyMapChanges.consumeMapChanges(p.policyMapState, features, p.SelectorCache)
}
// AllowsIdentity returns whether the specified policy allows
// ingress and egress traffic for the specified numeric security identity.
// If the 'secID' is zero, it will check if all traffic is allowed.
//
// Returning true for either return value indicates all traffic is allowed.
func (p *EndpointPolicy) AllowsIdentity(identity identity.NumericIdentity) (ingress, egress bool) {
key := Key{
Identity: uint32(identity),
}
if !p.IngressPolicyEnabled {
ingress = true
} else {
key.TrafficDirection = trafficdirection.Ingress.Uint8()
if v, exists := p.policyMapState.Get(key); exists && !v.IsDeny {
ingress = true
}
}
if !p.EgressPolicyEnabled {
egress = true
} else {
key.TrafficDirection = trafficdirection.Egress.Uint8()
if v, exists := p.policyMapState.Get(key); exists && !v.IsDeny {
egress = true
}
}
return ingress, egress
}
// NewEndpointPolicy returns an empty EndpointPolicy stub.
func NewEndpointPolicy(repo *Repository) *EndpointPolicy {
return &EndpointPolicy{
selectorPolicy: newSelectorPolicy(repo.GetSelectorCache()),
policyMapState: NewMapState(nil),
}
}