Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Disable peering queries by default in proxycfg #17731

Merged
merged 1 commit into from
Jun 14, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
7 changes: 7 additions & 0 deletions .changelog/17731.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
```release-note:breaking-change
connect: Disable peering by default in connect proxies for Consul 1.13. This change was made to prevent inefficient polling
queries from having a negative impact on server performance. Peering in Consul 1.13 is an experimental feature and is not
recommended for use in production environments. If you still wish to use the experimental peering feature, ensure
[`peering.enabled = true`](https://developer.hashicorp.com/consul/docs/v1.13.x/agent/config/config-files#peering_enabled)
is set on all clients and servers.
```
1 change: 1 addition & 0 deletions agent/agent.go
Original file line number Diff line number Diff line change
Expand Up @@ -646,6 +646,7 @@ func (a *Agent) Start(ctx context.Context) error {
},
TLSConfigurator: a.tlsConfigurator,
IntentionDefaultAllow: intentionDefaultAllow,
PeeringEnabled: a.config.PeeringEnabled,
})
if err != nil {
return err
Expand Down
44 changes: 24 additions & 20 deletions agent/proxycfg/connect_proxy.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,16 +46,18 @@ func (s *handlerConnectProxy) initialize(ctx context.Context) (ConfigSnapshot, e
return snap, err
}

err = s.dataSources.TrustBundleList.Notify(ctx, &cachetype.TrustBundleListRequest{
Request: &pbpeering.TrustBundleListByServiceRequest{
ServiceName: s.proxyCfg.DestinationServiceName,
Namespace: s.proxyID.NamespaceOrDefault(),
Partition: s.proxyID.PartitionOrDefault(),
},
QueryOptions: structs.QueryOptions{Token: s.token},
}, peeringTrustBundlesWatchID, s.ch)
if err != nil {
return snap, err
if s.peeringEnabled {
err = s.dataSources.TrustBundleList.Notify(ctx, &cachetype.TrustBundleListRequest{
Request: &pbpeering.TrustBundleListByServiceRequest{
ServiceName: s.proxyCfg.DestinationServiceName,
Namespace: s.proxyID.NamespaceOrDefault(),
Partition: s.proxyID.PartitionOrDefault(),
},
QueryOptions: structs.QueryOptions{Token: s.token},
}, peeringTrustBundlesWatchID, s.ch)
if err != nil {
return snap, err
}
}

// Watch the leaf cert
Expand Down Expand Up @@ -112,13 +114,15 @@ func (s *handlerConnectProxy) initialize(ctx context.Context) (ConfigSnapshot, e
if err != nil {
return snap, err
}
err = s.dataSources.PeeredUpstreams.Notify(ctx, &structs.PartitionSpecificRequest{
QueryOptions: structs.QueryOptions{Token: s.token},
Datacenter: s.source.Datacenter,
EnterpriseMeta: s.proxyID.EnterpriseMeta,
}, peeredUpstreamsID, s.ch)
if err != nil {
return snap, err
if s.peeringEnabled {
err = s.dataSources.PeeredUpstreams.Notify(ctx, &structs.PartitionSpecificRequest{
QueryOptions: structs.QueryOptions{Token: s.token},
Datacenter: s.source.Datacenter,
EnterpriseMeta: s.proxyID.EnterpriseMeta,
}, peeredUpstreamsID, s.ch)
if err != nil {
return snap, err
}
}
// We also infer upstreams from destinations (egress points)
err = s.dataSources.IntentionUpstreamsDestination.Notify(ctx, &structs.ServiceSpecificRequest{
Expand Down Expand Up @@ -194,7 +198,7 @@ func (s *handlerConnectProxy) initialize(ctx context.Context) (ConfigSnapshot, e
fallthrough

case "":
if u.DestinationPeer != "" {
if u.DestinationPeer != "" && s.peeringEnabled {
// NOTE: An upstream that points to a peer by definition will
// only ever watch a single catalog query, so a map key of just
// "UID" is sufficient to cover the peer data watches here.
Expand Down Expand Up @@ -285,7 +289,7 @@ func (s *handlerConnectProxy) handleUpdate(ctx context.Context, u UpdateEvent, s
snap.ConnectProxy.UpstreamPeerTrustBundles.Set(peer, resp.Bundle)
}

case u.CorrelationID == peeringTrustBundlesWatchID:
case u.CorrelationID == peeringTrustBundlesWatchID && s.peeringEnabled:
resp, ok := u.Result.(*pbpeering.TrustBundleListByServiceResponse)
if !ok {
return fmt.Errorf("invalid type for response: %T", u.Result)
Expand All @@ -303,7 +307,7 @@ func (s *handlerConnectProxy) handleUpdate(ctx context.Context, u UpdateEvent, s
snap.ConnectProxy.Intentions = resp
snap.ConnectProxy.IntentionsSet = true

case u.CorrelationID == peeredUpstreamsID:
case u.CorrelationID == peeredUpstreamsID && s.peeringEnabled:
resp, ok := u.Result.(*structs.IndexedPeeredServiceList)
if !ok {
return fmt.Errorf("invalid type for response %T", u.Result)
Expand Down
3 changes: 3 additions & 0 deletions agent/proxycfg/manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,8 @@ type ManagerConfig struct {
// information to proxies that need to make intention decisions on their
// own.
IntentionDefaultAllow bool

PeeringEnabled bool
}

// NewManager constructs a Manager.
Expand Down Expand Up @@ -137,6 +139,7 @@ func (m *Manager) Register(id ProxyID, ns *structs.NodeService, source ProxySour
source: m.Source,
dnsConfig: m.DNSConfig,
intentionDefaultAllow: m.IntentionDefaultAllow,
peeringEnabled: m.PeeringEnabled,
}
if m.TLSConfigurator != nil {
stateConfig.serverSNIFn = m.TLSConfigurator.ServerSNI
Expand Down
45 changes: 27 additions & 18 deletions agent/proxycfg/mesh_gateway.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,17 +32,24 @@ func (s *handlerMeshGateway) initialize(ctx context.Context) (ConfigSnapshot, er
}

// Watch for all peer trust bundles we may need.
err = s.dataSources.TrustBundleList.Notify(ctx, &cachetype.TrustBundleListRequest{
Request: &pbpeering.TrustBundleListByServiceRequest{
Kind: string(structs.ServiceKindMeshGateway),
ServiceName: s.service,
Namespace: s.proxyID.NamespaceOrDefault(),
Partition: s.proxyID.PartitionOrDefault(),
},
QueryOptions: structs.QueryOptions{Token: s.token},
}, peeringTrustBundlesWatchID, s.ch)
if err != nil {
return snap, err
if s.peeringEnabled {
err = s.dataSources.TrustBundleList.Notify(ctx, &cachetype.TrustBundleListRequest{
Request: &pbpeering.TrustBundleListByServiceRequest{
Kind: string(structs.ServiceKindMeshGateway),
ServiceName: s.service,
Namespace: s.proxyID.NamespaceOrDefault(),
Partition: s.proxyID.PartitionOrDefault(),
},
QueryOptions: structs.QueryOptions{Token: s.token},
}, peeringTrustBundlesWatchID, s.ch)
if err != nil {
return snap, err
}
} else {
// Initialize these fields even when peering is not enabled, so that the mesh gateway
// returns the proper value in calls to `Valid()`
snap.MeshGateway.PeeringTrustBundles = make([]*pbpeering.PeeringTrustBundle, 0)
snap.MeshGateway.PeeringTrustBundlesSet = true
}

wildcardEntMeta := s.proxyID.WithWildcardNamespace()
Expand Down Expand Up @@ -448,14 +455,16 @@ func (s *handlerMeshGateway) handleUpdate(ctx context.Context, u UpdateEvent, sn
snap.MeshGateway.Leaf = leaf

case peeringTrustBundlesWatchID:
resp, ok := u.Result.(*pbpeering.TrustBundleListByServiceResponse)
if !ok {
return fmt.Errorf("invalid type for response: %T", u.Result)
}
if len(resp.Bundles) > 0 {
snap.MeshGateway.PeeringTrustBundles = resp.Bundles
if s.peeringEnabled {
resp, ok := u.Result.(*pbpeering.TrustBundleListByServiceResponse)
if !ok {
return fmt.Errorf("invalid type for response: %T", u.Result)
}
if len(resp.Bundles) > 0 {
snap.MeshGateway.PeeringTrustBundles = resp.Bundles
}
snap.MeshGateway.PeeringTrustBundlesSet = true
}
snap.MeshGateway.PeeringTrustBundlesSet = true

case meshConfigEntryID:
resp, ok := u.Result.(*structs.ConfigEntryResponse)
Expand Down
1 change: 1 addition & 0 deletions agent/proxycfg/state.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,7 @@ type stateConfig struct {
dnsConfig DNSConfig
serverSNIFn ServerSNIFunc
intentionDefaultAllow bool
peeringEnabled bool
}

// state holds all the state needed to maintain the config for a registered
Expand Down
12 changes: 7 additions & 5 deletions agent/proxycfg/state_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -441,9 +441,10 @@ func TestState_WatchesAndUpdates(t *testing.T) {
type testCase struct {
// the state to operate on. the logger, source, cache,
// ctx and cancel fields will be filled in by the test
ns structs.NodeService
sourceDC string
stages []verificationStage
ns structs.NodeService
sourceDC string
stages []verificationStage
peeringEnabled bool
}

newConnectProxyCase := func(meshGatewayProxyConfigValue structs.MeshGatewayMode) testCase {
Expand Down Expand Up @@ -747,7 +748,6 @@ func TestState_WatchesAndUpdates(t *testing.T) {
rootsWatchID: genVerifyDCSpecificWatch("dc1"),
exportedServiceListWatchID: genVerifyDCSpecificWatch("dc1"),
meshConfigEntryID: genVerifyMeshConfigWatch("dc1"),
peeringTrustBundlesWatchID: genVerifyTrustBundleListWatchForMeshGateway(""),
},
verifySnapshot: func(t testing.TB, snap *ConfigSnapshot) {
require.False(t, snap.Valid(), "gateway without root is not valid")
Expand Down Expand Up @@ -827,7 +827,6 @@ func TestState_WatchesAndUpdates(t *testing.T) {
rootsWatchID: genVerifyDCSpecificWatch("dc1"),
exportedServiceListWatchID: genVerifyDCSpecificWatch("dc1"),
meshConfigEntryID: genVerifyMeshConfigWatch("dc1"),
peeringTrustBundlesWatchID: genVerifyTrustBundleListWatchForMeshGateway(""),
},
events: []UpdateEvent{
rootWatchEvent(),
Expand Down Expand Up @@ -2715,6 +2714,7 @@ func TestState_WatchesAndUpdates(t *testing.T) {
},
},
"transparent-proxy-initial-with-peers": {
peeringEnabled: true,
ns: structs.NodeService{
Kind: structs.ServiceKindConnectProxy,
ID: "api-proxy",
Expand Down Expand Up @@ -2964,6 +2964,7 @@ func TestState_WatchesAndUpdates(t *testing.T) {
"connect-proxy": newConnectProxyCase(structs.MeshGatewayModeDefault),
"connect-proxy-mesh-gateway-local": newConnectProxyCase(structs.MeshGatewayModeLocal),
"connect-proxy-with-peers": {
peeringEnabled: true,
ns: structs.NodeService{
Kind: structs.ServiceKindConnectProxy,
ID: "web-sidecar-proxy",
Expand Down Expand Up @@ -3141,6 +3142,7 @@ func TestState_WatchesAndUpdates(t *testing.T) {
Domain: "consul.",
AltDomain: "alt.consul.",
},
peeringEnabled: tc.peeringEnabled,
}
wr := recordWatches(&sc)

Expand Down
2 changes: 2 additions & 0 deletions agent/proxycfg/testing.go
Original file line number Diff line number Diff line change
Expand Up @@ -719,6 +719,7 @@ func testConfigSnapshotFixture(
nsFn func(ns *structs.NodeService),
serverSNIFn ServerSNIFunc,
updates []UpdateEvent,
peeringEnabled bool,
) *ConfigSnapshot {
const token = ""

Expand Down Expand Up @@ -761,6 +762,7 @@ func testConfigSnapshotFixture(
},
serverSNIFn: serverSNIFn,
intentionDefaultAllow: false, // TODO: make configurable
peeringEnabled: peeringEnabled,
}
testConfigSnapshotFixtureEnterprise(&config)
s, err := newServiceInstanceFromNodeService(ProxyID{ServiceID: ns.CompoundServiceID()}, ns, token)
Expand Down
12 changes: 6 additions & 6 deletions agent/proxycfg/testing_connect_proxy.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ import (
)

// TestConfigSnapshot returns a fully populated snapshot
func TestConfigSnapshot(t testing.T, nsFn func(ns *structs.NodeService), extraUpdates []UpdateEvent) *ConfigSnapshot {
func TestConfigSnapshot(t testing.T, nsFn func(ns *structs.NodeService), extraUpdates []UpdateEvent, peeringEnabled bool) *ConfigSnapshot {
roots, leaf := TestCerts(t)

// no entries implies we'll get a default chain
Expand Down Expand Up @@ -84,7 +84,7 @@ func TestConfigSnapshot(t testing.T, nsFn func(ns *structs.NodeService), extraUp
},
Meta: nil,
TaggedAddresses: nil,
}, nsFn, nil, testSpliceEvents(baseEvents, extraUpdates))
}, nsFn, nil, testSpliceEvents(baseEvents, extraUpdates), peeringEnabled)
}

// TestConfigSnapshotDiscoveryChain returns a fully populated snapshot using a discovery chain
Expand Down Expand Up @@ -155,7 +155,7 @@ func TestConfigSnapshotDiscoveryChain(
},
Meta: nil,
TaggedAddresses: nil,
}, nsFn, nil, testSpliceEvents(baseEvents, extraUpdates))
}, nsFn, nil, testSpliceEvents(baseEvents, extraUpdates), false)
}

func TestConfigSnapshotExposeConfig(t testing.T, nsFn func(ns *structs.NodeService)) *ConfigSnapshot {
Expand Down Expand Up @@ -210,7 +210,7 @@ func TestConfigSnapshotExposeConfig(t testing.T, nsFn func(ns *structs.NodeServi
},
Meta: nil,
TaggedAddresses: nil,
}, nsFn, nil, baseEvents)
}, nsFn, nil, baseEvents, false)
}

func TestConfigSnapshotExposeChecks(t testing.T) *ConfigSnapshot {
Expand All @@ -237,7 +237,7 @@ func TestConfigSnapshotExposeChecks(t testing.T) *ConfigSnapshot {
}},
},
},
)
false)
}

func TestConfigSnapshotGRPCExposeHTTP1(t testing.T) *ConfigSnapshot {
Expand Down Expand Up @@ -286,5 +286,5 @@ func TestConfigSnapshotGRPCExposeHTTP1(t testing.T) *ConfigSnapshot {
CorrelationID: svcChecksWatchIDPrefix + structs.ServiceIDString("grpc", nil),
Result: []structs.CheckType{},
},
})
}, false)
}
2 changes: 1 addition & 1 deletion agent/proxycfg/testing_ingress_gateway.go
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ func TestConfigSnapshotIngressGateway(
Address: "1.2.3.4",
Meta: nil,
TaggedAddresses: nil,
}, nsFn, nil, testSpliceEvents(baseEvents, extraUpdates))
}, nsFn, nil, testSpliceEvents(baseEvents, extraUpdates), true)
}

func TestConfigSnapshotIngressGatewaySDS_GatewayLevel_MixedTLS(t testing.T) *ConfigSnapshot {
Expand Down
4 changes: 2 additions & 2 deletions agent/proxycfg/testing_mesh_gateway.go
Original file line number Diff line number Diff line change
Expand Up @@ -461,7 +461,7 @@ func TestConfigSnapshotMeshGateway(t testing.T, variant string, nsFn func(ns *st
Port: 443,
},
},
}, nsFn, nil, testSpliceEvents(baseEvents, extraUpdates))
}, nsFn, nil, testSpliceEvents(baseEvents, extraUpdates), false)
}

func TestConfigSnapshotPeeredMeshGateway(t testing.T, variant string, nsFn func(ns *structs.NodeService), extraUpdates []UpdateEvent) *ConfigSnapshot {
Expand Down Expand Up @@ -737,5 +737,5 @@ func TestConfigSnapshotPeeredMeshGateway(t testing.T, variant string, nsFn func(
Port: 443,
},
},
}, nsFn, nil, testSpliceEvents(baseEvents, extraUpdates))
}, nsFn, nil, testSpliceEvents(baseEvents, extraUpdates), true)
}
4 changes: 2 additions & 2 deletions agent/proxycfg/testing_peering.go
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ func TestConfigSnapshotPeering(t testing.T) *ConfigSnapshot {
},
},
},
})
}, true)
}

func TestConfigSnapshotPeeringTProxy(t testing.T) *ConfigSnapshot {
Expand Down Expand Up @@ -247,5 +247,5 @@ func TestConfigSnapshotPeeringTProxy(t testing.T) *ConfigSnapshot {
},
},
},
})
}, true)
}
4 changes: 2 additions & 2 deletions agent/proxycfg/testing_terminating_gateway.go
Original file line number Diff line number Diff line change
Expand Up @@ -321,7 +321,7 @@ func TestConfigSnapshotTerminatingGateway(t testing.T, populateServices bool, ns
Port: 443,
},
},
}, nsFn, nil, testSpliceEvents(baseEvents, extraUpdates))
}, nsFn, nil, testSpliceEvents(baseEvents, extraUpdates), false)
}

func TestConfigSnapshotTerminatingGatewayDestinations(t testing.T, populateDestinations bool, extraUpdates []UpdateEvent) *ConfigSnapshot {
Expand Down Expand Up @@ -520,7 +520,7 @@ func TestConfigSnapshotTerminatingGatewayDestinations(t testing.T, populateDesti
Port: 443,
},
},
}, nil, nil, testSpliceEvents(baseEvents, extraUpdates))
}, nil, nil, testSpliceEvents(baseEvents, extraUpdates), false)
}

func TestConfigSnapshotTerminatingGatewayServiceSubsets(t testing.T) *ConfigSnapshot {
Expand Down