Skip to content

Commit

Permalink
Remove health plugin and management cluster (#24361)
Browse files Browse the repository at this point in the history
* Remove health plugin

* remove mgmt cluster

* Fully cleanup

* More cleanup
  • Loading branch information
howardjohn committed Jun 4, 2020
1 parent c89d5cb commit 19c8e17
Show file tree
Hide file tree
Showing 30 changed files with 29 additions and 1,244 deletions.
2 changes: 0 additions & 2 deletions mixer/test/client/gateway/gateway_test.go
Expand Up @@ -245,9 +245,7 @@ func (mock) GetService(_ host.Name) (*model.Service, error) { return nil, nil }
func (mock) InstancesByPort(_ *model.Service, _ int, _ labels.Collection) ([]*model.ServiceInstance, error) {
return nil, nil
}
func (mock) ManagementPorts(_ string) model.PortList { return nil }
func (mock) Services() ([]*model.Service, error) { return nil, nil }
func (mock) WorkloadHealthCheckInfo(_ string) model.ProbeList { return nil }
func (mock) GetIstioServiceAccounts(_ *model.Service, ports []int) []string { return nil }

const (
Expand Down
2 changes: 0 additions & 2 deletions mixer/test/client/pilotplugin/pilotplugin_test.go
Expand Up @@ -318,9 +318,7 @@ func (mock) GetService(_ host.Name) (*model.Service, error) { return nil, nil }
func (mock) InstancesByPort(_ *model.Service, _ int, _ labels.Collection) ([]*model.ServiceInstance, error) {
return nil, nil
}
func (mock) ManagementPorts(_ string) model.PortList { return nil }
func (mock) Services() ([]*model.Service, error) { return nil, nil }
func (mock) WorkloadHealthCheckInfo(_ string) model.ProbeList { return nil }
func (mock) GetIstioServiceAccounts(_ *model.Service, ports []int) []string { return nil }

const (
Expand Down
2 changes: 0 additions & 2 deletions mixer/test/client/pilotplugin_mtls/pilotplugin_mtls_test.go
Expand Up @@ -326,9 +326,7 @@ func (mock) GetService(_ host.Name) (*model.Service, error) { return nil, nil }
func (mock) InstancesByPort(_ *model.Service, _ int, _ labels.Collection) ([]*model.ServiceInstance, error) {
return nil, nil
}
func (mock) ManagementPorts(_ string) model.PortList { return nil }
func (mock) Services() ([]*model.Service, error) { return nil, nil }
func (mock) WorkloadHealthCheckInfo(_ string) model.ProbeList { return nil }
func (mock) GetIstioServiceAccounts(_ *model.Service, ports []int) []string { return nil }

const (
Expand Down
2 changes: 0 additions & 2 deletions mixer/test/client/pilotplugin_tcp/pilotplugin_tcp_test.go
Expand Up @@ -195,9 +195,7 @@ func (mock) GetService(_ host.Name) (*model.Service, error) { return nil, nil }
func (mock) InstancesByPort(_ *model.Service, _ int, _ labels.Collection) ([]*model.ServiceInstance, error) {
return nil, nil
}
func (mock) ManagementPorts(_ string) model.PortList { return nil }
func (mock) Services() ([]*model.Service, error) { return nil, nil }
func (mock) WorkloadHealthCheckInfo(_ string) model.ProbeList { return nil }
func (mock) GetIstioServiceAccounts(_ *model.Service, ports []int) []string { return nil }

const (
Expand Down
13 changes: 0 additions & 13 deletions pilot/pkg/model/service.go
Expand Up @@ -434,19 +434,6 @@ type ServiceDiscovery interface {

GetProxyWorkloadLabels(*Proxy) (labels.Collection, error)

// ManagementPorts lists set of management ports associated with an IPv4 address.
// These management ports are typically used by the platform for out of band management
// tasks such as health checks, etc. In a scenario where the proxy functions in the
// transparent mode (traps all traffic to and from the service instance IP address),
// the configuration generated for the proxy will not manipulate traffic destined for
// the management ports
ManagementPorts(addr string) PortList

// WorkloadHealthCheckInfo lists set of probes associated with an IPv4 address.
// These probes are used by the platform to identify requests that are performing
// health checks.
WorkloadHealthCheckInfo(addr string) ProbeList

// GetIstioServiceAccounts returns a list of service accounts looked up from
// the specified service hostname and ports.
// Deprecated - service account tracking moved to XdsServer, incremental.
Expand Down
22 changes: 2 additions & 20 deletions pilot/pkg/networking/core/v1alpha3/cluster.go
Expand Up @@ -105,13 +105,7 @@ func (configgen *ConfigGeneratorImpl) BuildClusters(proxy *model.Proxy, push *mo
// DO NOT CALL PLUGINS for these two clusters.
outboundClusters = append(outboundClusters, cb.buildBlackHoleCluster(), cb.buildDefaultPassthroughCluster())
outboundClusters = envoyfilter.ApplyClusterPatches(networking.EnvoyFilter_SIDECAR_OUTBOUND, proxy, push, outboundClusters)
// Let ServiceDiscovery decide which IP and Port are used for management if
// there are multiple IPs
managementPorts := make([]*model.Port, 0)
for _, ip := range proxy.IPAddresses {
managementPorts = append(managementPorts, push.ManagementPorts(ip)...)
}
inboundClusters := configgen.buildInboundClusters(proxy, push, instances, managementPorts)
inboundClusters := configgen.buildInboundClusters(proxy, push, instances)
// Pass through clusters for inbound traffic. These cluster bind loopback-ish src address to access node local service.
inboundClusters = append(inboundClusters, cb.buildInboundPassthroughClusters()...)
inboundClusters = envoyfilter.ApplyClusterPatches(networking.EnvoyFilter_SIDECAR_INBOUND, proxy, push, inboundClusters)
Expand Down Expand Up @@ -328,10 +322,9 @@ func buildInboundLocalityLbEndpoints(bind string, port uint32) []*endpoint.Local
}

func (configgen *ConfigGeneratorImpl) buildInboundClusters(proxy *model.Proxy,
push *model.PushContext, instances []*model.ServiceInstance, managementPorts []*model.Port) []*cluster.Cluster {
push *model.PushContext, instances []*model.ServiceInstance) []*cluster.Cluster {

clusters := make([]*cluster.Cluster, 0)
cb := NewClusterBuilder(proxy, push)

// The inbound clusters for a node depends on whether the node has a SidecarScope with inbound listeners
// or not. If the node has a sidecarscope with ingress listeners, we only return clusters corresponding
Expand Down Expand Up @@ -370,17 +363,6 @@ func (configgen *ConfigGeneratorImpl) buildInboundClusters(proxy *model.Proxy,
have[instance.ServicePort] = true
}
}

// Add a passthrough cluster for traffic to management ports (health check ports)
for _, port := range managementPorts {
clusterName := model.BuildSubsetKey(model.TrafficDirectionInbound, port.Name,
ManagementClusterHostname, port.Port)
localityLbEndpoints := buildInboundLocalityLbEndpoints(actualLocalHost, uint32(port.Port))
mgmtCluster := cb.buildDefaultCluster(clusterName, cluster.Cluster_STATIC, localityLbEndpoints,
model.TrafficDirectionInbound, nil, false)
setUpstreamProtocol(proxy, mgmtCluster, port, model.TrafficDirectionInbound)
clusters = append(clusters, mgmtCluster)
}
} else {
rule := sidecarScope.Config.Spec.(*networking.Sidecar)
for _, ingressListener := range rule.Ingress {
Expand Down
4 changes: 2 additions & 2 deletions pilot/pkg/networking/core/v1alpha3/cluster_test.go
Expand Up @@ -1840,7 +1840,7 @@ func TestBuildInboundClustersDefaultCircuitBreakerThresholds(t *testing.T) {
},
}

clusters := configgen.buildInboundClusters(proxy, env.PushContext, instances, []*model.Port{servicePort})
clusters := configgen.buildInboundClusters(proxy, env.PushContext, instances)
g.Expect(len(clusters)).ShouldNot(Equal(0))

for _, cluster := range clusters {
Expand Down Expand Up @@ -1981,7 +1981,7 @@ func TestBuildInboundClustersPortLevelCircuitBreakerThresholds(t *testing.T) {
}

env := c.newEnv(serviceDiscovery, configStore)
clusters := configgen.buildInboundClusters(proxy, env.PushContext, instances, []*model.Port{servicePort})
clusters := configgen.buildInboundClusters(proxy, env.PushContext, instances)
g.Expect(len(clusters)).ShouldNot(Equal(0))

for _, cluster := range clusters {
Expand Down
146 changes: 0 additions & 146 deletions pilot/pkg/networking/core/v1alpha3/fakes/fake_service_discovery.gen.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion pilot/pkg/networking/core/v1alpha3/httproute_test.go
Expand Up @@ -818,7 +818,7 @@ func testSidecarRDSVHosts(t *testing.T, services []*model.Service,
p := &fakePlugin{}
configgen := NewConfigGenerator([]plugin.Plugin{p})

env := buildListenerEnvWithVirtualServices(services, virtualServices, nil)
env := buildListenerEnvWithVirtualServices(services, virtualServices)

if err := env.PushContext.InitContext(&env, nil, nil); err != nil {
t.Fatalf("failed to initialize push context")
Expand Down
71 changes: 0 additions & 71 deletions pilot/pkg/networking/core/v1alpha3/listener.go
Expand Up @@ -459,7 +459,6 @@ func (configgen *ConfigGeneratorImpl) buildSidecarListeners(push *model.PushCont
// Any build order change need a careful code review
builder.buildSidecarInboundListeners(configgen).
buildSidecarOutboundListeners(configgen).
buildManagementListeners(configgen).
buildHTTPProxyListener(configgen).
buildVirtualOutboundListener(configgen).
buildVirtualInboundListener(configgen)
Expand Down Expand Up @@ -1771,76 +1770,6 @@ func (configgen *ConfigGeneratorImpl) onVirtualOutboundListener(
return ipTablesListener
}

// buildSidecarInboundMgmtListeners creates inbound TCP only listeners for the management ports on
// server (inbound). Management port listeners are slightly different from standard Inbound listeners
// in that, they do not have mixer filters nor do they have inbound auth.
// N.B. If a given management port is same as the service instance's endpoint port
// the pod will fail to start in Kubernetes, because the mixer service tries to
// lookup the service associated with the Pod. Since the pod is yet to be started
// and hence not bound to the service), the service lookup fails causing the mixer
// to fail the health check call. This results in a vicious cycle, where kubernetes
// restarts the unhealthy pod after successive failed health checks, and the mixer
// continues to reject the health checks as there is no service associated with
// the pod.
// So, if a user wants to use kubernetes probes with Istio, she should ensure
// that the health check ports are distinct from the service ports.
func buildSidecarInboundMgmtListeners(node *model.Proxy, push *model.PushContext, managementPorts model.PortList, managementIP string) []*listener.Listener {
listeners := make([]*listener.Listener, 0, len(managementPorts))
// assumes that inbound connections/requests are sent to the endpoint address
for _, mPort := range managementPorts {
switch mPort.Protocol {
case protocol.HTTP, protocol.HTTP2, protocol.GRPC, protocol.GRPCWeb, protocol.TCP,
protocol.HTTPS, protocol.TLS, protocol.Mongo, protocol.Redis, protocol.MySQL:

instance := &model.ServiceInstance{
Service: &model.Service{
Hostname: ManagementClusterHostname,
},
ServicePort: mPort,
Endpoint: &model.IstioEndpoint{
Address: managementIP,
EndpointPort: uint32(mPort.Port),
},
}
listenerOpts := buildListenerOpts{
bind: managementIP,
port: mPort.Port,
filterChainOpts: []*filterChainOpts{{
networkFilters: buildInboundNetworkFilters(push, node, instance),
}},
// No user filters for the management unless we introduce new listener matches
skipUserFilters: true,
proxy: node,
push: push,
}
l := buildListener(listenerOpts)
l.TrafficDirection = core.TrafficDirection_INBOUND
mutable := &istionetworking.MutableObjects{
Listener: l,
FilterChains: []istionetworking.FilterChain{{}},
}
pluginParams := &plugin.InputParams{
ListenerProtocol: istionetworking.ListenerProtocolTCP,
ListenerCategory: networking.EnvoyFilter_SIDECAR_OUTBOUND,
Push: push,
Node: node,
Port: mPort,
}
// TODO: should we call plugins for the admin port listeners too? We do everywhere else we construct listeners.
if err := buildCompleteFilterChain(pluginParams, mutable, listenerOpts); err != nil {
log.Warna("buildSidecarInboundMgmtListeners ", err.Error())
} else {
listeners = append(listeners, l)
}
default:
log.Warnf("Unsupported inbound protocol %v for management port %#v",
mPort.Protocol, mPort)
}
}

return listeners
}

// httpListenerOpts are options for an HTTP listener
type httpListenerOpts struct {
routeConfig *route.RouteConfiguration
Expand Down

0 comments on commit 19c8e17

Please sign in to comment.