Skip to content

Commit

Permalink
Switch from using targetpools to backend services
Browse files Browse the repository at this point in the history
  • Loading branch information
upodroid committed Feb 19, 2024
1 parent 3c3188e commit 5c91426
Show file tree
Hide file tree
Showing 24 changed files with 411 additions and 181 deletions.
4 changes: 2 additions & 2 deletions cloudmock/gce/mockcompute/api.go
Original file line number Diff line number Diff line change
Expand Up @@ -137,11 +137,11 @@ func (c *MockClient) HTTPHealthChecks() gce.HttpHealthChecksClient {
return c.httpHealthChecksClient
}

func (c *MockClient) RegionHealthChecks() gce.RegionHealthChecksClient {
func (c *MockClient) HealthChecks() gce.HealthChecksClient {
return c.healthCheckClient
}

func (c *MockClient) RegionBackendServices() gce.RegionBackendServiceClient {
func (c *MockClient) BackendServices() gce.BackendServiceClient {
return c.backendServiceClient
}

Expand Down
2 changes: 1 addition & 1 deletion cloudmock/gce/mockcompute/backend_services.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ type backendServiceClient struct {
sync.Mutex
}

var _ gce.RegionBackendServiceClient = &backendServiceClient{}
var _ gce.BackendServiceClient = &backendServiceClient{}

func newBackendServiceClient() *backendServiceClient {
return &backendServiceClient{
Expand Down
2 changes: 1 addition & 1 deletion cloudmock/gce/mockcompute/health_checks.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ type healthCheckClient struct {
sync.Mutex
}

var _ gce.RegionHealthChecksClient = &healthCheckClient{}
var _ gce.HealthChecksClient = &healthCheckClient{}

func newHealthCheckClient() *healthCheckClient {
return &healthCheckClient{
Expand Down
2 changes: 1 addition & 1 deletion cmd/kops/create_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -384,7 +384,7 @@ func NewCmdCreateCluster(f *util.Factory, out io.Writer) *cobra.Command {
cmd.Flags().StringVar(&options.APILoadBalancerClass, "api-loadbalancer-class", options.APILoadBalancerClass, "Class of load balancer for the Kubernetes API (AWS only): classic or network")
cmd.Flags().MarkDeprecated("api-loadbalancer-class", "network load balancer should be used for all newly created clusters")
cmd.RegisterFlagCompletionFunc("api-loadbalancer-class", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return []string{"classic", "network"}, cobra.ShellCompDirectiveNoFileComp
return []string{"classic", "network", "regional", "global"}, cobra.ShellCompDirectiveNoFileComp
})
cmd.Flags().StringVar(&options.APILoadBalancerType, "api-loadbalancer-type", options.APILoadBalancerType, "Type of load balancer for the Kubernetes API: public or internal")
cmd.RegisterFlagCompletionFunc("api-loadbalancer-type", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
Expand Down
6 changes: 4 additions & 2 deletions pkg/apis/kops/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -505,8 +505,10 @@ const (
type LoadBalancerClass string

const (
LoadBalancerClassClassic LoadBalancerClass = "Classic"
LoadBalancerClassNetwork LoadBalancerClass = "Network"
LoadBalancerClassClassic LoadBalancerClass = "Classic"
LoadBalancerClassNetwork LoadBalancerClass = "Network"
LoadBalancerClassRegional LoadBalancerClass = "Regional"
LoadBalancerClassGlobal LoadBalancerClass = "Global"
)

type AccessLogSpec struct {
Expand Down
3 changes: 0 additions & 3 deletions pkg/apis/kops/validation/validation.go
Original file line number Diff line number Diff line change
Expand Up @@ -224,9 +224,6 @@ func validateClusterSpec(spec *kops.ClusterSpec, c *kops.Cluster, fieldPath *fie
lbSpec := spec.API.LoadBalancer
lbPath := fieldPath.Child("api", "loadBalancer")
if spec.GetCloudProvider() != kops.CloudProviderAWS {
if lbSpec.Class != "" {
allErrs = append(allErrs, field.Forbidden(lbPath.Child("class"), "class is only supported on AWS"))
}
if lbSpec.IdleTimeoutSeconds != nil {
allErrs = append(allErrs, field.Forbidden(lbPath.Child("idleTimeoutSeconds"), "idleTimeoutSeconds is only supported on AWS"))
}
Expand Down
5 changes: 5 additions & 0 deletions pkg/model/context.go
Original file line number Diff line number Diff line change
Expand Up @@ -301,6 +301,11 @@ func (b *KopsModelContext) UseNetworkLoadBalancer() bool {
return b.Cluster.Spec.API.LoadBalancer.Class == kops.LoadBalancerClassNetwork
}

// UseRegionalLoadBalancer checks if we are using Regional LoadBalancer
func (b *KopsModelContext) UseRegionalLoadBalancer() bool {
return b.Cluster.Spec.API.LoadBalancer.Class == kops.LoadBalancerClassRegional
}

// UseSSHKey returns true if SSHKeyName from the cluster spec is set to a nonempty string
// or there is an SSH public key provisioned in the key store.
func (b *KopsModelContext) UseSSHKey() bool {
Expand Down
78 changes: 57 additions & 21 deletions pkg/model/gcemodel/api_loadbalancer.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,34 +40,54 @@ var _ fi.CloudupModelBuilder = &APILoadBalancerBuilder{}
// createPublicLB validates the existence of a target pool with the given name,
// and creates an IP address and forwarding rule pointing to that target pool.
func (b *APILoadBalancerBuilder) createPublicLB(c *fi.CloudupModelBuilderContext) error {
healthCheck := &gcetasks.HTTPHealthcheck{
Name: s(b.NameForHealthcheck("api")),
region := ""
if b.UseRegionalLoadBalancer() {
for _, subnet := range b.Cluster.Spec.Networking.Subnets {
if subnet.Region != "" {
region = subnet.Region
}
}
}

hc := &gcetasks.HealthCheck{
Name: s(b.NameForHealthCheck("api")),
Port: i64(wellknownports.KubeAPIServerHealthCheck),
RequestPath: s("/healthz"),
Lifecycle: b.Lifecycle,
Region: region,
}
c.AddTask(healthCheck)

// TODO: point target pool to instance group managers, as done in internal LB.
targetPool := &gcetasks.TargetPool{
Name: s(b.NameForTargetPool("api")),
HealthCheck: healthCheck,
Lifecycle: b.Lifecycle,
c.AddTask(hc)
var igms []*gcetasks.InstanceGroupManager
for _, ig := range b.InstanceGroups {
if ig.Spec.Role != kops.InstanceGroupRoleControlPlane {
continue
}
if len(ig.Spec.Zones) > 1 {
return fmt.Errorf("instance group %q has %d zones, which is not yet supported for GCP", ig.GetName(), len(ig.Spec.Zones))
}
if len(ig.Spec.Zones) == 0 {
return fmt.Errorf("instance group %q must specify exactly one zone", ig.GetName())
}
zone := ig.Spec.Zones[0]
igms = append(igms, &gcetasks.InstanceGroupManager{Name: s(gce.NameForInstanceGroupManager(b.Cluster.ObjectMeta.Name, ig.ObjectMeta.Name, zone)), Zone: s(zone)})
}
c.AddTask(targetPool)

poolHealthCheck := &gcetasks.PoolHealthCheck{
Name: s(b.NameForPoolHealthcheck("api")),
Healthcheck: healthCheck,
Pool: targetPool,
Lifecycle: b.Lifecycle,
bs := &gcetasks.BackendService{
Name: s(b.NameForBackendService("api")),
Protocol: s("TCP"),
HealthChecks: []*gcetasks.HealthCheck{hc},
Lifecycle: b.Lifecycle,
LoadBalancingScheme: s("EXTERNAL"),
Region: region,
InstanceGroupManagers: igms,
}
c.AddTask(poolHealthCheck)
c.AddTask(bs)

ipAddress := &gcetasks.Address{
Name: s(b.NameForIPAddress("api")),

Lifecycle: b.Lifecycle,
Region: region,
WellKnownServices: []wellknownservices.WellKnownService{wellknownservices.KubeAPIServer},
}
c.AddTask(ipAddress)
Expand All @@ -78,8 +98,9 @@ func (b *APILoadBalancerBuilder) createPublicLB(c *fi.CloudupModelBuilderContext
Name: s(b.NameForForwardingRule("api")),
Lifecycle: b.Lifecycle,
PortRange: s(strconv.Itoa(wellknownports.KubeAPIServer) + "-" + strconv.Itoa(wellknownports.KubeAPIServer)),
TargetPool: targetPool,
BackendService: bs,
IPAddress: ipAddress,
Region: region,
IPProtocol: "TCP",
LoadBalancingScheme: s("EXTERNAL"),
Labels: map[string]string{
Expand All @@ -94,9 +115,10 @@ func (b *APILoadBalancerBuilder) createPublicLB(c *fi.CloudupModelBuilderContext
Name: s(b.NameForForwardingRule("kops-controller")),
Lifecycle: b.Lifecycle,
PortRange: s(strconv.Itoa(wellknownports.KopsControllerPort) + "-" + strconv.Itoa(wellknownports.KopsControllerPort)),
TargetPool: targetPool,
BackendService: bs,
IPAddress: ipAddress,
IPProtocol: "TCP",
Region: region,
LoadBalancingScheme: s("EXTERNAL"),
Labels: map[string]string{
clusterLabel.Key: clusterLabel.Value,
Expand Down Expand Up @@ -155,10 +177,20 @@ func (b *APILoadBalancerBuilder) addFirewallRules(c *fi.CloudupModelBuilderConte
func (b *APILoadBalancerBuilder) createInternalLB(c *fi.CloudupModelBuilderContext) error {
clusterLabel := gce.LabelForCluster(b.ClusterName())

// internal Loadbalancers are always regional
region := ""
for _, subnet := range b.Cluster.Spec.Networking.Subnets {
if subnet.Region != "" {
region = subnet.Region
}
}

hc := &gcetasks.HealthCheck{
Name: s(b.NameForHealthCheck("api")),
Port: wellknownports.KubeAPIServer,
Lifecycle: b.Lifecycle,
Name: s(b.NameForHealthCheck("api")),
Port: i64(wellknownports.KubeAPIServer),
RequestPath: s("/healthz"),
Region: region,
Lifecycle: b.Lifecycle,
}
c.AddTask(hc)
var igms []*gcetasks.InstanceGroupManager
Expand All @@ -181,6 +213,7 @@ func (b *APILoadBalancerBuilder) createInternalLB(c *fi.CloudupModelBuilderConte
HealthChecks: []*gcetasks.HealthCheck{hc},
Lifecycle: b.Lifecycle,
LoadBalancingScheme: s("INTERNAL"),
Region: region,
InstanceGroupManagers: igms,
}
c.AddTask(bs)
Expand Down Expand Up @@ -209,6 +242,7 @@ func (b *APILoadBalancerBuilder) createInternalLB(c *fi.CloudupModelBuilderConte
Subnetwork: subnet,

WellKnownServices: []wellknownservices.WellKnownService{wellknownservices.KubeAPIServer},
Region: region,
Lifecycle: b.Lifecycle,
}
c.AddTask(ipAddress)
Expand All @@ -222,6 +256,7 @@ func (b *APILoadBalancerBuilder) createInternalLB(c *fi.CloudupModelBuilderConte
IPProtocol: "TCP",
LoadBalancingScheme: s("INTERNAL"),
Network: network,
Region: region,
Subnetwork: subnet,
Labels: map[string]string{
clusterLabel.Key: clusterLabel.Value,
Expand All @@ -240,6 +275,7 @@ func (b *APILoadBalancerBuilder) createInternalLB(c *fi.CloudupModelBuilderConte
IPProtocol: "TCP",
LoadBalancingScheme: s("INTERNAL"),
Network: network,
Region: region,
Subnetwork: subnet,
Labels: map[string]string{
clusterLabel.Key: clusterLabel.Value,
Expand Down
17 changes: 0 additions & 17 deletions pkg/model/gcemodel/autoscalinggroup.go
Original file line number Diff line number Diff line change
Expand Up @@ -309,23 +309,6 @@ func (b *AutoscalingGroupModelBuilder) Build(c *fi.CloudupModelBuilderContext) e
InstanceTemplate: instanceTemplate,
ListManagedInstancesResults: "PAGINATED",
}

// Attach masters to load balancer if we're using one
switch ig.Spec.Role {
case kops.InstanceGroupRoleControlPlane:
if b.UseLoadBalancerForAPI() {
lbSpec := b.Cluster.Spec.API.LoadBalancer
if lbSpec != nil {
switch lbSpec.Type {
case kops.LoadBalancerTypePublic:
t.TargetPools = append(t.TargetPools, b.LinkToTargetPool("api"))
case kops.LoadBalancerTypeInternal:
klog.Warningf("Not hooking the instance group manager up to anything.")
}
}
}
}

c.AddTask(t)
}
}
Expand Down
4 changes: 4 additions & 0 deletions pkg/model/gcemodel/context.go
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,10 @@ func (c *GCEModelContext) LinkToTargetPool(id string) *gcetasks.TargetPool {
return &gcetasks.TargetPool{Name: s(c.NameForTargetPool(id))}
}

func (c *GCEModelContext) LinkToBackendService(id string) *gcetasks.BackendService {
return &gcetasks.BackendService{Name: s(c.NameForBackendService(id))}
}

func (c *GCEModelContext) NameForTargetPool(id string) string {
return c.SafeSuffixedObjectName(id)
}
Expand Down
58 changes: 49 additions & 9 deletions pkg/resources/gce/gce.go
Original file line number Diff line number Diff line change
Expand Up @@ -511,7 +511,11 @@ func deleteForwardingRule(cloud fi.Cloud, r *resources.Resource) error {
return err
}

op, err := c.Compute().ForwardingRules().Delete(ctx, u.Project, u.Region, u.Name)
region := ""
if !u.Global {
region = u.Region
}
op, err := c.Compute().ForwardingRules().Delete(u.Project, region, u.Name)
if err != nil {
if gce.IsNotFound(err) {
klog.Infof("ForwardingRule not found, assuming deleted: %q", t.SelfLink)
Expand Down Expand Up @@ -828,9 +832,14 @@ func (d *clusterDiscoveryGCE) listAddresses() ([]*resources.Resource, error) {

addrs, err := c.Compute().Addresses().List(ctx, c.Project(), c.Region())
if err != nil {
return nil, fmt.Errorf("error listing Addresses: %v", err)
return nil, fmt.Errorf("error listing regional Addresses: %v", err)
}
globalAddrs, err := c.Compute().Addresses().List(ctx, c.Project(), "")
if err != nil {
return nil, fmt.Errorf("error listing global Addresses: %v", err)
}

addrs = append(addrs, globalAddrs...)
for _, a := range addrs {
if !d.matchesClusterName(a.Name) {
klog.V(8).Infof("Skipping Address with name %q", a.Name)
Expand Down Expand Up @@ -861,8 +870,12 @@ func deleteAddress(cloud fi.Cloud, r *resources.Resource) error {
if err != nil {
return err
}
region := ""
if !u.Global {
region = u.Region
}

op, err := c.Compute().Addresses().Delete(u.Project, u.Region, u.Name)
op, err := c.Compute().Addresses().Delete(u.Project, region, u.Name)
if err != nil {
if gce.IsNotFound(err) {
klog.Infof("Address not found, assuming deleted: %q", t.SelfLink)
Expand Down Expand Up @@ -1079,15 +1092,26 @@ func containsOnlyListedIGMs(svc *compute.BackendService, igms []*resources.Resou

func (d *clusterDiscoveryGCE) listBackendServices() ([]*resources.Resource, error) {
c := d.gceCloud
// list global backendservices first
svcs, err := c.Compute().BackendServices().List(context.Background(), c.Project(), "")
if err != nil {
if gce.IsNotFound(err) {
klog.Infof("backend services not found, assuming none exist in project: %q region: %q", c.Project(), c.Region())
return nil, nil
}
return nil, fmt.Errorf("failed to list global backend services: %w", err)
}

svcs, err := c.Compute().RegionBackendServices().List(context.Background(), c.Project(), c.Region())
// list regional backendservices as well
regionalsvcs, err := c.Compute().BackendServices().List(context.Background(), c.Project(), c.Region())
if err != nil {
if gce.IsNotFound(err) {
klog.Infof("backend services not found, assuming none exist in project: %q region: %q", c.Project(), c.Region())
return nil, nil
}
return nil, fmt.Errorf("Failed to list backend services: %w", err)
return nil, fmt.Errorf("failed to list regional backend services: %w", err)
}
svcs = append(svcs, regionalsvcs...)
// TODO: cache, for efficiency, if needed.
// Find all relevant backend services by finding all the cluster's IGMs, and then
// listing all backend services in the project / region, then selecting
Expand All @@ -1104,7 +1128,15 @@ func (d *clusterDiscoveryGCE) listBackendServices() ([]*resources.Resource, erro
ID: svc.Name,
Type: typeBackendService,
Deleter: func(cloud fi.Cloud, r *resources.Resource) error {
op, err := c.Compute().RegionBackendServices().Delete(c.Project(), c.Region(), svc.Name)
u, err := gce.ParseGoogleCloudURL(svc.SelfLink)
if err != nil {
return err
}
region := ""
if !u.Global {
region = u.Region
}
op, err := c.Compute().BackendServices().Delete(c.Project(), region, svc.Name)
if err != nil {
return err
}
Expand Down Expand Up @@ -1139,12 +1171,20 @@ func (d *clusterDiscoveryGCE) listHealthchecks() ([]*resources.Resource, error)
}
var hcResources []*resources.Resource
for hc := range hcs {
u, err := gce.ParseGoogleCloudURL(hc)
if err != nil {
return nil, err
}
region := ""
if !u.Global {
region = u.Region
}
hcResources = append(hcResources, &resources.Resource{
Name: gce.LastComponent(hc),
ID: gce.LastComponent(hc),
Name: u.Name,
ID: u.Name,
Type: typeHealthcheck,
Deleter: func(cloud fi.Cloud, r *resources.Resource) error {
op, err := c.Compute().RegionHealthChecks().Delete(c.Project(), c.Region(), gce.LastComponent(hc))
op, err := c.Compute().HealthChecks().Delete(u.Project, region, u.Name)
if err != nil {
return err
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ metadata:
spec:
api:
loadBalancer:
class: Regional
type: Public
authorization:
rbac: {}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ metadata:
spec:
api:
loadBalancer:
class: Regional
type: Public
authorization:
rbac: {}
Expand Down
Loading

0 comments on commit 5c91426

Please sign in to comment.