Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

gce: when using only ipv6, we don't need a CNI to set up the network. #15930

Closed
wants to merge 1 commit into from
Closed
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
94 changes: 92 additions & 2 deletions cmd/kops-controller/controllers/gceipam.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,14 +18,18 @@ package controllers

import (
"context"
"encoding/json"
"fmt"
"net"
"net/url"
"strings"

"github.com/go-logr/logr"
"google.golang.org/api/compute/v1"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"
Expand All @@ -37,8 +41,9 @@ import (
func NewGCEIPAMReconciler(mgr manager.Manager) (*GCEIPAMReconciler, error) {
klog.Info("starting gce ipam controller")
r := &GCEIPAMReconciler{
client: mgr.GetClient(),
log: ctrl.Log.WithName("controllers").WithName("gce-ipam"),
client: mgr.GetClient(),
fieldOwner: "kops-controller",
log: ctrl.Log.WithName("controllers").WithName("gce-ipam"),
}

coreClient, err := corev1client.NewForConfig(mgr.GetConfig())
Expand All @@ -61,6 +66,9 @@ type GCEIPAMReconciler struct {
// client is the controller-runtime client
client client.Client

// fieldOwner is the field-manager owner for fields that we apply
fieldOwner string

// log is a logr
log logr.Logger

Expand Down Expand Up @@ -139,6 +147,46 @@ func (r *GCEIPAMReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct
}
}

if len(node.Spec.PodCIDRs) != 0 {
Copy link
Member

@aojea aojea Sep 19, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

you can make this simpler , this field is already validated in the apiserver https://github.com/kubernetes/kubernetes/blob/54d42e6a658f84ea66593d3107f41db6931a5fbf/pkg/apis/core/validation/validation.go#L4247-L4281 , so if there are two values , they are ipv4 and ipv6

Suggested change
if len(node.Spec.PodCIDRs) != 0 {
netutils "k8s.io/utils/net"
if len(node.Spec.PodCIDRs) != 1 {
return ctrl.Result{}, nil
}
if netutils.IsIPv4CIDR(node.Spec.PodCIDRs[0]) {
return ctrl.Result{}, nil
}
...
patch condition here to unset NetworkUnavailable
...

allIPv6 := true

for _, podCIDR := range node.Spec.PodCIDRs {
_, cidr, err := net.ParseCIDR(podCIDR)
if err != nil {
klog.Warning("failed to parse podCIDR %q", podCIDR)
allIPv6 = false
continue
}

// Split into ipv4s and ipv6s, but treat IPv4-mapped IPv6 addresses as IPv6
if cidr.IP.To4() != nil && !strings.Contains(podCIDR, ":") {
// ipv4
allIPv6 = false
} else {
// ipv6
}
}

if allIPv6 {
// IPv6 does not require a route to be set up, so mark the node as NetworkReady
for _, condition := range node.Status.Conditions {
if condition.Type == "NetworkUnavailable" {
if condition.Status == corev1.ConditionTrue {
newCondition := metav1.Condition{
Message: "Node has IPv6",
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
Message: "Node has IPv6",
Message: "Node is IPv6-only",

Status: metav1.ConditionFalse,
Reason: "RouteCreated",
Type: "NetworkUnavailable",
}
if err := patchStatusCondition(ctx, r.client, node, r.fieldOwner, newCondition); err != nil {
return ctrl.Result{}, fmt.Errorf("updating NetworkUnavailable condition: %w", err)
}
}
}
}
}
}

return ctrl.Result{}, nil
}

Expand All @@ -147,3 +195,45 @@ func (r *GCEIPAMReconciler) SetupWithManager(mgr ctrl.Manager) error {
For(&corev1.Node{}).
Complete(r)
}

type statusConditions struct {
Conditions []metav1.Condition `json:"conditions,omitempty"`
}

type objectStatusPatch struct {
APIVersion string `json:"apiVersion"`
Kind string `json:"kind"`
Metadata metav1.ObjectMeta `json:"metadata"`
Status statusConditions `json:"status,omitempty"`
}

// patchStatusCondition server-side-applies the node status to set the specified status condition.
func patchStatusCondition(ctx context.Context, kube client.Client, obj client.Object, fieldOwner string, condition metav1.Condition) error {
apiVersion, kind := obj.GetObjectKind().GroupVersionKind().ToAPIVersionAndKind()

klog.Infof("setting condition %v on %v %q", condition, kind, obj.GetName())
patch := &objectStatusPatch{
APIVersion: apiVersion,
Kind: kind,
Metadata: metav1.ObjectMeta{
Name: obj.GetName(),
Namespace: obj.GetNamespace(),
},
Status: statusConditions{
Conditions: []metav1.Condition{condition},
},
}

patchJSON, err := json.Marshal(patch)
if err != nil {
return fmt.Errorf("error building patch: %w", err)
}

klog.V(2).Infof("sending patch for %v %q: %q", kind, obj.GetName(), string(patchJSON))

if err := kube.Status().Patch(ctx, obj, client.RawPatch(types.ApplyPatchType, patchJSON), client.ForceOwnership, client.FieldOwner(fieldOwner)); err != nil {
return fmt.Errorf("applying patch to %v %v: %w", kind, obj.GetName(), err)
}

return nil
}
Loading