-
Notifications
You must be signed in to change notification settings - Fork 165
/
common.go
128 lines (107 loc) · 3.51 KB
/
common.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
// SPDX-License-Identifier: Apache-2.0
// SPDX-FileCopyrightText: 2021-Present The Zarf Authors
// Package k8s provides a client for interacting with a Kubernetes cluster.
package k8s
import (
"context"
"fmt"
"time"
"github.com/go-logr/logr/funcr"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
// Include the cloud auth plugins
_ "k8s.io/client-go/plugin/pkg/client/auth"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
pkgkubernetes "github.com/defenseunicorns/pkg/kubernetes"
)
const (
// ZarfManagedByLabel is used to denote Zarf manages the lifecycle of a resource
ZarfManagedByLabel = "app.kubernetes.io/managed-by"
// AgentLabel is used to give instructions to the Zarf agent
AgentLabel = "zarf.dev/agent"
)
// New creates a new K8s client.
func New(logger Log) (*K8s, error) {
klog.SetLogger(funcr.New(func(_, args string) {
logger(args)
}, funcr.Options{}))
config, clientset, err := connect()
if err != nil {
return nil, fmt.Errorf("failed to connect to k8s cluster: %w", err)
}
watcher, err := pkgkubernetes.WatcherForConfig(config)
if err != nil {
return nil, err
}
return &K8s{
RestConfig: config,
Clientset: clientset,
Watcher: watcher,
Log: logger,
}, nil
}
// WaitForHealthyCluster checks for an available K8s cluster every second until timeout.
func (k *K8s) WaitForHealthyCluster(ctx context.Context) error {
const waitDuration = 1 * time.Second
timer := time.NewTimer(0)
defer timer.Stop()
for {
select {
case <-ctx.Done():
return fmt.Errorf("error waiting for cluster to report healthy: %w", ctx.Err())
case <-timer.C:
if k.RestConfig == nil || k.Clientset == nil {
config, clientset, err := connect()
if err != nil {
k.Log("Cluster connection not available yet: %w", err)
timer.Reset(waitDuration)
continue
}
k.RestConfig = config
k.Clientset = clientset
}
// Make sure there is at least one running Node
nodeList, err := k.Clientset.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil || len(nodeList.Items) < 1 {
k.Log("No nodes reporting healthy yet: %v\n", err)
timer.Reset(waitDuration)
continue
}
// Get the cluster pod list
pods, err := k.Clientset.CoreV1().Pods(corev1.NamespaceAll).List(ctx, metav1.ListOptions{})
if err != nil {
k.Log("Could not get the pod list: %w", err)
timer.Reset(waitDuration)
continue
}
// Check that at least one pod is in the 'succeeded' or 'running' state
for _, pod := range pods.Items {
if pod.Status.Phase == corev1.PodSucceeded || pod.Status.Phase == corev1.PodRunning {
return nil
}
}
k.Log("No pods reported 'succeeded' or 'running' state yet.")
timer.Reset(waitDuration)
}
}
}
// Use the K8s "client-go" library to get the currently active kube context, in the same way that
// "kubectl" gets it if no extra config flags like "--kubeconfig" are passed.
func connect() (config *rest.Config, clientset *kubernetes.Clientset, err error) {
// Build the config from the currently active kube context in the default way that the k8s client-go gets it, which
// is to look at the KUBECONFIG env var
config, err = clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
clientcmd.NewDefaultClientConfigLoadingRules(),
&clientcmd.ConfigOverrides{}).ClientConfig()
if err != nil {
return nil, nil, err
}
clientset, err = kubernetes.NewForConfig(config)
if err != nil {
return nil, nil, err
}
return config, clientset, nil
}