-
Notifications
You must be signed in to change notification settings - Fork 1.6k
/
kubectl_forwarder.go
348 lines (307 loc) · 11.2 KB
/
kubectl_forwarder.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
/*
Copyright 2019 The Skaffold Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package portforward
import (
"bufio"
"bytes"
"context"
"fmt"
"io"
"os"
"sort"
"strings"
"sync/atomic"
"time"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/intstr"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/kubectl"
kubernetesclient "github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/kubernetes/client"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/output"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/output/log"
schemautil "github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/schema/util"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/util"
)
type EntryForwarder interface {
Start(io.Writer)
Forward(parentCtx context.Context, pfe *portForwardEntry) error
Terminate(p *portForwardEntry)
}
type KubectlForwarder struct {
started int32
out io.Writer
kubectl *kubectl.CLI
}
// NewKubectlForwarder returns a new KubectlForwarder
func NewKubectlForwarder(cli *kubectl.CLI) *KubectlForwarder {
return &KubectlForwarder{
kubectl: cli,
}
}
// For testing
var (
isPortFree = util.IsPortFree
findNewestPodForSvc = findNewestPodForService
deferFunc = func() {}
waitPortNotFree = 5 * time.Second
waitErrorLogs = 1 * time.Second
)
func (k *KubectlForwarder) Start(out io.Writer) {
atomic.StoreInt32(&k.started, 1)
k.out = out
}
// Forward port-forwards a pod using kubectl port-forward in the background.
// It kills the command on errors in the kubectl port-forward log
// It restarts the command if it was not cancelled by skaffold
// It retries in case the port is taken
func (k *KubectlForwarder) Forward(parentCtx context.Context, pfe *portForwardEntry) error {
errChan := make(chan error, 1)
go k.forward(parentCtx, pfe, errChan)
l := log.Entry(parentCtx)
resourceName := ""
if pfe != nil {
resourceName = pfe.resource.Name
}
l.Tracef("KubectlForwarder.Forward(%s): waiting on errChan", resourceName)
select {
case <-parentCtx.Done():
l.Tracef("KubectlForwarder.Forward(%s): parentCtx canceled, returning nil error", resourceName)
return nil
case err := <-errChan:
l.Tracef("KubectlForwarder.Forward(%s): got error on errChan, returning: %+v", resourceName, err)
return err
}
}
func (k *KubectlForwarder) forward(ctx context.Context, pfe *portForwardEntry, errChan chan error) {
if atomic.LoadInt32(&k.started) == 0 {
errChan <- fmt.Errorf("Forward() called before kubectl forwarder was started")
return
}
var notifiedUser bool
defer deferFunc()
for {
pfe.terminationLock.Lock()
if pfe.terminated {
log.Entry(ctx).Debugf("port forwarding %v was cancelled...", pfe)
pfe.terminationLock.Unlock()
errChan <- nil
return
}
pfe.terminationLock.Unlock()
if !isPortFree(util.Loopback, pfe.localPort) {
// Assuming that Skaffold brokered ports don't overlap, this has to be an external process that started
// since the dev loop kicked off. We are notifying the user in the hope that they can fix it
output.Red.Fprintf(k.out, "failed to port forward %v, port %d is taken, retrying...\n", pfe, pfe.localPort)
notifiedUser = true
time.Sleep(waitPortNotFree)
continue
}
if notifiedUser {
output.Green.Fprintf(k.out, "port forwarding %v recovered on port %d\n", pfe, pfe.localPort)
notifiedUser = false
}
ctx, cancel := context.WithCancel(ctx)
pfe.cancel = cancel
args := portForwardArgs(ctx, k.kubectl.KubeContext, pfe)
var buf bytes.Buffer
cmd := k.kubectl.CommandWithStrictCancellation(ctx, "port-forward", args...)
cmd.Stdout = &buf
cmd.Stderr = &buf
log.Entry(ctx).Debugf("Running command: %s", cmd.Args)
if err := cmd.Start(); err != nil {
if ctx.Err() == context.Canceled {
log.Entry(ctx).Debugf("couldn't start %v due to context cancellation", pfe)
return
}
// Retry on exit at Start()
log.Entry(ctx).Debugf("error starting port forwarding %v: %s, output: %s", pfe, err, buf.String())
time.Sleep(500 * time.Millisecond)
continue
}
// Kill kubectl on port forwarding error logs
go k.monitorLogs(ctx, &buf, cmd, pfe, errChan)
if err := cmd.Wait(); err != nil {
if ctx.Err() == context.Canceled {
log.Entry(ctx).Debugf("terminated %v due to context cancellation", pfe)
return
}
// To make sure that the log monitor gets cleared up
cancel()
s := buf.String()
log.Entry(ctx).Debugf("port forwarding %v got terminated: %s, output: %s", pfe, err, s)
if !strings.Contains(s, "address already in use") {
select {
case errChan <- fmt.Errorf("port forwarding %v got terminated: output: %s", pfe, s):
default:
}
}
time.Sleep(500 * time.Millisecond)
}
}
}
func portForwardArgs(ctx context.Context, kubeContext string, pfe *portForwardEntry) []string {
args := []string{"--pod-running-timeout", "1s", "--namespace", pfe.resource.Namespace}
_, disableServiceForwarding := os.LookupEnv("SKAFFOLD_DISABLE_SERVICE_FORWARDING")
switch {
case pfe.resource.Type == "service" && !disableServiceForwarding:
// Services need special handling: https://github.com/GoogleContainerTools/skaffold/issues/4522
podName, remotePort, err := findNewestPodForSvc(ctx, kubeContext, pfe.resource.Namespace, pfe.resource.Name, pfe.resource.Port)
if err == nil {
args = append(args, fmt.Sprintf("pod/%s", podName), fmt.Sprintf("%d:%d", pfe.localPort, remotePort))
break
}
log.Entry(ctx).Warnf("could not map pods to service %s/%s/%s: %v", pfe.resource.Namespace, pfe.resource.Name, pfe.resource.Port.String(), err)
fallthrough // and let kubectl try to handle it
default:
args = append(args, fmt.Sprintf("%s/%s", pfe.resource.Type, pfe.resource.Name), fmt.Sprintf("%d:%s", pfe.localPort, pfe.resource.Port.String()))
}
if pfe.resource.Address != "" && pfe.resource.Address != util.Loopback {
args = append(args, []string{"--address", pfe.resource.Address}...)
}
return args
}
// Terminate terminates an existing kubectl port-forward command using SIGTERM
func (*KubectlForwarder) Terminate(p *portForwardEntry) {
log.Entry(context.TODO()).Debugf("Terminating port-forward %v", p)
p.terminationLock.Lock()
defer p.terminationLock.Unlock()
if p.cancel != nil {
p.cancel()
}
p.terminated = true
}
// Monitor monitors the logs for a kubectl port forward command
// If it sees an error, it calls back to the EntryManager to
// retry the entire port forward operation.
func (*KubectlForwarder) monitorLogs(ctx context.Context, logs io.Reader, cmd *kubectl.Cmd, p *portForwardEntry, err chan error) {
ticker := time.NewTicker(waitErrorLogs)
defer ticker.Stop()
r := bufio.NewReader(logs)
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
s, _ := r.ReadString('\n')
if s == "" {
continue
}
log.Entry(ctx).Tracef("[port-forward] %s", s)
if strings.Contains(s, "error forwarding port") ||
strings.Contains(s, "unable to forward") ||
strings.Contains(s, "error upgrading connection") {
// kubectl is having an error. retry the command
log.Entry(ctx).Tracef("killing port forwarding %v", p)
if err := cmd.Terminate(); err != nil {
log.Entry(ctx).Tracef("failed to kill port forwarding %v, err: %s", p, err)
}
select {
case err <- fmt.Errorf("port forwarding %v got terminated: output: %s", p, s):
default:
}
return
} else if strings.Contains(s, "Forwarding from") {
select {
case err <- nil:
default:
}
}
}
}
}
// findNewestPodForService queries the cluster to find a pod that fulfills the given service, giving
// preference to pods that were most recently created. This is in contrast to the selection algorithm
// used by kubectl (see https://github.com/GoogleContainerTools/skaffold/issues/4522 for details).
func findNewestPodForService(ctx context.Context, kubeContext, ns, serviceName string, servicePort schemautil.IntOrString) (string, int, error) {
client, err := kubernetesclient.Client(kubeContext)
if err != nil {
return "", -1, fmt.Errorf("getting Kubernetes client: %w", err)
}
svc, err := client.CoreV1().Services(ns).Get(ctx, serviceName, metav1.GetOptions{})
if err != nil {
return "", -1, fmt.Errorf("getting service %s/%s: %w", ns, serviceName, err)
}
svcPort, err := findServicePort(*svc, servicePort)
if err != nil {
return "", -1, err
}
// Look for pods with matching selectors and that are not terminated.
// We cannot use field selectors as they are only supported in 1.16
// https://github.com/flant/shell-operator/blob/8fa3c3b8cfeb1ddb37b070b7a871561fdffe788b/HOOKS.md#fieldselector
set := labels.Set(svc.Spec.Selector)
listOptions := metav1.ListOptions{
LabelSelector: set.AsSelector().String(),
}
podsList, err := client.CoreV1().Pods(ns).List(ctx, listOptions)
if err != nil {
return "", -1, fmt.Errorf("listing pods: %w", err)
}
var pods []corev1.Pod
for _, pod := range podsList.Items {
if pod.Status.Phase == corev1.PodPending || pod.Status.Phase == corev1.PodRunning {
pods = append(pods, pod)
}
}
sort.Slice(pods, newestPodsFirst(pods))
if log.IsTraceLevelEnabled() {
var names []string
for _, p := range pods {
names = append(names, fmt.Sprintf("(pod:%q phase:%v created:%v)", p.Name, p.Status.Phase, p.CreationTimestamp))
}
log.Entry(ctx).Tracef("service %s/%s maps to %d pods: %v", serviceName, servicePort.String(), len(pods), names)
}
for _, p := range pods {
if targetPort := findTargetPort(svcPort, p); targetPort > 0 {
log.Entry(ctx).Debugf("Forwarding service %s/%s to pod %s/%d", serviceName, servicePort.String(), p.Name, targetPort)
return p.Name, targetPort, nil
}
}
return "", -1, fmt.Errorf("no pods match service %s/%s", serviceName, servicePort.String())
}
// newestPodsFirst sorts pods by their creation time
func newestPodsFirst(pods []corev1.Pod) func(int, int) bool {
return func(i, j int) bool {
ti := pods[i].CreationTimestamp.Time
tj := pods[j].CreationTimestamp.Time
return ti.After(tj)
}
}
func findServicePort(svc corev1.Service, servicePort schemautil.IntOrString) (corev1.ServicePort, error) {
for _, s := range svc.Spec.Ports {
switch servicePort.Type {
case schemautil.Int:
if s.Port == int32(servicePort.IntVal) {
return s, nil
}
case schemautil.String:
if s.Name == servicePort.StrVal {
return s, nil
}
}
}
return corev1.ServicePort{}, fmt.Errorf("service %q does not expose port %s", svc.Name, servicePort.String())
}
func findTargetPort(svcPort corev1.ServicePort, pod corev1.Pod) int {
if svcPort.TargetPort.Type == intstr.Int {
return svcPort.TargetPort.IntValue()
}
for _, c := range pod.Spec.Containers {
for _, p := range c.Ports {
if svcPort.TargetPort.StrVal == p.Name {
return int(p.ContainerPort)
}
}
}
return -1
}