forked from Harwayne/knative-gcp
/
pool.go
133 lines (115 loc) · 3.2 KB
/
pool.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
/*
Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package handler
import (
"context"
"net/http"
"strconv"
"sync"
"time"
"github.com/google/knative-gcp/pkg/logging"
"go.uber.org/zap"
)
const (
// DefaultHealthCheckPort is the default port for checking sync pool health.
DefaultHealthCheckPort = 8080
)
type SyncPool interface {
SyncOnce(ctx context.Context) error
}
type healthChecker struct {
mux sync.RWMutex
lastReportTime time.Time
maxStaleDuration time.Duration
port int
}
func (c *healthChecker) reportHealth() {
c.mux.Lock()
defer c.mux.Unlock()
c.lastReportTime = time.Now()
}
func (c *healthChecker) lastTime() time.Time {
c.mux.RLock()
defer c.mux.RUnlock()
return c.lastReportTime
}
func (c *healthChecker) start(ctx context.Context) {
c.reportHealth()
srv := &http.Server{
Addr: ":" + strconv.Itoa(c.port),
Handler: c,
}
go func() {
logging.FromContext(ctx).Info("Starting the sync pool health checker...")
if err := srv.ListenAndServe(); err != nil && err != http.ErrServerClosed {
logging.FromContext(ctx).Error("the sync pool health checker has stopped unexpectedly", zap.Error(err))
}
}()
<-ctx.Done()
if err := srv.Shutdown(ctx); err != nil {
logging.FromContext(ctx).Error("failed to shutdown the sync pool health checker", zap.Error(err))
}
}
func (c *healthChecker) ServeHTTP(w http.ResponseWriter, req *http.Request) {
if req.URL.Path != "/healthz" {
w.WriteHeader(http.StatusNotFound)
return
}
// Zero maxStaleDuration means infinite.
if c.maxStaleDuration == 0 {
w.WriteHeader(http.StatusOK)
return
}
if time.Now().Sub(c.lastTime()) > c.maxStaleDuration {
w.WriteHeader(http.StatusServiceUnavailable)
return
}
w.WriteHeader(http.StatusOK)
}
// StartSyncPool starts the sync pool.
func StartSyncPool(
ctx context.Context,
syncPool SyncPool,
syncSignal <-chan struct{},
maxStaleDuration time.Duration,
healthCheckPort int,
) (SyncPool, error) {
if err := syncPool.SyncOnce(ctx); err != nil {
return nil, err
}
c := &healthChecker{
maxStaleDuration: maxStaleDuration,
port: healthCheckPort,
}
go c.start(ctx)
if syncSignal != nil {
go watch(ctx, syncPool, syncSignal, c)
}
return syncPool, nil
}
func watch(ctx context.Context, syncPool SyncPool, syncSignal <-chan struct{}, c *healthChecker) {
for {
select {
case <-ctx.Done():
return
case <-syncSignal:
if err := syncPool.SyncOnce(ctx); err != nil {
// Currently we don't really expect errors from SyncOnce.
logging.FromContext(ctx).Error("failed to sync handlers pool on watch signal", zap.Error(err))
} else {
logging.FromContext(ctx).Debug("successfully synced handlers pool on watch signal")
c.reportHealth()
}
}
}
}