/
handler.go
213 lines (196 loc) · 6.21 KB
/
handler.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
package haproxy
import (
log "github.com/sirupsen/logrus"
"github.com/vsk8s/k8router/pkg/config"
"github.com/vsk8s/k8router/pkg/state"
"os"
"os/exec"
"sort"
"strings"
"text/template"
"time"
)
// Handler takes care of assembling all cluster state and then uses it to write a HAProxy config
type Handler struct {
// Parsed template for HAProxy config
template *template.Template
// Reference to application configuration
config config.Config
// Channel with updates from clusters
updates chan state.ClusterState
// Channel internally used to stop our goroutine
stop chan bool
// Map of clusters to their current state
clusterState map[string]state.ClusterState
// Number of changes since the last time we wrote everything to disk
numChanges int
// Current cluster state prepared for templating
templateInfo TemplateInfo
// Debug use only: Can be used to be notified when stuff is written to disk
debugFileEventChannel chan bool
}
// Init initializes a new Handler
func Init(updates chan state.ClusterState, config config.Config) (*Handler, error) {
parsedTemplate, err := template.ParseFiles(config.HAProxyTemplatePath)
if err != nil {
return nil, err
}
parsedTemplate.Funcs(template.FuncMap{"StringJoin": strings.Join})
return &Handler{
updates: updates,
numChanges: 0,
template: parsedTemplate,
clusterState: make(map[string]state.ClusterState),
config: config,
stop: make(chan bool),
}, nil
}
// Write a new config to disk
func (h *Handler) updateConfig() {
log.Debug("Writing myConfigFile")
// TODO: Respect file mode setting
myConfigFile, err := os.OpenFile(h.config.HAProxyDropinPath, os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
log.WithField("path", h.config.HAProxyDropinPath).WithError(err).Fatal(
"Couldn't open haproxy dropin path for writing")
}
err = h.template.Execute(myConfigFile, h.templateInfo)
if err != nil {
log.WithError(err).Fatal("Couldn't template haproxy myConfigFile")
}
// TODO: Replace with systemd API
if h.debugFileEventChannel == nil {
// We're not debugging/testing
err = exec.Command("sudo", "/bin/systemctl", "reload", "haproxy.service").Run()
if err != nil {
log.WithError(err).Fatal("Couldn't reload haproxy")
}
}
}
// Regenerate the templateInfo struct
func (h *Handler) rebuildConfig() {
/* The HAProxy config we write works (simplified) like this:
* * There is a frontend that splits request according to SNI
* * For each certificate, we have a backend where those SNI request go to and another frontend with that cert
* * Each of these frontends does "normal" host-style case distinction and then
* * routes to a combination of backends
*/
// Step 1: For each Ingress, which backends does it have?
hostToClusters := map[string][]string{}
for _, cluster := range h.clusterState {
for _, ingress := range cluster.Ingresses {
for _, host := range ingress.Hosts {
hostToClusters[host] = append(hostToClusters[host], cluster.Name)
}
}
}
// Step 2: Make a map of backend combinations to ips and of hosts to backend combinations
hostToBackend := map[string]string{}
backendCombinationList := map[string][]Backend{}
hosts := map[string]bool{}
for host, clusters := range hostToClusters {
hosts[host] = true
sort.Strings(clusters)
key := strings.Join(clusters, "-")
if _, ok := backendCombinationList[key]; !ok {
// We haven't seen this particular backend combination yet
var backends []Backend
for _, cluster := range clusters {
for _, backend := range h.clusterState[cluster].Backends {
backends = append(backends, Backend{
IP: backend.IP,
Name: backend.Name,
})
}
}
backendCombinationList[key] = backends
}
hostToBackend[host] = key
}
cfg := TemplateInfo{
SniList: make(map[string]SniDetail),
BackendCombinationList: backendCombinationList,
HostToBackend: hostToBackend,
IPs: h.config.IPs,
}
// Step 3: Which SNIs do we have in our certs (first frontend and it's backends)
localForwardPort := 12345 // TODO(uubk): Make configurable
hostToCert := map[string]string{}
for _, cert := range h.config.Certificates {
// For each host: Figure out whether we actually have a backend there
var actuallyUsedHosts []string
isWildcard := false
for _, host := range cert.Domains {
if strings.Contains(host, "*") {
isWildcard = true
suffix := strings.Trim(host, "*")
for actualHost := range hostToBackend {
if strings.HasSuffix(actualHost, suffix) {
actuallyUsedHosts = append(actuallyUsedHosts, actualHost)
hostToCert[actualHost] = cert.Name
}
}
} else {
if _, ok := hostToBackend[host]; ok {
actuallyUsedHosts = append(actuallyUsedHosts, host)
hostToCert[host] = cert.Name
}
}
}
currentCert := SniDetail{
Domains: actuallyUsedHosts,
IsWildcard: isWildcard,
Path: cert.Cert,
LocalForwardPort: localForwardPort,
}
cfg.SniList[cert.Name] = currentCert
localForwardPort++
if isWildcard {
cfg.DefaultWildcardCert = cert.Name
}
}
// Check whether we have hosts without certs
for host := range hostToBackend {
if _, ok := hostToCert[host]; !ok {
log.WithField("host", host).Warning("Host skipped because it is not covered by any certificate!")
}
}
h.templateInfo = cfg
}
// Main event loop
func (h *Handler) eventLoop() {
updateTicks := time.NewTicker(1 * time.Second)
for {
select {
case _ = <-h.stop:
log.Debug("Returning from event loop after stop request")
return
case event := <-h.updates:
stateObj := h.clusterState[event.Name]
if !state.IsClusterStateEquivalent(&stateObj, &event) {
h.clusterState[event.Name] = event
h.numChanges++
}
case _ = <-updateTicks.C:
if h.numChanges > 0 {
// There is something to do
h.numChanges = 0
log.WithField("clusterState", h.clusterState).Debug("Rebuilding config")
h.rebuildConfig()
log.WithField("templateInfo", h.templateInfo).Debug("Templating config")
h.updateConfig()
if h.debugFileEventChannel != nil {
h.debugFileEventChannel <- true
}
}
}
}
}
// Start handling events
func (h *Handler) Start() {
go h.eventLoop()
}
// Stop handling events
func (h *Handler) Stop() {
h.stop <- true
}