-
Notifications
You must be signed in to change notification settings - Fork 68
/
main.go
128 lines (111 loc) · 3.02 KB
/
main.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
package main
import (
"flag"
"github.com/golang/glog"
"github.com/mittwald/kube-httpcache/controller"
"github.com/mittwald/kube-httpcache/signaller"
"github.com/mittwald/kube-httpcache/watcher"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
var opts KubeHTTPProxyFlags
func init() {
flag.Set("logtostderr", "true")
}
func main() {
opts.Parse()
glog.Infof("running kube-httpcache with following options: %+v", opts)
var config *rest.Config
var err error
var client kubernetes.Interface
if opts.Kubernetes.Config == "" {
glog.Infof("using in-cluster configuration")
config, err = rest.InClusterConfig()
} else {
glog.Infof("using configuration from '%s'", opts.Kubernetes.Config)
config, err = clientcmd.BuildConfigFromFlags("", opts.Kubernetes.Config)
}
if err != nil {
panic(err)
}
client = kubernetes.NewForConfigOrDie(config)
var frontendUpdates chan *watcher.EndpointConfig
var frontendErrors chan error
if opts.Frontend.Watch {
frontendWatcher := watcher.NewEndpointWatcher(
client,
opts.Frontend.Namespace,
opts.Frontend.Service,
opts.Frontend.PortName,
opts.Kubernetes.RetryBackoff,
)
frontendUpdates, frontendErrors = frontendWatcher.Run()
}
var backendUpdates chan *watcher.EndpointConfig
var backendErrors chan error
if opts.Backend.Watch {
backendWatcher := watcher.NewEndpointWatcher(
client,
opts.Backend.Namespace,
opts.Backend.Service,
opts.Backend.PortName,
opts.Kubernetes.RetryBackoff,
)
backendUpdates, backendErrors = backendWatcher.Run()
}
templateWatcher := watcher.MustNewTemplateWatcher(opts.Varnish.VCLTemplate, opts.Varnish.VCLTemplatePoll)
templateUpdates, templateErrors := templateWatcher.Run()
var varnishSignaller *signaller.Signaller
var varnishSignallerErrors chan error
if opts.Signaller.Enable {
varnishSignaller = signaller.NewSignaller(
opts.Signaller.Address,
opts.Signaller.Port,
opts.Signaller.WorkersCount,
opts.Signaller.MaxRetries,
opts.Signaller.RetryBackoff,
)
varnishSignallerErrors = varnishSignaller.GetErrors()
go func() {
err = varnishSignaller.Run()
if err != nil {
panic(err)
}
}()
}
go func() {
for {
select {
case err := <-frontendErrors:
glog.Errorf("error while watching frontends: %s", err.Error())
case err := <-backendErrors:
glog.Errorf("error while watching backends: %s", err.Error())
case err := <-templateErrors:
glog.Errorf("error while watching template changes: %s", err.Error())
case err := <-varnishSignallerErrors:
glog.Errorf("error while running varnish signaller: %s", err.Error())
}
}
}()
varnishController, err := controller.NewVarnishController(
opts.Varnish.SecretFile,
opts.Varnish.Storage,
opts.Frontend.Address,
opts.Frontend.Port,
opts.Admin.Address,
opts.Admin.Port,
frontendUpdates,
backendUpdates,
templateUpdates,
varnishSignaller,
opts.Varnish.VCLTemplate,
)
if err != nil {
panic(err)
}
err = varnishController.Run()
if err != nil {
panic(err)
}
}