forked from submariner-io/submariner
/
main.go
182 lines (156 loc) · 5.85 KB
/
main.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
package main
import (
"context"
"flag"
"github.com/kelseyhightower/envconfig"
"github.com/rancher/submariner/pkg/cableengine"
"github.com/rancher/submariner/pkg/cableengine/ipsec"
"github.com/rancher/submariner/pkg/controllers/datastoresyncer"
"github.com/rancher/submariner/pkg/datastore"
"github.com/rancher/submariner/pkg/types"
"github.com/rancher/submariner/pkg/util"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/leaderelection"
"k8s.io/client-go/tools/leaderelection/resourcelock"
"k8s.io/client-go/tools/record"
"os"
"sync"
"time"
kubeInformers "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/klog"
submarinerClientset "github.com/rancher/submariner/pkg/client/clientset/versioned"
submarinerInformers "github.com/rancher/submariner/pkg/client/informers/externalversions"
subk8s "github.com/rancher/submariner/pkg/datastore/kubernetes"
"github.com/rancher/submariner/pkg/controllers/tunnel"
"github.com/rancher/submariner/pkg/datastore/phpapi"
"github.com/rancher/submariner/pkg/signals"
)
var (
localMasterUrl string
localKubeconfig string
)
func init() {
flag.StringVar(&localKubeconfig, "kubeconfig", "", "Path to kubeconfig of local cluster. Only required if out-of-cluster.")
flag.StringVar(&localMasterUrl, "master", "", "The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster.")
}
func main() {
klog.InitFlags(nil)
flag.Parse()
klog.V(2).Info("Starting submariner")
// set up signals so we handle the first shutdown signal gracefully
stopCh := signals.SetupSignalHandler()
var ss types.SubmarinerSpecification
err := envconfig.Process("submariner", &ss)
if err != nil {
klog.Fatal(err)
}
cfg, err := clientcmd.BuildConfigFromFlags(localMasterUrl, localKubeconfig)
if err != nil {
klog.Fatalf("Error building kubeconfig: %s", err.Error())
}
kubeClient, err := kubernetes.NewForConfig(cfg)
if err != nil {
klog.Fatalf("Error building kubernetes clientset: %s", err.Error())
}
submarinerClient, err := submarinerClientset.NewForConfig(cfg)
if err != nil {
klog.Fatalf("Error building submariner clientset: %s", err.Error())
}
kubeInformerFactory := kubeInformers.NewSharedInformerFactoryWithOptions(kubeClient, time.Second*30, kubeInformers.WithNamespace(ss.Namespace))
submarinerInformerFactory := submarinerInformers.NewSharedInformerFactoryWithOptions(submarinerClient, time.Second*30, submarinerInformers.WithNamespace(ss.Namespace))
start := func(context.Context) {
var ce cableengine.CableEngine
localCluster, err := util.GetLocalCluster(ss)
if err != nil {
klog.Fatalf("Fatal error occurred while retrieving local cluster %v", localCluster)
}
localEndpoint, err := util.GetLocalEndpoint(ss.ClusterId, "ipsec", nil, ss.NatEnabled, append(ss.ServiceCidr, ss.ClusterCidr...))
if err != nil {
klog.Fatalf("Fatal error occurred while retrieving local endpoint %v", localCluster)
}
ce = ipsec.NewEngine(append(ss.ClusterCidr, ss.ServiceCidr...), localCluster, localEndpoint)
tunnelController := tunnel.NewTunnelController(ss.Namespace, ce, kubeClient, submarinerClient,
submarinerInformerFactory.Submariner().V1().Endpoints())
var ds datastore.Datastore
switch ss.Broker {
case "phpapi":
secure, err := util.ParseSecure(ss.Token)
if err != nil {
klog.Fatalf("Error parsing secure token: %v", err)
}
ds = phpapi.NewPHPAPI(secure.ApiKey)
case "k8s":
ds = subk8s.NewK8sDatastore(ss.ClusterId, stopCh)
default:
panic("No backend was specified")
}
klog.V(6).Infof("Creating new datastore syncer")
dsSyncer := datastoresyncer.NewDatastoreSyncer(ss.ClusterId, ss.Namespace, kubeClient, submarinerClient, submarinerInformerFactory.Submariner().V1().Clusters(), submarinerInformerFactory.Submariner().V1().Endpoints(), ds, ss.ColorCodes, localCluster, localEndpoint)
kubeInformerFactory.Start(stopCh)
submarinerInformerFactory.Start(stopCh)
klog.V(4).Infof("Starting controllers")
var wg sync.WaitGroup
wg.Add(3)
go func() {
defer wg.Done()
ce.StartEngine(true)
}()
go func() {
defer wg.Done()
if err = tunnelController.Run(stopCh); err != nil {
klog.Fatalf("Error running tunnel controller: %s", err.Error())
}
}()
go func() {
defer wg.Done()
if err = dsSyncer.Run(stopCh); err != nil {
klog.Fatalf("Error running datastoresyncer controller: %s", err.Error())
}
}()
wg.Wait()
}
leClient, err := kubernetes.NewForConfig(rest.AddUserAgent(cfg, "leader-election"))
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(klog.V(4).Infof)
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "submariner-controller"})
if err != nil {
klog.Fatal(err)
}
startLeaderElection(leClient, recorder, start)
klog.Fatal("All controllers stopped or exited. Stopping main loop")
}
func startLeaderElection(leaderElectionClient kubernetes.Interface, recorder record.EventRecorder, run func(ctx context.Context)) {
id, err := os.Hostname()
if err != nil {
klog.Fatalf("error getting hostname: %s", err.Error())
}
// Lock required for leader election
rl := resourcelock.ConfigMapLock{
ConfigMapMeta: metav1.ObjectMeta{
Namespace: "submariner",
Name: "submariner-engine-lock",
},
Client: leaderElectionClient.CoreV1(),
LockConfig: resourcelock.ResourceLockConfig{
Identity: id + "-submariner-engine",
EventRecorder: recorder,
},
}
leaderelection.RunOrDie(context.TODO(), leaderelection.LeaderElectionConfig{
Lock: &rl,
LeaseDuration: 15*time.Second,
RenewDeadline: 10*time.Second,
RetryPeriod: 3*time.Second,
Callbacks: leaderelection.LeaderCallbacks{
OnStartedLeading: run,
OnStoppedLeading: func() {
klog.Fatalf("leaderelection lost")
},
},
})
}